summaryrefslogtreecommitdiff
path: root/app/openssl/crypto/aes/asm
diff options
context:
space:
mode:
authorParménides GV <parmegv@sdf.org>2015-06-04 19:20:15 +0200
committerParménides GV <parmegv@sdf.org>2015-06-04 19:20:15 +0200
commit27594eeae6f40a402bc3110f06d57975168e74e3 (patch)
treecdabf6571e6f4ff07205fd6921d8095539a1fcdc /app/openssl/crypto/aes/asm
parent8dc4f58d96892fbfd83094fb85b1d17656035290 (diff)
ics-openvpn as a submodule! beautiful
ics-openvpn is now officially on GitHub, and they track openssl and openvpn as submodules, so it's easier to update everything. Just a git submodule update --recursive. I've also set up soft links to native modules from ics-openvpn in app, so that we don't copy files in Gradle (which was causing problems with the submodules .git* files, not being copied). That makes the repo cleaner.
Diffstat (limited to 'app/openssl/crypto/aes/asm')
-rw-r--r--app/openssl/crypto/aes/asm/aes-586.S3239
-rw-r--r--app/openssl/crypto/aes/asm/aes-586.pl2980
-rw-r--r--app/openssl/crypto/aes/asm/aes-armv4.S1177
-rw-r--r--app/openssl/crypto/aes/asm/aes-armv4.pl1217
-rw-r--r--app/openssl/crypto/aes/asm/aes-ia64.S1123
-rw-r--r--app/openssl/crypto/aes/asm/aes-mips.S1337
-rw-r--r--app/openssl/crypto/aes/asm/aes-mips.pl1611
-rw-r--r--app/openssl/crypto/aes/asm/aes-parisc.pl1022
-rw-r--r--app/openssl/crypto/aes/asm/aes-ppc.pl1365
-rw-r--r--app/openssl/crypto/aes/asm/aes-s390x.pl2237
-rwxr-xr-xapp/openssl/crypto/aes/asm/aes-sparcv9.pl1182
-rw-r--r--app/openssl/crypto/aes/asm/aes-x86_64.S2541
-rwxr-xr-xapp/openssl/crypto/aes/asm/aes-x86_64.pl2819
-rw-r--r--app/openssl/crypto/aes/asm/aesni-sha1-x86_64.S1396
-rw-r--r--app/openssl/crypto/aes/asm/aesni-sha1-x86_64.pl1250
-rw-r--r--app/openssl/crypto/aes/asm/aesni-x86.S2143
-rw-r--r--app/openssl/crypto/aes/asm/aesni-x86.pl2189
-rw-r--r--app/openssl/crypto/aes/asm/aesni-x86_64.S2535
-rw-r--r--app/openssl/crypto/aes/asm/aesni-x86_64.pl3071
-rw-r--r--app/openssl/crypto/aes/asm/aesv8-armx-64.S761
-rw-r--r--app/openssl/crypto/aes/asm/aesv8-armx.S767
-rw-r--r--app/openssl/crypto/aes/asm/aesv8-armx.pl980
-rw-r--r--app/openssl/crypto/aes/asm/bsaes-armv7.S2544
-rw-r--r--app/openssl/crypto/aes/asm/bsaes-armv7.pl2467
-rw-r--r--app/openssl/crypto/aes/asm/bsaes-x86_64.S2498
-rw-r--r--app/openssl/crypto/aes/asm/bsaes-x86_64.pl3108
-rw-r--r--app/openssl/crypto/aes/asm/vpaes-x86.S661
-rw-r--r--app/openssl/crypto/aes/asm/vpaes-x86.pl903
-rw-r--r--app/openssl/crypto/aes/asm/vpaes-x86_64.S828
-rw-r--r--app/openssl/crypto/aes/asm/vpaes-x86_64.pl1207
30 files changed, 0 insertions, 53158 deletions
diff --git a/app/openssl/crypto/aes/asm/aes-586.S b/app/openssl/crypto/aes/asm/aes-586.S
deleted file mode 100644
index 20c0238f..00000000
--- a/app/openssl/crypto/aes/asm/aes-586.S
+++ /dev/null
@@ -1,3239 +0,0 @@
-.file "aes-586.s"
-.text
-.type _x86_AES_encrypt_compact,@function
-.align 16
-_x86_AES_encrypt_compact:
- movl %edi,20(%esp)
- xorl (%edi),%eax
- xorl 4(%edi),%ebx
- xorl 8(%edi),%ecx
- xorl 12(%edi),%edx
- movl 240(%edi),%esi
- leal -2(%esi,%esi,1),%esi
- leal (%edi,%esi,8),%esi
- movl %esi,24(%esp)
- movl -128(%ebp),%edi
- movl -96(%ebp),%esi
- movl -64(%ebp),%edi
- movl -32(%ebp),%esi
- movl (%ebp),%edi
- movl 32(%ebp),%esi
- movl 64(%ebp),%edi
- movl 96(%ebp),%esi
-.align 16
-.L000loop:
- movl %eax,%esi
- andl $255,%esi
- movzbl -128(%ebp,%esi,1),%esi
- movzbl %bh,%edi
- movzbl -128(%ebp,%edi,1),%edi
- shll $8,%edi
- xorl %edi,%esi
- movl %ecx,%edi
- shrl $16,%edi
- andl $255,%edi
- movzbl -128(%ebp,%edi,1),%edi
- shll $16,%edi
- xorl %edi,%esi
- movl %edx,%edi
- shrl $24,%edi
- movzbl -128(%ebp,%edi,1),%edi
- shll $24,%edi
- xorl %edi,%esi
- movl %esi,4(%esp)
-
- movl %ebx,%esi
- andl $255,%esi
- shrl $16,%ebx
- movzbl -128(%ebp,%esi,1),%esi
- movzbl %ch,%edi
- movzbl -128(%ebp,%edi,1),%edi
- shll $8,%edi
- xorl %edi,%esi
- movl %edx,%edi
- shrl $16,%edi
- andl $255,%edi
- movzbl -128(%ebp,%edi,1),%edi
- shll $16,%edi
- xorl %edi,%esi
- movl %eax,%edi
- shrl $24,%edi
- movzbl -128(%ebp,%edi,1),%edi
- shll $24,%edi
- xorl %edi,%esi
- movl %esi,8(%esp)
-
- movl %ecx,%esi
- andl $255,%esi
- shrl $24,%ecx
- movzbl -128(%ebp,%esi,1),%esi
- movzbl %dh,%edi
- movzbl -128(%ebp,%edi,1),%edi
- shll $8,%edi
- xorl %edi,%esi
- movl %eax,%edi
- shrl $16,%edi
- andl $255,%edx
- andl $255,%edi
- movzbl -128(%ebp,%edi,1),%edi
- shll $16,%edi
- xorl %edi,%esi
- movzbl %bh,%edi
- movzbl -128(%ebp,%edi,1),%edi
- shll $24,%edi
- xorl %edi,%esi
-
- andl $255,%edx
- movzbl -128(%ebp,%edx,1),%edx
- movzbl %ah,%eax
- movzbl -128(%ebp,%eax,1),%eax
- shll $8,%eax
- xorl %eax,%edx
- movl 4(%esp),%eax
- andl $255,%ebx
- movzbl -128(%ebp,%ebx,1),%ebx
- shll $16,%ebx
- xorl %ebx,%edx
- movl 8(%esp),%ebx
- movzbl -128(%ebp,%ecx,1),%ecx
- shll $24,%ecx
- xorl %ecx,%edx
- movl %esi,%ecx
-
- movl %ecx,%esi
- andl $2155905152,%esi
- movl %esi,%ebp
- shrl $7,%ebp
- leal (%ecx,%ecx,1),%edi
- subl %ebp,%esi
- andl $4278124286,%edi
- andl $454761243,%esi
- movl %ecx,%ebp
- xorl %edi,%esi
- xorl %esi,%ecx
- roll $24,%ecx
- xorl %esi,%ecx
- rorl $16,%ebp
- xorl %ebp,%ecx
- rorl $8,%ebp
- xorl %ebp,%ecx
- movl %edx,%esi
- andl $2155905152,%esi
- movl %esi,%ebp
- shrl $7,%ebp
- leal (%edx,%edx,1),%edi
- subl %ebp,%esi
- andl $4278124286,%edi
- andl $454761243,%esi
- movl %edx,%ebp
- xorl %edi,%esi
- xorl %esi,%edx
- roll $24,%edx
- xorl %esi,%edx
- rorl $16,%ebp
- xorl %ebp,%edx
- rorl $8,%ebp
- xorl %ebp,%edx
- movl %eax,%esi
- andl $2155905152,%esi
- movl %esi,%ebp
- shrl $7,%ebp
- leal (%eax,%eax,1),%edi
- subl %ebp,%esi
- andl $4278124286,%edi
- andl $454761243,%esi
- movl %eax,%ebp
- xorl %edi,%esi
- xorl %esi,%eax
- roll $24,%eax
- xorl %esi,%eax
- rorl $16,%ebp
- xorl %ebp,%eax
- rorl $8,%ebp
- xorl %ebp,%eax
- movl %ebx,%esi
- andl $2155905152,%esi
- movl %esi,%ebp
- shrl $7,%ebp
- leal (%ebx,%ebx,1),%edi
- subl %ebp,%esi
- andl $4278124286,%edi
- andl $454761243,%esi
- movl %ebx,%ebp
- xorl %edi,%esi
- xorl %esi,%ebx
- roll $24,%ebx
- xorl %esi,%ebx
- rorl $16,%ebp
- xorl %ebp,%ebx
- rorl $8,%ebp
- xorl %ebp,%ebx
- movl 20(%esp),%edi
- movl 28(%esp),%ebp
- addl $16,%edi
- xorl (%edi),%eax
- xorl 4(%edi),%ebx
- xorl 8(%edi),%ecx
- xorl 12(%edi),%edx
- cmpl 24(%esp),%edi
- movl %edi,20(%esp)
- jb .L000loop
- movl %eax,%esi
- andl $255,%esi
- movzbl -128(%ebp,%esi,1),%esi
- movzbl %bh,%edi
- movzbl -128(%ebp,%edi,1),%edi
- shll $8,%edi
- xorl %edi,%esi
- movl %ecx,%edi
- shrl $16,%edi
- andl $255,%edi
- movzbl -128(%ebp,%edi,1),%edi
- shll $16,%edi
- xorl %edi,%esi
- movl %edx,%edi
- shrl $24,%edi
- movzbl -128(%ebp,%edi,1),%edi
- shll $24,%edi
- xorl %edi,%esi
- movl %esi,4(%esp)
-
- movl %ebx,%esi
- andl $255,%esi
- shrl $16,%ebx
- movzbl -128(%ebp,%esi,1),%esi
- movzbl %ch,%edi
- movzbl -128(%ebp,%edi,1),%edi
- shll $8,%edi
- xorl %edi,%esi
- movl %edx,%edi
- shrl $16,%edi
- andl $255,%edi
- movzbl -128(%ebp,%edi,1),%edi
- shll $16,%edi
- xorl %edi,%esi
- movl %eax,%edi
- shrl $24,%edi
- movzbl -128(%ebp,%edi,1),%edi
- shll $24,%edi
- xorl %edi,%esi
- movl %esi,8(%esp)
-
- movl %ecx,%esi
- andl $255,%esi
- shrl $24,%ecx
- movzbl -128(%ebp,%esi,1),%esi
- movzbl %dh,%edi
- movzbl -128(%ebp,%edi,1),%edi
- shll $8,%edi
- xorl %edi,%esi
- movl %eax,%edi
- shrl $16,%edi
- andl $255,%edx
- andl $255,%edi
- movzbl -128(%ebp,%edi,1),%edi
- shll $16,%edi
- xorl %edi,%esi
- movzbl %bh,%edi
- movzbl -128(%ebp,%edi,1),%edi
- shll $24,%edi
- xorl %edi,%esi
-
- movl 20(%esp),%edi
- andl $255,%edx
- movzbl -128(%ebp,%edx,1),%edx
- movzbl %ah,%eax
- movzbl -128(%ebp,%eax,1),%eax
- shll $8,%eax
- xorl %eax,%edx
- movl 4(%esp),%eax
- andl $255,%ebx
- movzbl -128(%ebp,%ebx,1),%ebx
- shll $16,%ebx
- xorl %ebx,%edx
- movl 8(%esp),%ebx
- movzbl -128(%ebp,%ecx,1),%ecx
- shll $24,%ecx
- xorl %ecx,%edx
- movl %esi,%ecx
-
- xorl 16(%edi),%eax
- xorl 20(%edi),%ebx
- xorl 24(%edi),%ecx
- xorl 28(%edi),%edx
- ret
-.size _x86_AES_encrypt_compact,.-_x86_AES_encrypt_compact
-.type _sse_AES_encrypt_compact,@function
-.align 16
-_sse_AES_encrypt_compact:
- pxor (%edi),%mm0
- pxor 8(%edi),%mm4
- movl 240(%edi),%esi
- leal -2(%esi,%esi,1),%esi
- leal (%edi,%esi,8),%esi
- movl %esi,24(%esp)
- movl $454761243,%eax
- movl %eax,8(%esp)
- movl %eax,12(%esp)
- movl -128(%ebp),%eax
- movl -96(%ebp),%ebx
- movl -64(%ebp),%ecx
- movl -32(%ebp),%edx
- movl (%ebp),%eax
- movl 32(%ebp),%ebx
- movl 64(%ebp),%ecx
- movl 96(%ebp),%edx
-.align 16
-.L001loop:
- pshufw $8,%mm0,%mm1
- pshufw $13,%mm4,%mm5
- movd %mm1,%eax
- movd %mm5,%ebx
- movzbl %al,%esi
- movzbl -128(%ebp,%esi,1),%ecx
- pshufw $13,%mm0,%mm2
- movzbl %ah,%edx
- movzbl -128(%ebp,%edx,1),%edx
- shll $8,%edx
- shrl $16,%eax
- movzbl %bl,%esi
- movzbl -128(%ebp,%esi,1),%esi
- shll $16,%esi
- orl %esi,%ecx
- pshufw $8,%mm4,%mm6
- movzbl %bh,%esi
- movzbl -128(%ebp,%esi,1),%esi
- shll $24,%esi
- orl %esi,%edx
- shrl $16,%ebx
- movzbl %ah,%esi
- movzbl -128(%ebp,%esi,1),%esi
- shll $8,%esi
- orl %esi,%ecx
- movzbl %bh,%esi
- movzbl -128(%ebp,%esi,1),%esi
- shll $24,%esi
- orl %esi,%ecx
- movd %ecx,%mm0
- movzbl %al,%esi
- movzbl -128(%ebp,%esi,1),%ecx
- movd %mm2,%eax
- movzbl %bl,%esi
- movzbl -128(%ebp,%esi,1),%esi
- shll $16,%esi
- orl %esi,%ecx
- movd %mm6,%ebx
- movzbl %ah,%esi
- movzbl -128(%ebp,%esi,1),%esi
- shll $24,%esi
- orl %esi,%ecx
- movzbl %bh,%esi
- movzbl -128(%ebp,%esi,1),%esi
- shll $8,%esi
- orl %esi,%ecx
- movd %ecx,%mm1
- movzbl %bl,%esi
- movzbl -128(%ebp,%esi,1),%ecx
- shrl $16,%ebx
- movzbl %al,%esi
- movzbl -128(%ebp,%esi,1),%esi
- shll $16,%esi
- orl %esi,%ecx
- shrl $16,%eax
- punpckldq %mm1,%mm0
- movzbl %ah,%esi
- movzbl -128(%ebp,%esi,1),%esi
- shll $24,%esi
- orl %esi,%ecx
- andl $255,%eax
- movzbl -128(%ebp,%eax,1),%eax
- shll $16,%eax
- orl %eax,%edx
- movzbl %bh,%esi
- movzbl -128(%ebp,%esi,1),%esi
- shll $8,%esi
- orl %esi,%ecx
- movd %ecx,%mm4
- andl $255,%ebx
- movzbl -128(%ebp,%ebx,1),%ebx
- orl %ebx,%edx
- movd %edx,%mm5
- punpckldq %mm5,%mm4
- addl $16,%edi
- cmpl 24(%esp),%edi
- ja .L002out
- movq 8(%esp),%mm2
- pxor %mm3,%mm3
- pxor %mm7,%mm7
- movq %mm0,%mm1
- movq %mm4,%mm5
- pcmpgtb %mm0,%mm3
- pcmpgtb %mm4,%mm7
- pand %mm2,%mm3
- pand %mm2,%mm7
- pshufw $177,%mm0,%mm2
- pshufw $177,%mm4,%mm6
- paddb %mm0,%mm0
- paddb %mm4,%mm4
- pxor %mm3,%mm0
- pxor %mm7,%mm4
- pshufw $177,%mm2,%mm3
- pshufw $177,%mm6,%mm7
- pxor %mm0,%mm1
- pxor %mm4,%mm5
- pxor %mm2,%mm0
- pxor %mm6,%mm4
- movq %mm3,%mm2
- movq %mm7,%mm6
- pslld $8,%mm3
- pslld $8,%mm7
- psrld $24,%mm2
- psrld $24,%mm6
- pxor %mm3,%mm0
- pxor %mm7,%mm4
- pxor %mm2,%mm0
- pxor %mm6,%mm4
- movq %mm1,%mm3
- movq %mm5,%mm7
- movq (%edi),%mm2
- movq 8(%edi),%mm6
- psrld $8,%mm1
- psrld $8,%mm5
- movl -128(%ebp),%eax
- pslld $24,%mm3
- pslld $24,%mm7
- movl -64(%ebp),%ebx
- pxor %mm1,%mm0
- pxor %mm5,%mm4
- movl (%ebp),%ecx
- pxor %mm3,%mm0
- pxor %mm7,%mm4
- movl 64(%ebp),%edx
- pxor %mm2,%mm0
- pxor %mm6,%mm4
- jmp .L001loop
-.align 16
-.L002out:
- pxor (%edi),%mm0
- pxor 8(%edi),%mm4
- ret
-.size _sse_AES_encrypt_compact,.-_sse_AES_encrypt_compact
-.type _x86_AES_encrypt,@function
-.align 16
-_x86_AES_encrypt:
- movl %edi,20(%esp)
- xorl (%edi),%eax
- xorl 4(%edi),%ebx
- xorl 8(%edi),%ecx
- xorl 12(%edi),%edx
- movl 240(%edi),%esi
- leal -2(%esi,%esi,1),%esi
- leal (%edi,%esi,8),%esi
- movl %esi,24(%esp)
-.align 16
-.L003loop:
- movl %eax,%esi
- andl $255,%esi
- movl (%ebp,%esi,8),%esi
- movzbl %bh,%edi
- xorl 3(%ebp,%edi,8),%esi
- movl %ecx,%edi
- shrl $16,%edi
- andl $255,%edi
- xorl 2(%ebp,%edi,8),%esi
- movl %edx,%edi
- shrl $24,%edi
- xorl 1(%ebp,%edi,8),%esi
- movl %esi,4(%esp)
-
- movl %ebx,%esi
- andl $255,%esi
- shrl $16,%ebx
- movl (%ebp,%esi,8),%esi
- movzbl %ch,%edi
- xorl 3(%ebp,%edi,8),%esi
- movl %edx,%edi
- shrl $16,%edi
- andl $255,%edi
- xorl 2(%ebp,%edi,8),%esi
- movl %eax,%edi
- shrl $24,%edi
- xorl 1(%ebp,%edi,8),%esi
- movl %esi,8(%esp)
-
- movl %ecx,%esi
- andl $255,%esi
- shrl $24,%ecx
- movl (%ebp,%esi,8),%esi
- movzbl %dh,%edi
- xorl 3(%ebp,%edi,8),%esi
- movl %eax,%edi
- shrl $16,%edi
- andl $255,%edx
- andl $255,%edi
- xorl 2(%ebp,%edi,8),%esi
- movzbl %bh,%edi
- xorl 1(%ebp,%edi,8),%esi
-
- movl 20(%esp),%edi
- movl (%ebp,%edx,8),%edx
- movzbl %ah,%eax
- xorl 3(%ebp,%eax,8),%edx
- movl 4(%esp),%eax
- andl $255,%ebx
- xorl 2(%ebp,%ebx,8),%edx
- movl 8(%esp),%ebx
- xorl 1(%ebp,%ecx,8),%edx
- movl %esi,%ecx
-
- addl $16,%edi
- xorl (%edi),%eax
- xorl 4(%edi),%ebx
- xorl 8(%edi),%ecx
- xorl 12(%edi),%edx
- cmpl 24(%esp),%edi
- movl %edi,20(%esp)
- jb .L003loop
- movl %eax,%esi
- andl $255,%esi
- movl 2(%ebp,%esi,8),%esi
- andl $255,%esi
- movzbl %bh,%edi
- movl (%ebp,%edi,8),%edi
- andl $65280,%edi
- xorl %edi,%esi
- movl %ecx,%edi
- shrl $16,%edi
- andl $255,%edi
- movl (%ebp,%edi,8),%edi
- andl $16711680,%edi
- xorl %edi,%esi
- movl %edx,%edi
- shrl $24,%edi
- movl 2(%ebp,%edi,8),%edi
- andl $4278190080,%edi
- xorl %edi,%esi
- movl %esi,4(%esp)
- movl %ebx,%esi
- andl $255,%esi
- shrl $16,%ebx
- movl 2(%ebp,%esi,8),%esi
- andl $255,%esi
- movzbl %ch,%edi
- movl (%ebp,%edi,8),%edi
- andl $65280,%edi
- xorl %edi,%esi
- movl %edx,%edi
- shrl $16,%edi
- andl $255,%edi
- movl (%ebp,%edi,8),%edi
- andl $16711680,%edi
- xorl %edi,%esi
- movl %eax,%edi
- shrl $24,%edi
- movl 2(%ebp,%edi,8),%edi
- andl $4278190080,%edi
- xorl %edi,%esi
- movl %esi,8(%esp)
- movl %ecx,%esi
- andl $255,%esi
- shrl $24,%ecx
- movl 2(%ebp,%esi,8),%esi
- andl $255,%esi
- movzbl %dh,%edi
- movl (%ebp,%edi,8),%edi
- andl $65280,%edi
- xorl %edi,%esi
- movl %eax,%edi
- shrl $16,%edi
- andl $255,%edx
- andl $255,%edi
- movl (%ebp,%edi,8),%edi
- andl $16711680,%edi
- xorl %edi,%esi
- movzbl %bh,%edi
- movl 2(%ebp,%edi,8),%edi
- andl $4278190080,%edi
- xorl %edi,%esi
- movl 20(%esp),%edi
- andl $255,%edx
- movl 2(%ebp,%edx,8),%edx
- andl $255,%edx
- movzbl %ah,%eax
- movl (%ebp,%eax,8),%eax
- andl $65280,%eax
- xorl %eax,%edx
- movl 4(%esp),%eax
- andl $255,%ebx
- movl (%ebp,%ebx,8),%ebx
- andl $16711680,%ebx
- xorl %ebx,%edx
- movl 8(%esp),%ebx
- movl 2(%ebp,%ecx,8),%ecx
- andl $4278190080,%ecx
- xorl %ecx,%edx
- movl %esi,%ecx
- addl $16,%edi
- xorl (%edi),%eax
- xorl 4(%edi),%ebx
- xorl 8(%edi),%ecx
- xorl 12(%edi),%edx
- ret
-.align 64
-.LAES_Te:
-.long 2774754246,2774754246
-.long 2222750968,2222750968
-.long 2574743534,2574743534
-.long 2373680118,2373680118
-.long 234025727,234025727
-.long 3177933782,3177933782
-.long 2976870366,2976870366
-.long 1422247313,1422247313
-.long 1345335392,1345335392
-.long 50397442,50397442
-.long 2842126286,2842126286
-.long 2099981142,2099981142
-.long 436141799,436141799
-.long 1658312629,1658312629
-.long 3870010189,3870010189
-.long 2591454956,2591454956
-.long 1170918031,1170918031
-.long 2642575903,2642575903
-.long 1086966153,1086966153
-.long 2273148410,2273148410
-.long 368769775,368769775
-.long 3948501426,3948501426
-.long 3376891790,3376891790
-.long 200339707,200339707
-.long 3970805057,3970805057
-.long 1742001331,1742001331
-.long 4255294047,4255294047
-.long 3937382213,3937382213
-.long 3214711843,3214711843
-.long 4154762323,4154762323
-.long 2524082916,2524082916
-.long 1539358875,1539358875
-.long 3266819957,3266819957
-.long 486407649,486407649
-.long 2928907069,2928907069
-.long 1780885068,1780885068
-.long 1513502316,1513502316
-.long 1094664062,1094664062
-.long 49805301,49805301
-.long 1338821763,1338821763
-.long 1546925160,1546925160
-.long 4104496465,4104496465
-.long 887481809,887481809
-.long 150073849,150073849
-.long 2473685474,2473685474
-.long 1943591083,1943591083
-.long 1395732834,1395732834
-.long 1058346282,1058346282
-.long 201589768,201589768
-.long 1388824469,1388824469
-.long 1696801606,1696801606
-.long 1589887901,1589887901
-.long 672667696,672667696
-.long 2711000631,2711000631
-.long 251987210,251987210
-.long 3046808111,3046808111
-.long 151455502,151455502
-.long 907153956,907153956
-.long 2608889883,2608889883
-.long 1038279391,1038279391
-.long 652995533,652995533
-.long 1764173646,1764173646
-.long 3451040383,3451040383
-.long 2675275242,2675275242
-.long 453576978,453576978
-.long 2659418909,2659418909
-.long 1949051992,1949051992
-.long 773462580,773462580
-.long 756751158,756751158
-.long 2993581788,2993581788
-.long 3998898868,3998898868
-.long 4221608027,4221608027
-.long 4132590244,4132590244
-.long 1295727478,1295727478
-.long 1641469623,1641469623
-.long 3467883389,3467883389
-.long 2066295122,2066295122
-.long 1055122397,1055122397
-.long 1898917726,1898917726
-.long 2542044179,2542044179
-.long 4115878822,4115878822
-.long 1758581177,1758581177
-.long 0,0
-.long 753790401,753790401
-.long 1612718144,1612718144
-.long 536673507,536673507
-.long 3367088505,3367088505
-.long 3982187446,3982187446
-.long 3194645204,3194645204
-.long 1187761037,1187761037
-.long 3653156455,3653156455
-.long 1262041458,1262041458
-.long 3729410708,3729410708
-.long 3561770136,3561770136
-.long 3898103984,3898103984
-.long 1255133061,1255133061
-.long 1808847035,1808847035
-.long 720367557,720367557
-.long 3853167183,3853167183
-.long 385612781,385612781
-.long 3309519750,3309519750
-.long 3612167578,3612167578
-.long 1429418854,1429418854
-.long 2491778321,2491778321
-.long 3477423498,3477423498
-.long 284817897,284817897
-.long 100794884,100794884
-.long 2172616702,2172616702
-.long 4031795360,4031795360
-.long 1144798328,1144798328
-.long 3131023141,3131023141
-.long 3819481163,3819481163
-.long 4082192802,4082192802
-.long 4272137053,4272137053
-.long 3225436288,3225436288
-.long 2324664069,2324664069
-.long 2912064063,2912064063
-.long 3164445985,3164445985
-.long 1211644016,1211644016
-.long 83228145,83228145
-.long 3753688163,3753688163
-.long 3249976951,3249976951
-.long 1977277103,1977277103
-.long 1663115586,1663115586
-.long 806359072,806359072
-.long 452984805,452984805
-.long 250868733,250868733
-.long 1842533055,1842533055
-.long 1288555905,1288555905
-.long 336333848,336333848
-.long 890442534,890442534
-.long 804056259,804056259
-.long 3781124030,3781124030
-.long 2727843637,2727843637
-.long 3427026056,3427026056
-.long 957814574,957814574
-.long 1472513171,1472513171
-.long 4071073621,4071073621
-.long 2189328124,2189328124
-.long 1195195770,1195195770
-.long 2892260552,2892260552
-.long 3881655738,3881655738
-.long 723065138,723065138
-.long 2507371494,2507371494
-.long 2690670784,2690670784
-.long 2558624025,2558624025
-.long 3511635870,3511635870
-.long 2145180835,2145180835
-.long 1713513028,1713513028
-.long 2116692564,2116692564
-.long 2878378043,2878378043
-.long 2206763019,2206763019
-.long 3393603212,3393603212
-.long 703524551,703524551
-.long 3552098411,3552098411
-.long 1007948840,1007948840
-.long 2044649127,2044649127
-.long 3797835452,3797835452
-.long 487262998,487262998
-.long 1994120109,1994120109
-.long 1004593371,1004593371
-.long 1446130276,1446130276
-.long 1312438900,1312438900
-.long 503974420,503974420
-.long 3679013266,3679013266
-.long 168166924,168166924
-.long 1814307912,1814307912
-.long 3831258296,3831258296
-.long 1573044895,1573044895
-.long 1859376061,1859376061
-.long 4021070915,4021070915
-.long 2791465668,2791465668
-.long 2828112185,2828112185
-.long 2761266481,2761266481
-.long 937747667,937747667
-.long 2339994098,2339994098
-.long 854058965,854058965
-.long 1137232011,1137232011
-.long 1496790894,1496790894
-.long 3077402074,3077402074
-.long 2358086913,2358086913
-.long 1691735473,1691735473
-.long 3528347292,3528347292
-.long 3769215305,3769215305
-.long 3027004632,3027004632
-.long 4199962284,4199962284
-.long 133494003,133494003
-.long 636152527,636152527
-.long 2942657994,2942657994
-.long 2390391540,2390391540
-.long 3920539207,3920539207
-.long 403179536,403179536
-.long 3585784431,3585784431
-.long 2289596656,2289596656
-.long 1864705354,1864705354
-.long 1915629148,1915629148
-.long 605822008,605822008
-.long 4054230615,4054230615
-.long 3350508659,3350508659
-.long 1371981463,1371981463
-.long 602466507,602466507
-.long 2094914977,2094914977
-.long 2624877800,2624877800
-.long 555687742,555687742
-.long 3712699286,3712699286
-.long 3703422305,3703422305
-.long 2257292045,2257292045
-.long 2240449039,2240449039
-.long 2423288032,2423288032
-.long 1111375484,1111375484
-.long 3300242801,3300242801
-.long 2858837708,2858837708
-.long 3628615824,3628615824
-.long 84083462,84083462
-.long 32962295,32962295
-.long 302911004,302911004
-.long 2741068226,2741068226
-.long 1597322602,1597322602
-.long 4183250862,4183250862
-.long 3501832553,3501832553
-.long 2441512471,2441512471
-.long 1489093017,1489093017
-.long 656219450,656219450
-.long 3114180135,3114180135
-.long 954327513,954327513
-.long 335083755,335083755
-.long 3013122091,3013122091
-.long 856756514,856756514
-.long 3144247762,3144247762
-.long 1893325225,1893325225
-.long 2307821063,2307821063
-.long 2811532339,2811532339
-.long 3063651117,3063651117
-.long 572399164,572399164
-.long 2458355477,2458355477
-.long 552200649,552200649
-.long 1238290055,1238290055
-.long 4283782570,4283782570
-.long 2015897680,2015897680
-.long 2061492133,2061492133
-.long 2408352771,2408352771
-.long 4171342169,4171342169
-.long 2156497161,2156497161
-.long 386731290,386731290
-.long 3669999461,3669999461
-.long 837215959,837215959
-.long 3326231172,3326231172
-.long 3093850320,3093850320
-.long 3275833730,3275833730
-.long 2962856233,2962856233
-.long 1999449434,1999449434
-.long 286199582,286199582
-.long 3417354363,3417354363
-.long 4233385128,4233385128
-.long 3602627437,3602627437
-.long 974525996,974525996
-.byte 99,124,119,123,242,107,111,197
-.byte 48,1,103,43,254,215,171,118
-.byte 202,130,201,125,250,89,71,240
-.byte 173,212,162,175,156,164,114,192
-.byte 183,253,147,38,54,63,247,204
-.byte 52,165,229,241,113,216,49,21
-.byte 4,199,35,195,24,150,5,154
-.byte 7,18,128,226,235,39,178,117
-.byte 9,131,44,26,27,110,90,160
-.byte 82,59,214,179,41,227,47,132
-.byte 83,209,0,237,32,252,177,91
-.byte 106,203,190,57,74,76,88,207
-.byte 208,239,170,251,67,77,51,133
-.byte 69,249,2,127,80,60,159,168
-.byte 81,163,64,143,146,157,56,245
-.byte 188,182,218,33,16,255,243,210
-.byte 205,12,19,236,95,151,68,23
-.byte 196,167,126,61,100,93,25,115
-.byte 96,129,79,220,34,42,144,136
-.byte 70,238,184,20,222,94,11,219
-.byte 224,50,58,10,73,6,36,92
-.byte 194,211,172,98,145,149,228,121
-.byte 231,200,55,109,141,213,78,169
-.byte 108,86,244,234,101,122,174,8
-.byte 186,120,37,46,28,166,180,198
-.byte 232,221,116,31,75,189,139,138
-.byte 112,62,181,102,72,3,246,14
-.byte 97,53,87,185,134,193,29,158
-.byte 225,248,152,17,105,217,142,148
-.byte 155,30,135,233,206,85,40,223
-.byte 140,161,137,13,191,230,66,104
-.byte 65,153,45,15,176,84,187,22
-.byte 99,124,119,123,242,107,111,197
-.byte 48,1,103,43,254,215,171,118
-.byte 202,130,201,125,250,89,71,240
-.byte 173,212,162,175,156,164,114,192
-.byte 183,253,147,38,54,63,247,204
-.byte 52,165,229,241,113,216,49,21
-.byte 4,199,35,195,24,150,5,154
-.byte 7,18,128,226,235,39,178,117
-.byte 9,131,44,26,27,110,90,160
-.byte 82,59,214,179,41,227,47,132
-.byte 83,209,0,237,32,252,177,91
-.byte 106,203,190,57,74,76,88,207
-.byte 208,239,170,251,67,77,51,133
-.byte 69,249,2,127,80,60,159,168
-.byte 81,163,64,143,146,157,56,245
-.byte 188,182,218,33,16,255,243,210
-.byte 205,12,19,236,95,151,68,23
-.byte 196,167,126,61,100,93,25,115
-.byte 96,129,79,220,34,42,144,136
-.byte 70,238,184,20,222,94,11,219
-.byte 224,50,58,10,73,6,36,92
-.byte 194,211,172,98,145,149,228,121
-.byte 231,200,55,109,141,213,78,169
-.byte 108,86,244,234,101,122,174,8
-.byte 186,120,37,46,28,166,180,198
-.byte 232,221,116,31,75,189,139,138
-.byte 112,62,181,102,72,3,246,14
-.byte 97,53,87,185,134,193,29,158
-.byte 225,248,152,17,105,217,142,148
-.byte 155,30,135,233,206,85,40,223
-.byte 140,161,137,13,191,230,66,104
-.byte 65,153,45,15,176,84,187,22
-.byte 99,124,119,123,242,107,111,197
-.byte 48,1,103,43,254,215,171,118
-.byte 202,130,201,125,250,89,71,240
-.byte 173,212,162,175,156,164,114,192
-.byte 183,253,147,38,54,63,247,204
-.byte 52,165,229,241,113,216,49,21
-.byte 4,199,35,195,24,150,5,154
-.byte 7,18,128,226,235,39,178,117
-.byte 9,131,44,26,27,110,90,160
-.byte 82,59,214,179,41,227,47,132
-.byte 83,209,0,237,32,252,177,91
-.byte 106,203,190,57,74,76,88,207
-.byte 208,239,170,251,67,77,51,133
-.byte 69,249,2,127,80,60,159,168
-.byte 81,163,64,143,146,157,56,245
-.byte 188,182,218,33,16,255,243,210
-.byte 205,12,19,236,95,151,68,23
-.byte 196,167,126,61,100,93,25,115
-.byte 96,129,79,220,34,42,144,136
-.byte 70,238,184,20,222,94,11,219
-.byte 224,50,58,10,73,6,36,92
-.byte 194,211,172,98,145,149,228,121
-.byte 231,200,55,109,141,213,78,169
-.byte 108,86,244,234,101,122,174,8
-.byte 186,120,37,46,28,166,180,198
-.byte 232,221,116,31,75,189,139,138
-.byte 112,62,181,102,72,3,246,14
-.byte 97,53,87,185,134,193,29,158
-.byte 225,248,152,17,105,217,142,148
-.byte 155,30,135,233,206,85,40,223
-.byte 140,161,137,13,191,230,66,104
-.byte 65,153,45,15,176,84,187,22
-.byte 99,124,119,123,242,107,111,197
-.byte 48,1,103,43,254,215,171,118
-.byte 202,130,201,125,250,89,71,240
-.byte 173,212,162,175,156,164,114,192
-.byte 183,253,147,38,54,63,247,204
-.byte 52,165,229,241,113,216,49,21
-.byte 4,199,35,195,24,150,5,154
-.byte 7,18,128,226,235,39,178,117
-.byte 9,131,44,26,27,110,90,160
-.byte 82,59,214,179,41,227,47,132
-.byte 83,209,0,237,32,252,177,91
-.byte 106,203,190,57,74,76,88,207
-.byte 208,239,170,251,67,77,51,133
-.byte 69,249,2,127,80,60,159,168
-.byte 81,163,64,143,146,157,56,245
-.byte 188,182,218,33,16,255,243,210
-.byte 205,12,19,236,95,151,68,23
-.byte 196,167,126,61,100,93,25,115
-.byte 96,129,79,220,34,42,144,136
-.byte 70,238,184,20,222,94,11,219
-.byte 224,50,58,10,73,6,36,92
-.byte 194,211,172,98,145,149,228,121
-.byte 231,200,55,109,141,213,78,169
-.byte 108,86,244,234,101,122,174,8
-.byte 186,120,37,46,28,166,180,198
-.byte 232,221,116,31,75,189,139,138
-.byte 112,62,181,102,72,3,246,14
-.byte 97,53,87,185,134,193,29,158
-.byte 225,248,152,17,105,217,142,148
-.byte 155,30,135,233,206,85,40,223
-.byte 140,161,137,13,191,230,66,104
-.byte 65,153,45,15,176,84,187,22
-.long 1,2,4,8
-.long 16,32,64,128
-.long 27,54,0,0
-.long 0,0,0,0
-.size _x86_AES_encrypt,.-_x86_AES_encrypt
-.globl AES_encrypt
-.type AES_encrypt,@function
-.align 16
-AES_encrypt:
-.L_AES_encrypt_begin:
- pushl %ebp
- pushl %ebx
- pushl %esi
- pushl %edi
- movl 20(%esp),%esi
- movl 28(%esp),%edi
- movl %esp,%eax
- subl $36,%esp
- andl $-64,%esp
- leal -127(%edi),%ebx
- subl %esp,%ebx
- negl %ebx
- andl $960,%ebx
- subl %ebx,%esp
- addl $4,%esp
- movl %eax,28(%esp)
- call .L004pic_point
-.L004pic_point:
- popl %ebp
- leal _GLOBAL_OFFSET_TABLE_+[.-.L004pic_point](%ebp),%eax
- movl OPENSSL_ia32cap_P@GOT(%eax),%eax
- leal .LAES_Te-.L004pic_point(%ebp),%ebp
- leal 764(%esp),%ebx
- subl %ebp,%ebx
- andl $768,%ebx
- leal 2176(%ebp,%ebx,1),%ebp
- btl $25,(%eax)
- jnc .L005x86
- movq (%esi),%mm0
- movq 8(%esi),%mm4
- call _sse_AES_encrypt_compact
- movl 28(%esp),%esp
- movl 24(%esp),%esi
- movq %mm0,(%esi)
- movq %mm4,8(%esi)
- emms
- popl %edi
- popl %esi
- popl %ebx
- popl %ebp
- ret
-.align 16
-.L005x86:
- movl %ebp,24(%esp)
- movl (%esi),%eax
- movl 4(%esi),%ebx
- movl 8(%esi),%ecx
- movl 12(%esi),%edx
- call _x86_AES_encrypt_compact
- movl 28(%esp),%esp
- movl 24(%esp),%esi
- movl %eax,(%esi)
- movl %ebx,4(%esi)
- movl %ecx,8(%esi)
- movl %edx,12(%esi)
- popl %edi
- popl %esi
- popl %ebx
- popl %ebp
- ret
-.size AES_encrypt,.-.L_AES_encrypt_begin
-.type _x86_AES_decrypt_compact,@function
-.align 16
-_x86_AES_decrypt_compact:
- movl %edi,20(%esp)
- xorl (%edi),%eax
- xorl 4(%edi),%ebx
- xorl 8(%edi),%ecx
- xorl 12(%edi),%edx
- movl 240(%edi),%esi
- leal -2(%esi,%esi,1),%esi
- leal (%edi,%esi,8),%esi
- movl %esi,24(%esp)
- movl -128(%ebp),%edi
- movl -96(%ebp),%esi
- movl -64(%ebp),%edi
- movl -32(%ebp),%esi
- movl (%ebp),%edi
- movl 32(%ebp),%esi
- movl 64(%ebp),%edi
- movl 96(%ebp),%esi
-.align 16
-.L006loop:
- movl %eax,%esi
- andl $255,%esi
- movzbl -128(%ebp,%esi,1),%esi
- movzbl %dh,%edi
- movzbl -128(%ebp,%edi,1),%edi
- shll $8,%edi
- xorl %edi,%esi
- movl %ecx,%edi
- shrl $16,%edi
- andl $255,%edi
- movzbl -128(%ebp,%edi,1),%edi
- shll $16,%edi
- xorl %edi,%esi
- movl %ebx,%edi
- shrl $24,%edi
- movzbl -128(%ebp,%edi,1),%edi
- shll $24,%edi
- xorl %edi,%esi
- movl %esi,4(%esp)
- movl %ebx,%esi
- andl $255,%esi
- movzbl -128(%ebp,%esi,1),%esi
- movzbl %ah,%edi
- movzbl -128(%ebp,%edi,1),%edi
- shll $8,%edi
- xorl %edi,%esi
- movl %edx,%edi
- shrl $16,%edi
- andl $255,%edi
- movzbl -128(%ebp,%edi,1),%edi
- shll $16,%edi
- xorl %edi,%esi
- movl %ecx,%edi
- shrl $24,%edi
- movzbl -128(%ebp,%edi,1),%edi
- shll $24,%edi
- xorl %edi,%esi
- movl %esi,8(%esp)
- movl %ecx,%esi
- andl $255,%esi
- movzbl -128(%ebp,%esi,1),%esi
- movzbl %bh,%edi
- movzbl -128(%ebp,%edi,1),%edi
- shll $8,%edi
- xorl %edi,%esi
- movl %eax,%edi
- shrl $16,%edi
- andl $255,%edi
- movzbl -128(%ebp,%edi,1),%edi
- shll $16,%edi
- xorl %edi,%esi
- movl %edx,%edi
- shrl $24,%edi
- movzbl -128(%ebp,%edi,1),%edi
- shll $24,%edi
- xorl %edi,%esi
- andl $255,%edx
- movzbl -128(%ebp,%edx,1),%edx
- movzbl %ch,%ecx
- movzbl -128(%ebp,%ecx,1),%ecx
- shll $8,%ecx
- xorl %ecx,%edx
- movl %esi,%ecx
- shrl $16,%ebx
- andl $255,%ebx
- movzbl -128(%ebp,%ebx,1),%ebx
- shll $16,%ebx
- xorl %ebx,%edx
- shrl $24,%eax
- movzbl -128(%ebp,%eax,1),%eax
- shll $24,%eax
- xorl %eax,%edx
- movl %ecx,%esi
- andl $2155905152,%esi
- movl %esi,%edi
- shrl $7,%edi
- leal (%ecx,%ecx,1),%eax
- subl %edi,%esi
- andl $4278124286,%eax
- andl $454761243,%esi
- xorl %eax,%esi
- movl %esi,%eax
- andl $2155905152,%esi
- movl %esi,%edi
- shrl $7,%edi
- leal (%eax,%eax,1),%ebx
- subl %edi,%esi
- andl $4278124286,%ebx
- andl $454761243,%esi
- xorl %ecx,%eax
- xorl %ebx,%esi
- movl %esi,%ebx
- andl $2155905152,%esi
- movl %esi,%edi
- shrl $7,%edi
- leal (%ebx,%ebx,1),%ebp
- subl %edi,%esi
- andl $4278124286,%ebp
- andl $454761243,%esi
- xorl %ecx,%ebx
- roll $8,%ecx
- xorl %esi,%ebp
- xorl %eax,%ecx
- xorl %ebp,%eax
- roll $24,%eax
- xorl %ebx,%ecx
- xorl %ebp,%ebx
- roll $16,%ebx
- xorl %ebp,%ecx
- roll $8,%ebp
- xorl %eax,%ecx
- xorl %ebx,%ecx
- movl 4(%esp),%eax
- xorl %ebp,%ecx
- movl %ecx,12(%esp)
- movl %edx,%esi
- andl $2155905152,%esi
- movl %esi,%edi
- shrl $7,%edi
- leal (%edx,%edx,1),%ebx
- subl %edi,%esi
- andl $4278124286,%ebx
- andl $454761243,%esi
- xorl %ebx,%esi
- movl %esi,%ebx
- andl $2155905152,%esi
- movl %esi,%edi
- shrl $7,%edi
- leal (%ebx,%ebx,1),%ecx
- subl %edi,%esi
- andl $4278124286,%ecx
- andl $454761243,%esi
- xorl %edx,%ebx
- xorl %ecx,%esi
- movl %esi,%ecx
- andl $2155905152,%esi
- movl %esi,%edi
- shrl $7,%edi
- leal (%ecx,%ecx,1),%ebp
- subl %edi,%esi
- andl $4278124286,%ebp
- andl $454761243,%esi
- xorl %edx,%ecx
- roll $8,%edx
- xorl %esi,%ebp
- xorl %ebx,%edx
- xorl %ebp,%ebx
- roll $24,%ebx
- xorl %ecx,%edx
- xorl %ebp,%ecx
- roll $16,%ecx
- xorl %ebp,%edx
- roll $8,%ebp
- xorl %ebx,%edx
- xorl %ecx,%edx
- movl 8(%esp),%ebx
- xorl %ebp,%edx
- movl %edx,16(%esp)
- movl %eax,%esi
- andl $2155905152,%esi
- movl %esi,%edi
- shrl $7,%edi
- leal (%eax,%eax,1),%ecx
- subl %edi,%esi
- andl $4278124286,%ecx
- andl $454761243,%esi
- xorl %ecx,%esi
- movl %esi,%ecx
- andl $2155905152,%esi
- movl %esi,%edi
- shrl $7,%edi
- leal (%ecx,%ecx,1),%edx
- subl %edi,%esi
- andl $4278124286,%edx
- andl $454761243,%esi
- xorl %eax,%ecx
- xorl %edx,%esi
- movl %esi,%edx
- andl $2155905152,%esi
- movl %esi,%edi
- shrl $7,%edi
- leal (%edx,%edx,1),%ebp
- subl %edi,%esi
- andl $4278124286,%ebp
- andl $454761243,%esi
- xorl %eax,%edx
- roll $8,%eax
- xorl %esi,%ebp
- xorl %ecx,%eax
- xorl %ebp,%ecx
- roll $24,%ecx
- xorl %edx,%eax
- xorl %ebp,%edx
- roll $16,%edx
- xorl %ebp,%eax
- roll $8,%ebp
- xorl %ecx,%eax
- xorl %edx,%eax
- xorl %ebp,%eax
- movl %ebx,%esi
- andl $2155905152,%esi
- movl %esi,%edi
- shrl $7,%edi
- leal (%ebx,%ebx,1),%ecx
- subl %edi,%esi
- andl $4278124286,%ecx
- andl $454761243,%esi
- xorl %ecx,%esi
- movl %esi,%ecx
- andl $2155905152,%esi
- movl %esi,%edi
- shrl $7,%edi
- leal (%ecx,%ecx,1),%edx
- subl %edi,%esi
- andl $4278124286,%edx
- andl $454761243,%esi
- xorl %ebx,%ecx
- xorl %edx,%esi
- movl %esi,%edx
- andl $2155905152,%esi
- movl %esi,%edi
- shrl $7,%edi
- leal (%edx,%edx,1),%ebp
- subl %edi,%esi
- andl $4278124286,%ebp
- andl $454761243,%esi
- xorl %ebx,%edx
- roll $8,%ebx
- xorl %esi,%ebp
- xorl %ecx,%ebx
- xorl %ebp,%ecx
- roll $24,%ecx
- xorl %edx,%ebx
- xorl %ebp,%edx
- roll $16,%edx
- xorl %ebp,%ebx
- roll $8,%ebp
- xorl %ecx,%ebx
- xorl %edx,%ebx
- movl 12(%esp),%ecx
- xorl %ebp,%ebx
- movl 16(%esp),%edx
- movl 20(%esp),%edi
- movl 28(%esp),%ebp
- addl $16,%edi
- xorl (%edi),%eax
- xorl 4(%edi),%ebx
- xorl 8(%edi),%ecx
- xorl 12(%edi),%edx
- cmpl 24(%esp),%edi
- movl %edi,20(%esp)
- jb .L006loop
- movl %eax,%esi
- andl $255,%esi
- movzbl -128(%ebp,%esi,1),%esi
- movzbl %dh,%edi
- movzbl -128(%ebp,%edi,1),%edi
- shll $8,%edi
- xorl %edi,%esi
- movl %ecx,%edi
- shrl $16,%edi
- andl $255,%edi
- movzbl -128(%ebp,%edi,1),%edi
- shll $16,%edi
- xorl %edi,%esi
- movl %ebx,%edi
- shrl $24,%edi
- movzbl -128(%ebp,%edi,1),%edi
- shll $24,%edi
- xorl %edi,%esi
- movl %esi,4(%esp)
- movl %ebx,%esi
- andl $255,%esi
- movzbl -128(%ebp,%esi,1),%esi
- movzbl %ah,%edi
- movzbl -128(%ebp,%edi,1),%edi
- shll $8,%edi
- xorl %edi,%esi
- movl %edx,%edi
- shrl $16,%edi
- andl $255,%edi
- movzbl -128(%ebp,%edi,1),%edi
- shll $16,%edi
- xorl %edi,%esi
- movl %ecx,%edi
- shrl $24,%edi
- movzbl -128(%ebp,%edi,1),%edi
- shll $24,%edi
- xorl %edi,%esi
- movl %esi,8(%esp)
- movl %ecx,%esi
- andl $255,%esi
- movzbl -128(%ebp,%esi,1),%esi
- movzbl %bh,%edi
- movzbl -128(%ebp,%edi,1),%edi
- shll $8,%edi
- xorl %edi,%esi
- movl %eax,%edi
- shrl $16,%edi
- andl $255,%edi
- movzbl -128(%ebp,%edi,1),%edi
- shll $16,%edi
- xorl %edi,%esi
- movl %edx,%edi
- shrl $24,%edi
- movzbl -128(%ebp,%edi,1),%edi
- shll $24,%edi
- xorl %edi,%esi
- movl 20(%esp),%edi
- andl $255,%edx
- movzbl -128(%ebp,%edx,1),%edx
- movzbl %ch,%ecx
- movzbl -128(%ebp,%ecx,1),%ecx
- shll $8,%ecx
- xorl %ecx,%edx
- movl %esi,%ecx
- shrl $16,%ebx
- andl $255,%ebx
- movzbl -128(%ebp,%ebx,1),%ebx
- shll $16,%ebx
- xorl %ebx,%edx
- movl 8(%esp),%ebx
- shrl $24,%eax
- movzbl -128(%ebp,%eax,1),%eax
- shll $24,%eax
- xorl %eax,%edx
- movl 4(%esp),%eax
- xorl 16(%edi),%eax
- xorl 20(%edi),%ebx
- xorl 24(%edi),%ecx
- xorl 28(%edi),%edx
- ret
-.size _x86_AES_decrypt_compact,.-_x86_AES_decrypt_compact
-.type _sse_AES_decrypt_compact,@function
-.align 16
-_sse_AES_decrypt_compact:
- pxor (%edi),%mm0
- pxor 8(%edi),%mm4
- movl 240(%edi),%esi
- leal -2(%esi,%esi,1),%esi
- leal (%edi,%esi,8),%esi
- movl %esi,24(%esp)
- movl $454761243,%eax
- movl %eax,8(%esp)
- movl %eax,12(%esp)
- movl -128(%ebp),%eax
- movl -96(%ebp),%ebx
- movl -64(%ebp),%ecx
- movl -32(%ebp),%edx
- movl (%ebp),%eax
- movl 32(%ebp),%ebx
- movl 64(%ebp),%ecx
- movl 96(%ebp),%edx
-.align 16
-.L007loop:
- pshufw $12,%mm0,%mm1
- movd %mm1,%eax
- pshufw $9,%mm4,%mm5
- movzbl %al,%esi
- movzbl -128(%ebp,%esi,1),%ecx
- movd %mm5,%ebx
- movzbl %ah,%edx
- movzbl -128(%ebp,%edx,1),%edx
- shll $8,%edx
- pshufw $6,%mm0,%mm2
- movzbl %bl,%esi
- movzbl -128(%ebp,%esi,1),%esi
- shll $16,%esi
- orl %esi,%ecx
- shrl $16,%eax
- movzbl %bh,%esi
- movzbl -128(%ebp,%esi,1),%esi
- shll $24,%esi
- orl %esi,%edx
- shrl $16,%ebx
- pshufw $3,%mm4,%mm6
- movzbl %ah,%esi
- movzbl -128(%ebp,%esi,1),%esi
- shll $24,%esi
- orl %esi,%ecx
- movzbl %bh,%esi
- movzbl -128(%ebp,%esi,1),%esi
- shll $8,%esi
- orl %esi,%ecx
- movd %ecx,%mm0
- movzbl %al,%esi
- movd %mm2,%eax
- movzbl -128(%ebp,%esi,1),%ecx
- shll $16,%ecx
- movzbl %bl,%esi
- movd %mm6,%ebx
- movzbl -128(%ebp,%esi,1),%esi
- orl %esi,%ecx
- movzbl %al,%esi
- movzbl -128(%ebp,%esi,1),%esi
- orl %esi,%edx
- movzbl %bl,%esi
- movzbl -128(%ebp,%esi,1),%esi
- shll $16,%esi
- orl %esi,%edx
- movd %edx,%mm1
- movzbl %ah,%esi
- movzbl -128(%ebp,%esi,1),%edx
- shll $8,%edx
- movzbl %bh,%esi
- shrl $16,%eax
- movzbl -128(%ebp,%esi,1),%esi
- shll $24,%esi
- orl %esi,%edx
- shrl $16,%ebx
- punpckldq %mm1,%mm0
- movzbl %bh,%esi
- movzbl -128(%ebp,%esi,1),%esi
- shll $8,%esi
- orl %esi,%ecx
- andl $255,%ebx
- movzbl -128(%ebp,%ebx,1),%ebx
- orl %ebx,%edx
- movzbl %al,%esi
- movzbl -128(%ebp,%esi,1),%esi
- shll $16,%esi
- orl %esi,%edx
- movd %edx,%mm4
- movzbl %ah,%eax
- movzbl -128(%ebp,%eax,1),%eax
- shll $24,%eax
- orl %eax,%ecx
- movd %ecx,%mm5
- punpckldq %mm5,%mm4
- addl $16,%edi
- cmpl 24(%esp),%edi
- ja .L008out
- movq %mm0,%mm3
- movq %mm4,%mm7
- pshufw $228,%mm0,%mm2
- pshufw $228,%mm4,%mm6
- movq %mm0,%mm1
- movq %mm4,%mm5
- pshufw $177,%mm0,%mm0
- pshufw $177,%mm4,%mm4
- pslld $8,%mm2
- pslld $8,%mm6
- psrld $8,%mm3
- psrld $8,%mm7
- pxor %mm2,%mm0
- pxor %mm6,%mm4
- pxor %mm3,%mm0
- pxor %mm7,%mm4
- pslld $16,%mm2
- pslld $16,%mm6
- psrld $16,%mm3
- psrld $16,%mm7
- pxor %mm2,%mm0
- pxor %mm6,%mm4
- pxor %mm3,%mm0
- pxor %mm7,%mm4
- movq 8(%esp),%mm3
- pxor %mm2,%mm2
- pxor %mm6,%mm6
- pcmpgtb %mm1,%mm2
- pcmpgtb %mm5,%mm6
- pand %mm3,%mm2
- pand %mm3,%mm6
- paddb %mm1,%mm1
- paddb %mm5,%mm5
- pxor %mm2,%mm1
- pxor %mm6,%mm5
- movq %mm1,%mm3
- movq %mm5,%mm7
- movq %mm1,%mm2
- movq %mm5,%mm6
- pxor %mm1,%mm0
- pxor %mm5,%mm4
- pslld $24,%mm3
- pslld $24,%mm7
- psrld $8,%mm2
- psrld $8,%mm6
- pxor %mm3,%mm0
- pxor %mm7,%mm4
- pxor %mm2,%mm0
- pxor %mm6,%mm4
- movq 8(%esp),%mm2
- pxor %mm3,%mm3
- pxor %mm7,%mm7
- pcmpgtb %mm1,%mm3
- pcmpgtb %mm5,%mm7
- pand %mm2,%mm3
- pand %mm2,%mm7
- paddb %mm1,%mm1
- paddb %mm5,%mm5
- pxor %mm3,%mm1
- pxor %mm7,%mm5
- pshufw $177,%mm1,%mm3
- pshufw $177,%mm5,%mm7
- pxor %mm1,%mm0
- pxor %mm5,%mm4
- pxor %mm3,%mm0
- pxor %mm7,%mm4
- pxor %mm3,%mm3
- pxor %mm7,%mm7
- pcmpgtb %mm1,%mm3
- pcmpgtb %mm5,%mm7
- pand %mm2,%mm3
- pand %mm2,%mm7
- paddb %mm1,%mm1
- paddb %mm5,%mm5
- pxor %mm3,%mm1
- pxor %mm7,%mm5
- pxor %mm1,%mm0
- pxor %mm5,%mm4
- movq %mm1,%mm3
- movq %mm5,%mm7
- pshufw $177,%mm1,%mm2
- pshufw $177,%mm5,%mm6
- pxor %mm2,%mm0
- pxor %mm6,%mm4
- pslld $8,%mm1
- pslld $8,%mm5
- psrld $8,%mm3
- psrld $8,%mm7
- movq (%edi),%mm2
- movq 8(%edi),%mm6
- pxor %mm1,%mm0
- pxor %mm5,%mm4
- pxor %mm3,%mm0
- pxor %mm7,%mm4
- movl -128(%ebp),%eax
- pslld $16,%mm1
- pslld $16,%mm5
- movl -64(%ebp),%ebx
- psrld $16,%mm3
- psrld $16,%mm7
- movl (%ebp),%ecx
- pxor %mm1,%mm0
- pxor %mm5,%mm4
- movl 64(%ebp),%edx
- pxor %mm3,%mm0
- pxor %mm7,%mm4
- pxor %mm2,%mm0
- pxor %mm6,%mm4
- jmp .L007loop
-.align 16
-.L008out:
- pxor (%edi),%mm0
- pxor 8(%edi),%mm4
- ret
-.size _sse_AES_decrypt_compact,.-_sse_AES_decrypt_compact
-.type _x86_AES_decrypt,@function
-.align 16
-_x86_AES_decrypt:
- movl %edi,20(%esp)
- xorl (%edi),%eax
- xorl 4(%edi),%ebx
- xorl 8(%edi),%ecx
- xorl 12(%edi),%edx
- movl 240(%edi),%esi
- leal -2(%esi,%esi,1),%esi
- leal (%edi,%esi,8),%esi
- movl %esi,24(%esp)
-.align 16
-.L009loop:
- movl %eax,%esi
- andl $255,%esi
- movl (%ebp,%esi,8),%esi
- movzbl %dh,%edi
- xorl 3(%ebp,%edi,8),%esi
- movl %ecx,%edi
- shrl $16,%edi
- andl $255,%edi
- xorl 2(%ebp,%edi,8),%esi
- movl %ebx,%edi
- shrl $24,%edi
- xorl 1(%ebp,%edi,8),%esi
- movl %esi,4(%esp)
-
- movl %ebx,%esi
- andl $255,%esi
- movl (%ebp,%esi,8),%esi
- movzbl %ah,%edi
- xorl 3(%ebp,%edi,8),%esi
- movl %edx,%edi
- shrl $16,%edi
- andl $255,%edi
- xorl 2(%ebp,%edi,8),%esi
- movl %ecx,%edi
- shrl $24,%edi
- xorl 1(%ebp,%edi,8),%esi
- movl %esi,8(%esp)
-
- movl %ecx,%esi
- andl $255,%esi
- movl (%ebp,%esi,8),%esi
- movzbl %bh,%edi
- xorl 3(%ebp,%edi,8),%esi
- movl %eax,%edi
- shrl $16,%edi
- andl $255,%edi
- xorl 2(%ebp,%edi,8),%esi
- movl %edx,%edi
- shrl $24,%edi
- xorl 1(%ebp,%edi,8),%esi
-
- movl 20(%esp),%edi
- andl $255,%edx
- movl (%ebp,%edx,8),%edx
- movzbl %ch,%ecx
- xorl 3(%ebp,%ecx,8),%edx
- movl %esi,%ecx
- shrl $16,%ebx
- andl $255,%ebx
- xorl 2(%ebp,%ebx,8),%edx
- movl 8(%esp),%ebx
- shrl $24,%eax
- xorl 1(%ebp,%eax,8),%edx
- movl 4(%esp),%eax
-
- addl $16,%edi
- xorl (%edi),%eax
- xorl 4(%edi),%ebx
- xorl 8(%edi),%ecx
- xorl 12(%edi),%edx
- cmpl 24(%esp),%edi
- movl %edi,20(%esp)
- jb .L009loop
- leal 2176(%ebp),%ebp
- movl -128(%ebp),%edi
- movl -96(%ebp),%esi
- movl -64(%ebp),%edi
- movl -32(%ebp),%esi
- movl (%ebp),%edi
- movl 32(%ebp),%esi
- movl 64(%ebp),%edi
- movl 96(%ebp),%esi
- leal -128(%ebp),%ebp
- movl %eax,%esi
- andl $255,%esi
- movzbl (%ebp,%esi,1),%esi
- movzbl %dh,%edi
- movzbl (%ebp,%edi,1),%edi
- shll $8,%edi
- xorl %edi,%esi
- movl %ecx,%edi
- shrl $16,%edi
- andl $255,%edi
- movzbl (%ebp,%edi,1),%edi
- shll $16,%edi
- xorl %edi,%esi
- movl %ebx,%edi
- shrl $24,%edi
- movzbl (%ebp,%edi,1),%edi
- shll $24,%edi
- xorl %edi,%esi
- movl %esi,4(%esp)
- movl %ebx,%esi
- andl $255,%esi
- movzbl (%ebp,%esi,1),%esi
- movzbl %ah,%edi
- movzbl (%ebp,%edi,1),%edi
- shll $8,%edi
- xorl %edi,%esi
- movl %edx,%edi
- shrl $16,%edi
- andl $255,%edi
- movzbl (%ebp,%edi,1),%edi
- shll $16,%edi
- xorl %edi,%esi
- movl %ecx,%edi
- shrl $24,%edi
- movzbl (%ebp,%edi,1),%edi
- shll $24,%edi
- xorl %edi,%esi
- movl %esi,8(%esp)
- movl %ecx,%esi
- andl $255,%esi
- movzbl (%ebp,%esi,1),%esi
- movzbl %bh,%edi
- movzbl (%ebp,%edi,1),%edi
- shll $8,%edi
- xorl %edi,%esi
- movl %eax,%edi
- shrl $16,%edi
- andl $255,%edi
- movzbl (%ebp,%edi,1),%edi
- shll $16,%edi
- xorl %edi,%esi
- movl %edx,%edi
- shrl $24,%edi
- movzbl (%ebp,%edi,1),%edi
- shll $24,%edi
- xorl %edi,%esi
- movl 20(%esp),%edi
- andl $255,%edx
- movzbl (%ebp,%edx,1),%edx
- movzbl %ch,%ecx
- movzbl (%ebp,%ecx,1),%ecx
- shll $8,%ecx
- xorl %ecx,%edx
- movl %esi,%ecx
- shrl $16,%ebx
- andl $255,%ebx
- movzbl (%ebp,%ebx,1),%ebx
- shll $16,%ebx
- xorl %ebx,%edx
- movl 8(%esp),%ebx
- shrl $24,%eax
- movzbl (%ebp,%eax,1),%eax
- shll $24,%eax
- xorl %eax,%edx
- movl 4(%esp),%eax
- leal -2048(%ebp),%ebp
- addl $16,%edi
- xorl (%edi),%eax
- xorl 4(%edi),%ebx
- xorl 8(%edi),%ecx
- xorl 12(%edi),%edx
- ret
-.align 64
-.LAES_Td:
-.long 1353184337,1353184337
-.long 1399144830,1399144830
-.long 3282310938,3282310938
-.long 2522752826,2522752826
-.long 3412831035,3412831035
-.long 4047871263,4047871263
-.long 2874735276,2874735276
-.long 2466505547,2466505547
-.long 1442459680,1442459680
-.long 4134368941,4134368941
-.long 2440481928,2440481928
-.long 625738485,625738485
-.long 4242007375,4242007375
-.long 3620416197,3620416197
-.long 2151953702,2151953702
-.long 2409849525,2409849525
-.long 1230680542,1230680542
-.long 1729870373,1729870373
-.long 2551114309,2551114309
-.long 3787521629,3787521629
-.long 41234371,41234371
-.long 317738113,317738113
-.long 2744600205,2744600205
-.long 3338261355,3338261355
-.long 3881799427,3881799427
-.long 2510066197,2510066197
-.long 3950669247,3950669247
-.long 3663286933,3663286933
-.long 763608788,763608788
-.long 3542185048,3542185048
-.long 694804553,694804553
-.long 1154009486,1154009486
-.long 1787413109,1787413109
-.long 2021232372,2021232372
-.long 1799248025,1799248025
-.long 3715217703,3715217703
-.long 3058688446,3058688446
-.long 397248752,397248752
-.long 1722556617,1722556617
-.long 3023752829,3023752829
-.long 407560035,407560035
-.long 2184256229,2184256229
-.long 1613975959,1613975959
-.long 1165972322,1165972322
-.long 3765920945,3765920945
-.long 2226023355,2226023355
-.long 480281086,480281086
-.long 2485848313,2485848313
-.long 1483229296,1483229296
-.long 436028815,436028815
-.long 2272059028,2272059028
-.long 3086515026,3086515026
-.long 601060267,601060267
-.long 3791801202,3791801202
-.long 1468997603,1468997603
-.long 715871590,715871590
-.long 120122290,120122290
-.long 63092015,63092015
-.long 2591802758,2591802758
-.long 2768779219,2768779219
-.long 4068943920,4068943920
-.long 2997206819,2997206819
-.long 3127509762,3127509762
-.long 1552029421,1552029421
-.long 723308426,723308426
-.long 2461301159,2461301159
-.long 4042393587,4042393587
-.long 2715969870,2715969870
-.long 3455375973,3455375973
-.long 3586000134,3586000134
-.long 526529745,526529745
-.long 2331944644,2331944644
-.long 2639474228,2639474228
-.long 2689987490,2689987490
-.long 853641733,853641733
-.long 1978398372,1978398372
-.long 971801355,971801355
-.long 2867814464,2867814464
-.long 111112542,111112542
-.long 1360031421,1360031421
-.long 4186579262,4186579262
-.long 1023860118,1023860118
-.long 2919579357,2919579357
-.long 1186850381,1186850381
-.long 3045938321,3045938321
-.long 90031217,90031217
-.long 1876166148,1876166148
-.long 4279586912,4279586912
-.long 620468249,620468249
-.long 2548678102,2548678102
-.long 3426959497,3426959497
-.long 2006899047,2006899047
-.long 3175278768,3175278768
-.long 2290845959,2290845959
-.long 945494503,945494503
-.long 3689859193,3689859193
-.long 1191869601,1191869601
-.long 3910091388,3910091388
-.long 3374220536,3374220536
-.long 0,0
-.long 2206629897,2206629897
-.long 1223502642,1223502642
-.long 2893025566,2893025566
-.long 1316117100,1316117100
-.long 4227796733,4227796733
-.long 1446544655,1446544655
-.long 517320253,517320253
-.long 658058550,658058550
-.long 1691946762,1691946762
-.long 564550760,564550760
-.long 3511966619,3511966619
-.long 976107044,976107044
-.long 2976320012,2976320012
-.long 266819475,266819475
-.long 3533106868,3533106868
-.long 2660342555,2660342555
-.long 1338359936,1338359936
-.long 2720062561,2720062561
-.long 1766553434,1766553434
-.long 370807324,370807324
-.long 179999714,179999714
-.long 3844776128,3844776128
-.long 1138762300,1138762300
-.long 488053522,488053522
-.long 185403662,185403662
-.long 2915535858,2915535858
-.long 3114841645,3114841645
-.long 3366526484,3366526484
-.long 2233069911,2233069911
-.long 1275557295,1275557295
-.long 3151862254,3151862254
-.long 4250959779,4250959779
-.long 2670068215,2670068215
-.long 3170202204,3170202204
-.long 3309004356,3309004356
-.long 880737115,880737115
-.long 1982415755,1982415755
-.long 3703972811,3703972811
-.long 1761406390,1761406390
-.long 1676797112,1676797112
-.long 3403428311,3403428311
-.long 277177154,277177154
-.long 1076008723,1076008723
-.long 538035844,538035844
-.long 2099530373,2099530373
-.long 4164795346,4164795346
-.long 288553390,288553390
-.long 1839278535,1839278535
-.long 1261411869,1261411869
-.long 4080055004,4080055004
-.long 3964831245,3964831245
-.long 3504587127,3504587127
-.long 1813426987,1813426987
-.long 2579067049,2579067049
-.long 4199060497,4199060497
-.long 577038663,577038663
-.long 3297574056,3297574056
-.long 440397984,440397984
-.long 3626794326,3626794326
-.long 4019204898,4019204898
-.long 3343796615,3343796615
-.long 3251714265,3251714265
-.long 4272081548,4272081548
-.long 906744984,906744984
-.long 3481400742,3481400742
-.long 685669029,685669029
-.long 646887386,646887386
-.long 2764025151,2764025151
-.long 3835509292,3835509292
-.long 227702864,227702864
-.long 2613862250,2613862250
-.long 1648787028,1648787028
-.long 3256061430,3256061430
-.long 3904428176,3904428176
-.long 1593260334,1593260334
-.long 4121936770,4121936770
-.long 3196083615,3196083615
-.long 2090061929,2090061929
-.long 2838353263,2838353263
-.long 3004310991,3004310991
-.long 999926984,999926984
-.long 2809993232,2809993232
-.long 1852021992,1852021992
-.long 2075868123,2075868123
-.long 158869197,158869197
-.long 4095236462,4095236462
-.long 28809964,28809964
-.long 2828685187,2828685187
-.long 1701746150,1701746150
-.long 2129067946,2129067946
-.long 147831841,147831841
-.long 3873969647,3873969647
-.long 3650873274,3650873274
-.long 3459673930,3459673930
-.long 3557400554,3557400554
-.long 3598495785,3598495785
-.long 2947720241,2947720241
-.long 824393514,824393514
-.long 815048134,815048134
-.long 3227951669,3227951669
-.long 935087732,935087732
-.long 2798289660,2798289660
-.long 2966458592,2966458592
-.long 366520115,366520115
-.long 1251476721,1251476721
-.long 4158319681,4158319681
-.long 240176511,240176511
-.long 804688151,804688151
-.long 2379631990,2379631990
-.long 1303441219,1303441219
-.long 1414376140,1414376140
-.long 3741619940,3741619940
-.long 3820343710,3820343710
-.long 461924940,461924940
-.long 3089050817,3089050817
-.long 2136040774,2136040774
-.long 82468509,82468509
-.long 1563790337,1563790337
-.long 1937016826,1937016826
-.long 776014843,776014843
-.long 1511876531,1511876531
-.long 1389550482,1389550482
-.long 861278441,861278441
-.long 323475053,323475053
-.long 2355222426,2355222426
-.long 2047648055,2047648055
-.long 2383738969,2383738969
-.long 2302415851,2302415851
-.long 3995576782,3995576782
-.long 902390199,902390199
-.long 3991215329,3991215329
-.long 1018251130,1018251130
-.long 1507840668,1507840668
-.long 1064563285,1064563285
-.long 2043548696,2043548696
-.long 3208103795,3208103795
-.long 3939366739,3939366739
-.long 1537932639,1537932639
-.long 342834655,342834655
-.long 2262516856,2262516856
-.long 2180231114,2180231114
-.long 1053059257,1053059257
-.long 741614648,741614648
-.long 1598071746,1598071746
-.long 1925389590,1925389590
-.long 203809468,203809468
-.long 2336832552,2336832552
-.long 1100287487,1100287487
-.long 1895934009,1895934009
-.long 3736275976,3736275976
-.long 2632234200,2632234200
-.long 2428589668,2428589668
-.long 1636092795,1636092795
-.long 1890988757,1890988757
-.long 1952214088,1952214088
-.long 1113045200,1113045200
-.byte 82,9,106,213,48,54,165,56
-.byte 191,64,163,158,129,243,215,251
-.byte 124,227,57,130,155,47,255,135
-.byte 52,142,67,68,196,222,233,203
-.byte 84,123,148,50,166,194,35,61
-.byte 238,76,149,11,66,250,195,78
-.byte 8,46,161,102,40,217,36,178
-.byte 118,91,162,73,109,139,209,37
-.byte 114,248,246,100,134,104,152,22
-.byte 212,164,92,204,93,101,182,146
-.byte 108,112,72,80,253,237,185,218
-.byte 94,21,70,87,167,141,157,132
-.byte 144,216,171,0,140,188,211,10
-.byte 247,228,88,5,184,179,69,6
-.byte 208,44,30,143,202,63,15,2
-.byte 193,175,189,3,1,19,138,107
-.byte 58,145,17,65,79,103,220,234
-.byte 151,242,207,206,240,180,230,115
-.byte 150,172,116,34,231,173,53,133
-.byte 226,249,55,232,28,117,223,110
-.byte 71,241,26,113,29,41,197,137
-.byte 111,183,98,14,170,24,190,27
-.byte 252,86,62,75,198,210,121,32
-.byte 154,219,192,254,120,205,90,244
-.byte 31,221,168,51,136,7,199,49
-.byte 177,18,16,89,39,128,236,95
-.byte 96,81,127,169,25,181,74,13
-.byte 45,229,122,159,147,201,156,239
-.byte 160,224,59,77,174,42,245,176
-.byte 200,235,187,60,131,83,153,97
-.byte 23,43,4,126,186,119,214,38
-.byte 225,105,20,99,85,33,12,125
-.byte 82,9,106,213,48,54,165,56
-.byte 191,64,163,158,129,243,215,251
-.byte 124,227,57,130,155,47,255,135
-.byte 52,142,67,68,196,222,233,203
-.byte 84,123,148,50,166,194,35,61
-.byte 238,76,149,11,66,250,195,78
-.byte 8,46,161,102,40,217,36,178
-.byte 118,91,162,73,109,139,209,37
-.byte 114,248,246,100,134,104,152,22
-.byte 212,164,92,204,93,101,182,146
-.byte 108,112,72,80,253,237,185,218
-.byte 94,21,70,87,167,141,157,132
-.byte 144,216,171,0,140,188,211,10
-.byte 247,228,88,5,184,179,69,6
-.byte 208,44,30,143,202,63,15,2
-.byte 193,175,189,3,1,19,138,107
-.byte 58,145,17,65,79,103,220,234
-.byte 151,242,207,206,240,180,230,115
-.byte 150,172,116,34,231,173,53,133
-.byte 226,249,55,232,28,117,223,110
-.byte 71,241,26,113,29,41,197,137
-.byte 111,183,98,14,170,24,190,27
-.byte 252,86,62,75,198,210,121,32
-.byte 154,219,192,254,120,205,90,244
-.byte 31,221,168,51,136,7,199,49
-.byte 177,18,16,89,39,128,236,95
-.byte 96,81,127,169,25,181,74,13
-.byte 45,229,122,159,147,201,156,239
-.byte 160,224,59,77,174,42,245,176
-.byte 200,235,187,60,131,83,153,97
-.byte 23,43,4,126,186,119,214,38
-.byte 225,105,20,99,85,33,12,125
-.byte 82,9,106,213,48,54,165,56
-.byte 191,64,163,158,129,243,215,251
-.byte 124,227,57,130,155,47,255,135
-.byte 52,142,67,68,196,222,233,203
-.byte 84,123,148,50,166,194,35,61
-.byte 238,76,149,11,66,250,195,78
-.byte 8,46,161,102,40,217,36,178
-.byte 118,91,162,73,109,139,209,37
-.byte 114,248,246,100,134,104,152,22
-.byte 212,164,92,204,93,101,182,146
-.byte 108,112,72,80,253,237,185,218
-.byte 94,21,70,87,167,141,157,132
-.byte 144,216,171,0,140,188,211,10
-.byte 247,228,88,5,184,179,69,6
-.byte 208,44,30,143,202,63,15,2
-.byte 193,175,189,3,1,19,138,107
-.byte 58,145,17,65,79,103,220,234
-.byte 151,242,207,206,240,180,230,115
-.byte 150,172,116,34,231,173,53,133
-.byte 226,249,55,232,28,117,223,110
-.byte 71,241,26,113,29,41,197,137
-.byte 111,183,98,14,170,24,190,27
-.byte 252,86,62,75,198,210,121,32
-.byte 154,219,192,254,120,205,90,244
-.byte 31,221,168,51,136,7,199,49
-.byte 177,18,16,89,39,128,236,95
-.byte 96,81,127,169,25,181,74,13
-.byte 45,229,122,159,147,201,156,239
-.byte 160,224,59,77,174,42,245,176
-.byte 200,235,187,60,131,83,153,97
-.byte 23,43,4,126,186,119,214,38
-.byte 225,105,20,99,85,33,12,125
-.byte 82,9,106,213,48,54,165,56
-.byte 191,64,163,158,129,243,215,251
-.byte 124,227,57,130,155,47,255,135
-.byte 52,142,67,68,196,222,233,203
-.byte 84,123,148,50,166,194,35,61
-.byte 238,76,149,11,66,250,195,78
-.byte 8,46,161,102,40,217,36,178
-.byte 118,91,162,73,109,139,209,37
-.byte 114,248,246,100,134,104,152,22
-.byte 212,164,92,204,93,101,182,146
-.byte 108,112,72,80,253,237,185,218
-.byte 94,21,70,87,167,141,157,132
-.byte 144,216,171,0,140,188,211,10
-.byte 247,228,88,5,184,179,69,6
-.byte 208,44,30,143,202,63,15,2
-.byte 193,175,189,3,1,19,138,107
-.byte 58,145,17,65,79,103,220,234
-.byte 151,242,207,206,240,180,230,115
-.byte 150,172,116,34,231,173,53,133
-.byte 226,249,55,232,28,117,223,110
-.byte 71,241,26,113,29,41,197,137
-.byte 111,183,98,14,170,24,190,27
-.byte 252,86,62,75,198,210,121,32
-.byte 154,219,192,254,120,205,90,244
-.byte 31,221,168,51,136,7,199,49
-.byte 177,18,16,89,39,128,236,95
-.byte 96,81,127,169,25,181,74,13
-.byte 45,229,122,159,147,201,156,239
-.byte 160,224,59,77,174,42,245,176
-.byte 200,235,187,60,131,83,153,97
-.byte 23,43,4,126,186,119,214,38
-.byte 225,105,20,99,85,33,12,125
-.size _x86_AES_decrypt,.-_x86_AES_decrypt
-.globl AES_decrypt
-.type AES_decrypt,@function
-.align 16
-AES_decrypt:
-.L_AES_decrypt_begin:
- pushl %ebp
- pushl %ebx
- pushl %esi
- pushl %edi
- movl 20(%esp),%esi
- movl 28(%esp),%edi
- movl %esp,%eax
- subl $36,%esp
- andl $-64,%esp
- leal -127(%edi),%ebx
- subl %esp,%ebx
- negl %ebx
- andl $960,%ebx
- subl %ebx,%esp
- addl $4,%esp
- movl %eax,28(%esp)
- call .L010pic_point
-.L010pic_point:
- popl %ebp
- leal _GLOBAL_OFFSET_TABLE_+[.-.L010pic_point](%ebp),%eax
- movl OPENSSL_ia32cap_P@GOT(%eax),%eax
- leal .LAES_Td-.L010pic_point(%ebp),%ebp
- leal 764(%esp),%ebx
- subl %ebp,%ebx
- andl $768,%ebx
- leal 2176(%ebp,%ebx,1),%ebp
- btl $25,(%eax)
- jnc .L011x86
- movq (%esi),%mm0
- movq 8(%esi),%mm4
- call _sse_AES_decrypt_compact
- movl 28(%esp),%esp
- movl 24(%esp),%esi
- movq %mm0,(%esi)
- movq %mm4,8(%esi)
- emms
- popl %edi
- popl %esi
- popl %ebx
- popl %ebp
- ret
-.align 16
-.L011x86:
- movl %ebp,24(%esp)
- movl (%esi),%eax
- movl 4(%esi),%ebx
- movl 8(%esi),%ecx
- movl 12(%esi),%edx
- call _x86_AES_decrypt_compact
- movl 28(%esp),%esp
- movl 24(%esp),%esi
- movl %eax,(%esi)
- movl %ebx,4(%esi)
- movl %ecx,8(%esi)
- movl %edx,12(%esi)
- popl %edi
- popl %esi
- popl %ebx
- popl %ebp
- ret
-.size AES_decrypt,.-.L_AES_decrypt_begin
-.globl AES_cbc_encrypt
-.type AES_cbc_encrypt,@function
-.align 16
-AES_cbc_encrypt:
-.L_AES_cbc_encrypt_begin:
- pushl %ebp
- pushl %ebx
- pushl %esi
- pushl %edi
- movl 28(%esp),%ecx
- cmpl $0,%ecx
- je .L012drop_out
- call .L013pic_point
-.L013pic_point:
- popl %ebp
- leal _GLOBAL_OFFSET_TABLE_+[.-.L013pic_point](%ebp),%eax
- movl OPENSSL_ia32cap_P@GOT(%eax),%eax
- cmpl $0,40(%esp)
- leal .LAES_Te-.L013pic_point(%ebp),%ebp
- jne .L014picked_te
- leal .LAES_Td-.LAES_Te(%ebp),%ebp
-.L014picked_te:
- pushfl
- cld
- cmpl $512,%ecx
- jb .L015slow_way
- testl $15,%ecx
- jnz .L015slow_way
- btl $28,(%eax)
- jc .L015slow_way
- leal -324(%esp),%esi
- andl $-64,%esi
- movl %ebp,%eax
- leal 2304(%ebp),%ebx
- movl %esi,%edx
- andl $4095,%eax
- andl $4095,%ebx
- andl $4095,%edx
- cmpl %ebx,%edx
- jb .L016tbl_break_out
- subl %ebx,%edx
- subl %edx,%esi
- jmp .L017tbl_ok
-.align 4
-.L016tbl_break_out:
- subl %eax,%edx
- andl $4095,%edx
- addl $384,%edx
- subl %edx,%esi
-.align 4
-.L017tbl_ok:
- leal 24(%esp),%edx
- xchgl %esi,%esp
- addl $4,%esp
- movl %ebp,24(%esp)
- movl %esi,28(%esp)
- movl (%edx),%eax
- movl 4(%edx),%ebx
- movl 12(%edx),%edi
- movl 16(%edx),%esi
- movl 20(%edx),%edx
- movl %eax,32(%esp)
- movl %ebx,36(%esp)
- movl %ecx,40(%esp)
- movl %edi,44(%esp)
- movl %esi,48(%esp)
- movl $0,316(%esp)
- movl %edi,%ebx
- movl $61,%ecx
- subl %ebp,%ebx
- movl %edi,%esi
- andl $4095,%ebx
- leal 76(%esp),%edi
- cmpl $2304,%ebx
- jb .L018do_copy
- cmpl $3852,%ebx
- jb .L019skip_copy
-.align 4
-.L018do_copy:
- movl %edi,44(%esp)
-.long 2784229001
-.L019skip_copy:
- movl $16,%edi
-.align 4
-.L020prefetch_tbl:
- movl (%ebp),%eax
- movl 32(%ebp),%ebx
- movl 64(%ebp),%ecx
- movl 96(%ebp),%esi
- leal 128(%ebp),%ebp
- subl $1,%edi
- jnz .L020prefetch_tbl
- subl $2048,%ebp
- movl 32(%esp),%esi
- movl 48(%esp),%edi
- cmpl $0,%edx
- je .L021fast_decrypt
- movl (%edi),%eax
- movl 4(%edi),%ebx
-.align 16
-.L022fast_enc_loop:
- movl 8(%edi),%ecx
- movl 12(%edi),%edx
- xorl (%esi),%eax
- xorl 4(%esi),%ebx
- xorl 8(%esi),%ecx
- xorl 12(%esi),%edx
- movl 44(%esp),%edi
- call _x86_AES_encrypt
- movl 32(%esp),%esi
- movl 36(%esp),%edi
- movl %eax,(%edi)
- movl %ebx,4(%edi)
- movl %ecx,8(%edi)
- movl %edx,12(%edi)
- leal 16(%esi),%esi
- movl 40(%esp),%ecx
- movl %esi,32(%esp)
- leal 16(%edi),%edx
- movl %edx,36(%esp)
- subl $16,%ecx
- movl %ecx,40(%esp)
- jnz .L022fast_enc_loop
- movl 48(%esp),%esi
- movl 8(%edi),%ecx
- movl 12(%edi),%edx
- movl %eax,(%esi)
- movl %ebx,4(%esi)
- movl %ecx,8(%esi)
- movl %edx,12(%esi)
- cmpl $0,316(%esp)
- movl 44(%esp),%edi
- je .L023skip_ezero
- movl $60,%ecx
- xorl %eax,%eax
-.align 4
-.long 2884892297
-.L023skip_ezero:
- movl 28(%esp),%esp
- popfl
-.L012drop_out:
- popl %edi
- popl %esi
- popl %ebx
- popl %ebp
- ret
- pushfl
-.align 16
-.L021fast_decrypt:
- cmpl 36(%esp),%esi
- je .L024fast_dec_in_place
- movl %edi,52(%esp)
-.align 4
-.align 16
-.L025fast_dec_loop:
- movl (%esi),%eax
- movl 4(%esi),%ebx
- movl 8(%esi),%ecx
- movl 12(%esi),%edx
- movl 44(%esp),%edi
- call _x86_AES_decrypt
- movl 52(%esp),%edi
- movl 40(%esp),%esi
- xorl (%edi),%eax
- xorl 4(%edi),%ebx
- xorl 8(%edi),%ecx
- xorl 12(%edi),%edx
- movl 36(%esp),%edi
- movl 32(%esp),%esi
- movl %eax,(%edi)
- movl %ebx,4(%edi)
- movl %ecx,8(%edi)
- movl %edx,12(%edi)
- movl 40(%esp),%ecx
- movl %esi,52(%esp)
- leal 16(%esi),%esi
- movl %esi,32(%esp)
- leal 16(%edi),%edi
- movl %edi,36(%esp)
- subl $16,%ecx
- movl %ecx,40(%esp)
- jnz .L025fast_dec_loop
- movl 52(%esp),%edi
- movl 48(%esp),%esi
- movl (%edi),%eax
- movl 4(%edi),%ebx
- movl 8(%edi),%ecx
- movl 12(%edi),%edx
- movl %eax,(%esi)
- movl %ebx,4(%esi)
- movl %ecx,8(%esi)
- movl %edx,12(%esi)
- jmp .L026fast_dec_out
-.align 16
-.L024fast_dec_in_place:
-.L027fast_dec_in_place_loop:
- movl (%esi),%eax
- movl 4(%esi),%ebx
- movl 8(%esi),%ecx
- movl 12(%esi),%edx
- leal 60(%esp),%edi
- movl %eax,(%edi)
- movl %ebx,4(%edi)
- movl %ecx,8(%edi)
- movl %edx,12(%edi)
- movl 44(%esp),%edi
- call _x86_AES_decrypt
- movl 48(%esp),%edi
- movl 36(%esp),%esi
- xorl (%edi),%eax
- xorl 4(%edi),%ebx
- xorl 8(%edi),%ecx
- xorl 12(%edi),%edx
- movl %eax,(%esi)
- movl %ebx,4(%esi)
- movl %ecx,8(%esi)
- movl %edx,12(%esi)
- leal 16(%esi),%esi
- movl %esi,36(%esp)
- leal 60(%esp),%esi
- movl (%esi),%eax
- movl 4(%esi),%ebx
- movl 8(%esi),%ecx
- movl 12(%esi),%edx
- movl %eax,(%edi)
- movl %ebx,4(%edi)
- movl %ecx,8(%edi)
- movl %edx,12(%edi)
- movl 32(%esp),%esi
- movl 40(%esp),%ecx
- leal 16(%esi),%esi
- movl %esi,32(%esp)
- subl $16,%ecx
- movl %ecx,40(%esp)
- jnz .L027fast_dec_in_place_loop
-.align 4
-.L026fast_dec_out:
- cmpl $0,316(%esp)
- movl 44(%esp),%edi
- je .L028skip_dzero
- movl $60,%ecx
- xorl %eax,%eax
-.align 4
-.long 2884892297
-.L028skip_dzero:
- movl 28(%esp),%esp
- popfl
- popl %edi
- popl %esi
- popl %ebx
- popl %ebp
- ret
- pushfl
-.align 16
-.L015slow_way:
- movl (%eax),%eax
- movl 36(%esp),%edi
- leal -80(%esp),%esi
- andl $-64,%esi
- leal -143(%edi),%ebx
- subl %esi,%ebx
- negl %ebx
- andl $960,%ebx
- subl %ebx,%esi
- leal 768(%esi),%ebx
- subl %ebp,%ebx
- andl $768,%ebx
- leal 2176(%ebp,%ebx,1),%ebp
- leal 24(%esp),%edx
- xchgl %esi,%esp
- addl $4,%esp
- movl %ebp,24(%esp)
- movl %esi,28(%esp)
- movl %eax,52(%esp)
- movl (%edx),%eax
- movl 4(%edx),%ebx
- movl 16(%edx),%esi
- movl 20(%edx),%edx
- movl %eax,32(%esp)
- movl %ebx,36(%esp)
- movl %ecx,40(%esp)
- movl %edi,44(%esp)
- movl %esi,48(%esp)
- movl %esi,%edi
- movl %eax,%esi
- cmpl $0,%edx
- je .L029slow_decrypt
- cmpl $16,%ecx
- movl %ebx,%edx
- jb .L030slow_enc_tail
- btl $25,52(%esp)
- jnc .L031slow_enc_x86
- movq (%edi),%mm0
- movq 8(%edi),%mm4
-.align 16
-.L032slow_enc_loop_sse:
- pxor (%esi),%mm0
- pxor 8(%esi),%mm4
- movl 44(%esp),%edi
- call _sse_AES_encrypt_compact
- movl 32(%esp),%esi
- movl 36(%esp),%edi
- movl 40(%esp),%ecx
- movq %mm0,(%edi)
- movq %mm4,8(%edi)
- leal 16(%esi),%esi
- movl %esi,32(%esp)
- leal 16(%edi),%edx
- movl %edx,36(%esp)
- subl $16,%ecx
- cmpl $16,%ecx
- movl %ecx,40(%esp)
- jae .L032slow_enc_loop_sse
- testl $15,%ecx
- jnz .L030slow_enc_tail
- movl 48(%esp),%esi
- movq %mm0,(%esi)
- movq %mm4,8(%esi)
- emms
- movl 28(%esp),%esp
- popfl
- popl %edi
- popl %esi
- popl %ebx
- popl %ebp
- ret
- pushfl
-.align 16
-.L031slow_enc_x86:
- movl (%edi),%eax
- movl 4(%edi),%ebx
-.align 4
-.L033slow_enc_loop_x86:
- movl 8(%edi),%ecx
- movl 12(%edi),%edx
- xorl (%esi),%eax
- xorl 4(%esi),%ebx
- xorl 8(%esi),%ecx
- xorl 12(%esi),%edx
- movl 44(%esp),%edi
- call _x86_AES_encrypt_compact
- movl 32(%esp),%esi
- movl 36(%esp),%edi
- movl %eax,(%edi)
- movl %ebx,4(%edi)
- movl %ecx,8(%edi)
- movl %edx,12(%edi)
- movl 40(%esp),%ecx
- leal 16(%esi),%esi
- movl %esi,32(%esp)
- leal 16(%edi),%edx
- movl %edx,36(%esp)
- subl $16,%ecx
- cmpl $16,%ecx
- movl %ecx,40(%esp)
- jae .L033slow_enc_loop_x86
- testl $15,%ecx
- jnz .L030slow_enc_tail
- movl 48(%esp),%esi
- movl 8(%edi),%ecx
- movl 12(%edi),%edx
- movl %eax,(%esi)
- movl %ebx,4(%esi)
- movl %ecx,8(%esi)
- movl %edx,12(%esi)
- movl 28(%esp),%esp
- popfl
- popl %edi
- popl %esi
- popl %ebx
- popl %ebp
- ret
- pushfl
-.align 16
-.L030slow_enc_tail:
- emms
- movl %edx,%edi
- movl $16,%ebx
- subl %ecx,%ebx
- cmpl %esi,%edi
- je .L034enc_in_place
-.align 4
-.long 2767451785
- jmp .L035enc_skip_in_place
-.L034enc_in_place:
- leal (%edi,%ecx,1),%edi
-.L035enc_skip_in_place:
- movl %ebx,%ecx
- xorl %eax,%eax
-.align 4
-.long 2868115081
- movl 48(%esp),%edi
- movl %edx,%esi
- movl (%edi),%eax
- movl 4(%edi),%ebx
- movl $16,40(%esp)
- jmp .L033slow_enc_loop_x86
-.align 16
-.L029slow_decrypt:
- btl $25,52(%esp)
- jnc .L036slow_dec_loop_x86
-.align 4
-.L037slow_dec_loop_sse:
- movq (%esi),%mm0
- movq 8(%esi),%mm4
- movl 44(%esp),%edi
- call _sse_AES_decrypt_compact
- movl 32(%esp),%esi
- leal 60(%esp),%eax
- movl 36(%esp),%ebx
- movl 40(%esp),%ecx
- movl 48(%esp),%edi
- movq (%esi),%mm1
- movq 8(%esi),%mm5
- pxor (%edi),%mm0
- pxor 8(%edi),%mm4
- movq %mm1,(%edi)
- movq %mm5,8(%edi)
- subl $16,%ecx
- jc .L038slow_dec_partial_sse
- movq %mm0,(%ebx)
- movq %mm4,8(%ebx)
- leal 16(%ebx),%ebx
- movl %ebx,36(%esp)
- leal 16(%esi),%esi
- movl %esi,32(%esp)
- movl %ecx,40(%esp)
- jnz .L037slow_dec_loop_sse
- emms
- movl 28(%esp),%esp
- popfl
- popl %edi
- popl %esi
- popl %ebx
- popl %ebp
- ret
- pushfl
-.align 16
-.L038slow_dec_partial_sse:
- movq %mm0,(%eax)
- movq %mm4,8(%eax)
- emms
- addl $16,%ecx
- movl %ebx,%edi
- movl %eax,%esi
-.align 4
-.long 2767451785
- movl 28(%esp),%esp
- popfl
- popl %edi
- popl %esi
- popl %ebx
- popl %ebp
- ret
- pushfl
-.align 16
-.L036slow_dec_loop_x86:
- movl (%esi),%eax
- movl 4(%esi),%ebx
- movl 8(%esi),%ecx
- movl 12(%esi),%edx
- leal 60(%esp),%edi
- movl %eax,(%edi)
- movl %ebx,4(%edi)
- movl %ecx,8(%edi)
- movl %edx,12(%edi)
- movl 44(%esp),%edi
- call _x86_AES_decrypt_compact
- movl 48(%esp),%edi
- movl 40(%esp),%esi
- xorl (%edi),%eax
- xorl 4(%edi),%ebx
- xorl 8(%edi),%ecx
- xorl 12(%edi),%edx
- subl $16,%esi
- jc .L039slow_dec_partial_x86
- movl %esi,40(%esp)
- movl 36(%esp),%esi
- movl %eax,(%esi)
- movl %ebx,4(%esi)
- movl %ecx,8(%esi)
- movl %edx,12(%esi)
- leal 16(%esi),%esi
- movl %esi,36(%esp)
- leal 60(%esp),%esi
- movl (%esi),%eax
- movl 4(%esi),%ebx
- movl 8(%esi),%ecx
- movl 12(%esi),%edx
- movl %eax,(%edi)
- movl %ebx,4(%edi)
- movl %ecx,8(%edi)
- movl %edx,12(%edi)
- movl 32(%esp),%esi
- leal 16(%esi),%esi
- movl %esi,32(%esp)
- jnz .L036slow_dec_loop_x86
- movl 28(%esp),%esp
- popfl
- popl %edi
- popl %esi
- popl %ebx
- popl %ebp
- ret
- pushfl
-.align 16
-.L039slow_dec_partial_x86:
- leal 60(%esp),%esi
- movl %eax,(%esi)
- movl %ebx,4(%esi)
- movl %ecx,8(%esi)
- movl %edx,12(%esi)
- movl 32(%esp),%esi
- movl (%esi),%eax
- movl 4(%esi),%ebx
- movl 8(%esi),%ecx
- movl 12(%esi),%edx
- movl %eax,(%edi)
- movl %ebx,4(%edi)
- movl %ecx,8(%edi)
- movl %edx,12(%edi)
- movl 40(%esp),%ecx
- movl 36(%esp),%edi
- leal 60(%esp),%esi
-.align 4
-.long 2767451785
- movl 28(%esp),%esp
- popfl
- popl %edi
- popl %esi
- popl %ebx
- popl %ebp
- ret
-.size AES_cbc_encrypt,.-.L_AES_cbc_encrypt_begin
-.type _x86_AES_set_encrypt_key,@function
-.align 16
-_x86_AES_set_encrypt_key:
- pushl %ebp
- pushl %ebx
- pushl %esi
- pushl %edi
- movl 24(%esp),%esi
- movl 32(%esp),%edi
- testl $-1,%esi
- jz .L040badpointer
- testl $-1,%edi
- jz .L040badpointer
- call .L041pic_point
-.L041pic_point:
- popl %ebp
- leal .LAES_Te-.L041pic_point(%ebp),%ebp
- leal 2176(%ebp),%ebp
- movl -128(%ebp),%eax
- movl -96(%ebp),%ebx
- movl -64(%ebp),%ecx
- movl -32(%ebp),%edx
- movl (%ebp),%eax
- movl 32(%ebp),%ebx
- movl 64(%ebp),%ecx
- movl 96(%ebp),%edx
- movl 28(%esp),%ecx
- cmpl $128,%ecx
- je .L04210rounds
- cmpl $192,%ecx
- je .L04312rounds
- cmpl $256,%ecx
- je .L04414rounds
- movl $-2,%eax
- jmp .L045exit
-.L04210rounds:
- movl (%esi),%eax
- movl 4(%esi),%ebx
- movl 8(%esi),%ecx
- movl 12(%esi),%edx
- movl %eax,(%edi)
- movl %ebx,4(%edi)
- movl %ecx,8(%edi)
- movl %edx,12(%edi)
- xorl %ecx,%ecx
- jmp .L04610shortcut
-.align 4
-.L04710loop:
- movl (%edi),%eax
- movl 12(%edi),%edx
-.L04610shortcut:
- movzbl %dl,%esi
- movzbl -128(%ebp,%esi,1),%ebx
- movzbl %dh,%esi
- shll $24,%ebx
- xorl %ebx,%eax
- movzbl -128(%ebp,%esi,1),%ebx
- shrl $16,%edx
- movzbl %dl,%esi
- xorl %ebx,%eax
- movzbl -128(%ebp,%esi,1),%ebx
- movzbl %dh,%esi
- shll $8,%ebx
- xorl %ebx,%eax
- movzbl -128(%ebp,%esi,1),%ebx
- shll $16,%ebx
- xorl %ebx,%eax
- xorl 896(%ebp,%ecx,4),%eax
- movl %eax,16(%edi)
- xorl 4(%edi),%eax
- movl %eax,20(%edi)
- xorl 8(%edi),%eax
- movl %eax,24(%edi)
- xorl 12(%edi),%eax
- movl %eax,28(%edi)
- incl %ecx
- addl $16,%edi
- cmpl $10,%ecx
- jl .L04710loop
- movl $10,80(%edi)
- xorl %eax,%eax
- jmp .L045exit
-.L04312rounds:
- movl (%esi),%eax
- movl 4(%esi),%ebx
- movl 8(%esi),%ecx
- movl 12(%esi),%edx
- movl %eax,(%edi)
- movl %ebx,4(%edi)
- movl %ecx,8(%edi)
- movl %edx,12(%edi)
- movl 16(%esi),%ecx
- movl 20(%esi),%edx
- movl %ecx,16(%edi)
- movl %edx,20(%edi)
- xorl %ecx,%ecx
- jmp .L04812shortcut
-.align 4
-.L04912loop:
- movl (%edi),%eax
- movl 20(%edi),%edx
-.L04812shortcut:
- movzbl %dl,%esi
- movzbl -128(%ebp,%esi,1),%ebx
- movzbl %dh,%esi
- shll $24,%ebx
- xorl %ebx,%eax
- movzbl -128(%ebp,%esi,1),%ebx
- shrl $16,%edx
- movzbl %dl,%esi
- xorl %ebx,%eax
- movzbl -128(%ebp,%esi,1),%ebx
- movzbl %dh,%esi
- shll $8,%ebx
- xorl %ebx,%eax
- movzbl -128(%ebp,%esi,1),%ebx
- shll $16,%ebx
- xorl %ebx,%eax
- xorl 896(%ebp,%ecx,4),%eax
- movl %eax,24(%edi)
- xorl 4(%edi),%eax
- movl %eax,28(%edi)
- xorl 8(%edi),%eax
- movl %eax,32(%edi)
- xorl 12(%edi),%eax
- movl %eax,36(%edi)
- cmpl $7,%ecx
- je .L05012break
- incl %ecx
- xorl 16(%edi),%eax
- movl %eax,40(%edi)
- xorl 20(%edi),%eax
- movl %eax,44(%edi)
- addl $24,%edi
- jmp .L04912loop
-.L05012break:
- movl $12,72(%edi)
- xorl %eax,%eax
- jmp .L045exit
-.L04414rounds:
- movl (%esi),%eax
- movl 4(%esi),%ebx
- movl 8(%esi),%ecx
- movl 12(%esi),%edx
- movl %eax,(%edi)
- movl %ebx,4(%edi)
- movl %ecx,8(%edi)
- movl %edx,12(%edi)
- movl 16(%esi),%eax
- movl 20(%esi),%ebx
- movl 24(%esi),%ecx
- movl 28(%esi),%edx
- movl %eax,16(%edi)
- movl %ebx,20(%edi)
- movl %ecx,24(%edi)
- movl %edx,28(%edi)
- xorl %ecx,%ecx
- jmp .L05114shortcut
-.align 4
-.L05214loop:
- movl 28(%edi),%edx
-.L05114shortcut:
- movl (%edi),%eax
- movzbl %dl,%esi
- movzbl -128(%ebp,%esi,1),%ebx
- movzbl %dh,%esi
- shll $24,%ebx
- xorl %ebx,%eax
- movzbl -128(%ebp,%esi,1),%ebx
- shrl $16,%edx
- movzbl %dl,%esi
- xorl %ebx,%eax
- movzbl -128(%ebp,%esi,1),%ebx
- movzbl %dh,%esi
- shll $8,%ebx
- xorl %ebx,%eax
- movzbl -128(%ebp,%esi,1),%ebx
- shll $16,%ebx
- xorl %ebx,%eax
- xorl 896(%ebp,%ecx,4),%eax
- movl %eax,32(%edi)
- xorl 4(%edi),%eax
- movl %eax,36(%edi)
- xorl 8(%edi),%eax
- movl %eax,40(%edi)
- xorl 12(%edi),%eax
- movl %eax,44(%edi)
- cmpl $6,%ecx
- je .L05314break
- incl %ecx
- movl %eax,%edx
- movl 16(%edi),%eax
- movzbl %dl,%esi
- movzbl -128(%ebp,%esi,1),%ebx
- movzbl %dh,%esi
- xorl %ebx,%eax
- movzbl -128(%ebp,%esi,1),%ebx
- shrl $16,%edx
- shll $8,%ebx
- movzbl %dl,%esi
- xorl %ebx,%eax
- movzbl -128(%ebp,%esi,1),%ebx
- movzbl %dh,%esi
- shll $16,%ebx
- xorl %ebx,%eax
- movzbl -128(%ebp,%esi,1),%ebx
- shll $24,%ebx
- xorl %ebx,%eax
- movl %eax,48(%edi)
- xorl 20(%edi),%eax
- movl %eax,52(%edi)
- xorl 24(%edi),%eax
- movl %eax,56(%edi)
- xorl 28(%edi),%eax
- movl %eax,60(%edi)
- addl $32,%edi
- jmp .L05214loop
-.L05314break:
- movl $14,48(%edi)
- xorl %eax,%eax
- jmp .L045exit
-.L040badpointer:
- movl $-1,%eax
-.L045exit:
- popl %edi
- popl %esi
- popl %ebx
- popl %ebp
- ret
-.size _x86_AES_set_encrypt_key,.-_x86_AES_set_encrypt_key
-.globl private_AES_set_encrypt_key
-.type private_AES_set_encrypt_key,@function
-.align 16
-private_AES_set_encrypt_key:
-.L_private_AES_set_encrypt_key_begin:
- call _x86_AES_set_encrypt_key
- ret
-.size private_AES_set_encrypt_key,.-.L_private_AES_set_encrypt_key_begin
-.globl private_AES_set_decrypt_key
-.type private_AES_set_decrypt_key,@function
-.align 16
-private_AES_set_decrypt_key:
-.L_private_AES_set_decrypt_key_begin:
- call _x86_AES_set_encrypt_key
- cmpl $0,%eax
- je .L054proceed
- ret
-.L054proceed:
- pushl %ebp
- pushl %ebx
- pushl %esi
- pushl %edi
- movl 28(%esp),%esi
- movl 240(%esi),%ecx
- leal (,%ecx,4),%ecx
- leal (%esi,%ecx,4),%edi
-.align 4
-.L055invert:
- movl (%esi),%eax
- movl 4(%esi),%ebx
- movl (%edi),%ecx
- movl 4(%edi),%edx
- movl %eax,(%edi)
- movl %ebx,4(%edi)
- movl %ecx,(%esi)
- movl %edx,4(%esi)
- movl 8(%esi),%eax
- movl 12(%esi),%ebx
- movl 8(%edi),%ecx
- movl 12(%edi),%edx
- movl %eax,8(%edi)
- movl %ebx,12(%edi)
- movl %ecx,8(%esi)
- movl %edx,12(%esi)
- addl $16,%esi
- subl $16,%edi
- cmpl %edi,%esi
- jne .L055invert
- movl 28(%esp),%edi
- movl 240(%edi),%esi
- leal -2(%esi,%esi,1),%esi
- leal (%edi,%esi,8),%esi
- movl %esi,28(%esp)
- movl 16(%edi),%eax
-.align 4
-.L056permute:
- addl $16,%edi
- movl %eax,%esi
- andl $2155905152,%esi
- movl %esi,%ebp
- shrl $7,%ebp
- leal (%eax,%eax,1),%ebx
- subl %ebp,%esi
- andl $4278124286,%ebx
- andl $454761243,%esi
- xorl %ebx,%esi
- movl %esi,%ebx
- andl $2155905152,%esi
- movl %esi,%ebp
- shrl $7,%ebp
- leal (%ebx,%ebx,1),%ecx
- subl %ebp,%esi
- andl $4278124286,%ecx
- andl $454761243,%esi
- xorl %eax,%ebx
- xorl %ecx,%esi
- movl %esi,%ecx
- andl $2155905152,%esi
- movl %esi,%ebp
- shrl $7,%ebp
- leal (%ecx,%ecx,1),%edx
- xorl %eax,%ecx
- subl %ebp,%esi
- andl $4278124286,%edx
- andl $454761243,%esi
- roll $8,%eax
- xorl %esi,%edx
- movl 4(%edi),%ebp
- xorl %ebx,%eax
- xorl %edx,%ebx
- xorl %ecx,%eax
- roll $24,%ebx
- xorl %edx,%ecx
- xorl %edx,%eax
- roll $16,%ecx
- xorl %ebx,%eax
- roll $8,%edx
- xorl %ecx,%eax
- movl %ebp,%ebx
- xorl %edx,%eax
- movl %eax,(%edi)
- movl %ebx,%esi
- andl $2155905152,%esi
- movl %esi,%ebp
- shrl $7,%ebp
- leal (%ebx,%ebx,1),%ecx
- subl %ebp,%esi
- andl $4278124286,%ecx
- andl $454761243,%esi
- xorl %ecx,%esi
- movl %esi,%ecx
- andl $2155905152,%esi
- movl %esi,%ebp
- shrl $7,%ebp
- leal (%ecx,%ecx,1),%edx
- subl %ebp,%esi
- andl $4278124286,%edx
- andl $454761243,%esi
- xorl %ebx,%ecx
- xorl %edx,%esi
- movl %esi,%edx
- andl $2155905152,%esi
- movl %esi,%ebp
- shrl $7,%ebp
- leal (%edx,%edx,1),%eax
- xorl %ebx,%edx
- subl %ebp,%esi
- andl $4278124286,%eax
- andl $454761243,%esi
- roll $8,%ebx
- xorl %esi,%eax
- movl 8(%edi),%ebp
- xorl %ecx,%ebx
- xorl %eax,%ecx
- xorl %edx,%ebx
- roll $24,%ecx
- xorl %eax,%edx
- xorl %eax,%ebx
- roll $16,%edx
- xorl %ecx,%ebx
- roll $8,%eax
- xorl %edx,%ebx
- movl %ebp,%ecx
- xorl %eax,%ebx
- movl %ebx,4(%edi)
- movl %ecx,%esi
- andl $2155905152,%esi
- movl %esi,%ebp
- shrl $7,%ebp
- leal (%ecx,%ecx,1),%edx
- subl %ebp,%esi
- andl $4278124286,%edx
- andl $454761243,%esi
- xorl %edx,%esi
- movl %esi,%edx
- andl $2155905152,%esi
- movl %esi,%ebp
- shrl $7,%ebp
- leal (%edx,%edx,1),%eax
- subl %ebp,%esi
- andl $4278124286,%eax
- andl $454761243,%esi
- xorl %ecx,%edx
- xorl %eax,%esi
- movl %esi,%eax
- andl $2155905152,%esi
- movl %esi,%ebp
- shrl $7,%ebp
- leal (%eax,%eax,1),%ebx
- xorl %ecx,%eax
- subl %ebp,%esi
- andl $4278124286,%ebx
- andl $454761243,%esi
- roll $8,%ecx
- xorl %esi,%ebx
- movl 12(%edi),%ebp
- xorl %edx,%ecx
- xorl %ebx,%edx
- xorl %eax,%ecx
- roll $24,%edx
- xorl %ebx,%eax
- xorl %ebx,%ecx
- roll $16,%eax
- xorl %edx,%ecx
- roll $8,%ebx
- xorl %eax,%ecx
- movl %ebp,%edx
- xorl %ebx,%ecx
- movl %ecx,8(%edi)
- movl %edx,%esi
- andl $2155905152,%esi
- movl %esi,%ebp
- shrl $7,%ebp
- leal (%edx,%edx,1),%eax
- subl %ebp,%esi
- andl $4278124286,%eax
- andl $454761243,%esi
- xorl %eax,%esi
- movl %esi,%eax
- andl $2155905152,%esi
- movl %esi,%ebp
- shrl $7,%ebp
- leal (%eax,%eax,1),%ebx
- subl %ebp,%esi
- andl $4278124286,%ebx
- andl $454761243,%esi
- xorl %edx,%eax
- xorl %ebx,%esi
- movl %esi,%ebx
- andl $2155905152,%esi
- movl %esi,%ebp
- shrl $7,%ebp
- leal (%ebx,%ebx,1),%ecx
- xorl %edx,%ebx
- subl %ebp,%esi
- andl $4278124286,%ecx
- andl $454761243,%esi
- roll $8,%edx
- xorl %esi,%ecx
- movl 16(%edi),%ebp
- xorl %eax,%edx
- xorl %ecx,%eax
- xorl %ebx,%edx
- roll $24,%eax
- xorl %ecx,%ebx
- xorl %ecx,%edx
- roll $16,%ebx
- xorl %eax,%edx
- roll $8,%ecx
- xorl %ebx,%edx
- movl %ebp,%eax
- xorl %ecx,%edx
- movl %edx,12(%edi)
- cmpl 28(%esp),%edi
- jb .L056permute
- xorl %eax,%eax
- popl %edi
- popl %esi
- popl %ebx
- popl %ebp
- ret
-.size private_AES_set_decrypt_key,.-.L_private_AES_set_decrypt_key_begin
-.byte 65,69,83,32,102,111,114,32,120,56,54,44,32,67,82,89
-.byte 80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114
-.byte 111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
-.comm OPENSSL_ia32cap_P,8,4
diff --git a/app/openssl/crypto/aes/asm/aes-586.pl b/app/openssl/crypto/aes/asm/aes-586.pl
deleted file mode 100644
index 51b500dd..00000000
--- a/app/openssl/crypto/aes/asm/aes-586.pl
+++ /dev/null
@@ -1,2980 +0,0 @@
-#!/usr/bin/env perl
-#
-# ====================================================================
-# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
-# project. The module is, however, dual licensed under OpenSSL and
-# CRYPTOGAMS licenses depending on where you obtain it. For further
-# details see http://www.openssl.org/~appro/cryptogams/.
-# ====================================================================
-#
-# Version 4.3.
-#
-# You might fail to appreciate this module performance from the first
-# try. If compared to "vanilla" linux-ia32-icc target, i.e. considered
-# to be *the* best Intel C compiler without -KPIC, performance appears
-# to be virtually identical... But try to re-configure with shared
-# library support... Aha! Intel compiler "suddenly" lags behind by 30%
-# [on P4, more on others]:-) And if compared to position-independent
-# code generated by GNU C, this code performs *more* than *twice* as
-# fast! Yes, all this buzz about PIC means that unlike other hand-
-# coded implementations, this one was explicitly designed to be safe
-# to use even in shared library context... This also means that this
-# code isn't necessarily absolutely fastest "ever," because in order
-# to achieve position independence an extra register has to be
-# off-loaded to stack, which affects the benchmark result.
-#
-# Special note about instruction choice. Do you recall RC4_INT code
-# performing poorly on P4? It might be the time to figure out why.
-# RC4_INT code implies effective address calculations in base+offset*4
-# form. Trouble is that it seems that offset scaling turned to be
-# critical path... At least eliminating scaling resulted in 2.8x RC4
-# performance improvement [as you might recall]. As AES code is hungry
-# for scaling too, I [try to] avoid the latter by favoring off-by-2
-# shifts and masking the result with 0xFF<<2 instead of "boring" 0xFF.
-#
-# As was shown by Dean Gaudet <dean@arctic.org>, the above note turned
-# void. Performance improvement with off-by-2 shifts was observed on
-# intermediate implementation, which was spilling yet another register
-# to stack... Final offset*4 code below runs just a tad faster on P4,
-# but exhibits up to 10% improvement on other cores.
-#
-# Second version is "monolithic" replacement for aes_core.c, which in
-# addition to AES_[de|en]crypt implements private_AES_set_[de|en]cryption_key.
-# This made it possible to implement little-endian variant of the
-# algorithm without modifying the base C code. Motivating factor for
-# the undertaken effort was that it appeared that in tight IA-32
-# register window little-endian flavor could achieve slightly higher
-# Instruction Level Parallelism, and it indeed resulted in up to 15%
-# better performance on most recent µ-archs...
-#
-# Third version adds AES_cbc_encrypt implementation, which resulted in
-# up to 40% performance imrovement of CBC benchmark results. 40% was
-# observed on P4 core, where "overall" imrovement coefficient, i.e. if
-# compared to PIC generated by GCC and in CBC mode, was observed to be
-# as large as 4x:-) CBC performance is virtually identical to ECB now
-# and on some platforms even better, e.g. 17.6 "small" cycles/byte on
-# Opteron, because certain function prologues and epilogues are
-# effectively taken out of the loop...
-#
-# Version 3.2 implements compressed tables and prefetch of these tables
-# in CBC[!] mode. Former means that 3/4 of table references are now
-# misaligned, which unfortunately has negative impact on elder IA-32
-# implementations, Pentium suffered 30% penalty, PIII - 10%.
-#
-# Version 3.3 avoids L1 cache aliasing between stack frame and
-# S-boxes, and 3.4 - L1 cache aliasing even between key schedule. The
-# latter is achieved by copying the key schedule to controlled place in
-# stack. This unfortunately has rather strong impact on small block CBC
-# performance, ~2x deterioration on 16-byte block if compared to 3.3.
-#
-# Version 3.5 checks if there is L1 cache aliasing between user-supplied
-# key schedule and S-boxes and abstains from copying the former if
-# there is no. This allows end-user to consciously retain small block
-# performance by aligning key schedule in specific manner.
-#
-# Version 3.6 compresses Td4 to 256 bytes and prefetches it in ECB.
-#
-# Current ECB performance numbers for 128-bit key in CPU cycles per
-# processed byte [measure commonly used by AES benchmarkers] are:
-#
-# small footprint fully unrolled
-# P4 24 22
-# AMD K8 20 19
-# PIII 25 23
-# Pentium 81 78
-#
-# Version 3.7 reimplements outer rounds as "compact." Meaning that
-# first and last rounds reference compact 256 bytes S-box. This means
-# that first round consumes a lot more CPU cycles and that encrypt
-# and decrypt performance becomes asymmetric. Encrypt performance
-# drops by 10-12%, while decrypt - by 20-25%:-( 256 bytes S-box is
-# aggressively pre-fetched.
-#
-# Version 4.0 effectively rolls back to 3.6 and instead implements
-# additional set of functions, _[x86|sse]_AES_[en|de]crypt_compact,
-# which use exclusively 256 byte S-box. These functions are to be
-# called in modes not concealing plain text, such as ECB, or when
-# we're asked to process smaller amount of data [or unconditionally
-# on hyper-threading CPU]. Currently it's called unconditionally from
-# AES_[en|de]crypt, which affects all modes, but CBC. CBC routine
-# still needs to be modified to switch between slower and faster
-# mode when appropriate... But in either case benchmark landscape
-# changes dramatically and below numbers are CPU cycles per processed
-# byte for 128-bit key.
-#
-# ECB encrypt ECB decrypt CBC large chunk
-# P4 56[60] 84[100] 23
-# AMD K8 48[44] 70[79] 18
-# PIII 41[50] 61[91] 24
-# Core 2 32[38] 45[70] 18.5
-# Pentium 120 160 77
-#
-# Version 4.1 switches to compact S-box even in key schedule setup.
-#
-# Version 4.2 prefetches compact S-box in every SSE round or in other
-# words every cache-line is *guaranteed* to be accessed within ~50
-# cycles window. Why just SSE? Because it's needed on hyper-threading
-# CPU! Which is also why it's prefetched with 64 byte stride. Best
-# part is that it has no negative effect on performance:-)
-#
-# Version 4.3 implements switch between compact and non-compact block
-# functions in AES_cbc_encrypt depending on how much data was asked
-# to be processed in one stroke.
-#
-######################################################################
-# Timing attacks are classified in two classes: synchronous when
-# attacker consciously initiates cryptographic operation and collects
-# timing data of various character afterwards, and asynchronous when
-# malicious code is executed on same CPU simultaneously with AES,
-# instruments itself and performs statistical analysis of this data.
-#
-# As far as synchronous attacks go the root to the AES timing
-# vulnerability is twofold. Firstly, of 256 S-box elements at most 160
-# are referred to in single 128-bit block operation. Well, in C
-# implementation with 4 distinct tables it's actually as little as 40
-# references per 256 elements table, but anyway... Secondly, even
-# though S-box elements are clustered into smaller amount of cache-
-# lines, smaller than 160 and even 40, it turned out that for certain
-# plain-text pattern[s] or simply put chosen plain-text and given key
-# few cache-lines remain unaccessed during block operation. Now, if
-# attacker can figure out this access pattern, he can deduct the key
-# [or at least part of it]. The natural way to mitigate this kind of
-# attacks is to minimize the amount of cache-lines in S-box and/or
-# prefetch them to ensure that every one is accessed for more uniform
-# timing. But note that *if* plain-text was concealed in such way that
-# input to block function is distributed *uniformly*, then attack
-# wouldn't apply. Now note that some encryption modes, most notably
-# CBC, do mask the plain-text in this exact way [secure cipher output
-# is distributed uniformly]. Yes, one still might find input that
-# would reveal the information about given key, but if amount of
-# candidate inputs to be tried is larger than amount of possible key
-# combinations then attack becomes infeasible. This is why revised
-# AES_cbc_encrypt "dares" to switch to larger S-box when larger chunk
-# of data is to be processed in one stroke. The current size limit of
-# 512 bytes is chosen to provide same [diminishigly low] probability
-# for cache-line to remain untouched in large chunk operation with
-# large S-box as for single block operation with compact S-box and
-# surely needs more careful consideration...
-#
-# As for asynchronous attacks. There are two flavours: attacker code
-# being interleaved with AES on hyper-threading CPU at *instruction*
-# level, and two processes time sharing single core. As for latter.
-# Two vectors. 1. Given that attacker process has higher priority,
-# yield execution to process performing AES just before timer fires
-# off the scheduler, immediately regain control of CPU and analyze the
-# cache state. For this attack to be efficient attacker would have to
-# effectively slow down the operation by several *orders* of magnitute,
-# by ratio of time slice to duration of handful of AES rounds, which
-# unlikely to remain unnoticed. Not to mention that this also means
-# that he would spend correspondigly more time to collect enough
-# statistical data to mount the attack. It's probably appropriate to
-# say that if adeversary reckons that this attack is beneficial and
-# risks to be noticed, you probably have larger problems having him
-# mere opportunity. In other words suggested code design expects you
-# to preclude/mitigate this attack by overall system security design.
-# 2. Attacker manages to make his code interrupt driven. In order for
-# this kind of attack to be feasible, interrupt rate has to be high
-# enough, again comparable to duration of handful of AES rounds. But
-# is there interrupt source of such rate? Hardly, not even 1Gbps NIC
-# generates interrupts at such raging rate...
-#
-# And now back to the former, hyper-threading CPU or more specifically
-# Intel P4. Recall that asynchronous attack implies that malicious
-# code instruments itself. And naturally instrumentation granularity
-# has be noticeably lower than duration of codepath accessing S-box.
-# Given that all cache-lines are accessed during that time that is.
-# Current implementation accesses *all* cache-lines within ~50 cycles
-# window, which is actually *less* than RDTSC latency on Intel P4!
-
-$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
-push(@INC,"${dir}","${dir}../../perlasm");
-require "x86asm.pl";
-
-&asm_init($ARGV[0],"aes-586.pl",$x86only = $ARGV[$#ARGV] eq "386");
-&static_label("AES_Te");
-&static_label("AES_Td");
-
-$s0="eax";
-$s1="ebx";
-$s2="ecx";
-$s3="edx";
-$key="edi";
-$acc="esi";
-$tbl="ebp";
-
-# stack frame layout in _[x86|sse]_AES_* routines, frame is allocated
-# by caller
-$__ra=&DWP(0,"esp"); # return address
-$__s0=&DWP(4,"esp"); # s0 backing store
-$__s1=&DWP(8,"esp"); # s1 backing store
-$__s2=&DWP(12,"esp"); # s2 backing store
-$__s3=&DWP(16,"esp"); # s3 backing store
-$__key=&DWP(20,"esp"); # pointer to key schedule
-$__end=&DWP(24,"esp"); # pointer to end of key schedule
-$__tbl=&DWP(28,"esp"); # %ebp backing store
-
-# stack frame layout in AES_[en|crypt] routines, which differs from
-# above by 4 and overlaps by %ebp backing store
-$_tbl=&DWP(24,"esp");
-$_esp=&DWP(28,"esp");
-
-sub _data_word() { my $i; while(defined($i=shift)) { &data_word($i,$i); } }
-
-$speed_limit=512; # chunks smaller than $speed_limit are
- # processed with compact routine in CBC mode
-$small_footprint=1; # $small_footprint=1 code is ~5% slower [on
- # recent µ-archs], but ~5 times smaller!
- # I favor compact code to minimize cache
- # contention and in hope to "collect" 5% back
- # in real-life applications...
-
-$vertical_spin=0; # shift "verticaly" defaults to 0, because of
- # its proof-of-concept status...
-# Note that there is no decvert(), as well as last encryption round is
-# performed with "horizontal" shifts. This is because this "vertical"
-# implementation [one which groups shifts on a given $s[i] to form a
-# "column," unlike "horizontal" one, which groups shifts on different
-# $s[i] to form a "row"] is work in progress. It was observed to run
-# few percents faster on Intel cores, but not AMD. On AMD K8 core it's
-# whole 12% slower:-( So we face a trade-off... Shall it be resolved
-# some day? Till then the code is considered experimental and by
-# default remains dormant...
-
-sub encvert()
-{ my ($te,@s) = @_;
- my $v0 = $acc, $v1 = $key;
-
- &mov ($v0,$s[3]); # copy s3
- &mov (&DWP(4,"esp"),$s[2]); # save s2
- &mov ($v1,$s[0]); # copy s0
- &mov (&DWP(8,"esp"),$s[1]); # save s1
-
- &movz ($s[2],&HB($s[0]));
- &and ($s[0],0xFF);
- &mov ($s[0],&DWP(0,$te,$s[0],8)); # s0>>0
- &shr ($v1,16);
- &mov ($s[3],&DWP(3,$te,$s[2],8)); # s0>>8
- &movz ($s[1],&HB($v1));
- &and ($v1,0xFF);
- &mov ($s[2],&DWP(2,$te,$v1,8)); # s0>>16
- &mov ($v1,$v0);
- &mov ($s[1],&DWP(1,$te,$s[1],8)); # s0>>24
-
- &and ($v0,0xFF);
- &xor ($s[3],&DWP(0,$te,$v0,8)); # s3>>0
- &movz ($v0,&HB($v1));
- &shr ($v1,16);
- &xor ($s[2],&DWP(3,$te,$v0,8)); # s3>>8
- &movz ($v0,&HB($v1));
- &and ($v1,0xFF);
- &xor ($s[1],&DWP(2,$te,$v1,8)); # s3>>16
- &mov ($v1,&DWP(4,"esp")); # restore s2
- &xor ($s[0],&DWP(1,$te,$v0,8)); # s3>>24
-
- &mov ($v0,$v1);
- &and ($v1,0xFF);
- &xor ($s[2],&DWP(0,$te,$v1,8)); # s2>>0
- &movz ($v1,&HB($v0));
- &shr ($v0,16);
- &xor ($s[1],&DWP(3,$te,$v1,8)); # s2>>8
- &movz ($v1,&HB($v0));
- &and ($v0,0xFF);
- &xor ($s[0],&DWP(2,$te,$v0,8)); # s2>>16
- &mov ($v0,&DWP(8,"esp")); # restore s1
- &xor ($s[3],&DWP(1,$te,$v1,8)); # s2>>24
-
- &mov ($v1,$v0);
- &and ($v0,0xFF);
- &xor ($s[1],&DWP(0,$te,$v0,8)); # s1>>0
- &movz ($v0,&HB($v1));
- &shr ($v1,16);
- &xor ($s[0],&DWP(3,$te,$v0,8)); # s1>>8
- &movz ($v0,&HB($v1));
- &and ($v1,0xFF);
- &xor ($s[3],&DWP(2,$te,$v1,8)); # s1>>16
- &mov ($key,$__key); # reincarnate v1 as key
- &xor ($s[2],&DWP(1,$te,$v0,8)); # s1>>24
-}
-
-# Another experimental routine, which features "horizontal spin," but
-# eliminates one reference to stack. Strangely enough runs slower...
-sub enchoriz()
-{ my $v0 = $key, $v1 = $acc;
-
- &movz ($v0,&LB($s0)); # 3, 2, 1, 0*
- &rotr ($s2,8); # 8,11,10, 9
- &mov ($v1,&DWP(0,$te,$v0,8)); # 0
- &movz ($v0,&HB($s1)); # 7, 6, 5*, 4
- &rotr ($s3,16); # 13,12,15,14
- &xor ($v1,&DWP(3,$te,$v0,8)); # 5
- &movz ($v0,&HB($s2)); # 8,11,10*, 9
- &rotr ($s0,16); # 1, 0, 3, 2
- &xor ($v1,&DWP(2,$te,$v0,8)); # 10
- &movz ($v0,&HB($s3)); # 13,12,15*,14
- &xor ($v1,&DWP(1,$te,$v0,8)); # 15, t[0] collected
- &mov ($__s0,$v1); # t[0] saved
-
- &movz ($v0,&LB($s1)); # 7, 6, 5, 4*
- &shr ($s1,16); # -, -, 7, 6
- &mov ($v1,&DWP(0,$te,$v0,8)); # 4
- &movz ($v0,&LB($s3)); # 13,12,15,14*
- &xor ($v1,&DWP(2,$te,$v0,8)); # 14
- &movz ($v0,&HB($s0)); # 1, 0, 3*, 2
- &and ($s3,0xffff0000); # 13,12, -, -
- &xor ($v1,&DWP(1,$te,$v0,8)); # 3
- &movz ($v0,&LB($s2)); # 8,11,10, 9*
- &or ($s3,$s1); # 13,12, 7, 6
- &xor ($v1,&DWP(3,$te,$v0,8)); # 9, t[1] collected
- &mov ($s1,$v1); # s[1]=t[1]
-
- &movz ($v0,&LB($s0)); # 1, 0, 3, 2*
- &shr ($s2,16); # -, -, 8,11
- &mov ($v1,&DWP(2,$te,$v0,8)); # 2
- &movz ($v0,&HB($s3)); # 13,12, 7*, 6
- &xor ($v1,&DWP(1,$te,$v0,8)); # 7
- &movz ($v0,&HB($s2)); # -, -, 8*,11
- &xor ($v1,&DWP(0,$te,$v0,8)); # 8
- &mov ($v0,$s3);
- &shr ($v0,24); # 13
- &xor ($v1,&DWP(3,$te,$v0,8)); # 13, t[2] collected
-
- &movz ($v0,&LB($s2)); # -, -, 8,11*
- &shr ($s0,24); # 1*
- &mov ($s2,&DWP(1,$te,$v0,8)); # 11
- &xor ($s2,&DWP(3,$te,$s0,8)); # 1
- &mov ($s0,$__s0); # s[0]=t[0]
- &movz ($v0,&LB($s3)); # 13,12, 7, 6*
- &shr ($s3,16); # , ,13,12
- &xor ($s2,&DWP(2,$te,$v0,8)); # 6
- &mov ($key,$__key); # reincarnate v0 as key
- &and ($s3,0xff); # , ,13,12*
- &mov ($s3,&DWP(0,$te,$s3,8)); # 12
- &xor ($s3,$s2); # s[2]=t[3] collected
- &mov ($s2,$v1); # s[2]=t[2]
-}
-
-# More experimental code... SSE one... Even though this one eliminates
-# *all* references to stack, it's not faster...
-sub sse_encbody()
-{
- &movz ($acc,&LB("eax")); # 0
- &mov ("ecx",&DWP(0,$tbl,$acc,8)); # 0
- &pshufw ("mm2","mm0",0x0d); # 7, 6, 3, 2
- &movz ("edx",&HB("eax")); # 1
- &mov ("edx",&DWP(3,$tbl,"edx",8)); # 1
- &shr ("eax",16); # 5, 4
-
- &movz ($acc,&LB("ebx")); # 10
- &xor ("ecx",&DWP(2,$tbl,$acc,8)); # 10
- &pshufw ("mm6","mm4",0x08); # 13,12, 9, 8
- &movz ($acc,&HB("ebx")); # 11
- &xor ("edx",&DWP(1,$tbl,$acc,8)); # 11
- &shr ("ebx",16); # 15,14
-
- &movz ($acc,&HB("eax")); # 5
- &xor ("ecx",&DWP(3,$tbl,$acc,8)); # 5
- &movq ("mm3",QWP(16,$key));
- &movz ($acc,&HB("ebx")); # 15
- &xor ("ecx",&DWP(1,$tbl,$acc,8)); # 15
- &movd ("mm0","ecx"); # t[0] collected
-
- &movz ($acc,&LB("eax")); # 4
- &mov ("ecx",&DWP(0,$tbl,$acc,8)); # 4
- &movd ("eax","mm2"); # 7, 6, 3, 2
- &movz ($acc,&LB("ebx")); # 14
- &xor ("ecx",&DWP(2,$tbl,$acc,8)); # 14
- &movd ("ebx","mm6"); # 13,12, 9, 8
-
- &movz ($acc,&HB("eax")); # 3
- &xor ("ecx",&DWP(1,$tbl,$acc,8)); # 3
- &movz ($acc,&HB("ebx")); # 9
- &xor ("ecx",&DWP(3,$tbl,$acc,8)); # 9
- &movd ("mm1","ecx"); # t[1] collected
-
- &movz ($acc,&LB("eax")); # 2
- &mov ("ecx",&DWP(2,$tbl,$acc,8)); # 2
- &shr ("eax",16); # 7, 6
- &punpckldq ("mm0","mm1"); # t[0,1] collected
- &movz ($acc,&LB("ebx")); # 8
- &xor ("ecx",&DWP(0,$tbl,$acc,8)); # 8
- &shr ("ebx",16); # 13,12
-
- &movz ($acc,&HB("eax")); # 7
- &xor ("ecx",&DWP(1,$tbl,$acc,8)); # 7
- &pxor ("mm0","mm3");
- &movz ("eax",&LB("eax")); # 6
- &xor ("edx",&DWP(2,$tbl,"eax",8)); # 6
- &pshufw ("mm1","mm0",0x08); # 5, 4, 1, 0
- &movz ($acc,&HB("ebx")); # 13
- &xor ("ecx",&DWP(3,$tbl,$acc,8)); # 13
- &xor ("ecx",&DWP(24,$key)); # t[2]
- &movd ("mm4","ecx"); # t[2] collected
- &movz ("ebx",&LB("ebx")); # 12
- &xor ("edx",&DWP(0,$tbl,"ebx",8)); # 12
- &shr ("ecx",16);
- &movd ("eax","mm1"); # 5, 4, 1, 0
- &mov ("ebx",&DWP(28,$key)); # t[3]
- &xor ("ebx","edx");
- &movd ("mm5","ebx"); # t[3] collected
- &and ("ebx",0xffff0000);
- &or ("ebx","ecx");
-
- &punpckldq ("mm4","mm5"); # t[2,3] collected
-}
-
-######################################################################
-# "Compact" block function
-######################################################################
-
-sub enccompact()
-{ my $Fn = mov;
- while ($#_>5) { pop(@_); $Fn=sub{}; }
- my ($i,$te,@s)=@_;
- my $tmp = $key;
- my $out = $i==3?$s[0]:$acc;
-
- # $Fn is used in first compact round and its purpose is to
- # void restoration of some values from stack, so that after
- # 4xenccompact with extra argument $key value is left there...
- if ($i==3) { &$Fn ($key,$__key); }##%edx
- else { &mov ($out,$s[0]); }
- &and ($out,0xFF);
- if ($i==1) { &shr ($s[0],16); }#%ebx[1]
- if ($i==2) { &shr ($s[0],24); }#%ecx[2]
- &movz ($out,&BP(-128,$te,$out,1));
-
- if ($i==3) { $tmp=$s[1]; }##%eax
- &movz ($tmp,&HB($s[1]));
- &movz ($tmp,&BP(-128,$te,$tmp,1));
- &shl ($tmp,8);
- &xor ($out,$tmp);
-
- if ($i==3) { $tmp=$s[2]; &mov ($s[1],$__s0); }##%ebx
- else { &mov ($tmp,$s[2]);
- &shr ($tmp,16); }
- if ($i==2) { &and ($s[1],0xFF); }#%edx[2]
- &and ($tmp,0xFF);
- &movz ($tmp,&BP(-128,$te,$tmp,1));
- &shl ($tmp,16);
- &xor ($out,$tmp);
-
- if ($i==3) { $tmp=$s[3]; &mov ($s[2],$__s1); }##%ecx
- elsif($i==2){ &movz ($tmp,&HB($s[3])); }#%ebx[2]
- else { &mov ($tmp,$s[3]);
- &shr ($tmp,24); }
- &movz ($tmp,&BP(-128,$te,$tmp,1));
- &shl ($tmp,24);
- &xor ($out,$tmp);
- if ($i<2) { &mov (&DWP(4+4*$i,"esp"),$out); }
- if ($i==3) { &mov ($s[3],$acc); }
- &comment();
-}
-
-sub enctransform()
-{ my @s = ($s0,$s1,$s2,$s3);
- my $i = shift;
- my $tmp = $tbl;
- my $r2 = $key ;
-
- &mov ($acc,$s[$i]);
- &and ($acc,0x80808080);
- &mov ($tmp,$acc);
- &shr ($tmp,7);
- &lea ($r2,&DWP(0,$s[$i],$s[$i]));
- &sub ($acc,$tmp);
- &and ($r2,0xfefefefe);
- &and ($acc,0x1b1b1b1b);
- &mov ($tmp,$s[$i]);
- &xor ($acc,$r2); # r2
-
- &xor ($s[$i],$acc); # r0 ^ r2
- &rotl ($s[$i],24);
- &xor ($s[$i],$acc) # ROTATE(r2^r0,24) ^ r2
- &rotr ($tmp,16);
- &xor ($s[$i],$tmp);
- &rotr ($tmp,8);
- &xor ($s[$i],$tmp);
-}
-
-&function_begin_B("_x86_AES_encrypt_compact");
- # note that caller is expected to allocate stack frame for me!
- &mov ($__key,$key); # save key
-
- &xor ($s0,&DWP(0,$key)); # xor with key
- &xor ($s1,&DWP(4,$key));
- &xor ($s2,&DWP(8,$key));
- &xor ($s3,&DWP(12,$key));
-
- &mov ($acc,&DWP(240,$key)); # load key->rounds
- &lea ($acc,&DWP(-2,$acc,$acc));
- &lea ($acc,&DWP(0,$key,$acc,8));
- &mov ($__end,$acc); # end of key schedule
-
- # prefetch Te4
- &mov ($key,&DWP(0-128,$tbl));
- &mov ($acc,&DWP(32-128,$tbl));
- &mov ($key,&DWP(64-128,$tbl));
- &mov ($acc,&DWP(96-128,$tbl));
- &mov ($key,&DWP(128-128,$tbl));
- &mov ($acc,&DWP(160-128,$tbl));
- &mov ($key,&DWP(192-128,$tbl));
- &mov ($acc,&DWP(224-128,$tbl));
-
- &set_label("loop",16);
-
- &enccompact(0,$tbl,$s0,$s1,$s2,$s3,1);
- &enccompact(1,$tbl,$s1,$s2,$s3,$s0,1);
- &enccompact(2,$tbl,$s2,$s3,$s0,$s1,1);
- &enccompact(3,$tbl,$s3,$s0,$s1,$s2,1);
- &enctransform(2);
- &enctransform(3);
- &enctransform(0);
- &enctransform(1);
- &mov ($key,$__key);
- &mov ($tbl,$__tbl);
- &add ($key,16); # advance rd_key
- &xor ($s0,&DWP(0,$key));
- &xor ($s1,&DWP(4,$key));
- &xor ($s2,&DWP(8,$key));
- &xor ($s3,&DWP(12,$key));
-
- &cmp ($key,$__end);
- &mov ($__key,$key);
- &jb (&label("loop"));
-
- &enccompact(0,$tbl,$s0,$s1,$s2,$s3);
- &enccompact(1,$tbl,$s1,$s2,$s3,$s0);
- &enccompact(2,$tbl,$s2,$s3,$s0,$s1);
- &enccompact(3,$tbl,$s3,$s0,$s1,$s2);
-
- &xor ($s0,&DWP(16,$key));
- &xor ($s1,&DWP(20,$key));
- &xor ($s2,&DWP(24,$key));
- &xor ($s3,&DWP(28,$key));
-
- &ret ();
-&function_end_B("_x86_AES_encrypt_compact");
-
-######################################################################
-# "Compact" SSE block function.
-######################################################################
-#
-# Performance is not actually extraordinary in comparison to pure
-# x86 code. In particular encrypt performance is virtually the same.
-# Decrypt performance on the other hand is 15-20% better on newer
-# µ-archs [but we're thankful for *any* improvement here], and ~50%
-# better on PIII:-) And additionally on the pros side this code
-# eliminates redundant references to stack and thus relieves/
-# minimizes the pressure on the memory bus.
-#
-# MMX register layout lsb
-# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
-# | mm4 | mm0 |
-# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
-# | s3 | s2 | s1 | s0 |
-# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
-# |15|14|13|12|11|10| 9| 8| 7| 6| 5| 4| 3| 2| 1| 0|
-# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
-#
-# Indexes translate as s[N/4]>>(8*(N%4)), e.g. 5 means s1>>8.
-# In this terms encryption and decryption "compact" permutation
-# matrices can be depicted as following:
-#
-# encryption lsb # decryption lsb
-# +----++----+----+----+----+ # +----++----+----+----+----+
-# | t0 || 15 | 10 | 5 | 0 | # | t0 || 7 | 10 | 13 | 0 |
-# +----++----+----+----+----+ # +----++----+----+----+----+
-# | t1 || 3 | 14 | 9 | 4 | # | t1 || 11 | 14 | 1 | 4 |
-# +----++----+----+----+----+ # +----++----+----+----+----+
-# | t2 || 7 | 2 | 13 | 8 | # | t2 || 15 | 2 | 5 | 8 |
-# +----++----+----+----+----+ # +----++----+----+----+----+
-# | t3 || 11 | 6 | 1 | 12 | # | t3 || 3 | 6 | 9 | 12 |
-# +----++----+----+----+----+ # +----++----+----+----+----+
-#
-######################################################################
-# Why not xmm registers? Short answer. It was actually tested and
-# was not any faster, but *contrary*, most notably on Intel CPUs.
-# Longer answer. Main advantage of using mm registers is that movd
-# latency is lower, especially on Intel P4. While arithmetic
-# instructions are twice as many, they can be scheduled every cycle
-# and not every second one when they are operating on xmm register,
-# so that "arithmetic throughput" remains virtually the same. And
-# finally the code can be executed even on elder SSE-only CPUs:-)
-
-sub sse_enccompact()
-{
- &pshufw ("mm1","mm0",0x08); # 5, 4, 1, 0
- &pshufw ("mm5","mm4",0x0d); # 15,14,11,10
- &movd ("eax","mm1"); # 5, 4, 1, 0
- &movd ("ebx","mm5"); # 15,14,11,10
-
- &movz ($acc,&LB("eax")); # 0
- &movz ("ecx",&BP(-128,$tbl,$acc,1)); # 0
- &pshufw ("mm2","mm0",0x0d); # 7, 6, 3, 2
- &movz ("edx",&HB("eax")); # 1
- &movz ("edx",&BP(-128,$tbl,"edx",1)); # 1
- &shl ("edx",8); # 1
- &shr ("eax",16); # 5, 4
-
- &movz ($acc,&LB("ebx")); # 10
- &movz ($acc,&BP(-128,$tbl,$acc,1)); # 10
- &shl ($acc,16); # 10
- &or ("ecx",$acc); # 10
- &pshufw ("mm6","mm4",0x08); # 13,12, 9, 8
- &movz ($acc,&HB("ebx")); # 11
- &movz ($acc,&BP(-128,$tbl,$acc,1)); # 11
- &shl ($acc,24); # 11
- &or ("edx",$acc); # 11
- &shr ("ebx",16); # 15,14
-
- &movz ($acc,&HB("eax")); # 5
- &movz ($acc,&BP(-128,$tbl,$acc,1)); # 5
- &shl ($acc,8); # 5
- &or ("ecx",$acc); # 5
- &movz ($acc,&HB("ebx")); # 15
- &movz ($acc,&BP(-128,$tbl,$acc,1)); # 15
- &shl ($acc,24); # 15
- &or ("ecx",$acc); # 15
- &movd ("mm0","ecx"); # t[0] collected
-
- &movz ($acc,&LB("eax")); # 4
- &movz ("ecx",&BP(-128,$tbl,$acc,1)); # 4
- &movd ("eax","mm2"); # 7, 6, 3, 2
- &movz ($acc,&LB("ebx")); # 14
- &movz ($acc,&BP(-128,$tbl,$acc,1)); # 14
- &shl ($acc,16); # 14
- &or ("ecx",$acc); # 14
-
- &movd ("ebx","mm6"); # 13,12, 9, 8
- &movz ($acc,&HB("eax")); # 3
- &movz ($acc,&BP(-128,$tbl,$acc,1)); # 3
- &shl ($acc,24); # 3
- &or ("ecx",$acc); # 3
- &movz ($acc,&HB("ebx")); # 9
- &movz ($acc,&BP(-128,$tbl,$acc,1)); # 9
- &shl ($acc,8); # 9
- &or ("ecx",$acc); # 9
- &movd ("mm1","ecx"); # t[1] collected
-
- &movz ($acc,&LB("ebx")); # 8
- &movz ("ecx",&BP(-128,$tbl,$acc,1)); # 8
- &shr ("ebx",16); # 13,12
- &movz ($acc,&LB("eax")); # 2
- &movz ($acc,&BP(-128,$tbl,$acc,1)); # 2
- &shl ($acc,16); # 2
- &or ("ecx",$acc); # 2
- &shr ("eax",16); # 7, 6
-
- &punpckldq ("mm0","mm1"); # t[0,1] collected
-
- &movz ($acc,&HB("eax")); # 7
- &movz ($acc,&BP(-128,$tbl,$acc,1)); # 7
- &shl ($acc,24); # 7
- &or ("ecx",$acc); # 7
- &and ("eax",0xff); # 6
- &movz ("eax",&BP(-128,$tbl,"eax",1)); # 6
- &shl ("eax",16); # 6
- &or ("edx","eax"); # 6
- &movz ($acc,&HB("ebx")); # 13
- &movz ($acc,&BP(-128,$tbl,$acc,1)); # 13
- &shl ($acc,8); # 13
- &or ("ecx",$acc); # 13
- &movd ("mm4","ecx"); # t[2] collected
- &and ("ebx",0xff); # 12
- &movz ("ebx",&BP(-128,$tbl,"ebx",1)); # 12
- &or ("edx","ebx"); # 12
- &movd ("mm5","edx"); # t[3] collected
-
- &punpckldq ("mm4","mm5"); # t[2,3] collected
-}
-
- if (!$x86only) {
-&function_begin_B("_sse_AES_encrypt_compact");
- &pxor ("mm0",&QWP(0,$key)); # 7, 6, 5, 4, 3, 2, 1, 0
- &pxor ("mm4",&QWP(8,$key)); # 15,14,13,12,11,10, 9, 8
-
- # note that caller is expected to allocate stack frame for me!
- &mov ($acc,&DWP(240,$key)); # load key->rounds
- &lea ($acc,&DWP(-2,$acc,$acc));
- &lea ($acc,&DWP(0,$key,$acc,8));
- &mov ($__end,$acc); # end of key schedule
-
- &mov ($s0,0x1b1b1b1b); # magic constant
- &mov (&DWP(8,"esp"),$s0);
- &mov (&DWP(12,"esp"),$s0);
-
- # prefetch Te4
- &mov ($s0,&DWP(0-128,$tbl));
- &mov ($s1,&DWP(32-128,$tbl));
- &mov ($s2,&DWP(64-128,$tbl));
- &mov ($s3,&DWP(96-128,$tbl));
- &mov ($s0,&DWP(128-128,$tbl));
- &mov ($s1,&DWP(160-128,$tbl));
- &mov ($s2,&DWP(192-128,$tbl));
- &mov ($s3,&DWP(224-128,$tbl));
-
- &set_label("loop",16);
- &sse_enccompact();
- &add ($key,16);
- &cmp ($key,$__end);
- &ja (&label("out"));
-
- &movq ("mm2",&QWP(8,"esp"));
- &pxor ("mm3","mm3"); &pxor ("mm7","mm7");
- &movq ("mm1","mm0"); &movq ("mm5","mm4"); # r0
- &pcmpgtb("mm3","mm0"); &pcmpgtb("mm7","mm4");
- &pand ("mm3","mm2"); &pand ("mm7","mm2");
- &pshufw ("mm2","mm0",0xb1); &pshufw ("mm6","mm4",0xb1);# ROTATE(r0,16)
- &paddb ("mm0","mm0"); &paddb ("mm4","mm4");
- &pxor ("mm0","mm3"); &pxor ("mm4","mm7"); # = r2
- &pshufw ("mm3","mm2",0xb1); &pshufw ("mm7","mm6",0xb1);# r0
- &pxor ("mm1","mm0"); &pxor ("mm5","mm4"); # r0^r2
- &pxor ("mm0","mm2"); &pxor ("mm4","mm6"); # ^= ROTATE(r0,16)
-
- &movq ("mm2","mm3"); &movq ("mm6","mm7");
- &pslld ("mm3",8); &pslld ("mm7",8);
- &psrld ("mm2",24); &psrld ("mm6",24);
- &pxor ("mm0","mm3"); &pxor ("mm4","mm7"); # ^= r0<<8
- &pxor ("mm0","mm2"); &pxor ("mm4","mm6"); # ^= r0>>24
-
- &movq ("mm3","mm1"); &movq ("mm7","mm5");
- &movq ("mm2",&QWP(0,$key)); &movq ("mm6",&QWP(8,$key));
- &psrld ("mm1",8); &psrld ("mm5",8);
- &mov ($s0,&DWP(0-128,$tbl));
- &pslld ("mm3",24); &pslld ("mm7",24);
- &mov ($s1,&DWP(64-128,$tbl));
- &pxor ("mm0","mm1"); &pxor ("mm4","mm5"); # ^= (r2^r0)<<8
- &mov ($s2,&DWP(128-128,$tbl));
- &pxor ("mm0","mm3"); &pxor ("mm4","mm7"); # ^= (r2^r0)>>24
- &mov ($s3,&DWP(192-128,$tbl));
-
- &pxor ("mm0","mm2"); &pxor ("mm4","mm6");
- &jmp (&label("loop"));
-
- &set_label("out",16);
- &pxor ("mm0",&QWP(0,$key));
- &pxor ("mm4",&QWP(8,$key));
-
- &ret ();
-&function_end_B("_sse_AES_encrypt_compact");
- }
-
-######################################################################
-# Vanilla block function.
-######################################################################
-
-sub encstep()
-{ my ($i,$te,@s) = @_;
- my $tmp = $key;
- my $out = $i==3?$s[0]:$acc;
-
- # lines marked with #%e?x[i] denote "reordered" instructions...
- if ($i==3) { &mov ($key,$__key); }##%edx
- else { &mov ($out,$s[0]);
- &and ($out,0xFF); }
- if ($i==1) { &shr ($s[0],16); }#%ebx[1]
- if ($i==2) { &shr ($s[0],24); }#%ecx[2]
- &mov ($out,&DWP(0,$te,$out,8));
-
- if ($i==3) { $tmp=$s[1]; }##%eax
- &movz ($tmp,&HB($s[1]));
- &xor ($out,&DWP(3,$te,$tmp,8));
-
- if ($i==3) { $tmp=$s[2]; &mov ($s[1],$__s0); }##%ebx
- else { &mov ($tmp,$s[2]);
- &shr ($tmp,16); }
- if ($i==2) { &and ($s[1],0xFF); }#%edx[2]
- &and ($tmp,0xFF);
- &xor ($out,&DWP(2,$te,$tmp,8));
-
- if ($i==3) { $tmp=$s[3]; &mov ($s[2],$__s1); }##%ecx
- elsif($i==2){ &movz ($tmp,&HB($s[3])); }#%ebx[2]
- else { &mov ($tmp,$s[3]);
- &shr ($tmp,24) }
- &xor ($out,&DWP(1,$te,$tmp,8));
- if ($i<2) { &mov (&DWP(4+4*$i,"esp"),$out); }
- if ($i==3) { &mov ($s[3],$acc); }
- &comment();
-}
-
-sub enclast()
-{ my ($i,$te,@s)=@_;
- my $tmp = $key;
- my $out = $i==3?$s[0]:$acc;
-
- if ($i==3) { &mov ($key,$__key); }##%edx
- else { &mov ($out,$s[0]); }
- &and ($out,0xFF);
- if ($i==1) { &shr ($s[0],16); }#%ebx[1]
- if ($i==2) { &shr ($s[0],24); }#%ecx[2]
- &mov ($out,&DWP(2,$te,$out,8));
- &and ($out,0x000000ff);
-
- if ($i==3) { $tmp=$s[1]; }##%eax
- &movz ($tmp,&HB($s[1]));
- &mov ($tmp,&DWP(0,$te,$tmp,8));
- &and ($tmp,0x0000ff00);
- &xor ($out,$tmp);
-
- if ($i==3) { $tmp=$s[2]; &mov ($s[1],$__s0); }##%ebx
- else { &mov ($tmp,$s[2]);
- &shr ($tmp,16); }
- if ($i==2) { &and ($s[1],0xFF); }#%edx[2]
- &and ($tmp,0xFF);
- &mov ($tmp,&DWP(0,$te,$tmp,8));
- &and ($tmp,0x00ff0000);
- &xor ($out,$tmp);
-
- if ($i==3) { $tmp=$s[3]; &mov ($s[2],$__s1); }##%ecx
- elsif($i==2){ &movz ($tmp,&HB($s[3])); }#%ebx[2]
- else { &mov ($tmp,$s[3]);
- &shr ($tmp,24); }
- &mov ($tmp,&DWP(2,$te,$tmp,8));
- &and ($tmp,0xff000000);
- &xor ($out,$tmp);
- if ($i<2) { &mov (&DWP(4+4*$i,"esp"),$out); }
- if ($i==3) { &mov ($s[3],$acc); }
-}
-
-&function_begin_B("_x86_AES_encrypt");
- if ($vertical_spin) {
- # I need high parts of volatile registers to be accessible...
- &exch ($s1="edi",$key="ebx");
- &mov ($s2="esi",$acc="ecx");
- }
-
- # note that caller is expected to allocate stack frame for me!
- &mov ($__key,$key); # save key
-
- &xor ($s0,&DWP(0,$key)); # xor with key
- &xor ($s1,&DWP(4,$key));
- &xor ($s2,&DWP(8,$key));
- &xor ($s3,&DWP(12,$key));
-
- &mov ($acc,&DWP(240,$key)); # load key->rounds
-
- if ($small_footprint) {
- &lea ($acc,&DWP(-2,$acc,$acc));
- &lea ($acc,&DWP(0,$key,$acc,8));
- &mov ($__end,$acc); # end of key schedule
-
- &set_label("loop",16);
- if ($vertical_spin) {
- &encvert($tbl,$s0,$s1,$s2,$s3);
- } else {
- &encstep(0,$tbl,$s0,$s1,$s2,$s3);
- &encstep(1,$tbl,$s1,$s2,$s3,$s0);
- &encstep(2,$tbl,$s2,$s3,$s0,$s1);
- &encstep(3,$tbl,$s3,$s0,$s1,$s2);
- }
- &add ($key,16); # advance rd_key
- &xor ($s0,&DWP(0,$key));
- &xor ($s1,&DWP(4,$key));
- &xor ($s2,&DWP(8,$key));
- &xor ($s3,&DWP(12,$key));
- &cmp ($key,$__end);
- &mov ($__key,$key);
- &jb (&label("loop"));
- }
- else {
- &cmp ($acc,10);
- &jle (&label("10rounds"));
- &cmp ($acc,12);
- &jle (&label("12rounds"));
-
- &set_label("14rounds",4);
- for ($i=1;$i<3;$i++) {
- if ($vertical_spin) {
- &encvert($tbl,$s0,$s1,$s2,$s3);
- } else {
- &encstep(0,$tbl,$s0,$s1,$s2,$s3);
- &encstep(1,$tbl,$s1,$s2,$s3,$s0);
- &encstep(2,$tbl,$s2,$s3,$s0,$s1);
- &encstep(3,$tbl,$s3,$s0,$s1,$s2);
- }
- &xor ($s0,&DWP(16*$i+0,$key));
- &xor ($s1,&DWP(16*$i+4,$key));
- &xor ($s2,&DWP(16*$i+8,$key));
- &xor ($s3,&DWP(16*$i+12,$key));
- }
- &add ($key,32);
- &mov ($__key,$key); # advance rd_key
- &set_label("12rounds",4);
- for ($i=1;$i<3;$i++) {
- if ($vertical_spin) {
- &encvert($tbl,$s0,$s1,$s2,$s3);
- } else {
- &encstep(0,$tbl,$s0,$s1,$s2,$s3);
- &encstep(1,$tbl,$s1,$s2,$s3,$s0);
- &encstep(2,$tbl,$s2,$s3,$s0,$s1);
- &encstep(3,$tbl,$s3,$s0,$s1,$s2);
- }
- &xor ($s0,&DWP(16*$i+0,$key));
- &xor ($s1,&DWP(16*$i+4,$key));
- &xor ($s2,&DWP(16*$i+8,$key));
- &xor ($s3,&DWP(16*$i+12,$key));
- }
- &add ($key,32);
- &mov ($__key,$key); # advance rd_key
- &set_label("10rounds",4);
- for ($i=1;$i<10;$i++) {
- if ($vertical_spin) {
- &encvert($tbl,$s0,$s1,$s2,$s3);
- } else {
- &encstep(0,$tbl,$s0,$s1,$s2,$s3);
- &encstep(1,$tbl,$s1,$s2,$s3,$s0);
- &encstep(2,$tbl,$s2,$s3,$s0,$s1);
- &encstep(3,$tbl,$s3,$s0,$s1,$s2);
- }
- &xor ($s0,&DWP(16*$i+0,$key));
- &xor ($s1,&DWP(16*$i+4,$key));
- &xor ($s2,&DWP(16*$i+8,$key));
- &xor ($s3,&DWP(16*$i+12,$key));
- }
- }
-
- if ($vertical_spin) {
- # "reincarnate" some registers for "horizontal" spin...
- &mov ($s1="ebx",$key="edi");
- &mov ($s2="ecx",$acc="esi");
- }
- &enclast(0,$tbl,$s0,$s1,$s2,$s3);
- &enclast(1,$tbl,$s1,$s2,$s3,$s0);
- &enclast(2,$tbl,$s2,$s3,$s0,$s1);
- &enclast(3,$tbl,$s3,$s0,$s1,$s2);
-
- &add ($key,$small_footprint?16:160);
- &xor ($s0,&DWP(0,$key));
- &xor ($s1,&DWP(4,$key));
- &xor ($s2,&DWP(8,$key));
- &xor ($s3,&DWP(12,$key));
-
- &ret ();
-
-&set_label("AES_Te",64); # Yes! I keep it in the code segment!
- &_data_word(0xa56363c6, 0x847c7cf8, 0x997777ee, 0x8d7b7bf6);
- &_data_word(0x0df2f2ff, 0xbd6b6bd6, 0xb16f6fde, 0x54c5c591);
- &_data_word(0x50303060, 0x03010102, 0xa96767ce, 0x7d2b2b56);
- &_data_word(0x19fefee7, 0x62d7d7b5, 0xe6abab4d, 0x9a7676ec);
- &_data_word(0x45caca8f, 0x9d82821f, 0x40c9c989, 0x877d7dfa);
- &_data_word(0x15fafaef, 0xeb5959b2, 0xc947478e, 0x0bf0f0fb);
- &_data_word(0xecadad41, 0x67d4d4b3, 0xfda2a25f, 0xeaafaf45);
- &_data_word(0xbf9c9c23, 0xf7a4a453, 0x967272e4, 0x5bc0c09b);
- &_data_word(0xc2b7b775, 0x1cfdfde1, 0xae93933d, 0x6a26264c);
- &_data_word(0x5a36366c, 0x413f3f7e, 0x02f7f7f5, 0x4fcccc83);
- &_data_word(0x5c343468, 0xf4a5a551, 0x34e5e5d1, 0x08f1f1f9);
- &_data_word(0x937171e2, 0x73d8d8ab, 0x53313162, 0x3f15152a);
- &_data_word(0x0c040408, 0x52c7c795, 0x65232346, 0x5ec3c39d);
- &_data_word(0x28181830, 0xa1969637, 0x0f05050a, 0xb59a9a2f);
- &_data_word(0x0907070e, 0x36121224, 0x9b80801b, 0x3de2e2df);
- &_data_word(0x26ebebcd, 0x6927274e, 0xcdb2b27f, 0x9f7575ea);
- &_data_word(0x1b090912, 0x9e83831d, 0x742c2c58, 0x2e1a1a34);
- &_data_word(0x2d1b1b36, 0xb26e6edc, 0xee5a5ab4, 0xfba0a05b);
- &_data_word(0xf65252a4, 0x4d3b3b76, 0x61d6d6b7, 0xceb3b37d);
- &_data_word(0x7b292952, 0x3ee3e3dd, 0x712f2f5e, 0x97848413);
- &_data_word(0xf55353a6, 0x68d1d1b9, 0x00000000, 0x2cededc1);
- &_data_word(0x60202040, 0x1ffcfce3, 0xc8b1b179, 0xed5b5bb6);
- &_data_word(0xbe6a6ad4, 0x46cbcb8d, 0xd9bebe67, 0x4b393972);
- &_data_word(0xde4a4a94, 0xd44c4c98, 0xe85858b0, 0x4acfcf85);
- &_data_word(0x6bd0d0bb, 0x2aefefc5, 0xe5aaaa4f, 0x16fbfbed);
- &_data_word(0xc5434386, 0xd74d4d9a, 0x55333366, 0x94858511);
- &_data_word(0xcf45458a, 0x10f9f9e9, 0x06020204, 0x817f7ffe);
- &_data_word(0xf05050a0, 0x443c3c78, 0xba9f9f25, 0xe3a8a84b);
- &_data_word(0xf35151a2, 0xfea3a35d, 0xc0404080, 0x8a8f8f05);
- &_data_word(0xad92923f, 0xbc9d9d21, 0x48383870, 0x04f5f5f1);
- &_data_word(0xdfbcbc63, 0xc1b6b677, 0x75dadaaf, 0x63212142);
- &_data_word(0x30101020, 0x1affffe5, 0x0ef3f3fd, 0x6dd2d2bf);
- &_data_word(0x4ccdcd81, 0x140c0c18, 0x35131326, 0x2fececc3);
- &_data_word(0xe15f5fbe, 0xa2979735, 0xcc444488, 0x3917172e);
- &_data_word(0x57c4c493, 0xf2a7a755, 0x827e7efc, 0x473d3d7a);
- &_data_word(0xac6464c8, 0xe75d5dba, 0x2b191932, 0x957373e6);
- &_data_word(0xa06060c0, 0x98818119, 0xd14f4f9e, 0x7fdcdca3);
- &_data_word(0x66222244, 0x7e2a2a54, 0xab90903b, 0x8388880b);
- &_data_word(0xca46468c, 0x29eeeec7, 0xd3b8b86b, 0x3c141428);
- &_data_word(0x79dedea7, 0xe25e5ebc, 0x1d0b0b16, 0x76dbdbad);
- &_data_word(0x3be0e0db, 0x56323264, 0x4e3a3a74, 0x1e0a0a14);
- &_data_word(0xdb494992, 0x0a06060c, 0x6c242448, 0xe45c5cb8);
- &_data_word(0x5dc2c29f, 0x6ed3d3bd, 0xefacac43, 0xa66262c4);
- &_data_word(0xa8919139, 0xa4959531, 0x37e4e4d3, 0x8b7979f2);
- &_data_word(0x32e7e7d5, 0x43c8c88b, 0x5937376e, 0xb76d6dda);
- &_data_word(0x8c8d8d01, 0x64d5d5b1, 0xd24e4e9c, 0xe0a9a949);
- &_data_word(0xb46c6cd8, 0xfa5656ac, 0x07f4f4f3, 0x25eaeacf);
- &_data_word(0xaf6565ca, 0x8e7a7af4, 0xe9aeae47, 0x18080810);
- &_data_word(0xd5baba6f, 0x887878f0, 0x6f25254a, 0x722e2e5c);
- &_data_word(0x241c1c38, 0xf1a6a657, 0xc7b4b473, 0x51c6c697);
- &_data_word(0x23e8e8cb, 0x7cdddda1, 0x9c7474e8, 0x211f1f3e);
- &_data_word(0xdd4b4b96, 0xdcbdbd61, 0x868b8b0d, 0x858a8a0f);
- &_data_word(0x907070e0, 0x423e3e7c, 0xc4b5b571, 0xaa6666cc);
- &_data_word(0xd8484890, 0x05030306, 0x01f6f6f7, 0x120e0e1c);
- &_data_word(0xa36161c2, 0x5f35356a, 0xf95757ae, 0xd0b9b969);
- &_data_word(0x91868617, 0x58c1c199, 0x271d1d3a, 0xb99e9e27);
- &_data_word(0x38e1e1d9, 0x13f8f8eb, 0xb398982b, 0x33111122);
- &_data_word(0xbb6969d2, 0x70d9d9a9, 0x898e8e07, 0xa7949433);
- &_data_word(0xb69b9b2d, 0x221e1e3c, 0x92878715, 0x20e9e9c9);
- &_data_word(0x49cece87, 0xff5555aa, 0x78282850, 0x7adfdfa5);
- &_data_word(0x8f8c8c03, 0xf8a1a159, 0x80898909, 0x170d0d1a);
- &_data_word(0xdabfbf65, 0x31e6e6d7, 0xc6424284, 0xb86868d0);
- &_data_word(0xc3414182, 0xb0999929, 0x772d2d5a, 0x110f0f1e);
- &_data_word(0xcbb0b07b, 0xfc5454a8, 0xd6bbbb6d, 0x3a16162c);
-
-#Te4 # four copies of Te4 to choose from to avoid L1 aliasing
- &data_byte(0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5);
- &data_byte(0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76);
- &data_byte(0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0);
- &data_byte(0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0);
- &data_byte(0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc);
- &data_byte(0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15);
- &data_byte(0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a);
- &data_byte(0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75);
- &data_byte(0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0);
- &data_byte(0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84);
- &data_byte(0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b);
- &data_byte(0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf);
- &data_byte(0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85);
- &data_byte(0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8);
- &data_byte(0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5);
- &data_byte(0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2);
- &data_byte(0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17);
- &data_byte(0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73);
- &data_byte(0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88);
- &data_byte(0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb);
- &data_byte(0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c);
- &data_byte(0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79);
- &data_byte(0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9);
- &data_byte(0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08);
- &data_byte(0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6);
- &data_byte(0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a);
- &data_byte(0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e);
- &data_byte(0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e);
- &data_byte(0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94);
- &data_byte(0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf);
- &data_byte(0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68);
- &data_byte(0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16);
-
- &data_byte(0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5);
- &data_byte(0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76);
- &data_byte(0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0);
- &data_byte(0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0);
- &data_byte(0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc);
- &data_byte(0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15);
- &data_byte(0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a);
- &data_byte(0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75);
- &data_byte(0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0);
- &data_byte(0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84);
- &data_byte(0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b);
- &data_byte(0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf);
- &data_byte(0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85);
- &data_byte(0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8);
- &data_byte(0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5);
- &data_byte(0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2);
- &data_byte(0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17);
- &data_byte(0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73);
- &data_byte(0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88);
- &data_byte(0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb);
- &data_byte(0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c);
- &data_byte(0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79);
- &data_byte(0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9);
- &data_byte(0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08);
- &data_byte(0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6);
- &data_byte(0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a);
- &data_byte(0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e);
- &data_byte(0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e);
- &data_byte(0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94);
- &data_byte(0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf);
- &data_byte(0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68);
- &data_byte(0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16);
-
- &data_byte(0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5);
- &data_byte(0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76);
- &data_byte(0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0);
- &data_byte(0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0);
- &data_byte(0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc);
- &data_byte(0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15);
- &data_byte(0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a);
- &data_byte(0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75);
- &data_byte(0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0);
- &data_byte(0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84);
- &data_byte(0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b);
- &data_byte(0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf);
- &data_byte(0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85);
- &data_byte(0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8);
- &data_byte(0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5);
- &data_byte(0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2);
- &data_byte(0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17);
- &data_byte(0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73);
- &data_byte(0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88);
- &data_byte(0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb);
- &data_byte(0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c);
- &data_byte(0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79);
- &data_byte(0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9);
- &data_byte(0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08);
- &data_byte(0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6);
- &data_byte(0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a);
- &data_byte(0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e);
- &data_byte(0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e);
- &data_byte(0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94);
- &data_byte(0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf);
- &data_byte(0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68);
- &data_byte(0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16);
-
- &data_byte(0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5);
- &data_byte(0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76);
- &data_byte(0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0);
- &data_byte(0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0);
- &data_byte(0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc);
- &data_byte(0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15);
- &data_byte(0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a);
- &data_byte(0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75);
- &data_byte(0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0);
- &data_byte(0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84);
- &data_byte(0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b);
- &data_byte(0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf);
- &data_byte(0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85);
- &data_byte(0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8);
- &data_byte(0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5);
- &data_byte(0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2);
- &data_byte(0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17);
- &data_byte(0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73);
- &data_byte(0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88);
- &data_byte(0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb);
- &data_byte(0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c);
- &data_byte(0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79);
- &data_byte(0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9);
- &data_byte(0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08);
- &data_byte(0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6);
- &data_byte(0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a);
- &data_byte(0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e);
- &data_byte(0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e);
- &data_byte(0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94);
- &data_byte(0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf);
- &data_byte(0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68);
- &data_byte(0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16);
-#rcon:
- &data_word(0x00000001, 0x00000002, 0x00000004, 0x00000008);
- &data_word(0x00000010, 0x00000020, 0x00000040, 0x00000080);
- &data_word(0x0000001b, 0x00000036, 0x00000000, 0x00000000);
- &data_word(0x00000000, 0x00000000, 0x00000000, 0x00000000);
-&function_end_B("_x86_AES_encrypt");
-
-# void AES_encrypt (const void *inp,void *out,const AES_KEY *key);
-&function_begin("AES_encrypt");
- &mov ($acc,&wparam(0)); # load inp
- &mov ($key,&wparam(2)); # load key
-
- &mov ($s0,"esp");
- &sub ("esp",36);
- &and ("esp",-64); # align to cache-line
-
- # place stack frame just "above" the key schedule
- &lea ($s1,&DWP(-64-63,$key));
- &sub ($s1,"esp");
- &neg ($s1);
- &and ($s1,0x3C0); # modulo 1024, but aligned to cache-line
- &sub ("esp",$s1);
- &add ("esp",4); # 4 is reserved for caller's return address
- &mov ($_esp,$s0); # save stack pointer
-
- &call (&label("pic_point")); # make it PIC!
- &set_label("pic_point");
- &blindpop($tbl);
- &picmeup($s0,"OPENSSL_ia32cap_P",$tbl,&label("pic_point")) if (!$x86only);
- &lea ($tbl,&DWP(&label("AES_Te")."-".&label("pic_point"),$tbl));
-
- # pick Te4 copy which can't "overlap" with stack frame or key schedule
- &lea ($s1,&DWP(768-4,"esp"));
- &sub ($s1,$tbl);
- &and ($s1,0x300);
- &lea ($tbl,&DWP(2048+128,$tbl,$s1));
-
- if (!$x86only) {
- &bt (&DWP(0,$s0),25); # check for SSE bit
- &jnc (&label("x86"));
-
- &movq ("mm0",&QWP(0,$acc));
- &movq ("mm4",&QWP(8,$acc));
- &call ("_sse_AES_encrypt_compact");
- &mov ("esp",$_esp); # restore stack pointer
- &mov ($acc,&wparam(1)); # load out
- &movq (&QWP(0,$acc),"mm0"); # write output data
- &movq (&QWP(8,$acc),"mm4");
- &emms ();
- &function_end_A();
- }
- &set_label("x86",16);
- &mov ($_tbl,$tbl);
- &mov ($s0,&DWP(0,$acc)); # load input data
- &mov ($s1,&DWP(4,$acc));
- &mov ($s2,&DWP(8,$acc));
- &mov ($s3,&DWP(12,$acc));
- &call ("_x86_AES_encrypt_compact");
- &mov ("esp",$_esp); # restore stack pointer
- &mov ($acc,&wparam(1)); # load out
- &mov (&DWP(0,$acc),$s0); # write output data
- &mov (&DWP(4,$acc),$s1);
- &mov (&DWP(8,$acc),$s2);
- &mov (&DWP(12,$acc),$s3);
-&function_end("AES_encrypt");
-
-#--------------------------------------------------------------------#
-
-######################################################################
-# "Compact" block function
-######################################################################
-
-sub deccompact()
-{ my $Fn = mov;
- while ($#_>5) { pop(@_); $Fn=sub{}; }
- my ($i,$td,@s)=@_;
- my $tmp = $key;
- my $out = $i==3?$s[0]:$acc;
-
- # $Fn is used in first compact round and its purpose is to
- # void restoration of some values from stack, so that after
- # 4xdeccompact with extra argument $key, $s0 and $s1 values
- # are left there...
- if($i==3) { &$Fn ($key,$__key); }
- else { &mov ($out,$s[0]); }
- &and ($out,0xFF);
- &movz ($out,&BP(-128,$td,$out,1));
-
- if ($i==3) { $tmp=$s[1]; }
- &movz ($tmp,&HB($s[1]));
- &movz ($tmp,&BP(-128,$td,$tmp,1));
- &shl ($tmp,8);
- &xor ($out,$tmp);
-
- if ($i==3) { $tmp=$s[2]; &mov ($s[1],$acc); }
- else { mov ($tmp,$s[2]); }
- &shr ($tmp,16);
- &and ($tmp,0xFF);
- &movz ($tmp,&BP(-128,$td,$tmp,1));
- &shl ($tmp,16);
- &xor ($out,$tmp);
-
- if ($i==3) { $tmp=$s[3]; &$Fn ($s[2],$__s1); }
- else { &mov ($tmp,$s[3]); }
- &shr ($tmp,24);
- &movz ($tmp,&BP(-128,$td,$tmp,1));
- &shl ($tmp,24);
- &xor ($out,$tmp);
- if ($i<2) { &mov (&DWP(4+4*$i,"esp"),$out); }
- if ($i==3) { &$Fn ($s[3],$__s0); }
-}
-
-# must be called with 2,3,0,1 as argument sequence!!!
-sub dectransform()
-{ my @s = ($s0,$s1,$s2,$s3);
- my $i = shift;
- my $tmp = $key;
- my $tp2 = @s[($i+2)%4]; $tp2 = @s[2] if ($i==1);
- my $tp4 = @s[($i+3)%4]; $tp4 = @s[3] if ($i==1);
- my $tp8 = $tbl;
-
- &mov ($acc,$s[$i]);
- &and ($acc,0x80808080);
- &mov ($tmp,$acc);
- &shr ($tmp,7);
- &lea ($tp2,&DWP(0,$s[$i],$s[$i]));
- &sub ($acc,$tmp);
- &and ($tp2,0xfefefefe);
- &and ($acc,0x1b1b1b1b);
- &xor ($acc,$tp2);
- &mov ($tp2,$acc);
-
- &and ($acc,0x80808080);
- &mov ($tmp,$acc);
- &shr ($tmp,7);
- &lea ($tp4,&DWP(0,$tp2,$tp2));
- &sub ($acc,$tmp);
- &and ($tp4,0xfefefefe);
- &and ($acc,0x1b1b1b1b);
- &xor ($tp2,$s[$i]); # tp2^tp1
- &xor ($acc,$tp4);
- &mov ($tp4,$acc);
-
- &and ($acc,0x80808080);
- &mov ($tmp,$acc);
- &shr ($tmp,7);
- &lea ($tp8,&DWP(0,$tp4,$tp4));
- &sub ($acc,$tmp);
- &and ($tp8,0xfefefefe);
- &and ($acc,0x1b1b1b1b);
- &xor ($tp4,$s[$i]); # tp4^tp1
- &rotl ($s[$i],8); # = ROTATE(tp1,8)
- &xor ($tp8,$acc);
-
- &xor ($s[$i],$tp2);
- &xor ($tp2,$tp8);
- &rotl ($tp2,24);
- &xor ($s[$i],$tp4);
- &xor ($tp4,$tp8);
- &rotl ($tp4,16);
- &xor ($s[$i],$tp8); # ^= tp8^(tp4^tp1)^(tp2^tp1)
- &rotl ($tp8,8);
- &xor ($s[$i],$tp2); # ^= ROTATE(tp8^tp2^tp1,24)
- &xor ($s[$i],$tp4); # ^= ROTATE(tp8^tp4^tp1,16)
- &mov ($s[0],$__s0) if($i==2); #prefetch $s0
- &mov ($s[1],$__s1) if($i==3); #prefetch $s1
- &mov ($s[2],$__s2) if($i==1);
- &xor ($s[$i],$tp8); # ^= ROTATE(tp8,8)
-
- &mov ($s[3],$__s3) if($i==1);
- &mov (&DWP(4+4*$i,"esp"),$s[$i]) if($i>=2);
-}
-
-&function_begin_B("_x86_AES_decrypt_compact");
- # note that caller is expected to allocate stack frame for me!
- &mov ($__key,$key); # save key
-
- &xor ($s0,&DWP(0,$key)); # xor with key
- &xor ($s1,&DWP(4,$key));
- &xor ($s2,&DWP(8,$key));
- &xor ($s3,&DWP(12,$key));
-
- &mov ($acc,&DWP(240,$key)); # load key->rounds
-
- &lea ($acc,&DWP(-2,$acc,$acc));
- &lea ($acc,&DWP(0,$key,$acc,8));
- &mov ($__end,$acc); # end of key schedule
-
- # prefetch Td4
- &mov ($key,&DWP(0-128,$tbl));
- &mov ($acc,&DWP(32-128,$tbl));
- &mov ($key,&DWP(64-128,$tbl));
- &mov ($acc,&DWP(96-128,$tbl));
- &mov ($key,&DWP(128-128,$tbl));
- &mov ($acc,&DWP(160-128,$tbl));
- &mov ($key,&DWP(192-128,$tbl));
- &mov ($acc,&DWP(224-128,$tbl));
-
- &set_label("loop",16);
-
- &deccompact(0,$tbl,$s0,$s3,$s2,$s1,1);
- &deccompact(1,$tbl,$s1,$s0,$s3,$s2,1);
- &deccompact(2,$tbl,$s2,$s1,$s0,$s3,1);
- &deccompact(3,$tbl,$s3,$s2,$s1,$s0,1);
- &dectransform(2);
- &dectransform(3);
- &dectransform(0);
- &dectransform(1);
- &mov ($key,$__key);
- &mov ($tbl,$__tbl);
- &add ($key,16); # advance rd_key
- &xor ($s0,&DWP(0,$key));
- &xor ($s1,&DWP(4,$key));
- &xor ($s2,&DWP(8,$key));
- &xor ($s3,&DWP(12,$key));
-
- &cmp ($key,$__end);
- &mov ($__key,$key);
- &jb (&label("loop"));
-
- &deccompact(0,$tbl,$s0,$s3,$s2,$s1);
- &deccompact(1,$tbl,$s1,$s0,$s3,$s2);
- &deccompact(2,$tbl,$s2,$s1,$s0,$s3);
- &deccompact(3,$tbl,$s3,$s2,$s1,$s0);
-
- &xor ($s0,&DWP(16,$key));
- &xor ($s1,&DWP(20,$key));
- &xor ($s2,&DWP(24,$key));
- &xor ($s3,&DWP(28,$key));
-
- &ret ();
-&function_end_B("_x86_AES_decrypt_compact");
-
-######################################################################
-# "Compact" SSE block function.
-######################################################################
-
-sub sse_deccompact()
-{
- &pshufw ("mm1","mm0",0x0c); # 7, 6, 1, 0
- &movd ("eax","mm1"); # 7, 6, 1, 0
-
- &pshufw ("mm5","mm4",0x09); # 13,12,11,10
- &movz ($acc,&LB("eax")); # 0
- &movz ("ecx",&BP(-128,$tbl,$acc,1)); # 0
- &movd ("ebx","mm5"); # 13,12,11,10
- &movz ("edx",&HB("eax")); # 1
- &movz ("edx",&BP(-128,$tbl,"edx",1)); # 1
- &shl ("edx",8); # 1
-
- &pshufw ("mm2","mm0",0x06); # 3, 2, 5, 4
- &movz ($acc,&LB("ebx")); # 10
- &movz ($acc,&BP(-128,$tbl,$acc,1)); # 10
- &shl ($acc,16); # 10
- &or ("ecx",$acc); # 10
- &shr ("eax",16); # 7, 6
- &movz ($acc,&HB("ebx")); # 11
- &movz ($acc,&BP(-128,$tbl,$acc,1)); # 11
- &shl ($acc,24); # 11
- &or ("edx",$acc); # 11
- &shr ("ebx",16); # 13,12
-
- &pshufw ("mm6","mm4",0x03); # 9, 8,15,14
- &movz ($acc,&HB("eax")); # 7
- &movz ($acc,&BP(-128,$tbl,$acc,1)); # 7
- &shl ($acc,24); # 7
- &or ("ecx",$acc); # 7
- &movz ($acc,&HB("ebx")); # 13
- &movz ($acc,&BP(-128,$tbl,$acc,1)); # 13
- &shl ($acc,8); # 13
- &or ("ecx",$acc); # 13
- &movd ("mm0","ecx"); # t[0] collected
-
- &movz ($acc,&LB("eax")); # 6
- &movd ("eax","mm2"); # 3, 2, 5, 4
- &movz ("ecx",&BP(-128,$tbl,$acc,1)); # 6
- &shl ("ecx",16); # 6
- &movz ($acc,&LB("ebx")); # 12
- &movd ("ebx","mm6"); # 9, 8,15,14
- &movz ($acc,&BP(-128,$tbl,$acc,1)); # 12
- &or ("ecx",$acc); # 12
-
- &movz ($acc,&LB("eax")); # 4
- &movz ($acc,&BP(-128,$tbl,$acc,1)); # 4
- &or ("edx",$acc); # 4
- &movz ($acc,&LB("ebx")); # 14
- &movz ($acc,&BP(-128,$tbl,$acc,1)); # 14
- &shl ($acc,16); # 14
- &or ("edx",$acc); # 14
- &movd ("mm1","edx"); # t[1] collected
-
- &movz ($acc,&HB("eax")); # 5
- &movz ("edx",&BP(-128,$tbl,$acc,1)); # 5
- &shl ("edx",8); # 5
- &movz ($acc,&HB("ebx")); # 15
- &shr ("eax",16); # 3, 2
- &movz ($acc,&BP(-128,$tbl,$acc,1)); # 15
- &shl ($acc,24); # 15
- &or ("edx",$acc); # 15
- &shr ("ebx",16); # 9, 8
-
- &punpckldq ("mm0","mm1"); # t[0,1] collected
-
- &movz ($acc,&HB("ebx")); # 9
- &movz ($acc,&BP(-128,$tbl,$acc,1)); # 9
- &shl ($acc,8); # 9
- &or ("ecx",$acc); # 9
- &and ("ebx",0xff); # 8
- &movz ("ebx",&BP(-128,$tbl,"ebx",1)); # 8
- &or ("edx","ebx"); # 8
- &movz ($acc,&LB("eax")); # 2
- &movz ($acc,&BP(-128,$tbl,$acc,1)); # 2
- &shl ($acc,16); # 2
- &or ("edx",$acc); # 2
- &movd ("mm4","edx"); # t[2] collected
- &movz ("eax",&HB("eax")); # 3
- &movz ("eax",&BP(-128,$tbl,"eax",1)); # 3
- &shl ("eax",24); # 3
- &or ("ecx","eax"); # 3
- &movd ("mm5","ecx"); # t[3] collected
-
- &punpckldq ("mm4","mm5"); # t[2,3] collected
-}
-
- if (!$x86only) {
-&function_begin_B("_sse_AES_decrypt_compact");
- &pxor ("mm0",&QWP(0,$key)); # 7, 6, 5, 4, 3, 2, 1, 0
- &pxor ("mm4",&QWP(8,$key)); # 15,14,13,12,11,10, 9, 8
-
- # note that caller is expected to allocate stack frame for me!
- &mov ($acc,&DWP(240,$key)); # load key->rounds
- &lea ($acc,&DWP(-2,$acc,$acc));
- &lea ($acc,&DWP(0,$key,$acc,8));
- &mov ($__end,$acc); # end of key schedule
-
- &mov ($s0,0x1b1b1b1b); # magic constant
- &mov (&DWP(8,"esp"),$s0);
- &mov (&DWP(12,"esp"),$s0);
-
- # prefetch Td4
- &mov ($s0,&DWP(0-128,$tbl));
- &mov ($s1,&DWP(32-128,$tbl));
- &mov ($s2,&DWP(64-128,$tbl));
- &mov ($s3,&DWP(96-128,$tbl));
- &mov ($s0,&DWP(128-128,$tbl));
- &mov ($s1,&DWP(160-128,$tbl));
- &mov ($s2,&DWP(192-128,$tbl));
- &mov ($s3,&DWP(224-128,$tbl));
-
- &set_label("loop",16);
- &sse_deccompact();
- &add ($key,16);
- &cmp ($key,$__end);
- &ja (&label("out"));
-
- # ROTATE(x^y,N) == ROTATE(x,N)^ROTATE(y,N)
- &movq ("mm3","mm0"); &movq ("mm7","mm4");
- &movq ("mm2","mm0",1); &movq ("mm6","mm4",1);
- &movq ("mm1","mm0"); &movq ("mm5","mm4");
- &pshufw ("mm0","mm0",0xb1); &pshufw ("mm4","mm4",0xb1);# = ROTATE(tp0,16)
- &pslld ("mm2",8); &pslld ("mm6",8);
- &psrld ("mm3",8); &psrld ("mm7",8);
- &pxor ("mm0","mm2"); &pxor ("mm4","mm6"); # ^= tp0<<8
- &pxor ("mm0","mm3"); &pxor ("mm4","mm7"); # ^= tp0>>8
- &pslld ("mm2",16); &pslld ("mm6",16);
- &psrld ("mm3",16); &psrld ("mm7",16);
- &pxor ("mm0","mm2"); &pxor ("mm4","mm6"); # ^= tp0<<24
- &pxor ("mm0","mm3"); &pxor ("mm4","mm7"); # ^= tp0>>24
-
- &movq ("mm3",&QWP(8,"esp"));
- &pxor ("mm2","mm2"); &pxor ("mm6","mm6");
- &pcmpgtb("mm2","mm1"); &pcmpgtb("mm6","mm5");
- &pand ("mm2","mm3"); &pand ("mm6","mm3");
- &paddb ("mm1","mm1"); &paddb ("mm5","mm5");
- &pxor ("mm1","mm2"); &pxor ("mm5","mm6"); # tp2
- &movq ("mm3","mm1"); &movq ("mm7","mm5");
- &movq ("mm2","mm1"); &movq ("mm6","mm5");
- &pxor ("mm0","mm1"); &pxor ("mm4","mm5"); # ^= tp2
- &pslld ("mm3",24); &pslld ("mm7",24);
- &psrld ("mm2",8); &psrld ("mm6",8);
- &pxor ("mm0","mm3"); &pxor ("mm4","mm7"); # ^= tp2<<24
- &pxor ("mm0","mm2"); &pxor ("mm4","mm6"); # ^= tp2>>8
-
- &movq ("mm2",&QWP(8,"esp"));
- &pxor ("mm3","mm3"); &pxor ("mm7","mm7");
- &pcmpgtb("mm3","mm1"); &pcmpgtb("mm7","mm5");
- &pand ("mm3","mm2"); &pand ("mm7","mm2");
- &paddb ("mm1","mm1"); &paddb ("mm5","mm5");
- &pxor ("mm1","mm3"); &pxor ("mm5","mm7"); # tp4
- &pshufw ("mm3","mm1",0xb1); &pshufw ("mm7","mm5",0xb1);
- &pxor ("mm0","mm1"); &pxor ("mm4","mm5"); # ^= tp4
- &pxor ("mm0","mm3"); &pxor ("mm4","mm7"); # ^= ROTATE(tp4,16)
-
- &pxor ("mm3","mm3"); &pxor ("mm7","mm7");
- &pcmpgtb("mm3","mm1"); &pcmpgtb("mm7","mm5");
- &pand ("mm3","mm2"); &pand ("mm7","mm2");
- &paddb ("mm1","mm1"); &paddb ("mm5","mm5");
- &pxor ("mm1","mm3"); &pxor ("mm5","mm7"); # tp8
- &pxor ("mm0","mm1"); &pxor ("mm4","mm5"); # ^= tp8
- &movq ("mm3","mm1"); &movq ("mm7","mm5");
- &pshufw ("mm2","mm1",0xb1); &pshufw ("mm6","mm5",0xb1);
- &pxor ("mm0","mm2"); &pxor ("mm4","mm6"); # ^= ROTATE(tp8,16)
- &pslld ("mm1",8); &pslld ("mm5",8);
- &psrld ("mm3",8); &psrld ("mm7",8);
- &movq ("mm2",&QWP(0,$key)); &movq ("mm6",&QWP(8,$key));
- &pxor ("mm0","mm1"); &pxor ("mm4","mm5"); # ^= tp8<<8
- &pxor ("mm0","mm3"); &pxor ("mm4","mm7"); # ^= tp8>>8
- &mov ($s0,&DWP(0-128,$tbl));
- &pslld ("mm1",16); &pslld ("mm5",16);
- &mov ($s1,&DWP(64-128,$tbl));
- &psrld ("mm3",16); &psrld ("mm7",16);
- &mov ($s2,&DWP(128-128,$tbl));
- &pxor ("mm0","mm1"); &pxor ("mm4","mm5"); # ^= tp8<<24
- &mov ($s3,&DWP(192-128,$tbl));
- &pxor ("mm0","mm3"); &pxor ("mm4","mm7"); # ^= tp8>>24
-
- &pxor ("mm0","mm2"); &pxor ("mm4","mm6");
- &jmp (&label("loop"));
-
- &set_label("out",16);
- &pxor ("mm0",&QWP(0,$key));
- &pxor ("mm4",&QWP(8,$key));
-
- &ret ();
-&function_end_B("_sse_AES_decrypt_compact");
- }
-
-######################################################################
-# Vanilla block function.
-######################################################################
-
-sub decstep()
-{ my ($i,$td,@s) = @_;
- my $tmp = $key;
- my $out = $i==3?$s[0]:$acc;
-
- # no instructions are reordered, as performance appears
- # optimal... or rather that all attempts to reorder didn't
- # result in better performance [which by the way is not a
- # bit lower than ecryption].
- if($i==3) { &mov ($key,$__key); }
- else { &mov ($out,$s[0]); }
- &and ($out,0xFF);
- &mov ($out,&DWP(0,$td,$out,8));
-
- if ($i==3) { $tmp=$s[1]; }
- &movz ($tmp,&HB($s[1]));
- &xor ($out,&DWP(3,$td,$tmp,8));
-
- if ($i==3) { $tmp=$s[2]; &mov ($s[1],$acc); }
- else { &mov ($tmp,$s[2]); }
- &shr ($tmp,16);
- &and ($tmp,0xFF);
- &xor ($out,&DWP(2,$td,$tmp,8));
-
- if ($i==3) { $tmp=$s[3]; &mov ($s[2],$__s1); }
- else { &mov ($tmp,$s[3]); }
- &shr ($tmp,24);
- &xor ($out,&DWP(1,$td,$tmp,8));
- if ($i<2) { &mov (&DWP(4+4*$i,"esp"),$out); }
- if ($i==3) { &mov ($s[3],$__s0); }
- &comment();
-}
-
-sub declast()
-{ my ($i,$td,@s)=@_;
- my $tmp = $key;
- my $out = $i==3?$s[0]:$acc;
-
- if($i==0) { &lea ($td,&DWP(2048+128,$td));
- &mov ($tmp,&DWP(0-128,$td));
- &mov ($acc,&DWP(32-128,$td));
- &mov ($tmp,&DWP(64-128,$td));
- &mov ($acc,&DWP(96-128,$td));
- &mov ($tmp,&DWP(128-128,$td));
- &mov ($acc,&DWP(160-128,$td));
- &mov ($tmp,&DWP(192-128,$td));
- &mov ($acc,&DWP(224-128,$td));
- &lea ($td,&DWP(-128,$td)); }
- if($i==3) { &mov ($key,$__key); }
- else { &mov ($out,$s[0]); }
- &and ($out,0xFF);
- &movz ($out,&BP(0,$td,$out,1));
-
- if ($i==3) { $tmp=$s[1]; }
- &movz ($tmp,&HB($s[1]));
- &movz ($tmp,&BP(0,$td,$tmp,1));
- &shl ($tmp,8);
- &xor ($out,$tmp);
-
- if ($i==3) { $tmp=$s[2]; &mov ($s[1],$acc); }
- else { mov ($tmp,$s[2]); }
- &shr ($tmp,16);
- &and ($tmp,0xFF);
- &movz ($tmp,&BP(0,$td,$tmp,1));
- &shl ($tmp,16);
- &xor ($out,$tmp);
-
- if ($i==3) { $tmp=$s[3]; &mov ($s[2],$__s1); }
- else { &mov ($tmp,$s[3]); }
- &shr ($tmp,24);
- &movz ($tmp,&BP(0,$td,$tmp,1));
- &shl ($tmp,24);
- &xor ($out,$tmp);
- if ($i<2) { &mov (&DWP(4+4*$i,"esp"),$out); }
- if ($i==3) { &mov ($s[3],$__s0);
- &lea ($td,&DWP(-2048,$td)); }
-}
-
-&function_begin_B("_x86_AES_decrypt");
- # note that caller is expected to allocate stack frame for me!
- &mov ($__key,$key); # save key
-
- &xor ($s0,&DWP(0,$key)); # xor with key
- &xor ($s1,&DWP(4,$key));
- &xor ($s2,&DWP(8,$key));
- &xor ($s3,&DWP(12,$key));
-
- &mov ($acc,&DWP(240,$key)); # load key->rounds
-
- if ($small_footprint) {
- &lea ($acc,&DWP(-2,$acc,$acc));
- &lea ($acc,&DWP(0,$key,$acc,8));
- &mov ($__end,$acc); # end of key schedule
- &set_label("loop",16);
- &decstep(0,$tbl,$s0,$s3,$s2,$s1);
- &decstep(1,$tbl,$s1,$s0,$s3,$s2);
- &decstep(2,$tbl,$s2,$s1,$s0,$s3);
- &decstep(3,$tbl,$s3,$s2,$s1,$s0);
- &add ($key,16); # advance rd_key
- &xor ($s0,&DWP(0,$key));
- &xor ($s1,&DWP(4,$key));
- &xor ($s2,&DWP(8,$key));
- &xor ($s3,&DWP(12,$key));
- &cmp ($key,$__end);
- &mov ($__key,$key);
- &jb (&label("loop"));
- }
- else {
- &cmp ($acc,10);
- &jle (&label("10rounds"));
- &cmp ($acc,12);
- &jle (&label("12rounds"));
-
- &set_label("14rounds",4);
- for ($i=1;$i<3;$i++) {
- &decstep(0,$tbl,$s0,$s3,$s2,$s1);
- &decstep(1,$tbl,$s1,$s0,$s3,$s2);
- &decstep(2,$tbl,$s2,$s1,$s0,$s3);
- &decstep(3,$tbl,$s3,$s2,$s1,$s0);
- &xor ($s0,&DWP(16*$i+0,$key));
- &xor ($s1,&DWP(16*$i+4,$key));
- &xor ($s2,&DWP(16*$i+8,$key));
- &xor ($s3,&DWP(16*$i+12,$key));
- }
- &add ($key,32);
- &mov ($__key,$key); # advance rd_key
- &set_label("12rounds",4);
- for ($i=1;$i<3;$i++) {
- &decstep(0,$tbl,$s0,$s3,$s2,$s1);
- &decstep(1,$tbl,$s1,$s0,$s3,$s2);
- &decstep(2,$tbl,$s2,$s1,$s0,$s3);
- &decstep(3,$tbl,$s3,$s2,$s1,$s0);
- &xor ($s0,&DWP(16*$i+0,$key));
- &xor ($s1,&DWP(16*$i+4,$key));
- &xor ($s2,&DWP(16*$i+8,$key));
- &xor ($s3,&DWP(16*$i+12,$key));
- }
- &add ($key,32);
- &mov ($__key,$key); # advance rd_key
- &set_label("10rounds",4);
- for ($i=1;$i<10;$i++) {
- &decstep(0,$tbl,$s0,$s3,$s2,$s1);
- &decstep(1,$tbl,$s1,$s0,$s3,$s2);
- &decstep(2,$tbl,$s2,$s1,$s0,$s3);
- &decstep(3,$tbl,$s3,$s2,$s1,$s0);
- &xor ($s0,&DWP(16*$i+0,$key));
- &xor ($s1,&DWP(16*$i+4,$key));
- &xor ($s2,&DWP(16*$i+8,$key));
- &xor ($s3,&DWP(16*$i+12,$key));
- }
- }
-
- &declast(0,$tbl,$s0,$s3,$s2,$s1);
- &declast(1,$tbl,$s1,$s0,$s3,$s2);
- &declast(2,$tbl,$s2,$s1,$s0,$s3);
- &declast(3,$tbl,$s3,$s2,$s1,$s0);
-
- &add ($key,$small_footprint?16:160);
- &xor ($s0,&DWP(0,$key));
- &xor ($s1,&DWP(4,$key));
- &xor ($s2,&DWP(8,$key));
- &xor ($s3,&DWP(12,$key));
-
- &ret ();
-
-&set_label("AES_Td",64); # Yes! I keep it in the code segment!
- &_data_word(0x50a7f451, 0x5365417e, 0xc3a4171a, 0x965e273a);
- &_data_word(0xcb6bab3b, 0xf1459d1f, 0xab58faac, 0x9303e34b);
- &_data_word(0x55fa3020, 0xf66d76ad, 0x9176cc88, 0x254c02f5);
- &_data_word(0xfcd7e54f, 0xd7cb2ac5, 0x80443526, 0x8fa362b5);
- &_data_word(0x495ab1de, 0x671bba25, 0x980eea45, 0xe1c0fe5d);
- &_data_word(0x02752fc3, 0x12f04c81, 0xa397468d, 0xc6f9d36b);
- &_data_word(0xe75f8f03, 0x959c9215, 0xeb7a6dbf, 0xda595295);
- &_data_word(0x2d83bed4, 0xd3217458, 0x2969e049, 0x44c8c98e);
- &_data_word(0x6a89c275, 0x78798ef4, 0x6b3e5899, 0xdd71b927);
- &_data_word(0xb64fe1be, 0x17ad88f0, 0x66ac20c9, 0xb43ace7d);
- &_data_word(0x184adf63, 0x82311ae5, 0x60335197, 0x457f5362);
- &_data_word(0xe07764b1, 0x84ae6bbb, 0x1ca081fe, 0x942b08f9);
- &_data_word(0x58684870, 0x19fd458f, 0x876cde94, 0xb7f87b52);
- &_data_word(0x23d373ab, 0xe2024b72, 0x578f1fe3, 0x2aab5566);
- &_data_word(0x0728ebb2, 0x03c2b52f, 0x9a7bc586, 0xa50837d3);
- &_data_word(0xf2872830, 0xb2a5bf23, 0xba6a0302, 0x5c8216ed);
- &_data_word(0x2b1ccf8a, 0x92b479a7, 0xf0f207f3, 0xa1e2694e);
- &_data_word(0xcdf4da65, 0xd5be0506, 0x1f6234d1, 0x8afea6c4);
- &_data_word(0x9d532e34, 0xa055f3a2, 0x32e18a05, 0x75ebf6a4);
- &_data_word(0x39ec830b, 0xaaef6040, 0x069f715e, 0x51106ebd);
- &_data_word(0xf98a213e, 0x3d06dd96, 0xae053edd, 0x46bde64d);
- &_data_word(0xb58d5491, 0x055dc471, 0x6fd40604, 0xff155060);
- &_data_word(0x24fb9819, 0x97e9bdd6, 0xcc434089, 0x779ed967);
- &_data_word(0xbd42e8b0, 0x888b8907, 0x385b19e7, 0xdbeec879);
- &_data_word(0x470a7ca1, 0xe90f427c, 0xc91e84f8, 0x00000000);
- &_data_word(0x83868009, 0x48ed2b32, 0xac70111e, 0x4e725a6c);
- &_data_word(0xfbff0efd, 0x5638850f, 0x1ed5ae3d, 0x27392d36);
- &_data_word(0x64d90f0a, 0x21a65c68, 0xd1545b9b, 0x3a2e3624);
- &_data_word(0xb1670a0c, 0x0fe75793, 0xd296eeb4, 0x9e919b1b);
- &_data_word(0x4fc5c080, 0xa220dc61, 0x694b775a, 0x161a121c);
- &_data_word(0x0aba93e2, 0xe52aa0c0, 0x43e0223c, 0x1d171b12);
- &_data_word(0x0b0d090e, 0xadc78bf2, 0xb9a8b62d, 0xc8a91e14);
- &_data_word(0x8519f157, 0x4c0775af, 0xbbdd99ee, 0xfd607fa3);
- &_data_word(0x9f2601f7, 0xbcf5725c, 0xc53b6644, 0x347efb5b);
- &_data_word(0x7629438b, 0xdcc623cb, 0x68fcedb6, 0x63f1e4b8);
- &_data_word(0xcadc31d7, 0x10856342, 0x40229713, 0x2011c684);
- &_data_word(0x7d244a85, 0xf83dbbd2, 0x1132f9ae, 0x6da129c7);
- &_data_word(0x4b2f9e1d, 0xf330b2dc, 0xec52860d, 0xd0e3c177);
- &_data_word(0x6c16b32b, 0x99b970a9, 0xfa489411, 0x2264e947);
- &_data_word(0xc48cfca8, 0x1a3ff0a0, 0xd82c7d56, 0xef903322);
- &_data_word(0xc74e4987, 0xc1d138d9, 0xfea2ca8c, 0x360bd498);
- &_data_word(0xcf81f5a6, 0x28de7aa5, 0x268eb7da, 0xa4bfad3f);
- &_data_word(0xe49d3a2c, 0x0d927850, 0x9bcc5f6a, 0x62467e54);
- &_data_word(0xc2138df6, 0xe8b8d890, 0x5ef7392e, 0xf5afc382);
- &_data_word(0xbe805d9f, 0x7c93d069, 0xa92dd56f, 0xb31225cf);
- &_data_word(0x3b99acc8, 0xa77d1810, 0x6e639ce8, 0x7bbb3bdb);
- &_data_word(0x097826cd, 0xf418596e, 0x01b79aec, 0xa89a4f83);
- &_data_word(0x656e95e6, 0x7ee6ffaa, 0x08cfbc21, 0xe6e815ef);
- &_data_word(0xd99be7ba, 0xce366f4a, 0xd4099fea, 0xd67cb029);
- &_data_word(0xafb2a431, 0x31233f2a, 0x3094a5c6, 0xc066a235);
- &_data_word(0x37bc4e74, 0xa6ca82fc, 0xb0d090e0, 0x15d8a733);
- &_data_word(0x4a9804f1, 0xf7daec41, 0x0e50cd7f, 0x2ff69117);
- &_data_word(0x8dd64d76, 0x4db0ef43, 0x544daacc, 0xdf0496e4);
- &_data_word(0xe3b5d19e, 0x1b886a4c, 0xb81f2cc1, 0x7f516546);
- &_data_word(0x04ea5e9d, 0x5d358c01, 0x737487fa, 0x2e410bfb);
- &_data_word(0x5a1d67b3, 0x52d2db92, 0x335610e9, 0x1347d66d);
- &_data_word(0x8c61d79a, 0x7a0ca137, 0x8e14f859, 0x893c13eb);
- &_data_word(0xee27a9ce, 0x35c961b7, 0xede51ce1, 0x3cb1477a);
- &_data_word(0x59dfd29c, 0x3f73f255, 0x79ce1418, 0xbf37c773);
- &_data_word(0xeacdf753, 0x5baafd5f, 0x146f3ddf, 0x86db4478);
- &_data_word(0x81f3afca, 0x3ec468b9, 0x2c342438, 0x5f40a3c2);
- &_data_word(0x72c31d16, 0x0c25e2bc, 0x8b493c28, 0x41950dff);
- &_data_word(0x7101a839, 0xdeb30c08, 0x9ce4b4d8, 0x90c15664);
- &_data_word(0x6184cb7b, 0x70b632d5, 0x745c6c48, 0x4257b8d0);
-
-#Td4: # four copies of Td4 to choose from to avoid L1 aliasing
- &data_byte(0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38);
- &data_byte(0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb);
- &data_byte(0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87);
- &data_byte(0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb);
- &data_byte(0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d);
- &data_byte(0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e);
- &data_byte(0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2);
- &data_byte(0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25);
- &data_byte(0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16);
- &data_byte(0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92);
- &data_byte(0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda);
- &data_byte(0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84);
- &data_byte(0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a);
- &data_byte(0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06);
- &data_byte(0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02);
- &data_byte(0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b);
- &data_byte(0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea);
- &data_byte(0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73);
- &data_byte(0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85);
- &data_byte(0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e);
- &data_byte(0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89);
- &data_byte(0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b);
- &data_byte(0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20);
- &data_byte(0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4);
- &data_byte(0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31);
- &data_byte(0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f);
- &data_byte(0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d);
- &data_byte(0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef);
- &data_byte(0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0);
- &data_byte(0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61);
- &data_byte(0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26);
- &data_byte(0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d);
-
- &data_byte(0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38);
- &data_byte(0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb);
- &data_byte(0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87);
- &data_byte(0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb);
- &data_byte(0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d);
- &data_byte(0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e);
- &data_byte(0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2);
- &data_byte(0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25);
- &data_byte(0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16);
- &data_byte(0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92);
- &data_byte(0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda);
- &data_byte(0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84);
- &data_byte(0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a);
- &data_byte(0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06);
- &data_byte(0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02);
- &data_byte(0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b);
- &data_byte(0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea);
- &data_byte(0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73);
- &data_byte(0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85);
- &data_byte(0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e);
- &data_byte(0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89);
- &data_byte(0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b);
- &data_byte(0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20);
- &data_byte(0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4);
- &data_byte(0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31);
- &data_byte(0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f);
- &data_byte(0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d);
- &data_byte(0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef);
- &data_byte(0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0);
- &data_byte(0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61);
- &data_byte(0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26);
- &data_byte(0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d);
-
- &data_byte(0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38);
- &data_byte(0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb);
- &data_byte(0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87);
- &data_byte(0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb);
- &data_byte(0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d);
- &data_byte(0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e);
- &data_byte(0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2);
- &data_byte(0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25);
- &data_byte(0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16);
- &data_byte(0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92);
- &data_byte(0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda);
- &data_byte(0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84);
- &data_byte(0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a);
- &data_byte(0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06);
- &data_byte(0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02);
- &data_byte(0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b);
- &data_byte(0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea);
- &data_byte(0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73);
- &data_byte(0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85);
- &data_byte(0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e);
- &data_byte(0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89);
- &data_byte(0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b);
- &data_byte(0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20);
- &data_byte(0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4);
- &data_byte(0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31);
- &data_byte(0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f);
- &data_byte(0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d);
- &data_byte(0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef);
- &data_byte(0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0);
- &data_byte(0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61);
- &data_byte(0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26);
- &data_byte(0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d);
-
- &data_byte(0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38);
- &data_byte(0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb);
- &data_byte(0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87);
- &data_byte(0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb);
- &data_byte(0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d);
- &data_byte(0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e);
- &data_byte(0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2);
- &data_byte(0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25);
- &data_byte(0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16);
- &data_byte(0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92);
- &data_byte(0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda);
- &data_byte(0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84);
- &data_byte(0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a);
- &data_byte(0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06);
- &data_byte(0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02);
- &data_byte(0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b);
- &data_byte(0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea);
- &data_byte(0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73);
- &data_byte(0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85);
- &data_byte(0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e);
- &data_byte(0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89);
- &data_byte(0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b);
- &data_byte(0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20);
- &data_byte(0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4);
- &data_byte(0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31);
- &data_byte(0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f);
- &data_byte(0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d);
- &data_byte(0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef);
- &data_byte(0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0);
- &data_byte(0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61);
- &data_byte(0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26);
- &data_byte(0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d);
-&function_end_B("_x86_AES_decrypt");
-
-# void AES_decrypt (const void *inp,void *out,const AES_KEY *key);
-&function_begin("AES_decrypt");
- &mov ($acc,&wparam(0)); # load inp
- &mov ($key,&wparam(2)); # load key
-
- &mov ($s0,"esp");
- &sub ("esp",36);
- &and ("esp",-64); # align to cache-line
-
- # place stack frame just "above" the key schedule
- &lea ($s1,&DWP(-64-63,$key));
- &sub ($s1,"esp");
- &neg ($s1);
- &and ($s1,0x3C0); # modulo 1024, but aligned to cache-line
- &sub ("esp",$s1);
- &add ("esp",4); # 4 is reserved for caller's return address
- &mov ($_esp,$s0); # save stack pointer
-
- &call (&label("pic_point")); # make it PIC!
- &set_label("pic_point");
- &blindpop($tbl);
- &picmeup($s0,"OPENSSL_ia32cap_P",$tbl,&label("pic_point")) if(!$x86only);
- &lea ($tbl,&DWP(&label("AES_Td")."-".&label("pic_point"),$tbl));
-
- # pick Td4 copy which can't "overlap" with stack frame or key schedule
- &lea ($s1,&DWP(768-4,"esp"));
- &sub ($s1,$tbl);
- &and ($s1,0x300);
- &lea ($tbl,&DWP(2048+128,$tbl,$s1));
-
- if (!$x86only) {
- &bt (&DWP(0,$s0),25); # check for SSE bit
- &jnc (&label("x86"));
-
- &movq ("mm0",&QWP(0,$acc));
- &movq ("mm4",&QWP(8,$acc));
- &call ("_sse_AES_decrypt_compact");
- &mov ("esp",$_esp); # restore stack pointer
- &mov ($acc,&wparam(1)); # load out
- &movq (&QWP(0,$acc),"mm0"); # write output data
- &movq (&QWP(8,$acc),"mm4");
- &emms ();
- &function_end_A();
- }
- &set_label("x86",16);
- &mov ($_tbl,$tbl);
- &mov ($s0,&DWP(0,$acc)); # load input data
- &mov ($s1,&DWP(4,$acc));
- &mov ($s2,&DWP(8,$acc));
- &mov ($s3,&DWP(12,$acc));
- &call ("_x86_AES_decrypt_compact");
- &mov ("esp",$_esp); # restore stack pointer
- &mov ($acc,&wparam(1)); # load out
- &mov (&DWP(0,$acc),$s0); # write output data
- &mov (&DWP(4,$acc),$s1);
- &mov (&DWP(8,$acc),$s2);
- &mov (&DWP(12,$acc),$s3);
-&function_end("AES_decrypt");
-
-# void AES_cbc_encrypt (const void char *inp, unsigned char *out,
-# size_t length, const AES_KEY *key,
-# unsigned char *ivp,const int enc);
-{
-# stack frame layout
-# -4(%esp) # return address 0(%esp)
-# 0(%esp) # s0 backing store 4(%esp)
-# 4(%esp) # s1 backing store 8(%esp)
-# 8(%esp) # s2 backing store 12(%esp)
-# 12(%esp) # s3 backing store 16(%esp)
-# 16(%esp) # key backup 20(%esp)
-# 20(%esp) # end of key schedule 24(%esp)
-# 24(%esp) # %ebp backup 28(%esp)
-# 28(%esp) # %esp backup
-my $_inp=&DWP(32,"esp"); # copy of wparam(0)
-my $_out=&DWP(36,"esp"); # copy of wparam(1)
-my $_len=&DWP(40,"esp"); # copy of wparam(2)
-my $_key=&DWP(44,"esp"); # copy of wparam(3)
-my $_ivp=&DWP(48,"esp"); # copy of wparam(4)
-my $_tmp=&DWP(52,"esp"); # volatile variable
-#
-my $ivec=&DWP(60,"esp"); # ivec[16]
-my $aes_key=&DWP(76,"esp"); # copy of aes_key
-my $mark=&DWP(76+240,"esp"); # copy of aes_key->rounds
-
-&function_begin("AES_cbc_encrypt");
- &mov ($s2 eq "ecx"? $s2 : "",&wparam(2)); # load len
- &cmp ($s2,0);
- &je (&label("drop_out"));
-
- &call (&label("pic_point")); # make it PIC!
- &set_label("pic_point");
- &blindpop($tbl);
- &picmeup($s0,"OPENSSL_ia32cap_P",$tbl,&label("pic_point")) if(!$x86only);
-
- &cmp (&wparam(5),0);
- &lea ($tbl,&DWP(&label("AES_Te")."-".&label("pic_point"),$tbl));
- &jne (&label("picked_te"));
- &lea ($tbl,&DWP(&label("AES_Td")."-".&label("AES_Te"),$tbl));
- &set_label("picked_te");
-
- # one can argue if this is required
- &pushf ();
- &cld ();
-
- &cmp ($s2,$speed_limit);
- &jb (&label("slow_way"));
- &test ($s2,15);
- &jnz (&label("slow_way"));
- if (!$x86only) {
- &bt (&DWP(0,$s0),28); # check for hyper-threading bit
- &jc (&label("slow_way"));
- }
- # pre-allocate aligned stack frame...
- &lea ($acc,&DWP(-80-244,"esp"));
- &and ($acc,-64);
-
- # ... and make sure it doesn't alias with $tbl modulo 4096
- &mov ($s0,$tbl);
- &lea ($s1,&DWP(2048+256,$tbl));
- &mov ($s3,$acc);
- &and ($s0,0xfff); # s = %ebp&0xfff
- &and ($s1,0xfff); # e = (%ebp+2048+256)&0xfff
- &and ($s3,0xfff); # p = %esp&0xfff
-
- &cmp ($s3,$s1); # if (p>=e) %esp =- (p-e);
- &jb (&label("tbl_break_out"));
- &sub ($s3,$s1);
- &sub ($acc,$s3);
- &jmp (&label("tbl_ok"));
- &set_label("tbl_break_out",4); # else %esp -= (p-s)&0xfff + framesz;
- &sub ($s3,$s0);
- &and ($s3,0xfff);
- &add ($s3,384);
- &sub ($acc,$s3);
- &set_label("tbl_ok",4);
-
- &lea ($s3,&wparam(0)); # obtain pointer to parameter block
- &exch ("esp",$acc); # allocate stack frame
- &add ("esp",4); # reserve for return address!
- &mov ($_tbl,$tbl); # save %ebp
- &mov ($_esp,$acc); # save %esp
-
- &mov ($s0,&DWP(0,$s3)); # load inp
- &mov ($s1,&DWP(4,$s3)); # load out
- #&mov ($s2,&DWP(8,$s3)); # load len
- &mov ($key,&DWP(12,$s3)); # load key
- &mov ($acc,&DWP(16,$s3)); # load ivp
- &mov ($s3,&DWP(20,$s3)); # load enc flag
-
- &mov ($_inp,$s0); # save copy of inp
- &mov ($_out,$s1); # save copy of out
- &mov ($_len,$s2); # save copy of len
- &mov ($_key,$key); # save copy of key
- &mov ($_ivp,$acc); # save copy of ivp
-
- &mov ($mark,0); # copy of aes_key->rounds = 0;
- # do we copy key schedule to stack?
- &mov ($s1 eq "ebx" ? $s1 : "",$key);
- &mov ($s2 eq "ecx" ? $s2 : "",244/4);
- &sub ($s1,$tbl);
- &mov ("esi",$key);
- &and ($s1,0xfff);
- &lea ("edi",$aes_key);
- &cmp ($s1,2048+256);
- &jb (&label("do_copy"));
- &cmp ($s1,4096-244);
- &jb (&label("skip_copy"));
- &set_label("do_copy",4);
- &mov ($_key,"edi");
- &data_word(0xA5F3F689); # rep movsd
- &set_label("skip_copy");
-
- &mov ($key,16);
- &set_label("prefetch_tbl",4);
- &mov ($s0,&DWP(0,$tbl));
- &mov ($s1,&DWP(32,$tbl));
- &mov ($s2,&DWP(64,$tbl));
- &mov ($acc,&DWP(96,$tbl));
- &lea ($tbl,&DWP(128,$tbl));
- &sub ($key,1);
- &jnz (&label("prefetch_tbl"));
- &sub ($tbl,2048);
-
- &mov ($acc,$_inp);
- &mov ($key,$_ivp);
-
- &cmp ($s3,0);
- &je (&label("fast_decrypt"));
-
-#----------------------------- ENCRYPT -----------------------------#
- &mov ($s0,&DWP(0,$key)); # load iv
- &mov ($s1,&DWP(4,$key));
-
- &set_label("fast_enc_loop",16);
- &mov ($s2,&DWP(8,$key));
- &mov ($s3,&DWP(12,$key));
-
- &xor ($s0,&DWP(0,$acc)); # xor input data
- &xor ($s1,&DWP(4,$acc));
- &xor ($s2,&DWP(8,$acc));
- &xor ($s3,&DWP(12,$acc));
-
- &mov ($key,$_key); # load key
- &call ("_x86_AES_encrypt");
-
- &mov ($acc,$_inp); # load inp
- &mov ($key,$_out); # load out
-
- &mov (&DWP(0,$key),$s0); # save output data
- &mov (&DWP(4,$key),$s1);
- &mov (&DWP(8,$key),$s2);
- &mov (&DWP(12,$key),$s3);
-
- &lea ($acc,&DWP(16,$acc)); # advance inp
- &mov ($s2,$_len); # load len
- &mov ($_inp,$acc); # save inp
- &lea ($s3,&DWP(16,$key)); # advance out
- &mov ($_out,$s3); # save out
- &sub ($s2,16); # decrease len
- &mov ($_len,$s2); # save len
- &jnz (&label("fast_enc_loop"));
- &mov ($acc,$_ivp); # load ivp
- &mov ($s2,&DWP(8,$key)); # restore last 2 dwords
- &mov ($s3,&DWP(12,$key));
- &mov (&DWP(0,$acc),$s0); # save ivec
- &mov (&DWP(4,$acc),$s1);
- &mov (&DWP(8,$acc),$s2);
- &mov (&DWP(12,$acc),$s3);
-
- &cmp ($mark,0); # was the key schedule copied?
- &mov ("edi",$_key);
- &je (&label("skip_ezero"));
- # zero copy of key schedule
- &mov ("ecx",240/4);
- &xor ("eax","eax");
- &align (4);
- &data_word(0xABF3F689); # rep stosd
- &set_label("skip_ezero")
- &mov ("esp",$_esp);
- &popf ();
- &set_label("drop_out");
- &function_end_A();
- &pushf (); # kludge, never executed
-
-#----------------------------- DECRYPT -----------------------------#
-&set_label("fast_decrypt",16);
-
- &cmp ($acc,$_out);
- &je (&label("fast_dec_in_place")); # in-place processing...
-
- &mov ($_tmp,$key);
-
- &align (4);
- &set_label("fast_dec_loop",16);
- &mov ($s0,&DWP(0,$acc)); # read input
- &mov ($s1,&DWP(4,$acc));
- &mov ($s2,&DWP(8,$acc));
- &mov ($s3,&DWP(12,$acc));
-
- &mov ($key,$_key); # load key
- &call ("_x86_AES_decrypt");
-
- &mov ($key,$_tmp); # load ivp
- &mov ($acc,$_len); # load len
- &xor ($s0,&DWP(0,$key)); # xor iv
- &xor ($s1,&DWP(4,$key));
- &xor ($s2,&DWP(8,$key));
- &xor ($s3,&DWP(12,$key));
-
- &mov ($key,$_out); # load out
- &mov ($acc,$_inp); # load inp
-
- &mov (&DWP(0,$key),$s0); # write output
- &mov (&DWP(4,$key),$s1);
- &mov (&DWP(8,$key),$s2);
- &mov (&DWP(12,$key),$s3);
-
- &mov ($s2,$_len); # load len
- &mov ($_tmp,$acc); # save ivp
- &lea ($acc,&DWP(16,$acc)); # advance inp
- &mov ($_inp,$acc); # save inp
- &lea ($key,&DWP(16,$key)); # advance out
- &mov ($_out,$key); # save out
- &sub ($s2,16); # decrease len
- &mov ($_len,$s2); # save len
- &jnz (&label("fast_dec_loop"));
- &mov ($key,$_tmp); # load temp ivp
- &mov ($acc,$_ivp); # load user ivp
- &mov ($s0,&DWP(0,$key)); # load iv
- &mov ($s1,&DWP(4,$key));
- &mov ($s2,&DWP(8,$key));
- &mov ($s3,&DWP(12,$key));
- &mov (&DWP(0,$acc),$s0); # copy back to user
- &mov (&DWP(4,$acc),$s1);
- &mov (&DWP(8,$acc),$s2);
- &mov (&DWP(12,$acc),$s3);
- &jmp (&label("fast_dec_out"));
-
- &set_label("fast_dec_in_place",16);
- &set_label("fast_dec_in_place_loop");
- &mov ($s0,&DWP(0,$acc)); # read input
- &mov ($s1,&DWP(4,$acc));
- &mov ($s2,&DWP(8,$acc));
- &mov ($s3,&DWP(12,$acc));
-
- &lea ($key,$ivec);
- &mov (&DWP(0,$key),$s0); # copy to temp
- &mov (&DWP(4,$key),$s1);
- &mov (&DWP(8,$key),$s2);
- &mov (&DWP(12,$key),$s3);
-
- &mov ($key,$_key); # load key
- &call ("_x86_AES_decrypt");
-
- &mov ($key,$_ivp); # load ivp
- &mov ($acc,$_out); # load out
- &xor ($s0,&DWP(0,$key)); # xor iv
- &xor ($s1,&DWP(4,$key));
- &xor ($s2,&DWP(8,$key));
- &xor ($s3,&DWP(12,$key));
-
- &mov (&DWP(0,$acc),$s0); # write output
- &mov (&DWP(4,$acc),$s1);
- &mov (&DWP(8,$acc),$s2);
- &mov (&DWP(12,$acc),$s3);
-
- &lea ($acc,&DWP(16,$acc)); # advance out
- &mov ($_out,$acc); # save out
-
- &lea ($acc,$ivec);
- &mov ($s0,&DWP(0,$acc)); # read temp
- &mov ($s1,&DWP(4,$acc));
- &mov ($s2,&DWP(8,$acc));
- &mov ($s3,&DWP(12,$acc));
-
- &mov (&DWP(0,$key),$s0); # copy iv
- &mov (&DWP(4,$key),$s1);
- &mov (&DWP(8,$key),$s2);
- &mov (&DWP(12,$key),$s3);
-
- &mov ($acc,$_inp); # load inp
- &mov ($s2,$_len); # load len
- &lea ($acc,&DWP(16,$acc)); # advance inp
- &mov ($_inp,$acc); # save inp
- &sub ($s2,16); # decrease len
- &mov ($_len,$s2); # save len
- &jnz (&label("fast_dec_in_place_loop"));
-
- &set_label("fast_dec_out",4);
- &cmp ($mark,0); # was the key schedule copied?
- &mov ("edi",$_key);
- &je (&label("skip_dzero"));
- # zero copy of key schedule
- &mov ("ecx",240/4);
- &xor ("eax","eax");
- &align (4);
- &data_word(0xABF3F689); # rep stosd
- &set_label("skip_dzero")
- &mov ("esp",$_esp);
- &popf ();
- &function_end_A();
- &pushf (); # kludge, never executed
-
-#--------------------------- SLOW ROUTINE ---------------------------#
-&set_label("slow_way",16);
-
- &mov ($s0,&DWP(0,$s0)) if (!$x86only);# load OPENSSL_ia32cap
- &mov ($key,&wparam(3)); # load key
-
- # pre-allocate aligned stack frame...
- &lea ($acc,&DWP(-80,"esp"));
- &and ($acc,-64);
-
- # ... and make sure it doesn't alias with $key modulo 1024
- &lea ($s1,&DWP(-80-63,$key));
- &sub ($s1,$acc);
- &neg ($s1);
- &and ($s1,0x3C0); # modulo 1024, but aligned to cache-line
- &sub ($acc,$s1);
-
- # pick S-box copy which can't overlap with stack frame or $key
- &lea ($s1,&DWP(768,$acc));
- &sub ($s1,$tbl);
- &and ($s1,0x300);
- &lea ($tbl,&DWP(2048+128,$tbl,$s1));
-
- &lea ($s3,&wparam(0)); # pointer to parameter block
-
- &exch ("esp",$acc);
- &add ("esp",4); # reserve for return address!
- &mov ($_tbl,$tbl); # save %ebp
- &mov ($_esp,$acc); # save %esp
- &mov ($_tmp,$s0); # save OPENSSL_ia32cap
-
- &mov ($s0,&DWP(0,$s3)); # load inp
- &mov ($s1,&DWP(4,$s3)); # load out
- #&mov ($s2,&DWP(8,$s3)); # load len
- #&mov ($key,&DWP(12,$s3)); # load key
- &mov ($acc,&DWP(16,$s3)); # load ivp
- &mov ($s3,&DWP(20,$s3)); # load enc flag
-
- &mov ($_inp,$s0); # save copy of inp
- &mov ($_out,$s1); # save copy of out
- &mov ($_len,$s2); # save copy of len
- &mov ($_key,$key); # save copy of key
- &mov ($_ivp,$acc); # save copy of ivp
-
- &mov ($key,$acc);
- &mov ($acc,$s0);
-
- &cmp ($s3,0);
- &je (&label("slow_decrypt"));
-
-#--------------------------- SLOW ENCRYPT ---------------------------#
- &cmp ($s2,16);
- &mov ($s3,$s1);
- &jb (&label("slow_enc_tail"));
-
- if (!$x86only) {
- &bt ($_tmp,25); # check for SSE bit
- &jnc (&label("slow_enc_x86"));
-
- &movq ("mm0",&QWP(0,$key)); # load iv
- &movq ("mm4",&QWP(8,$key));
-
- &set_label("slow_enc_loop_sse",16);
- &pxor ("mm0",&QWP(0,$acc)); # xor input data
- &pxor ("mm4",&QWP(8,$acc));
-
- &mov ($key,$_key);
- &call ("_sse_AES_encrypt_compact");
-
- &mov ($acc,$_inp); # load inp
- &mov ($key,$_out); # load out
- &mov ($s2,$_len); # load len
-
- &movq (&QWP(0,$key),"mm0"); # save output data
- &movq (&QWP(8,$key),"mm4");
-
- &lea ($acc,&DWP(16,$acc)); # advance inp
- &mov ($_inp,$acc); # save inp
- &lea ($s3,&DWP(16,$key)); # advance out
- &mov ($_out,$s3); # save out
- &sub ($s2,16); # decrease len
- &cmp ($s2,16);
- &mov ($_len,$s2); # save len
- &jae (&label("slow_enc_loop_sse"));
- &test ($s2,15);
- &jnz (&label("slow_enc_tail"));
- &mov ($acc,$_ivp); # load ivp
- &movq (&QWP(0,$acc),"mm0"); # save ivec
- &movq (&QWP(8,$acc),"mm4");
- &emms ();
- &mov ("esp",$_esp);
- &popf ();
- &function_end_A();
- &pushf (); # kludge, never executed
- }
- &set_label("slow_enc_x86",16);
- &mov ($s0,&DWP(0,$key)); # load iv
- &mov ($s1,&DWP(4,$key));
-
- &set_label("slow_enc_loop_x86",4);
- &mov ($s2,&DWP(8,$key));
- &mov ($s3,&DWP(12,$key));
-
- &xor ($s0,&DWP(0,$acc)); # xor input data
- &xor ($s1,&DWP(4,$acc));
- &xor ($s2,&DWP(8,$acc));
- &xor ($s3,&DWP(12,$acc));
-
- &mov ($key,$_key); # load key
- &call ("_x86_AES_encrypt_compact");
-
- &mov ($acc,$_inp); # load inp
- &mov ($key,$_out); # load out
-
- &mov (&DWP(0,$key),$s0); # save output data
- &mov (&DWP(4,$key),$s1);
- &mov (&DWP(8,$key),$s2);
- &mov (&DWP(12,$key),$s3);
-
- &mov ($s2,$_len); # load len
- &lea ($acc,&DWP(16,$acc)); # advance inp
- &mov ($_inp,$acc); # save inp
- &lea ($s3,&DWP(16,$key)); # advance out
- &mov ($_out,$s3); # save out
- &sub ($s2,16); # decrease len
- &cmp ($s2,16);
- &mov ($_len,$s2); # save len
- &jae (&label("slow_enc_loop_x86"));
- &test ($s2,15);
- &jnz (&label("slow_enc_tail"));
- &mov ($acc,$_ivp); # load ivp
- &mov ($s2,&DWP(8,$key)); # restore last dwords
- &mov ($s3,&DWP(12,$key));
- &mov (&DWP(0,$acc),$s0); # save ivec
- &mov (&DWP(4,$acc),$s1);
- &mov (&DWP(8,$acc),$s2);
- &mov (&DWP(12,$acc),$s3);
-
- &mov ("esp",$_esp);
- &popf ();
- &function_end_A();
- &pushf (); # kludge, never executed
-
- &set_label("slow_enc_tail",16);
- &emms () if (!$x86only);
- &mov ($key eq "edi"? $key:"",$s3); # load out to edi
- &mov ($s1,16);
- &sub ($s1,$s2);
- &cmp ($key,$acc eq "esi"? $acc:""); # compare with inp
- &je (&label("enc_in_place"));
- &align (4);
- &data_word(0xA4F3F689); # rep movsb # copy input
- &jmp (&label("enc_skip_in_place"));
- &set_label("enc_in_place");
- &lea ($key,&DWP(0,$key,$s2));
- &set_label("enc_skip_in_place");
- &mov ($s2,$s1);
- &xor ($s0,$s0);
- &align (4);
- &data_word(0xAAF3F689); # rep stosb # zero tail
-
- &mov ($key,$_ivp); # restore ivp
- &mov ($acc,$s3); # output as input
- &mov ($s0,&DWP(0,$key));
- &mov ($s1,&DWP(4,$key));
- &mov ($_len,16); # len=16
- &jmp (&label("slow_enc_loop_x86")); # one more spin...
-
-#--------------------------- SLOW DECRYPT ---------------------------#
-&set_label("slow_decrypt",16);
- if (!$x86only) {
- &bt ($_tmp,25); # check for SSE bit
- &jnc (&label("slow_dec_loop_x86"));
-
- &set_label("slow_dec_loop_sse",4);
- &movq ("mm0",&QWP(0,$acc)); # read input
- &movq ("mm4",&QWP(8,$acc));
-
- &mov ($key,$_key);
- &call ("_sse_AES_decrypt_compact");
-
- &mov ($acc,$_inp); # load inp
- &lea ($s0,$ivec);
- &mov ($s1,$_out); # load out
- &mov ($s2,$_len); # load len
- &mov ($key,$_ivp); # load ivp
-
- &movq ("mm1",&QWP(0,$acc)); # re-read input
- &movq ("mm5",&QWP(8,$acc));
-
- &pxor ("mm0",&QWP(0,$key)); # xor iv
- &pxor ("mm4",&QWP(8,$key));
-
- &movq (&QWP(0,$key),"mm1"); # copy input to iv
- &movq (&QWP(8,$key),"mm5");
-
- &sub ($s2,16); # decrease len
- &jc (&label("slow_dec_partial_sse"));
-
- &movq (&QWP(0,$s1),"mm0"); # write output
- &movq (&QWP(8,$s1),"mm4");
-
- &lea ($s1,&DWP(16,$s1)); # advance out
- &mov ($_out,$s1); # save out
- &lea ($acc,&DWP(16,$acc)); # advance inp
- &mov ($_inp,$acc); # save inp
- &mov ($_len,$s2); # save len
- &jnz (&label("slow_dec_loop_sse"));
- &emms ();
- &mov ("esp",$_esp);
- &popf ();
- &function_end_A();
- &pushf (); # kludge, never executed
-
- &set_label("slow_dec_partial_sse",16);
- &movq (&QWP(0,$s0),"mm0"); # save output to temp
- &movq (&QWP(8,$s0),"mm4");
- &emms ();
-
- &add ($s2 eq "ecx" ? "ecx":"",16);
- &mov ("edi",$s1); # out
- &mov ("esi",$s0); # temp
- &align (4);
- &data_word(0xA4F3F689); # rep movsb # copy partial output
-
- &mov ("esp",$_esp);
- &popf ();
- &function_end_A();
- &pushf (); # kludge, never executed
- }
- &set_label("slow_dec_loop_x86",16);
- &mov ($s0,&DWP(0,$acc)); # read input
- &mov ($s1,&DWP(4,$acc));
- &mov ($s2,&DWP(8,$acc));
- &mov ($s3,&DWP(12,$acc));
-
- &lea ($key,$ivec);
- &mov (&DWP(0,$key),$s0); # copy to temp
- &mov (&DWP(4,$key),$s1);
- &mov (&DWP(8,$key),$s2);
- &mov (&DWP(12,$key),$s3);
-
- &mov ($key,$_key); # load key
- &call ("_x86_AES_decrypt_compact");
-
- &mov ($key,$_ivp); # load ivp
- &mov ($acc,$_len); # load len
- &xor ($s0,&DWP(0,$key)); # xor iv
- &xor ($s1,&DWP(4,$key));
- &xor ($s2,&DWP(8,$key));
- &xor ($s3,&DWP(12,$key));
-
- &sub ($acc,16);
- &jc (&label("slow_dec_partial_x86"));
-
- &mov ($_len,$acc); # save len
- &mov ($acc,$_out); # load out
-
- &mov (&DWP(0,$acc),$s0); # write output
- &mov (&DWP(4,$acc),$s1);
- &mov (&DWP(8,$acc),$s2);
- &mov (&DWP(12,$acc),$s3);
-
- &lea ($acc,&DWP(16,$acc)); # advance out
- &mov ($_out,$acc); # save out
-
- &lea ($acc,$ivec);
- &mov ($s0,&DWP(0,$acc)); # read temp
- &mov ($s1,&DWP(4,$acc));
- &mov ($s2,&DWP(8,$acc));
- &mov ($s3,&DWP(12,$acc));
-
- &mov (&DWP(0,$key),$s0); # copy it to iv
- &mov (&DWP(4,$key),$s1);
- &mov (&DWP(8,$key),$s2);
- &mov (&DWP(12,$key),$s3);
-
- &mov ($acc,$_inp); # load inp
- &lea ($acc,&DWP(16,$acc)); # advance inp
- &mov ($_inp,$acc); # save inp
- &jnz (&label("slow_dec_loop_x86"));
- &mov ("esp",$_esp);
- &popf ();
- &function_end_A();
- &pushf (); # kludge, never executed
-
- &set_label("slow_dec_partial_x86",16);
- &lea ($acc,$ivec);
- &mov (&DWP(0,$acc),$s0); # save output to temp
- &mov (&DWP(4,$acc),$s1);
- &mov (&DWP(8,$acc),$s2);
- &mov (&DWP(12,$acc),$s3);
-
- &mov ($acc,$_inp);
- &mov ($s0,&DWP(0,$acc)); # re-read input
- &mov ($s1,&DWP(4,$acc));
- &mov ($s2,&DWP(8,$acc));
- &mov ($s3,&DWP(12,$acc));
-
- &mov (&DWP(0,$key),$s0); # copy it to iv
- &mov (&DWP(4,$key),$s1);
- &mov (&DWP(8,$key),$s2);
- &mov (&DWP(12,$key),$s3);
-
- &mov ("ecx",$_len);
- &mov ("edi",$_out);
- &lea ("esi",$ivec);
- &align (4);
- &data_word(0xA4F3F689); # rep movsb # copy partial output
-
- &mov ("esp",$_esp);
- &popf ();
-&function_end("AES_cbc_encrypt");
-}
-
-#------------------------------------------------------------------#
-
-sub enckey()
-{
- &movz ("esi",&LB("edx")); # rk[i]>>0
- &movz ("ebx",&BP(-128,$tbl,"esi",1));
- &movz ("esi",&HB("edx")); # rk[i]>>8
- &shl ("ebx",24);
- &xor ("eax","ebx");
-
- &movz ("ebx",&BP(-128,$tbl,"esi",1));
- &shr ("edx",16);
- &movz ("esi",&LB("edx")); # rk[i]>>16
- &xor ("eax","ebx");
-
- &movz ("ebx",&BP(-128,$tbl,"esi",1));
- &movz ("esi",&HB("edx")); # rk[i]>>24
- &shl ("ebx",8);
- &xor ("eax","ebx");
-
- &movz ("ebx",&BP(-128,$tbl,"esi",1));
- &shl ("ebx",16);
- &xor ("eax","ebx");
-
- &xor ("eax",&DWP(1024-128,$tbl,"ecx",4)); # rcon
-}
-
-&function_begin("_x86_AES_set_encrypt_key");
- &mov ("esi",&wparam(1)); # user supplied key
- &mov ("edi",&wparam(3)); # private key schedule
-
- &test ("esi",-1);
- &jz (&label("badpointer"));
- &test ("edi",-1);
- &jz (&label("badpointer"));
-
- &call (&label("pic_point"));
- &set_label("pic_point");
- &blindpop($tbl);
- &lea ($tbl,&DWP(&label("AES_Te")."-".&label("pic_point"),$tbl));
- &lea ($tbl,&DWP(2048+128,$tbl));
-
- # prefetch Te4
- &mov ("eax",&DWP(0-128,$tbl));
- &mov ("ebx",&DWP(32-128,$tbl));
- &mov ("ecx",&DWP(64-128,$tbl));
- &mov ("edx",&DWP(96-128,$tbl));
- &mov ("eax",&DWP(128-128,$tbl));
- &mov ("ebx",&DWP(160-128,$tbl));
- &mov ("ecx",&DWP(192-128,$tbl));
- &mov ("edx",&DWP(224-128,$tbl));
-
- &mov ("ecx",&wparam(2)); # number of bits in key
- &cmp ("ecx",128);
- &je (&label("10rounds"));
- &cmp ("ecx",192);
- &je (&label("12rounds"));
- &cmp ("ecx",256);
- &je (&label("14rounds"));
- &mov ("eax",-2); # invalid number of bits
- &jmp (&label("exit"));
-
- &set_label("10rounds");
- &mov ("eax",&DWP(0,"esi")); # copy first 4 dwords
- &mov ("ebx",&DWP(4,"esi"));
- &mov ("ecx",&DWP(8,"esi"));
- &mov ("edx",&DWP(12,"esi"));
- &mov (&DWP(0,"edi"),"eax");
- &mov (&DWP(4,"edi"),"ebx");
- &mov (&DWP(8,"edi"),"ecx");
- &mov (&DWP(12,"edi"),"edx");
-
- &xor ("ecx","ecx");
- &jmp (&label("10shortcut"));
-
- &align (4);
- &set_label("10loop");
- &mov ("eax",&DWP(0,"edi")); # rk[0]
- &mov ("edx",&DWP(12,"edi")); # rk[3]
- &set_label("10shortcut");
- &enckey ();
-
- &mov (&DWP(16,"edi"),"eax"); # rk[4]
- &xor ("eax",&DWP(4,"edi"));
- &mov (&DWP(20,"edi"),"eax"); # rk[5]
- &xor ("eax",&DWP(8,"edi"));
- &mov (&DWP(24,"edi"),"eax"); # rk[6]
- &xor ("eax",&DWP(12,"edi"));
- &mov (&DWP(28,"edi"),"eax"); # rk[7]
- &inc ("ecx");
- &add ("edi",16);
- &cmp ("ecx",10);
- &jl (&label("10loop"));
-
- &mov (&DWP(80,"edi"),10); # setup number of rounds
- &xor ("eax","eax");
- &jmp (&label("exit"));
-
- &set_label("12rounds");
- &mov ("eax",&DWP(0,"esi")); # copy first 6 dwords
- &mov ("ebx",&DWP(4,"esi"));
- &mov ("ecx",&DWP(8,"esi"));
- &mov ("edx",&DWP(12,"esi"));
- &mov (&DWP(0,"edi"),"eax");
- &mov (&DWP(4,"edi"),"ebx");
- &mov (&DWP(8,"edi"),"ecx");
- &mov (&DWP(12,"edi"),"edx");
- &mov ("ecx",&DWP(16,"esi"));
- &mov ("edx",&DWP(20,"esi"));
- &mov (&DWP(16,"edi"),"ecx");
- &mov (&DWP(20,"edi"),"edx");
-
- &xor ("ecx","ecx");
- &jmp (&label("12shortcut"));
-
- &align (4);
- &set_label("12loop");
- &mov ("eax",&DWP(0,"edi")); # rk[0]
- &mov ("edx",&DWP(20,"edi")); # rk[5]
- &set_label("12shortcut");
- &enckey ();
-
- &mov (&DWP(24,"edi"),"eax"); # rk[6]
- &xor ("eax",&DWP(4,"edi"));
- &mov (&DWP(28,"edi"),"eax"); # rk[7]
- &xor ("eax",&DWP(8,"edi"));
- &mov (&DWP(32,"edi"),"eax"); # rk[8]
- &xor ("eax",&DWP(12,"edi"));
- &mov (&DWP(36,"edi"),"eax"); # rk[9]
-
- &cmp ("ecx",7);
- &je (&label("12break"));
- &inc ("ecx");
-
- &xor ("eax",&DWP(16,"edi"));
- &mov (&DWP(40,"edi"),"eax"); # rk[10]
- &xor ("eax",&DWP(20,"edi"));
- &mov (&DWP(44,"edi"),"eax"); # rk[11]
-
- &add ("edi",24);
- &jmp (&label("12loop"));
-
- &set_label("12break");
- &mov (&DWP(72,"edi"),12); # setup number of rounds
- &xor ("eax","eax");
- &jmp (&label("exit"));
-
- &set_label("14rounds");
- &mov ("eax",&DWP(0,"esi")); # copy first 8 dwords
- &mov ("ebx",&DWP(4,"esi"));
- &mov ("ecx",&DWP(8,"esi"));
- &mov ("edx",&DWP(12,"esi"));
- &mov (&DWP(0,"edi"),"eax");
- &mov (&DWP(4,"edi"),"ebx");
- &mov (&DWP(8,"edi"),"ecx");
- &mov (&DWP(12,"edi"),"edx");
- &mov ("eax",&DWP(16,"esi"));
- &mov ("ebx",&DWP(20,"esi"));
- &mov ("ecx",&DWP(24,"esi"));
- &mov ("edx",&DWP(28,"esi"));
- &mov (&DWP(16,"edi"),"eax");
- &mov (&DWP(20,"edi"),"ebx");
- &mov (&DWP(24,"edi"),"ecx");
- &mov (&DWP(28,"edi"),"edx");
-
- &xor ("ecx","ecx");
- &jmp (&label("14shortcut"));
-
- &align (4);
- &set_label("14loop");
- &mov ("edx",&DWP(28,"edi")); # rk[7]
- &set_label("14shortcut");
- &mov ("eax",&DWP(0,"edi")); # rk[0]
-
- &enckey ();
-
- &mov (&DWP(32,"edi"),"eax"); # rk[8]
- &xor ("eax",&DWP(4,"edi"));
- &mov (&DWP(36,"edi"),"eax"); # rk[9]
- &xor ("eax",&DWP(8,"edi"));
- &mov (&DWP(40,"edi"),"eax"); # rk[10]
- &xor ("eax",&DWP(12,"edi"));
- &mov (&DWP(44,"edi"),"eax"); # rk[11]
-
- &cmp ("ecx",6);
- &je (&label("14break"));
- &inc ("ecx");
-
- &mov ("edx","eax");
- &mov ("eax",&DWP(16,"edi")); # rk[4]
- &movz ("esi",&LB("edx")); # rk[11]>>0
- &movz ("ebx",&BP(-128,$tbl,"esi",1));
- &movz ("esi",&HB("edx")); # rk[11]>>8
- &xor ("eax","ebx");
-
- &movz ("ebx",&BP(-128,$tbl,"esi",1));
- &shr ("edx",16);
- &shl ("ebx",8);
- &movz ("esi",&LB("edx")); # rk[11]>>16
- &xor ("eax","ebx");
-
- &movz ("ebx",&BP(-128,$tbl,"esi",1));
- &movz ("esi",&HB("edx")); # rk[11]>>24
- &shl ("ebx",16);
- &xor ("eax","ebx");
-
- &movz ("ebx",&BP(-128,$tbl,"esi",1));
- &shl ("ebx",24);
- &xor ("eax","ebx");
-
- &mov (&DWP(48,"edi"),"eax"); # rk[12]
- &xor ("eax",&DWP(20,"edi"));
- &mov (&DWP(52,"edi"),"eax"); # rk[13]
- &xor ("eax",&DWP(24,"edi"));
- &mov (&DWP(56,"edi"),"eax"); # rk[14]
- &xor ("eax",&DWP(28,"edi"));
- &mov (&DWP(60,"edi"),"eax"); # rk[15]
-
- &add ("edi",32);
- &jmp (&label("14loop"));
-
- &set_label("14break");
- &mov (&DWP(48,"edi"),14); # setup number of rounds
- &xor ("eax","eax");
- &jmp (&label("exit"));
-
- &set_label("badpointer");
- &mov ("eax",-1);
- &set_label("exit");
-&function_end("_x86_AES_set_encrypt_key");
-
-# int private_AES_set_encrypt_key(const unsigned char *userKey, const int bits,
-# AES_KEY *key)
-&function_begin_B("private_AES_set_encrypt_key");
- &call ("_x86_AES_set_encrypt_key");
- &ret ();
-&function_end_B("private_AES_set_encrypt_key");
-
-sub deckey()
-{ my ($i,$key,$tp1,$tp2,$tp4,$tp8) = @_;
- my $tmp = $tbl;
-
- &mov ($acc,$tp1);
- &and ($acc,0x80808080);
- &mov ($tmp,$acc);
- &shr ($tmp,7);
- &lea ($tp2,&DWP(0,$tp1,$tp1));
- &sub ($acc,$tmp);
- &and ($tp2,0xfefefefe);
- &and ($acc,0x1b1b1b1b);
- &xor ($acc,$tp2);
- &mov ($tp2,$acc);
-
- &and ($acc,0x80808080);
- &mov ($tmp,$acc);
- &shr ($tmp,7);
- &lea ($tp4,&DWP(0,$tp2,$tp2));
- &sub ($acc,$tmp);
- &and ($tp4,0xfefefefe);
- &and ($acc,0x1b1b1b1b);
- &xor ($tp2,$tp1); # tp2^tp1
- &xor ($acc,$tp4);
- &mov ($tp4,$acc);
-
- &and ($acc,0x80808080);
- &mov ($tmp,$acc);
- &shr ($tmp,7);
- &lea ($tp8,&DWP(0,$tp4,$tp4));
- &xor ($tp4,$tp1); # tp4^tp1
- &sub ($acc,$tmp);
- &and ($tp8,0xfefefefe);
- &and ($acc,0x1b1b1b1b);
- &rotl ($tp1,8); # = ROTATE(tp1,8)
- &xor ($tp8,$acc);
-
- &mov ($tmp,&DWP(4*($i+1),$key)); # modulo-scheduled load
-
- &xor ($tp1,$tp2);
- &xor ($tp2,$tp8);
- &xor ($tp1,$tp4);
- &rotl ($tp2,24);
- &xor ($tp4,$tp8);
- &xor ($tp1,$tp8); # ^= tp8^(tp4^tp1)^(tp2^tp1)
- &rotl ($tp4,16);
- &xor ($tp1,$tp2); # ^= ROTATE(tp8^tp2^tp1,24)
- &rotl ($tp8,8);
- &xor ($tp1,$tp4); # ^= ROTATE(tp8^tp4^tp1,16)
- &mov ($tp2,$tmp);
- &xor ($tp1,$tp8); # ^= ROTATE(tp8,8)
-
- &mov (&DWP(4*$i,$key),$tp1);
-}
-
-# int private_AES_set_decrypt_key(const unsigned char *userKey, const int bits,
-# AES_KEY *key)
-&function_begin_B("private_AES_set_decrypt_key");
- &call ("_x86_AES_set_encrypt_key");
- &cmp ("eax",0);
- &je (&label("proceed"));
- &ret ();
-
- &set_label("proceed");
- &push ("ebp");
- &push ("ebx");
- &push ("esi");
- &push ("edi");
-
- &mov ("esi",&wparam(2));
- &mov ("ecx",&DWP(240,"esi")); # pull number of rounds
- &lea ("ecx",&DWP(0,"","ecx",4));
- &lea ("edi",&DWP(0,"esi","ecx",4)); # pointer to last chunk
-
- &set_label("invert",4); # invert order of chunks
- &mov ("eax",&DWP(0,"esi"));
- &mov ("ebx",&DWP(4,"esi"));
- &mov ("ecx",&DWP(0,"edi"));
- &mov ("edx",&DWP(4,"edi"));
- &mov (&DWP(0,"edi"),"eax");
- &mov (&DWP(4,"edi"),"ebx");
- &mov (&DWP(0,"esi"),"ecx");
- &mov (&DWP(4,"esi"),"edx");
- &mov ("eax",&DWP(8,"esi"));
- &mov ("ebx",&DWP(12,"esi"));
- &mov ("ecx",&DWP(8,"edi"));
- &mov ("edx",&DWP(12,"edi"));
- &mov (&DWP(8,"edi"),"eax");
- &mov (&DWP(12,"edi"),"ebx");
- &mov (&DWP(8,"esi"),"ecx");
- &mov (&DWP(12,"esi"),"edx");
- &add ("esi",16);
- &sub ("edi",16);
- &cmp ("esi","edi");
- &jne (&label("invert"));
-
- &mov ($key,&wparam(2));
- &mov ($acc,&DWP(240,$key)); # pull number of rounds
- &lea ($acc,&DWP(-2,$acc,$acc));
- &lea ($acc,&DWP(0,$key,$acc,8));
- &mov (&wparam(2),$acc);
-
- &mov ($s0,&DWP(16,$key)); # modulo-scheduled load
- &set_label("permute",4); # permute the key schedule
- &add ($key,16);
- &deckey (0,$key,$s0,$s1,$s2,$s3);
- &deckey (1,$key,$s1,$s2,$s3,$s0);
- &deckey (2,$key,$s2,$s3,$s0,$s1);
- &deckey (3,$key,$s3,$s0,$s1,$s2);
- &cmp ($key,&wparam(2));
- &jb (&label("permute"));
-
- &xor ("eax","eax"); # return success
-&function_end("private_AES_set_decrypt_key");
-&asciz("AES for x86, CRYPTOGAMS by <appro\@openssl.org>");
-
-&asm_finish();
diff --git a/app/openssl/crypto/aes/asm/aes-armv4.S b/app/openssl/crypto/aes/asm/aes-armv4.S
deleted file mode 100644
index 333a5227..00000000
--- a/app/openssl/crypto/aes/asm/aes-armv4.S
+++ /dev/null
@@ -1,1177 +0,0 @@
-
-@ ====================================================================
-@ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
-@ project. The module is, however, dual licensed under OpenSSL and
-@ CRYPTOGAMS licenses depending on where you obtain it. For further
-@ details see http://www.openssl.org/~appro/cryptogams/.
-@ ====================================================================
-
-@ AES for ARMv4
-
-@ January 2007.
-@
-@ Code uses single 1K S-box and is >2 times faster than code generated
-@ by gcc-3.4.1. This is thanks to unique feature of ARMv4 ISA, which
-@ allows to merge logical or arithmetic operation with shift or rotate
-@ in one instruction and emit combined result every cycle. The module
-@ is endian-neutral. The performance is ~42 cycles/byte for 128-bit
-@ key [on single-issue Xscale PXA250 core].
-
-@ May 2007.
-@
-@ AES_set_[en|de]crypt_key is added.
-
-@ July 2010.
-@
-@ Rescheduling for dual-issue pipeline resulted in 12% improvement on
-@ Cortex A8 core and ~25 cycles per byte processed with 128-bit key.
-
-@ February 2011.
-@
-@ Profiler-assisted and platform-specific optimization resulted in 16%
-@ improvement on Cortex A8 core and ~21.5 cycles per byte.
-
-#ifndef __KERNEL__
-# include "arm_arch.h"
-#else
-# define __ARM_ARCH__ __LINUX_ARM_ARCH__
-#endif
-
-.text
-#if __ARM_ARCH__<7
-.code 32
-#else
-.syntax unified
-# ifdef __thumb2__
-.thumb
-# else
-.code 32
-# endif
-#endif
-
-.type AES_Te,%object
-.align 5
-AES_Te:
-.word 0xc66363a5, 0xf87c7c84, 0xee777799, 0xf67b7b8d
-.word 0xfff2f20d, 0xd66b6bbd, 0xde6f6fb1, 0x91c5c554
-.word 0x60303050, 0x02010103, 0xce6767a9, 0x562b2b7d
-.word 0xe7fefe19, 0xb5d7d762, 0x4dababe6, 0xec76769a
-.word 0x8fcaca45, 0x1f82829d, 0x89c9c940, 0xfa7d7d87
-.word 0xeffafa15, 0xb25959eb, 0x8e4747c9, 0xfbf0f00b
-.word 0x41adadec, 0xb3d4d467, 0x5fa2a2fd, 0x45afafea
-.word 0x239c9cbf, 0x53a4a4f7, 0xe4727296, 0x9bc0c05b
-.word 0x75b7b7c2, 0xe1fdfd1c, 0x3d9393ae, 0x4c26266a
-.word 0x6c36365a, 0x7e3f3f41, 0xf5f7f702, 0x83cccc4f
-.word 0x6834345c, 0x51a5a5f4, 0xd1e5e534, 0xf9f1f108
-.word 0xe2717193, 0xabd8d873, 0x62313153, 0x2a15153f
-.word 0x0804040c, 0x95c7c752, 0x46232365, 0x9dc3c35e
-.word 0x30181828, 0x379696a1, 0x0a05050f, 0x2f9a9ab5
-.word 0x0e070709, 0x24121236, 0x1b80809b, 0xdfe2e23d
-.word 0xcdebeb26, 0x4e272769, 0x7fb2b2cd, 0xea75759f
-.word 0x1209091b, 0x1d83839e, 0x582c2c74, 0x341a1a2e
-.word 0x361b1b2d, 0xdc6e6eb2, 0xb45a5aee, 0x5ba0a0fb
-.word 0xa45252f6, 0x763b3b4d, 0xb7d6d661, 0x7db3b3ce
-.word 0x5229297b, 0xdde3e33e, 0x5e2f2f71, 0x13848497
-.word 0xa65353f5, 0xb9d1d168, 0x00000000, 0xc1eded2c
-.word 0x40202060, 0xe3fcfc1f, 0x79b1b1c8, 0xb65b5bed
-.word 0xd46a6abe, 0x8dcbcb46, 0x67bebed9, 0x7239394b
-.word 0x944a4ade, 0x984c4cd4, 0xb05858e8, 0x85cfcf4a
-.word 0xbbd0d06b, 0xc5efef2a, 0x4faaaae5, 0xedfbfb16
-.word 0x864343c5, 0x9a4d4dd7, 0x66333355, 0x11858594
-.word 0x8a4545cf, 0xe9f9f910, 0x04020206, 0xfe7f7f81
-.word 0xa05050f0, 0x783c3c44, 0x259f9fba, 0x4ba8a8e3
-.word 0xa25151f3, 0x5da3a3fe, 0x804040c0, 0x058f8f8a
-.word 0x3f9292ad, 0x219d9dbc, 0x70383848, 0xf1f5f504
-.word 0x63bcbcdf, 0x77b6b6c1, 0xafdada75, 0x42212163
-.word 0x20101030, 0xe5ffff1a, 0xfdf3f30e, 0xbfd2d26d
-.word 0x81cdcd4c, 0x180c0c14, 0x26131335, 0xc3ecec2f
-.word 0xbe5f5fe1, 0x359797a2, 0x884444cc, 0x2e171739
-.word 0x93c4c457, 0x55a7a7f2, 0xfc7e7e82, 0x7a3d3d47
-.word 0xc86464ac, 0xba5d5de7, 0x3219192b, 0xe6737395
-.word 0xc06060a0, 0x19818198, 0x9e4f4fd1, 0xa3dcdc7f
-.word 0x44222266, 0x542a2a7e, 0x3b9090ab, 0x0b888883
-.word 0x8c4646ca, 0xc7eeee29, 0x6bb8b8d3, 0x2814143c
-.word 0xa7dede79, 0xbc5e5ee2, 0x160b0b1d, 0xaddbdb76
-.word 0xdbe0e03b, 0x64323256, 0x743a3a4e, 0x140a0a1e
-.word 0x924949db, 0x0c06060a, 0x4824246c, 0xb85c5ce4
-.word 0x9fc2c25d, 0xbdd3d36e, 0x43acacef, 0xc46262a6
-.word 0x399191a8, 0x319595a4, 0xd3e4e437, 0xf279798b
-.word 0xd5e7e732, 0x8bc8c843, 0x6e373759, 0xda6d6db7
-.word 0x018d8d8c, 0xb1d5d564, 0x9c4e4ed2, 0x49a9a9e0
-.word 0xd86c6cb4, 0xac5656fa, 0xf3f4f407, 0xcfeaea25
-.word 0xca6565af, 0xf47a7a8e, 0x47aeaee9, 0x10080818
-.word 0x6fbabad5, 0xf0787888, 0x4a25256f, 0x5c2e2e72
-.word 0x381c1c24, 0x57a6a6f1, 0x73b4b4c7, 0x97c6c651
-.word 0xcbe8e823, 0xa1dddd7c, 0xe874749c, 0x3e1f1f21
-.word 0x964b4bdd, 0x61bdbddc, 0x0d8b8b86, 0x0f8a8a85
-.word 0xe0707090, 0x7c3e3e42, 0x71b5b5c4, 0xcc6666aa
-.word 0x904848d8, 0x06030305, 0xf7f6f601, 0x1c0e0e12
-.word 0xc26161a3, 0x6a35355f, 0xae5757f9, 0x69b9b9d0
-.word 0x17868691, 0x99c1c158, 0x3a1d1d27, 0x279e9eb9
-.word 0xd9e1e138, 0xebf8f813, 0x2b9898b3, 0x22111133
-.word 0xd26969bb, 0xa9d9d970, 0x078e8e89, 0x339494a7
-.word 0x2d9b9bb6, 0x3c1e1e22, 0x15878792, 0xc9e9e920
-.word 0x87cece49, 0xaa5555ff, 0x50282878, 0xa5dfdf7a
-.word 0x038c8c8f, 0x59a1a1f8, 0x09898980, 0x1a0d0d17
-.word 0x65bfbfda, 0xd7e6e631, 0x844242c6, 0xd06868b8
-.word 0x824141c3, 0x299999b0, 0x5a2d2d77, 0x1e0f0f11
-.word 0x7bb0b0cb, 0xa85454fc, 0x6dbbbbd6, 0x2c16163a
-@ Te4[256]
-.byte 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5
-.byte 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76
-.byte 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0
-.byte 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0
-.byte 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc
-.byte 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15
-.byte 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a
-.byte 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75
-.byte 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0
-.byte 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84
-.byte 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b
-.byte 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf
-.byte 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85
-.byte 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8
-.byte 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5
-.byte 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2
-.byte 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17
-.byte 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73
-.byte 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88
-.byte 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb
-.byte 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c
-.byte 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79
-.byte 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9
-.byte 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08
-.byte 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6
-.byte 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a
-.byte 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e
-.byte 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e
-.byte 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94
-.byte 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf
-.byte 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68
-.byte 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
-@ rcon[]
-.word 0x01000000, 0x02000000, 0x04000000, 0x08000000
-.word 0x10000000, 0x20000000, 0x40000000, 0x80000000
-.word 0x1B000000, 0x36000000, 0, 0, 0, 0, 0, 0
-.size AES_Te,.-AES_Te
-
-@ void AES_encrypt(const unsigned char *in, unsigned char *out,
-@ const AES_KEY *key) {
-.global AES_encrypt
-.type AES_encrypt,%function
-.align 5
-AES_encrypt:
-#if __ARM_ARCH__<7
- sub r3,pc,#8 @ AES_encrypt
-#else
- adr r3,AES_encrypt
-#endif
- stmdb sp!,{r1,r4-r12,lr}
- mov r12,r0 @ inp
- mov r11,r2
- sub r10,r3,#AES_encrypt-AES_Te @ Te
-#if __ARM_ARCH__<7
- ldrb r0,[r12,#3] @ load input data in endian-neutral
- ldrb r4,[r12,#2] @ manner...
- ldrb r5,[r12,#1]
- ldrb r6,[r12,#0]
- orr r0,r0,r4,lsl#8
- ldrb r1,[r12,#7]
- orr r0,r0,r5,lsl#16
- ldrb r4,[r12,#6]
- orr r0,r0,r6,lsl#24
- ldrb r5,[r12,#5]
- ldrb r6,[r12,#4]
- orr r1,r1,r4,lsl#8
- ldrb r2,[r12,#11]
- orr r1,r1,r5,lsl#16
- ldrb r4,[r12,#10]
- orr r1,r1,r6,lsl#24
- ldrb r5,[r12,#9]
- ldrb r6,[r12,#8]
- orr r2,r2,r4,lsl#8
- ldrb r3,[r12,#15]
- orr r2,r2,r5,lsl#16
- ldrb r4,[r12,#14]
- orr r2,r2,r6,lsl#24
- ldrb r5,[r12,#13]
- ldrb r6,[r12,#12]
- orr r3,r3,r4,lsl#8
- orr r3,r3,r5,lsl#16
- orr r3,r3,r6,lsl#24
-#else
- ldr r0,[r12,#0]
- ldr r1,[r12,#4]
- ldr r2,[r12,#8]
- ldr r3,[r12,#12]
-#ifdef __ARMEL__
- rev r0,r0
- rev r1,r1
- rev r2,r2
- rev r3,r3
-#endif
-#endif
- bl _armv4_AES_encrypt
-
- ldr r12,[sp],#4 @ pop out
-#if __ARM_ARCH__>=7
-#ifdef __ARMEL__
- rev r0,r0
- rev r1,r1
- rev r2,r2
- rev r3,r3
-#endif
- str r0,[r12,#0]
- str r1,[r12,#4]
- str r2,[r12,#8]
- str r3,[r12,#12]
-#else
- mov r4,r0,lsr#24 @ write output in endian-neutral
- mov r5,r0,lsr#16 @ manner...
- mov r6,r0,lsr#8
- strb r4,[r12,#0]
- strb r5,[r12,#1]
- mov r4,r1,lsr#24
- strb r6,[r12,#2]
- mov r5,r1,lsr#16
- strb r0,[r12,#3]
- mov r6,r1,lsr#8
- strb r4,[r12,#4]
- strb r5,[r12,#5]
- mov r4,r2,lsr#24
- strb r6,[r12,#6]
- mov r5,r2,lsr#16
- strb r1,[r12,#7]
- mov r6,r2,lsr#8
- strb r4,[r12,#8]
- strb r5,[r12,#9]
- mov r4,r3,lsr#24
- strb r6,[r12,#10]
- mov r5,r3,lsr#16
- strb r2,[r12,#11]
- mov r6,r3,lsr#8
- strb r4,[r12,#12]
- strb r5,[r12,#13]
- strb r6,[r12,#14]
- strb r3,[r12,#15]
-#endif
-#if __ARM_ARCH__>=5
- ldmia sp!,{r4-r12,pc}
-#else
- ldmia sp!,{r4-r12,lr}
- tst lr,#1
- moveq pc,lr @ be binary compatible with V4, yet
- .word 0xe12fff1e @ interoperable with Thumb ISA:-)
-#endif
-.size AES_encrypt,.-AES_encrypt
-
-.type _armv4_AES_encrypt,%function
-.align 2
-_armv4_AES_encrypt:
- str lr,[sp,#-4]! @ push lr
- ldmia r11!,{r4-r7}
- eor r0,r0,r4
- ldr r12,[r11,#240-16]
- eor r1,r1,r5
- eor r2,r2,r6
- eor r3,r3,r7
- sub r12,r12,#1
- mov lr,#255
-
- and r7,lr,r0
- and r8,lr,r0,lsr#8
- and r9,lr,r0,lsr#16
- mov r0,r0,lsr#24
-.Lenc_loop:
- ldr r4,[r10,r7,lsl#2] @ Te3[s0>>0]
- and r7,lr,r1,lsr#16 @ i0
- ldr r5,[r10,r8,lsl#2] @ Te2[s0>>8]
- and r8,lr,r1
- ldr r6,[r10,r9,lsl#2] @ Te1[s0>>16]
- and r9,lr,r1,lsr#8
- ldr r0,[r10,r0,lsl#2] @ Te0[s0>>24]
- mov r1,r1,lsr#24
-
- ldr r7,[r10,r7,lsl#2] @ Te1[s1>>16]
- ldr r8,[r10,r8,lsl#2] @ Te3[s1>>0]
- ldr r9,[r10,r9,lsl#2] @ Te2[s1>>8]
- eor r0,r0,r7,ror#8
- ldr r1,[r10,r1,lsl#2] @ Te0[s1>>24]
- and r7,lr,r2,lsr#8 @ i0
- eor r5,r5,r8,ror#8
- and r8,lr,r2,lsr#16 @ i1
- eor r6,r6,r9,ror#8
- and r9,lr,r2
- ldr r7,[r10,r7,lsl#2] @ Te2[s2>>8]
- eor r1,r1,r4,ror#24
- ldr r8,[r10,r8,lsl#2] @ Te1[s2>>16]
- mov r2,r2,lsr#24
-
- ldr r9,[r10,r9,lsl#2] @ Te3[s2>>0]
- eor r0,r0,r7,ror#16
- ldr r2,[r10,r2,lsl#2] @ Te0[s2>>24]
- and r7,lr,r3 @ i0
- eor r1,r1,r8,ror#8
- and r8,lr,r3,lsr#8 @ i1
- eor r6,r6,r9,ror#16
- and r9,lr,r3,lsr#16 @ i2
- ldr r7,[r10,r7,lsl#2] @ Te3[s3>>0]
- eor r2,r2,r5,ror#16
- ldr r8,[r10,r8,lsl#2] @ Te2[s3>>8]
- mov r3,r3,lsr#24
-
- ldr r9,[r10,r9,lsl#2] @ Te1[s3>>16]
- eor r0,r0,r7,ror#24
- ldr r7,[r11],#16
- eor r1,r1,r8,ror#16
- ldr r3,[r10,r3,lsl#2] @ Te0[s3>>24]
- eor r2,r2,r9,ror#8
- ldr r4,[r11,#-12]
- eor r3,r3,r6,ror#8
-
- ldr r5,[r11,#-8]
- eor r0,r0,r7
- ldr r6,[r11,#-4]
- and r7,lr,r0
- eor r1,r1,r4
- and r8,lr,r0,lsr#8
- eor r2,r2,r5
- and r9,lr,r0,lsr#16
- eor r3,r3,r6
- mov r0,r0,lsr#24
-
- subs r12,r12,#1
- bne .Lenc_loop
-
- add r10,r10,#2
-
- ldrb r4,[r10,r7,lsl#2] @ Te4[s0>>0]
- and r7,lr,r1,lsr#16 @ i0
- ldrb r5,[r10,r8,lsl#2] @ Te4[s0>>8]
- and r8,lr,r1
- ldrb r6,[r10,r9,lsl#2] @ Te4[s0>>16]
- and r9,lr,r1,lsr#8
- ldrb r0,[r10,r0,lsl#2] @ Te4[s0>>24]
- mov r1,r1,lsr#24
-
- ldrb r7,[r10,r7,lsl#2] @ Te4[s1>>16]
- ldrb r8,[r10,r8,lsl#2] @ Te4[s1>>0]
- ldrb r9,[r10,r9,lsl#2] @ Te4[s1>>8]
- eor r0,r7,r0,lsl#8
- ldrb r1,[r10,r1,lsl#2] @ Te4[s1>>24]
- and r7,lr,r2,lsr#8 @ i0
- eor r5,r8,r5,lsl#8
- and r8,lr,r2,lsr#16 @ i1
- eor r6,r9,r6,lsl#8
- and r9,lr,r2
- ldrb r7,[r10,r7,lsl#2] @ Te4[s2>>8]
- eor r1,r4,r1,lsl#24
- ldrb r8,[r10,r8,lsl#2] @ Te4[s2>>16]
- mov r2,r2,lsr#24
-
- ldrb r9,[r10,r9,lsl#2] @ Te4[s2>>0]
- eor r0,r7,r0,lsl#8
- ldrb r2,[r10,r2,lsl#2] @ Te4[s2>>24]
- and r7,lr,r3 @ i0
- eor r1,r1,r8,lsl#16
- and r8,lr,r3,lsr#8 @ i1
- eor r6,r9,r6,lsl#8
- and r9,lr,r3,lsr#16 @ i2
- ldrb r7,[r10,r7,lsl#2] @ Te4[s3>>0]
- eor r2,r5,r2,lsl#24
- ldrb r8,[r10,r8,lsl#2] @ Te4[s3>>8]
- mov r3,r3,lsr#24
-
- ldrb r9,[r10,r9,lsl#2] @ Te4[s3>>16]
- eor r0,r7,r0,lsl#8
- ldr r7,[r11,#0]
- ldrb r3,[r10,r3,lsl#2] @ Te4[s3>>24]
- eor r1,r1,r8,lsl#8
- ldr r4,[r11,#4]
- eor r2,r2,r9,lsl#16
- ldr r5,[r11,#8]
- eor r3,r6,r3,lsl#24
- ldr r6,[r11,#12]
-
- eor r0,r0,r7
- eor r1,r1,r4
- eor r2,r2,r5
- eor r3,r3,r6
-
- sub r10,r10,#2
- ldr pc,[sp],#4 @ pop and return
-.size _armv4_AES_encrypt,.-_armv4_AES_encrypt
-
-.global private_AES_set_encrypt_key
-.type private_AES_set_encrypt_key,%function
-.align 5
-private_AES_set_encrypt_key:
-_armv4_AES_set_encrypt_key:
-#if __ARM_ARCH__<7
- sub r3,pc,#8 @ AES_set_encrypt_key
-#else
- adr r3,private_AES_set_encrypt_key
-#endif
- teq r0,#0
-#if __ARM_ARCH__>=7
- itt eq @ Thumb2 thing, sanity check in ARM
-#endif
- moveq r0,#-1
- beq .Labrt
- teq r2,#0
-#if __ARM_ARCH__>=7
- itt eq @ Thumb2 thing, sanity check in ARM
-#endif
- moveq r0,#-1
- beq .Labrt
-
- teq r1,#128
- beq .Lok
- teq r1,#192
- beq .Lok
- teq r1,#256
-#if __ARM_ARCH__>=7
- itt ne @ Thumb2 thing, sanity check in ARM
-#endif
- movne r0,#-1
- bne .Labrt
-
-.Lok: stmdb sp!,{r4-r12,lr}
- sub r10,r3,#_armv4_AES_set_encrypt_key-AES_Te-1024 @ Te4
-
- mov r12,r0 @ inp
- mov lr,r1 @ bits
- mov r11,r2 @ key
-
-#if __ARM_ARCH__<7
- ldrb r0,[r12,#3] @ load input data in endian-neutral
- ldrb r4,[r12,#2] @ manner...
- ldrb r5,[r12,#1]
- ldrb r6,[r12,#0]
- orr r0,r0,r4,lsl#8
- ldrb r1,[r12,#7]
- orr r0,r0,r5,lsl#16
- ldrb r4,[r12,#6]
- orr r0,r0,r6,lsl#24
- ldrb r5,[r12,#5]
- ldrb r6,[r12,#4]
- orr r1,r1,r4,lsl#8
- ldrb r2,[r12,#11]
- orr r1,r1,r5,lsl#16
- ldrb r4,[r12,#10]
- orr r1,r1,r6,lsl#24
- ldrb r5,[r12,#9]
- ldrb r6,[r12,#8]
- orr r2,r2,r4,lsl#8
- ldrb r3,[r12,#15]
- orr r2,r2,r5,lsl#16
- ldrb r4,[r12,#14]
- orr r2,r2,r6,lsl#24
- ldrb r5,[r12,#13]
- ldrb r6,[r12,#12]
- orr r3,r3,r4,lsl#8
- str r0,[r11],#16
- orr r3,r3,r5,lsl#16
- str r1,[r11,#-12]
- orr r3,r3,r6,lsl#24
- str r2,[r11,#-8]
- str r3,[r11,#-4]
-#else
- ldr r0,[r12,#0]
- ldr r1,[r12,#4]
- ldr r2,[r12,#8]
- ldr r3,[r12,#12]
-#ifdef __ARMEL__
- rev r0,r0
- rev r1,r1
- rev r2,r2
- rev r3,r3
-#endif
- str r0,[r11],#16
- str r1,[r11,#-12]
- str r2,[r11,#-8]
- str r3,[r11,#-4]
-#endif
-
- teq lr,#128
- bne .Lnot128
- mov r12,#10
- str r12,[r11,#240-16]
- add r6,r10,#256 @ rcon
- mov lr,#255
-
-.L128_loop:
- and r5,lr,r3,lsr#24
- and r7,lr,r3,lsr#16
- ldrb r5,[r10,r5]
- and r8,lr,r3,lsr#8
- ldrb r7,[r10,r7]
- and r9,lr,r3
- ldrb r8,[r10,r8]
- orr r5,r5,r7,lsl#24
- ldrb r9,[r10,r9]
- orr r5,r5,r8,lsl#16
- ldr r4,[r6],#4 @ rcon[i++]
- orr r5,r5,r9,lsl#8
- eor r5,r5,r4
- eor r0,r0,r5 @ rk[4]=rk[0]^...
- eor r1,r1,r0 @ rk[5]=rk[1]^rk[4]
- str r0,[r11],#16
- eor r2,r2,r1 @ rk[6]=rk[2]^rk[5]
- str r1,[r11,#-12]
- eor r3,r3,r2 @ rk[7]=rk[3]^rk[6]
- str r2,[r11,#-8]
- subs r12,r12,#1
- str r3,[r11,#-4]
- bne .L128_loop
- sub r2,r11,#176
- b .Ldone
-
-.Lnot128:
-#if __ARM_ARCH__<7
- ldrb r8,[r12,#19]
- ldrb r4,[r12,#18]
- ldrb r5,[r12,#17]
- ldrb r6,[r12,#16]
- orr r8,r8,r4,lsl#8
- ldrb r9,[r12,#23]
- orr r8,r8,r5,lsl#16
- ldrb r4,[r12,#22]
- orr r8,r8,r6,lsl#24
- ldrb r5,[r12,#21]
- ldrb r6,[r12,#20]
- orr r9,r9,r4,lsl#8
- orr r9,r9,r5,lsl#16
- str r8,[r11],#8
- orr r9,r9,r6,lsl#24
- str r9,[r11,#-4]
-#else
- ldr r8,[r12,#16]
- ldr r9,[r12,#20]
-#ifdef __ARMEL__
- rev r8,r8
- rev r9,r9
-#endif
- str r8,[r11],#8
- str r9,[r11,#-4]
-#endif
-
- teq lr,#192
- bne .Lnot192
- mov r12,#12
- str r12,[r11,#240-24]
- add r6,r10,#256 @ rcon
- mov lr,#255
- mov r12,#8
-
-.L192_loop:
- and r5,lr,r9,lsr#24
- and r7,lr,r9,lsr#16
- ldrb r5,[r10,r5]
- and r8,lr,r9,lsr#8
- ldrb r7,[r10,r7]
- and r9,lr,r9
- ldrb r8,[r10,r8]
- orr r5,r5,r7,lsl#24
- ldrb r9,[r10,r9]
- orr r5,r5,r8,lsl#16
- ldr r4,[r6],#4 @ rcon[i++]
- orr r5,r5,r9,lsl#8
- eor r9,r5,r4
- eor r0,r0,r9 @ rk[6]=rk[0]^...
- eor r1,r1,r0 @ rk[7]=rk[1]^rk[6]
- str r0,[r11],#24
- eor r2,r2,r1 @ rk[8]=rk[2]^rk[7]
- str r1,[r11,#-20]
- eor r3,r3,r2 @ rk[9]=rk[3]^rk[8]
- str r2,[r11,#-16]
- subs r12,r12,#1
- str r3,[r11,#-12]
-#if __ARM_ARCH__>=7
- itt eq @ Thumb2 thing, sanity check in ARM
-#endif
- subeq r2,r11,#216
- beq .Ldone
-
- ldr r7,[r11,#-32]
- ldr r8,[r11,#-28]
- eor r7,r7,r3 @ rk[10]=rk[4]^rk[9]
- eor r9,r8,r7 @ rk[11]=rk[5]^rk[10]
- str r7,[r11,#-8]
- str r9,[r11,#-4]
- b .L192_loop
-
-.Lnot192:
-#if __ARM_ARCH__<7
- ldrb r8,[r12,#27]
- ldrb r4,[r12,#26]
- ldrb r5,[r12,#25]
- ldrb r6,[r12,#24]
- orr r8,r8,r4,lsl#8
- ldrb r9,[r12,#31]
- orr r8,r8,r5,lsl#16
- ldrb r4,[r12,#30]
- orr r8,r8,r6,lsl#24
- ldrb r5,[r12,#29]
- ldrb r6,[r12,#28]
- orr r9,r9,r4,lsl#8
- orr r9,r9,r5,lsl#16
- str r8,[r11],#8
- orr r9,r9,r6,lsl#24
- str r9,[r11,#-4]
-#else
- ldr r8,[r12,#24]
- ldr r9,[r12,#28]
-#ifdef __ARMEL__
- rev r8,r8
- rev r9,r9
-#endif
- str r8,[r11],#8
- str r9,[r11,#-4]
-#endif
-
- mov r12,#14
- str r12,[r11,#240-32]
- add r6,r10,#256 @ rcon
- mov lr,#255
- mov r12,#7
-
-.L256_loop:
- and r5,lr,r9,lsr#24
- and r7,lr,r9,lsr#16
- ldrb r5,[r10,r5]
- and r8,lr,r9,lsr#8
- ldrb r7,[r10,r7]
- and r9,lr,r9
- ldrb r8,[r10,r8]
- orr r5,r5,r7,lsl#24
- ldrb r9,[r10,r9]
- orr r5,r5,r8,lsl#16
- ldr r4,[r6],#4 @ rcon[i++]
- orr r5,r5,r9,lsl#8
- eor r9,r5,r4
- eor r0,r0,r9 @ rk[8]=rk[0]^...
- eor r1,r1,r0 @ rk[9]=rk[1]^rk[8]
- str r0,[r11],#32
- eor r2,r2,r1 @ rk[10]=rk[2]^rk[9]
- str r1,[r11,#-28]
- eor r3,r3,r2 @ rk[11]=rk[3]^rk[10]
- str r2,[r11,#-24]
- subs r12,r12,#1
- str r3,[r11,#-20]
-#if __ARM_ARCH__>=7
- itt eq @ Thumb2 thing, sanity check in ARM
-#endif
- subeq r2,r11,#256
- beq .Ldone
-
- and r5,lr,r3
- and r7,lr,r3,lsr#8
- ldrb r5,[r10,r5]
- and r8,lr,r3,lsr#16
- ldrb r7,[r10,r7]
- and r9,lr,r3,lsr#24
- ldrb r8,[r10,r8]
- orr r5,r5,r7,lsl#8
- ldrb r9,[r10,r9]
- orr r5,r5,r8,lsl#16
- ldr r4,[r11,#-48]
- orr r5,r5,r9,lsl#24
-
- ldr r7,[r11,#-44]
- ldr r8,[r11,#-40]
- eor r4,r4,r5 @ rk[12]=rk[4]^...
- ldr r9,[r11,#-36]
- eor r7,r7,r4 @ rk[13]=rk[5]^rk[12]
- str r4,[r11,#-16]
- eor r8,r8,r7 @ rk[14]=rk[6]^rk[13]
- str r7,[r11,#-12]
- eor r9,r9,r8 @ rk[15]=rk[7]^rk[14]
- str r8,[r11,#-8]
- str r9,[r11,#-4]
- b .L256_loop
-
-.align 2
-.Ldone: mov r0,#0
- ldmia sp!,{r4-r12,lr}
-.Labrt:
-#if __ARM_ARCH__>=5
- bx lr @ .word 0xe12fff1e
-#else
- tst lr,#1
- moveq pc,lr @ be binary compatible with V4, yet
- .word 0xe12fff1e @ interoperable with Thumb ISA:-)
-#endif
-.size private_AES_set_encrypt_key,.-private_AES_set_encrypt_key
-
-.global private_AES_set_decrypt_key
-.type private_AES_set_decrypt_key,%function
-.align 5
-private_AES_set_decrypt_key:
- str lr,[sp,#-4]! @ push lr
- bl _armv4_AES_set_encrypt_key
- teq r0,#0
- ldr lr,[sp],#4 @ pop lr
- bne .Labrt
-
- mov r0,r2 @ AES_set_encrypt_key preserves r2,
- mov r1,r2 @ which is AES_KEY *key
- b _armv4_AES_set_enc2dec_key
-.size private_AES_set_decrypt_key,.-private_AES_set_decrypt_key
-
-@ void AES_set_enc2dec_key(const AES_KEY *inp,AES_KEY *out)
-.global AES_set_enc2dec_key
-.type AES_set_enc2dec_key,%function
-.align 5
-AES_set_enc2dec_key:
-_armv4_AES_set_enc2dec_key:
- stmdb sp!,{r4-r12,lr}
-
- ldr r12,[r0,#240]
- mov r7,r0 @ input
- add r8,r0,r12,lsl#4
- mov r11,r1 @ ouput
- add r10,r1,r12,lsl#4
- str r12,[r1,#240]
-
-.Linv: ldr r0,[r7],#16
- ldr r1,[r7,#-12]
- ldr r2,[r7,#-8]
- ldr r3,[r7,#-4]
- ldr r4,[r8],#-16
- ldr r5,[r8,#16+4]
- ldr r6,[r8,#16+8]
- ldr r9,[r8,#16+12]
- str r0,[r10],#-16
- str r1,[r10,#16+4]
- str r2,[r10,#16+8]
- str r3,[r10,#16+12]
- str r4,[r11],#16
- str r5,[r11,#-12]
- str r6,[r11,#-8]
- str r9,[r11,#-4]
- teq r7,r8
- bne .Linv
-
- ldr r0,[r7]
- ldr r1,[r7,#4]
- ldr r2,[r7,#8]
- ldr r3,[r7,#12]
- str r0,[r11]
- str r1,[r11,#4]
- str r2,[r11,#8]
- str r3,[r11,#12]
- sub r11,r11,r12,lsl#3
- ldr r0,[r11,#16]! @ prefetch tp1
- mov r7,#0x80
- mov r8,#0x1b
- orr r7,r7,#0x8000
- orr r8,r8,#0x1b00
- orr r7,r7,r7,lsl#16
- orr r8,r8,r8,lsl#16
- sub r12,r12,#1
- mvn r9,r7
- mov r12,r12,lsl#2 @ (rounds-1)*4
-
-.Lmix: and r4,r0,r7
- and r1,r0,r9
- sub r4,r4,r4,lsr#7
- and r4,r4,r8
- eor r1,r4,r1,lsl#1 @ tp2
-
- and r4,r1,r7
- and r2,r1,r9
- sub r4,r4,r4,lsr#7
- and r4,r4,r8
- eor r2,r4,r2,lsl#1 @ tp4
-
- and r4,r2,r7
- and r3,r2,r9
- sub r4,r4,r4,lsr#7
- and r4,r4,r8
- eor r3,r4,r3,lsl#1 @ tp8
-
- eor r4,r1,r2
- eor r5,r0,r3 @ tp9
- eor r4,r4,r3 @ tpe
- eor r4,r4,r1,ror#24
- eor r4,r4,r5,ror#24 @ ^= ROTATE(tpb=tp9^tp2,8)
- eor r4,r4,r2,ror#16
- eor r4,r4,r5,ror#16 @ ^= ROTATE(tpd=tp9^tp4,16)
- eor r4,r4,r5,ror#8 @ ^= ROTATE(tp9,24)
-
- ldr r0,[r11,#4] @ prefetch tp1
- str r4,[r11],#4
- subs r12,r12,#1
- bne .Lmix
-
- mov r0,#0
-#if __ARM_ARCH__>=5
- ldmia sp!,{r4-r12,pc}
-#else
- ldmia sp!,{r4-r12,lr}
- tst lr,#1
- moveq pc,lr @ be binary compatible with V4, yet
- .word 0xe12fff1e @ interoperable with Thumb ISA:-)
-#endif
-.size AES_set_enc2dec_key,.-AES_set_enc2dec_key
-
-.type AES_Td,%object
-.align 5
-AES_Td:
-.word 0x51f4a750, 0x7e416553, 0x1a17a4c3, 0x3a275e96
-.word 0x3bab6bcb, 0x1f9d45f1, 0xacfa58ab, 0x4be30393
-.word 0x2030fa55, 0xad766df6, 0x88cc7691, 0xf5024c25
-.word 0x4fe5d7fc, 0xc52acbd7, 0x26354480, 0xb562a38f
-.word 0xdeb15a49, 0x25ba1b67, 0x45ea0e98, 0x5dfec0e1
-.word 0xc32f7502, 0x814cf012, 0x8d4697a3, 0x6bd3f9c6
-.word 0x038f5fe7, 0x15929c95, 0xbf6d7aeb, 0x955259da
-.word 0xd4be832d, 0x587421d3, 0x49e06929, 0x8ec9c844
-.word 0x75c2896a, 0xf48e7978, 0x99583e6b, 0x27b971dd
-.word 0xbee14fb6, 0xf088ad17, 0xc920ac66, 0x7dce3ab4
-.word 0x63df4a18, 0xe51a3182, 0x97513360, 0x62537f45
-.word 0xb16477e0, 0xbb6bae84, 0xfe81a01c, 0xf9082b94
-.word 0x70486858, 0x8f45fd19, 0x94de6c87, 0x527bf8b7
-.word 0xab73d323, 0x724b02e2, 0xe31f8f57, 0x6655ab2a
-.word 0xb2eb2807, 0x2fb5c203, 0x86c57b9a, 0xd33708a5
-.word 0x302887f2, 0x23bfa5b2, 0x02036aba, 0xed16825c
-.word 0x8acf1c2b, 0xa779b492, 0xf307f2f0, 0x4e69e2a1
-.word 0x65daf4cd, 0x0605bed5, 0xd134621f, 0xc4a6fe8a
-.word 0x342e539d, 0xa2f355a0, 0x058ae132, 0xa4f6eb75
-.word 0x0b83ec39, 0x4060efaa, 0x5e719f06, 0xbd6e1051
-.word 0x3e218af9, 0x96dd063d, 0xdd3e05ae, 0x4de6bd46
-.word 0x91548db5, 0x71c45d05, 0x0406d46f, 0x605015ff
-.word 0x1998fb24, 0xd6bde997, 0x894043cc, 0x67d99e77
-.word 0xb0e842bd, 0x07898b88, 0xe7195b38, 0x79c8eedb
-.word 0xa17c0a47, 0x7c420fe9, 0xf8841ec9, 0x00000000
-.word 0x09808683, 0x322bed48, 0x1e1170ac, 0x6c5a724e
-.word 0xfd0efffb, 0x0f853856, 0x3daed51e, 0x362d3927
-.word 0x0a0fd964, 0x685ca621, 0x9b5b54d1, 0x24362e3a
-.word 0x0c0a67b1, 0x9357e70f, 0xb4ee96d2, 0x1b9b919e
-.word 0x80c0c54f, 0x61dc20a2, 0x5a774b69, 0x1c121a16
-.word 0xe293ba0a, 0xc0a02ae5, 0x3c22e043, 0x121b171d
-.word 0x0e090d0b, 0xf28bc7ad, 0x2db6a8b9, 0x141ea9c8
-.word 0x57f11985, 0xaf75074c, 0xee99ddbb, 0xa37f60fd
-.word 0xf701269f, 0x5c72f5bc, 0x44663bc5, 0x5bfb7e34
-.word 0x8b432976, 0xcb23c6dc, 0xb6edfc68, 0xb8e4f163
-.word 0xd731dcca, 0x42638510, 0x13972240, 0x84c61120
-.word 0x854a247d, 0xd2bb3df8, 0xaef93211, 0xc729a16d
-.word 0x1d9e2f4b, 0xdcb230f3, 0x0d8652ec, 0x77c1e3d0
-.word 0x2bb3166c, 0xa970b999, 0x119448fa, 0x47e96422
-.word 0xa8fc8cc4, 0xa0f03f1a, 0x567d2cd8, 0x223390ef
-.word 0x87494ec7, 0xd938d1c1, 0x8ccaa2fe, 0x98d40b36
-.word 0xa6f581cf, 0xa57ade28, 0xdab78e26, 0x3fadbfa4
-.word 0x2c3a9de4, 0x5078920d, 0x6a5fcc9b, 0x547e4662
-.word 0xf68d13c2, 0x90d8b8e8, 0x2e39f75e, 0x82c3aff5
-.word 0x9f5d80be, 0x69d0937c, 0x6fd52da9, 0xcf2512b3
-.word 0xc8ac993b, 0x10187da7, 0xe89c636e, 0xdb3bbb7b
-.word 0xcd267809, 0x6e5918f4, 0xec9ab701, 0x834f9aa8
-.word 0xe6956e65, 0xaaffe67e, 0x21bccf08, 0xef15e8e6
-.word 0xbae79bd9, 0x4a6f36ce, 0xea9f09d4, 0x29b07cd6
-.word 0x31a4b2af, 0x2a3f2331, 0xc6a59430, 0x35a266c0
-.word 0x744ebc37, 0xfc82caa6, 0xe090d0b0, 0x33a7d815
-.word 0xf104984a, 0x41ecdaf7, 0x7fcd500e, 0x1791f62f
-.word 0x764dd68d, 0x43efb04d, 0xccaa4d54, 0xe49604df
-.word 0x9ed1b5e3, 0x4c6a881b, 0xc12c1fb8, 0x4665517f
-.word 0x9d5eea04, 0x018c355d, 0xfa877473, 0xfb0b412e
-.word 0xb3671d5a, 0x92dbd252, 0xe9105633, 0x6dd64713
-.word 0x9ad7618c, 0x37a10c7a, 0x59f8148e, 0xeb133c89
-.word 0xcea927ee, 0xb761c935, 0xe11ce5ed, 0x7a47b13c
-.word 0x9cd2df59, 0x55f2733f, 0x1814ce79, 0x73c737bf
-.word 0x53f7cdea, 0x5ffdaa5b, 0xdf3d6f14, 0x7844db86
-.word 0xcaaff381, 0xb968c43e, 0x3824342c, 0xc2a3405f
-.word 0x161dc372, 0xbce2250c, 0x283c498b, 0xff0d9541
-.word 0x39a80171, 0x080cb3de, 0xd8b4e49c, 0x6456c190
-.word 0x7bcb8461, 0xd532b670, 0x486c5c74, 0xd0b85742
-@ Td4[256]
-.byte 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38
-.byte 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
-.byte 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
-.byte 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
-.byte 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
-.byte 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
-.byte 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
-.byte 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
-.byte 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
-.byte 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
-.byte 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
-.byte 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
-.byte 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
-.byte 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
-.byte 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
-.byte 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
-.byte 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
-.byte 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
-.byte 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
-.byte 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
-.byte 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
-.byte 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
-.byte 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
-.byte 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
-.byte 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
-.byte 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
-.byte 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
-.byte 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
-.byte 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
-.byte 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
-.byte 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
-.byte 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
-.size AES_Td,.-AES_Td
-
-@ void AES_decrypt(const unsigned char *in, unsigned char *out,
-@ const AES_KEY *key) {
-.global AES_decrypt
-.type AES_decrypt,%function
-.align 5
-AES_decrypt:
-#if __ARM_ARCH__<7
- sub r3,pc,#8 @ AES_decrypt
-#else
- adr r3,AES_decrypt
-#endif
- stmdb sp!,{r1,r4-r12,lr}
- mov r12,r0 @ inp
- mov r11,r2
- sub r10,r3,#AES_decrypt-AES_Td @ Td
-#if __ARM_ARCH__<7
- ldrb r0,[r12,#3] @ load input data in endian-neutral
- ldrb r4,[r12,#2] @ manner...
- ldrb r5,[r12,#1]
- ldrb r6,[r12,#0]
- orr r0,r0,r4,lsl#8
- ldrb r1,[r12,#7]
- orr r0,r0,r5,lsl#16
- ldrb r4,[r12,#6]
- orr r0,r0,r6,lsl#24
- ldrb r5,[r12,#5]
- ldrb r6,[r12,#4]
- orr r1,r1,r4,lsl#8
- ldrb r2,[r12,#11]
- orr r1,r1,r5,lsl#16
- ldrb r4,[r12,#10]
- orr r1,r1,r6,lsl#24
- ldrb r5,[r12,#9]
- ldrb r6,[r12,#8]
- orr r2,r2,r4,lsl#8
- ldrb r3,[r12,#15]
- orr r2,r2,r5,lsl#16
- ldrb r4,[r12,#14]
- orr r2,r2,r6,lsl#24
- ldrb r5,[r12,#13]
- ldrb r6,[r12,#12]
- orr r3,r3,r4,lsl#8
- orr r3,r3,r5,lsl#16
- orr r3,r3,r6,lsl#24
-#else
- ldr r0,[r12,#0]
- ldr r1,[r12,#4]
- ldr r2,[r12,#8]
- ldr r3,[r12,#12]
-#ifdef __ARMEL__
- rev r0,r0
- rev r1,r1
- rev r2,r2
- rev r3,r3
-#endif
-#endif
- bl _armv4_AES_decrypt
-
- ldr r12,[sp],#4 @ pop out
-#if __ARM_ARCH__>=7
-#ifdef __ARMEL__
- rev r0,r0
- rev r1,r1
- rev r2,r2
- rev r3,r3
-#endif
- str r0,[r12,#0]
- str r1,[r12,#4]
- str r2,[r12,#8]
- str r3,[r12,#12]
-#else
- mov r4,r0,lsr#24 @ write output in endian-neutral
- mov r5,r0,lsr#16 @ manner...
- mov r6,r0,lsr#8
- strb r4,[r12,#0]
- strb r5,[r12,#1]
- mov r4,r1,lsr#24
- strb r6,[r12,#2]
- mov r5,r1,lsr#16
- strb r0,[r12,#3]
- mov r6,r1,lsr#8
- strb r4,[r12,#4]
- strb r5,[r12,#5]
- mov r4,r2,lsr#24
- strb r6,[r12,#6]
- mov r5,r2,lsr#16
- strb r1,[r12,#7]
- mov r6,r2,lsr#8
- strb r4,[r12,#8]
- strb r5,[r12,#9]
- mov r4,r3,lsr#24
- strb r6,[r12,#10]
- mov r5,r3,lsr#16
- strb r2,[r12,#11]
- mov r6,r3,lsr#8
- strb r4,[r12,#12]
- strb r5,[r12,#13]
- strb r6,[r12,#14]
- strb r3,[r12,#15]
-#endif
-#if __ARM_ARCH__>=5
- ldmia sp!,{r4-r12,pc}
-#else
- ldmia sp!,{r4-r12,lr}
- tst lr,#1
- moveq pc,lr @ be binary compatible with V4, yet
- .word 0xe12fff1e @ interoperable with Thumb ISA:-)
-#endif
-.size AES_decrypt,.-AES_decrypt
-
-.type _armv4_AES_decrypt,%function
-.align 2
-_armv4_AES_decrypt:
- str lr,[sp,#-4]! @ push lr
- ldmia r11!,{r4-r7}
- eor r0,r0,r4
- ldr r12,[r11,#240-16]
- eor r1,r1,r5
- eor r2,r2,r6
- eor r3,r3,r7
- sub r12,r12,#1
- mov lr,#255
-
- and r7,lr,r0,lsr#16
- and r8,lr,r0,lsr#8
- and r9,lr,r0
- mov r0,r0,lsr#24
-.Ldec_loop:
- ldr r4,[r10,r7,lsl#2] @ Td1[s0>>16]
- and r7,lr,r1 @ i0
- ldr r5,[r10,r8,lsl#2] @ Td2[s0>>8]
- and r8,lr,r1,lsr#16
- ldr r6,[r10,r9,lsl#2] @ Td3[s0>>0]
- and r9,lr,r1,lsr#8
- ldr r0,[r10,r0,lsl#2] @ Td0[s0>>24]
- mov r1,r1,lsr#24
-
- ldr r7,[r10,r7,lsl#2] @ Td3[s1>>0]
- ldr r8,[r10,r8,lsl#2] @ Td1[s1>>16]
- ldr r9,[r10,r9,lsl#2] @ Td2[s1>>8]
- eor r0,r0,r7,ror#24
- ldr r1,[r10,r1,lsl#2] @ Td0[s1>>24]
- and r7,lr,r2,lsr#8 @ i0
- eor r5,r8,r5,ror#8
- and r8,lr,r2 @ i1
- eor r6,r9,r6,ror#8
- and r9,lr,r2,lsr#16
- ldr r7,[r10,r7,lsl#2] @ Td2[s2>>8]
- eor r1,r1,r4,ror#8
- ldr r8,[r10,r8,lsl#2] @ Td3[s2>>0]
- mov r2,r2,lsr#24
-
- ldr r9,[r10,r9,lsl#2] @ Td1[s2>>16]
- eor r0,r0,r7,ror#16
- ldr r2,[r10,r2,lsl#2] @ Td0[s2>>24]
- and r7,lr,r3,lsr#16 @ i0
- eor r1,r1,r8,ror#24
- and r8,lr,r3,lsr#8 @ i1
- eor r6,r9,r6,ror#8
- and r9,lr,r3 @ i2
- ldr r7,[r10,r7,lsl#2] @ Td1[s3>>16]
- eor r2,r2,r5,ror#8
- ldr r8,[r10,r8,lsl#2] @ Td2[s3>>8]
- mov r3,r3,lsr#24
-
- ldr r9,[r10,r9,lsl#2] @ Td3[s3>>0]
- eor r0,r0,r7,ror#8
- ldr r7,[r11],#16
- eor r1,r1,r8,ror#16
- ldr r3,[r10,r3,lsl#2] @ Td0[s3>>24]
- eor r2,r2,r9,ror#24
-
- ldr r4,[r11,#-12]
- eor r0,r0,r7
- ldr r5,[r11,#-8]
- eor r3,r3,r6,ror#8
- ldr r6,[r11,#-4]
- and r7,lr,r0,lsr#16
- eor r1,r1,r4
- and r8,lr,r0,lsr#8
- eor r2,r2,r5
- and r9,lr,r0
- eor r3,r3,r6
- mov r0,r0,lsr#24
-
- subs r12,r12,#1
- bne .Ldec_loop
-
- add r10,r10,#1024
-
- ldr r5,[r10,#0] @ prefetch Td4
- ldr r6,[r10,#32]
- ldr r4,[r10,#64]
- ldr r5,[r10,#96]
- ldr r6,[r10,#128]
- ldr r4,[r10,#160]
- ldr r5,[r10,#192]
- ldr r6,[r10,#224]
-
- ldrb r0,[r10,r0] @ Td4[s0>>24]
- ldrb r4,[r10,r7] @ Td4[s0>>16]
- and r7,lr,r1 @ i0
- ldrb r5,[r10,r8] @ Td4[s0>>8]
- and r8,lr,r1,lsr#16
- ldrb r6,[r10,r9] @ Td4[s0>>0]
- and r9,lr,r1,lsr#8
-
- add r1,r10,r1,lsr#24
- ldrb r7,[r10,r7] @ Td4[s1>>0]
- ldrb r1,[r1] @ Td4[s1>>24]
- ldrb r8,[r10,r8] @ Td4[s1>>16]
- eor r0,r7,r0,lsl#24
- ldrb r9,[r10,r9] @ Td4[s1>>8]
- eor r1,r4,r1,lsl#8
- and r7,lr,r2,lsr#8 @ i0
- eor r5,r5,r8,lsl#8
- and r8,lr,r2 @ i1
- ldrb r7,[r10,r7] @ Td4[s2>>8]
- eor r6,r6,r9,lsl#8
- ldrb r8,[r10,r8] @ Td4[s2>>0]
- and r9,lr,r2,lsr#16
-
- add r2,r10,r2,lsr#24
- ldrb r2,[r2] @ Td4[s2>>24]
- eor r0,r0,r7,lsl#8
- ldrb r9,[r10,r9] @ Td4[s2>>16]
- eor r1,r8,r1,lsl#16
- and r7,lr,r3,lsr#16 @ i0
- eor r2,r5,r2,lsl#16
- and r8,lr,r3,lsr#8 @ i1
- ldrb r7,[r10,r7] @ Td4[s3>>16]
- eor r6,r6,r9,lsl#16
- ldrb r8,[r10,r8] @ Td4[s3>>8]
- and r9,lr,r3 @ i2
-
- add r3,r10,r3,lsr#24
- ldrb r9,[r10,r9] @ Td4[s3>>0]
- ldrb r3,[r3] @ Td4[s3>>24]
- eor r0,r0,r7,lsl#16
- ldr r7,[r11,#0]
- eor r1,r1,r8,lsl#8
- ldr r4,[r11,#4]
- eor r2,r9,r2,lsl#8
- ldr r5,[r11,#8]
- eor r3,r6,r3,lsl#24
- ldr r6,[r11,#12]
-
- eor r0,r0,r7
- eor r1,r1,r4
- eor r2,r2,r5
- eor r3,r3,r6
-
- sub r10,r10,#1024
- ldr pc,[sp],#4 @ pop and return
-.size _armv4_AES_decrypt,.-_armv4_AES_decrypt
-.asciz "AES for ARMv4, CRYPTOGAMS by <appro@openssl.org>"
-.align 2
diff --git a/app/openssl/crypto/aes/asm/aes-armv4.pl b/app/openssl/crypto/aes/asm/aes-armv4.pl
deleted file mode 100644
index 4f891708..00000000
--- a/app/openssl/crypto/aes/asm/aes-armv4.pl
+++ /dev/null
@@ -1,1217 +0,0 @@
-#!/usr/bin/env perl
-
-# ====================================================================
-# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
-# project. The module is, however, dual licensed under OpenSSL and
-# CRYPTOGAMS licenses depending on where you obtain it. For further
-# details see http://www.openssl.org/~appro/cryptogams/.
-# ====================================================================
-
-# AES for ARMv4
-
-# January 2007.
-#
-# Code uses single 1K S-box and is >2 times faster than code generated
-# by gcc-3.4.1. This is thanks to unique feature of ARMv4 ISA, which
-# allows to merge logical or arithmetic operation with shift or rotate
-# in one instruction and emit combined result every cycle. The module
-# is endian-neutral. The performance is ~42 cycles/byte for 128-bit
-# key [on single-issue Xscale PXA250 core].
-
-# May 2007.
-#
-# AES_set_[en|de]crypt_key is added.
-
-# July 2010.
-#
-# Rescheduling for dual-issue pipeline resulted in 12% improvement on
-# Cortex A8 core and ~25 cycles per byte processed with 128-bit key.
-
-# February 2011.
-#
-# Profiler-assisted and platform-specific optimization resulted in 16%
-# improvement on Cortex A8 core and ~21.5 cycles per byte.
-
-while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
-open STDOUT,">$output";
-
-$s0="r0";
-$s1="r1";
-$s2="r2";
-$s3="r3";
-$t1="r4";
-$t2="r5";
-$t3="r6";
-$i1="r7";
-$i2="r8";
-$i3="r9";
-
-$tbl="r10";
-$key="r11";
-$rounds="r12";
-
-$code=<<___;
-#ifndef __KERNEL__
-# include "arm_arch.h"
-#else
-# define __ARM_ARCH__ __LINUX_ARM_ARCH__
-#endif
-
-.text
-#if __ARM_ARCH__<7
-.code 32
-#else
-.syntax unified
-# ifdef __thumb2__
-.thumb
-# else
-.code 32
-# endif
-#endif
-
-.type AES_Te,%object
-.align 5
-AES_Te:
-.word 0xc66363a5, 0xf87c7c84, 0xee777799, 0xf67b7b8d
-.word 0xfff2f20d, 0xd66b6bbd, 0xde6f6fb1, 0x91c5c554
-.word 0x60303050, 0x02010103, 0xce6767a9, 0x562b2b7d
-.word 0xe7fefe19, 0xb5d7d762, 0x4dababe6, 0xec76769a
-.word 0x8fcaca45, 0x1f82829d, 0x89c9c940, 0xfa7d7d87
-.word 0xeffafa15, 0xb25959eb, 0x8e4747c9, 0xfbf0f00b
-.word 0x41adadec, 0xb3d4d467, 0x5fa2a2fd, 0x45afafea
-.word 0x239c9cbf, 0x53a4a4f7, 0xe4727296, 0x9bc0c05b
-.word 0x75b7b7c2, 0xe1fdfd1c, 0x3d9393ae, 0x4c26266a
-.word 0x6c36365a, 0x7e3f3f41, 0xf5f7f702, 0x83cccc4f
-.word 0x6834345c, 0x51a5a5f4, 0xd1e5e534, 0xf9f1f108
-.word 0xe2717193, 0xabd8d873, 0x62313153, 0x2a15153f
-.word 0x0804040c, 0x95c7c752, 0x46232365, 0x9dc3c35e
-.word 0x30181828, 0x379696a1, 0x0a05050f, 0x2f9a9ab5
-.word 0x0e070709, 0x24121236, 0x1b80809b, 0xdfe2e23d
-.word 0xcdebeb26, 0x4e272769, 0x7fb2b2cd, 0xea75759f
-.word 0x1209091b, 0x1d83839e, 0x582c2c74, 0x341a1a2e
-.word 0x361b1b2d, 0xdc6e6eb2, 0xb45a5aee, 0x5ba0a0fb
-.word 0xa45252f6, 0x763b3b4d, 0xb7d6d661, 0x7db3b3ce
-.word 0x5229297b, 0xdde3e33e, 0x5e2f2f71, 0x13848497
-.word 0xa65353f5, 0xb9d1d168, 0x00000000, 0xc1eded2c
-.word 0x40202060, 0xe3fcfc1f, 0x79b1b1c8, 0xb65b5bed
-.word 0xd46a6abe, 0x8dcbcb46, 0x67bebed9, 0x7239394b
-.word 0x944a4ade, 0x984c4cd4, 0xb05858e8, 0x85cfcf4a
-.word 0xbbd0d06b, 0xc5efef2a, 0x4faaaae5, 0xedfbfb16
-.word 0x864343c5, 0x9a4d4dd7, 0x66333355, 0x11858594
-.word 0x8a4545cf, 0xe9f9f910, 0x04020206, 0xfe7f7f81
-.word 0xa05050f0, 0x783c3c44, 0x259f9fba, 0x4ba8a8e3
-.word 0xa25151f3, 0x5da3a3fe, 0x804040c0, 0x058f8f8a
-.word 0x3f9292ad, 0x219d9dbc, 0x70383848, 0xf1f5f504
-.word 0x63bcbcdf, 0x77b6b6c1, 0xafdada75, 0x42212163
-.word 0x20101030, 0xe5ffff1a, 0xfdf3f30e, 0xbfd2d26d
-.word 0x81cdcd4c, 0x180c0c14, 0x26131335, 0xc3ecec2f
-.word 0xbe5f5fe1, 0x359797a2, 0x884444cc, 0x2e171739
-.word 0x93c4c457, 0x55a7a7f2, 0xfc7e7e82, 0x7a3d3d47
-.word 0xc86464ac, 0xba5d5de7, 0x3219192b, 0xe6737395
-.word 0xc06060a0, 0x19818198, 0x9e4f4fd1, 0xa3dcdc7f
-.word 0x44222266, 0x542a2a7e, 0x3b9090ab, 0x0b888883
-.word 0x8c4646ca, 0xc7eeee29, 0x6bb8b8d3, 0x2814143c
-.word 0xa7dede79, 0xbc5e5ee2, 0x160b0b1d, 0xaddbdb76
-.word 0xdbe0e03b, 0x64323256, 0x743a3a4e, 0x140a0a1e
-.word 0x924949db, 0x0c06060a, 0x4824246c, 0xb85c5ce4
-.word 0x9fc2c25d, 0xbdd3d36e, 0x43acacef, 0xc46262a6
-.word 0x399191a8, 0x319595a4, 0xd3e4e437, 0xf279798b
-.word 0xd5e7e732, 0x8bc8c843, 0x6e373759, 0xda6d6db7
-.word 0x018d8d8c, 0xb1d5d564, 0x9c4e4ed2, 0x49a9a9e0
-.word 0xd86c6cb4, 0xac5656fa, 0xf3f4f407, 0xcfeaea25
-.word 0xca6565af, 0xf47a7a8e, 0x47aeaee9, 0x10080818
-.word 0x6fbabad5, 0xf0787888, 0x4a25256f, 0x5c2e2e72
-.word 0x381c1c24, 0x57a6a6f1, 0x73b4b4c7, 0x97c6c651
-.word 0xcbe8e823, 0xa1dddd7c, 0xe874749c, 0x3e1f1f21
-.word 0x964b4bdd, 0x61bdbddc, 0x0d8b8b86, 0x0f8a8a85
-.word 0xe0707090, 0x7c3e3e42, 0x71b5b5c4, 0xcc6666aa
-.word 0x904848d8, 0x06030305, 0xf7f6f601, 0x1c0e0e12
-.word 0xc26161a3, 0x6a35355f, 0xae5757f9, 0x69b9b9d0
-.word 0x17868691, 0x99c1c158, 0x3a1d1d27, 0x279e9eb9
-.word 0xd9e1e138, 0xebf8f813, 0x2b9898b3, 0x22111133
-.word 0xd26969bb, 0xa9d9d970, 0x078e8e89, 0x339494a7
-.word 0x2d9b9bb6, 0x3c1e1e22, 0x15878792, 0xc9e9e920
-.word 0x87cece49, 0xaa5555ff, 0x50282878, 0xa5dfdf7a
-.word 0x038c8c8f, 0x59a1a1f8, 0x09898980, 0x1a0d0d17
-.word 0x65bfbfda, 0xd7e6e631, 0x844242c6, 0xd06868b8
-.word 0x824141c3, 0x299999b0, 0x5a2d2d77, 0x1e0f0f11
-.word 0x7bb0b0cb, 0xa85454fc, 0x6dbbbbd6, 0x2c16163a
-@ Te4[256]
-.byte 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5
-.byte 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76
-.byte 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0
-.byte 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0
-.byte 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc
-.byte 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15
-.byte 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a
-.byte 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75
-.byte 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0
-.byte 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84
-.byte 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b
-.byte 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf
-.byte 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85
-.byte 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8
-.byte 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5
-.byte 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2
-.byte 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17
-.byte 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73
-.byte 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88
-.byte 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb
-.byte 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c
-.byte 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79
-.byte 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9
-.byte 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08
-.byte 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6
-.byte 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a
-.byte 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e
-.byte 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e
-.byte 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94
-.byte 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf
-.byte 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68
-.byte 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
-@ rcon[]
-.word 0x01000000, 0x02000000, 0x04000000, 0x08000000
-.word 0x10000000, 0x20000000, 0x40000000, 0x80000000
-.word 0x1B000000, 0x36000000, 0, 0, 0, 0, 0, 0
-.size AES_Te,.-AES_Te
-
-@ void AES_encrypt(const unsigned char *in, unsigned char *out,
-@ const AES_KEY *key) {
-.global AES_encrypt
-.type AES_encrypt,%function
-.align 5
-AES_encrypt:
-#if __ARM_ARCH__<7
- sub r3,pc,#8 @ AES_encrypt
-#else
- adr r3,AES_encrypt
-#endif
- stmdb sp!,{r1,r4-r12,lr}
- mov $rounds,r0 @ inp
- mov $key,r2
- sub $tbl,r3,#AES_encrypt-AES_Te @ Te
-#if __ARM_ARCH__<7
- ldrb $s0,[$rounds,#3] @ load input data in endian-neutral
- ldrb $t1,[$rounds,#2] @ manner...
- ldrb $t2,[$rounds,#1]
- ldrb $t3,[$rounds,#0]
- orr $s0,$s0,$t1,lsl#8
- ldrb $s1,[$rounds,#7]
- orr $s0,$s0,$t2,lsl#16
- ldrb $t1,[$rounds,#6]
- orr $s0,$s0,$t3,lsl#24
- ldrb $t2,[$rounds,#5]
- ldrb $t3,[$rounds,#4]
- orr $s1,$s1,$t1,lsl#8
- ldrb $s2,[$rounds,#11]
- orr $s1,$s1,$t2,lsl#16
- ldrb $t1,[$rounds,#10]
- orr $s1,$s1,$t3,lsl#24
- ldrb $t2,[$rounds,#9]
- ldrb $t3,[$rounds,#8]
- orr $s2,$s2,$t1,lsl#8
- ldrb $s3,[$rounds,#15]
- orr $s2,$s2,$t2,lsl#16
- ldrb $t1,[$rounds,#14]
- orr $s2,$s2,$t3,lsl#24
- ldrb $t2,[$rounds,#13]
- ldrb $t3,[$rounds,#12]
- orr $s3,$s3,$t1,lsl#8
- orr $s3,$s3,$t2,lsl#16
- orr $s3,$s3,$t3,lsl#24
-#else
- ldr $s0,[$rounds,#0]
- ldr $s1,[$rounds,#4]
- ldr $s2,[$rounds,#8]
- ldr $s3,[$rounds,#12]
-#ifdef __ARMEL__
- rev $s0,$s0
- rev $s1,$s1
- rev $s2,$s2
- rev $s3,$s3
-#endif
-#endif
- bl _armv4_AES_encrypt
-
- ldr $rounds,[sp],#4 @ pop out
-#if __ARM_ARCH__>=7
-#ifdef __ARMEL__
- rev $s0,$s0
- rev $s1,$s1
- rev $s2,$s2
- rev $s3,$s3
-#endif
- str $s0,[$rounds,#0]
- str $s1,[$rounds,#4]
- str $s2,[$rounds,#8]
- str $s3,[$rounds,#12]
-#else
- mov $t1,$s0,lsr#24 @ write output in endian-neutral
- mov $t2,$s0,lsr#16 @ manner...
- mov $t3,$s0,lsr#8
- strb $t1,[$rounds,#0]
- strb $t2,[$rounds,#1]
- mov $t1,$s1,lsr#24
- strb $t3,[$rounds,#2]
- mov $t2,$s1,lsr#16
- strb $s0,[$rounds,#3]
- mov $t3,$s1,lsr#8
- strb $t1,[$rounds,#4]
- strb $t2,[$rounds,#5]
- mov $t1,$s2,lsr#24
- strb $t3,[$rounds,#6]
- mov $t2,$s2,lsr#16
- strb $s1,[$rounds,#7]
- mov $t3,$s2,lsr#8
- strb $t1,[$rounds,#8]
- strb $t2,[$rounds,#9]
- mov $t1,$s3,lsr#24
- strb $t3,[$rounds,#10]
- mov $t2,$s3,lsr#16
- strb $s2,[$rounds,#11]
- mov $t3,$s3,lsr#8
- strb $t1,[$rounds,#12]
- strb $t2,[$rounds,#13]
- strb $t3,[$rounds,#14]
- strb $s3,[$rounds,#15]
-#endif
-#if __ARM_ARCH__>=5
- ldmia sp!,{r4-r12,pc}
-#else
- ldmia sp!,{r4-r12,lr}
- tst lr,#1
- moveq pc,lr @ be binary compatible with V4, yet
- bx lr @ interoperable with Thumb ISA:-)
-#endif
-.size AES_encrypt,.-AES_encrypt
-
-.type _armv4_AES_encrypt,%function
-.align 2
-_armv4_AES_encrypt:
- str lr,[sp,#-4]! @ push lr
- ldmia $key!,{$t1-$i1}
- eor $s0,$s0,$t1
- ldr $rounds,[$key,#240-16]
- eor $s1,$s1,$t2
- eor $s2,$s2,$t3
- eor $s3,$s3,$i1
- sub $rounds,$rounds,#1
- mov lr,#255
-
- and $i1,lr,$s0
- and $i2,lr,$s0,lsr#8
- and $i3,lr,$s0,lsr#16
- mov $s0,$s0,lsr#24
-.Lenc_loop:
- ldr $t1,[$tbl,$i1,lsl#2] @ Te3[s0>>0]
- and $i1,lr,$s1,lsr#16 @ i0
- ldr $t2,[$tbl,$i2,lsl#2] @ Te2[s0>>8]
- and $i2,lr,$s1
- ldr $t3,[$tbl,$i3,lsl#2] @ Te1[s0>>16]
- and $i3,lr,$s1,lsr#8
- ldr $s0,[$tbl,$s0,lsl#2] @ Te0[s0>>24]
- mov $s1,$s1,lsr#24
-
- ldr $i1,[$tbl,$i1,lsl#2] @ Te1[s1>>16]
- ldr $i2,[$tbl,$i2,lsl#2] @ Te3[s1>>0]
- ldr $i3,[$tbl,$i3,lsl#2] @ Te2[s1>>8]
- eor $s0,$s0,$i1,ror#8
- ldr $s1,[$tbl,$s1,lsl#2] @ Te0[s1>>24]
- and $i1,lr,$s2,lsr#8 @ i0
- eor $t2,$t2,$i2,ror#8
- and $i2,lr,$s2,lsr#16 @ i1
- eor $t3,$t3,$i3,ror#8
- and $i3,lr,$s2
- ldr $i1,[$tbl,$i1,lsl#2] @ Te2[s2>>8]
- eor $s1,$s1,$t1,ror#24
- ldr $i2,[$tbl,$i2,lsl#2] @ Te1[s2>>16]
- mov $s2,$s2,lsr#24
-
- ldr $i3,[$tbl,$i3,lsl#2] @ Te3[s2>>0]
- eor $s0,$s0,$i1,ror#16
- ldr $s2,[$tbl,$s2,lsl#2] @ Te0[s2>>24]
- and $i1,lr,$s3 @ i0
- eor $s1,$s1,$i2,ror#8
- and $i2,lr,$s3,lsr#8 @ i1
- eor $t3,$t3,$i3,ror#16
- and $i3,lr,$s3,lsr#16 @ i2
- ldr $i1,[$tbl,$i1,lsl#2] @ Te3[s3>>0]
- eor $s2,$s2,$t2,ror#16
- ldr $i2,[$tbl,$i2,lsl#2] @ Te2[s3>>8]
- mov $s3,$s3,lsr#24
-
- ldr $i3,[$tbl,$i3,lsl#2] @ Te1[s3>>16]
- eor $s0,$s0,$i1,ror#24
- ldr $i1,[$key],#16
- eor $s1,$s1,$i2,ror#16
- ldr $s3,[$tbl,$s3,lsl#2] @ Te0[s3>>24]
- eor $s2,$s2,$i3,ror#8
- ldr $t1,[$key,#-12]
- eor $s3,$s3,$t3,ror#8
-
- ldr $t2,[$key,#-8]
- eor $s0,$s0,$i1
- ldr $t3,[$key,#-4]
- and $i1,lr,$s0
- eor $s1,$s1,$t1
- and $i2,lr,$s0,lsr#8
- eor $s2,$s2,$t2
- and $i3,lr,$s0,lsr#16
- eor $s3,$s3,$t3
- mov $s0,$s0,lsr#24
-
- subs $rounds,$rounds,#1
- bne .Lenc_loop
-
- add $tbl,$tbl,#2
-
- ldrb $t1,[$tbl,$i1,lsl#2] @ Te4[s0>>0]
- and $i1,lr,$s1,lsr#16 @ i0
- ldrb $t2,[$tbl,$i2,lsl#2] @ Te4[s0>>8]
- and $i2,lr,$s1
- ldrb $t3,[$tbl,$i3,lsl#2] @ Te4[s0>>16]
- and $i3,lr,$s1,lsr#8
- ldrb $s0,[$tbl,$s0,lsl#2] @ Te4[s0>>24]
- mov $s1,$s1,lsr#24
-
- ldrb $i1,[$tbl,$i1,lsl#2] @ Te4[s1>>16]
- ldrb $i2,[$tbl,$i2,lsl#2] @ Te4[s1>>0]
- ldrb $i3,[$tbl,$i3,lsl#2] @ Te4[s1>>8]
- eor $s0,$i1,$s0,lsl#8
- ldrb $s1,[$tbl,$s1,lsl#2] @ Te4[s1>>24]
- and $i1,lr,$s2,lsr#8 @ i0
- eor $t2,$i2,$t2,lsl#8
- and $i2,lr,$s2,lsr#16 @ i1
- eor $t3,$i3,$t3,lsl#8
- and $i3,lr,$s2
- ldrb $i1,[$tbl,$i1,lsl#2] @ Te4[s2>>8]
- eor $s1,$t1,$s1,lsl#24
- ldrb $i2,[$tbl,$i2,lsl#2] @ Te4[s2>>16]
- mov $s2,$s2,lsr#24
-
- ldrb $i3,[$tbl,$i3,lsl#2] @ Te4[s2>>0]
- eor $s0,$i1,$s0,lsl#8
- ldrb $s2,[$tbl,$s2,lsl#2] @ Te4[s2>>24]
- and $i1,lr,$s3 @ i0
- eor $s1,$s1,$i2,lsl#16
- and $i2,lr,$s3,lsr#8 @ i1
- eor $t3,$i3,$t3,lsl#8
- and $i3,lr,$s3,lsr#16 @ i2
- ldrb $i1,[$tbl,$i1,lsl#2] @ Te4[s3>>0]
- eor $s2,$t2,$s2,lsl#24
- ldrb $i2,[$tbl,$i2,lsl#2] @ Te4[s3>>8]
- mov $s3,$s3,lsr#24
-
- ldrb $i3,[$tbl,$i3,lsl#2] @ Te4[s3>>16]
- eor $s0,$i1,$s0,lsl#8
- ldr $i1,[$key,#0]
- ldrb $s3,[$tbl,$s3,lsl#2] @ Te4[s3>>24]
- eor $s1,$s1,$i2,lsl#8
- ldr $t1,[$key,#4]
- eor $s2,$s2,$i3,lsl#16
- ldr $t2,[$key,#8]
- eor $s3,$t3,$s3,lsl#24
- ldr $t3,[$key,#12]
-
- eor $s0,$s0,$i1
- eor $s1,$s1,$t1
- eor $s2,$s2,$t2
- eor $s3,$s3,$t3
-
- sub $tbl,$tbl,#2
- ldr pc,[sp],#4 @ pop and return
-.size _armv4_AES_encrypt,.-_armv4_AES_encrypt
-
-.global private_AES_set_encrypt_key
-.type private_AES_set_encrypt_key,%function
-.align 5
-private_AES_set_encrypt_key:
-_armv4_AES_set_encrypt_key:
-#if __ARM_ARCH__<7
- sub r3,pc,#8 @ AES_set_encrypt_key
-#else
- adr r3,private_AES_set_encrypt_key
-#endif
- teq r0,#0
-#if __ARM_ARCH__>=7
- itt eq @ Thumb2 thing, sanity check in ARM
-#endif
- moveq r0,#-1
- beq .Labrt
- teq r2,#0
-#if __ARM_ARCH__>=7
- itt eq @ Thumb2 thing, sanity check in ARM
-#endif
- moveq r0,#-1
- beq .Labrt
-
- teq r1,#128
- beq .Lok
- teq r1,#192
- beq .Lok
- teq r1,#256
-#if __ARM_ARCH__>=7
- itt ne @ Thumb2 thing, sanity check in ARM
-#endif
- movne r0,#-1
- bne .Labrt
-
-.Lok: stmdb sp!,{r4-r12,lr}
- sub $tbl,r3,#_armv4_AES_set_encrypt_key-AES_Te-1024 @ Te4
-
- mov $rounds,r0 @ inp
- mov lr,r1 @ bits
- mov $key,r2 @ key
-
-#if __ARM_ARCH__<7
- ldrb $s0,[$rounds,#3] @ load input data in endian-neutral
- ldrb $t1,[$rounds,#2] @ manner...
- ldrb $t2,[$rounds,#1]
- ldrb $t3,[$rounds,#0]
- orr $s0,$s0,$t1,lsl#8
- ldrb $s1,[$rounds,#7]
- orr $s0,$s0,$t2,lsl#16
- ldrb $t1,[$rounds,#6]
- orr $s0,$s0,$t3,lsl#24
- ldrb $t2,[$rounds,#5]
- ldrb $t3,[$rounds,#4]
- orr $s1,$s1,$t1,lsl#8
- ldrb $s2,[$rounds,#11]
- orr $s1,$s1,$t2,lsl#16
- ldrb $t1,[$rounds,#10]
- orr $s1,$s1,$t3,lsl#24
- ldrb $t2,[$rounds,#9]
- ldrb $t3,[$rounds,#8]
- orr $s2,$s2,$t1,lsl#8
- ldrb $s3,[$rounds,#15]
- orr $s2,$s2,$t2,lsl#16
- ldrb $t1,[$rounds,#14]
- orr $s2,$s2,$t3,lsl#24
- ldrb $t2,[$rounds,#13]
- ldrb $t3,[$rounds,#12]
- orr $s3,$s3,$t1,lsl#8
- str $s0,[$key],#16
- orr $s3,$s3,$t2,lsl#16
- str $s1,[$key,#-12]
- orr $s3,$s3,$t3,lsl#24
- str $s2,[$key,#-8]
- str $s3,[$key,#-4]
-#else
- ldr $s0,[$rounds,#0]
- ldr $s1,[$rounds,#4]
- ldr $s2,[$rounds,#8]
- ldr $s3,[$rounds,#12]
-#ifdef __ARMEL__
- rev $s0,$s0
- rev $s1,$s1
- rev $s2,$s2
- rev $s3,$s3
-#endif
- str $s0,[$key],#16
- str $s1,[$key,#-12]
- str $s2,[$key,#-8]
- str $s3,[$key,#-4]
-#endif
-
- teq lr,#128
- bne .Lnot128
- mov $rounds,#10
- str $rounds,[$key,#240-16]
- add $t3,$tbl,#256 @ rcon
- mov lr,#255
-
-.L128_loop:
- and $t2,lr,$s3,lsr#24
- and $i1,lr,$s3,lsr#16
- ldrb $t2,[$tbl,$t2]
- and $i2,lr,$s3,lsr#8
- ldrb $i1,[$tbl,$i1]
- and $i3,lr,$s3
- ldrb $i2,[$tbl,$i2]
- orr $t2,$t2,$i1,lsl#24
- ldrb $i3,[$tbl,$i3]
- orr $t2,$t2,$i2,lsl#16
- ldr $t1,[$t3],#4 @ rcon[i++]
- orr $t2,$t2,$i3,lsl#8
- eor $t2,$t2,$t1
- eor $s0,$s0,$t2 @ rk[4]=rk[0]^...
- eor $s1,$s1,$s0 @ rk[5]=rk[1]^rk[4]
- str $s0,[$key],#16
- eor $s2,$s2,$s1 @ rk[6]=rk[2]^rk[5]
- str $s1,[$key,#-12]
- eor $s3,$s3,$s2 @ rk[7]=rk[3]^rk[6]
- str $s2,[$key,#-8]
- subs $rounds,$rounds,#1
- str $s3,[$key,#-4]
- bne .L128_loop
- sub r2,$key,#176
- b .Ldone
-
-.Lnot128:
-#if __ARM_ARCH__<7
- ldrb $i2,[$rounds,#19]
- ldrb $t1,[$rounds,#18]
- ldrb $t2,[$rounds,#17]
- ldrb $t3,[$rounds,#16]
- orr $i2,$i2,$t1,lsl#8
- ldrb $i3,[$rounds,#23]
- orr $i2,$i2,$t2,lsl#16
- ldrb $t1,[$rounds,#22]
- orr $i2,$i2,$t3,lsl#24
- ldrb $t2,[$rounds,#21]
- ldrb $t3,[$rounds,#20]
- orr $i3,$i3,$t1,lsl#8
- orr $i3,$i3,$t2,lsl#16
- str $i2,[$key],#8
- orr $i3,$i3,$t3,lsl#24
- str $i3,[$key,#-4]
-#else
- ldr $i2,[$rounds,#16]
- ldr $i3,[$rounds,#20]
-#ifdef __ARMEL__
- rev $i2,$i2
- rev $i3,$i3
-#endif
- str $i2,[$key],#8
- str $i3,[$key,#-4]
-#endif
-
- teq lr,#192
- bne .Lnot192
- mov $rounds,#12
- str $rounds,[$key,#240-24]
- add $t3,$tbl,#256 @ rcon
- mov lr,#255
- mov $rounds,#8
-
-.L192_loop:
- and $t2,lr,$i3,lsr#24
- and $i1,lr,$i3,lsr#16
- ldrb $t2,[$tbl,$t2]
- and $i2,lr,$i3,lsr#8
- ldrb $i1,[$tbl,$i1]
- and $i3,lr,$i3
- ldrb $i2,[$tbl,$i2]
- orr $t2,$t2,$i1,lsl#24
- ldrb $i3,[$tbl,$i3]
- orr $t2,$t2,$i2,lsl#16
- ldr $t1,[$t3],#4 @ rcon[i++]
- orr $t2,$t2,$i3,lsl#8
- eor $i3,$t2,$t1
- eor $s0,$s0,$i3 @ rk[6]=rk[0]^...
- eor $s1,$s1,$s0 @ rk[7]=rk[1]^rk[6]
- str $s0,[$key],#24
- eor $s2,$s2,$s1 @ rk[8]=rk[2]^rk[7]
- str $s1,[$key,#-20]
- eor $s3,$s3,$s2 @ rk[9]=rk[3]^rk[8]
- str $s2,[$key,#-16]
- subs $rounds,$rounds,#1
- str $s3,[$key,#-12]
-#if __ARM_ARCH__>=7
- itt eq @ Thumb2 thing, sanity check in ARM
-#endif
- subeq r2,$key,#216
- beq .Ldone
-
- ldr $i1,[$key,#-32]
- ldr $i2,[$key,#-28]
- eor $i1,$i1,$s3 @ rk[10]=rk[4]^rk[9]
- eor $i3,$i2,$i1 @ rk[11]=rk[5]^rk[10]
- str $i1,[$key,#-8]
- str $i3,[$key,#-4]
- b .L192_loop
-
-.Lnot192:
-#if __ARM_ARCH__<7
- ldrb $i2,[$rounds,#27]
- ldrb $t1,[$rounds,#26]
- ldrb $t2,[$rounds,#25]
- ldrb $t3,[$rounds,#24]
- orr $i2,$i2,$t1,lsl#8
- ldrb $i3,[$rounds,#31]
- orr $i2,$i2,$t2,lsl#16
- ldrb $t1,[$rounds,#30]
- orr $i2,$i2,$t3,lsl#24
- ldrb $t2,[$rounds,#29]
- ldrb $t3,[$rounds,#28]
- orr $i3,$i3,$t1,lsl#8
- orr $i3,$i3,$t2,lsl#16
- str $i2,[$key],#8
- orr $i3,$i3,$t3,lsl#24
- str $i3,[$key,#-4]
-#else
- ldr $i2,[$rounds,#24]
- ldr $i3,[$rounds,#28]
-#ifdef __ARMEL__
- rev $i2,$i2
- rev $i3,$i3
-#endif
- str $i2,[$key],#8
- str $i3,[$key,#-4]
-#endif
-
- mov $rounds,#14
- str $rounds,[$key,#240-32]
- add $t3,$tbl,#256 @ rcon
- mov lr,#255
- mov $rounds,#7
-
-.L256_loop:
- and $t2,lr,$i3,lsr#24
- and $i1,lr,$i3,lsr#16
- ldrb $t2,[$tbl,$t2]
- and $i2,lr,$i3,lsr#8
- ldrb $i1,[$tbl,$i1]
- and $i3,lr,$i3
- ldrb $i2,[$tbl,$i2]
- orr $t2,$t2,$i1,lsl#24
- ldrb $i3,[$tbl,$i3]
- orr $t2,$t2,$i2,lsl#16
- ldr $t1,[$t3],#4 @ rcon[i++]
- orr $t2,$t2,$i3,lsl#8
- eor $i3,$t2,$t1
- eor $s0,$s0,$i3 @ rk[8]=rk[0]^...
- eor $s1,$s1,$s0 @ rk[9]=rk[1]^rk[8]
- str $s0,[$key],#32
- eor $s2,$s2,$s1 @ rk[10]=rk[2]^rk[9]
- str $s1,[$key,#-28]
- eor $s3,$s3,$s2 @ rk[11]=rk[3]^rk[10]
- str $s2,[$key,#-24]
- subs $rounds,$rounds,#1
- str $s3,[$key,#-20]
-#if __ARM_ARCH__>=7
- itt eq @ Thumb2 thing, sanity check in ARM
-#endif
- subeq r2,$key,#256
- beq .Ldone
-
- and $t2,lr,$s3
- and $i1,lr,$s3,lsr#8
- ldrb $t2,[$tbl,$t2]
- and $i2,lr,$s3,lsr#16
- ldrb $i1,[$tbl,$i1]
- and $i3,lr,$s3,lsr#24
- ldrb $i2,[$tbl,$i2]
- orr $t2,$t2,$i1,lsl#8
- ldrb $i3,[$tbl,$i3]
- orr $t2,$t2,$i2,lsl#16
- ldr $t1,[$key,#-48]
- orr $t2,$t2,$i3,lsl#24
-
- ldr $i1,[$key,#-44]
- ldr $i2,[$key,#-40]
- eor $t1,$t1,$t2 @ rk[12]=rk[4]^...
- ldr $i3,[$key,#-36]
- eor $i1,$i1,$t1 @ rk[13]=rk[5]^rk[12]
- str $t1,[$key,#-16]
- eor $i2,$i2,$i1 @ rk[14]=rk[6]^rk[13]
- str $i1,[$key,#-12]
- eor $i3,$i3,$i2 @ rk[15]=rk[7]^rk[14]
- str $i2,[$key,#-8]
- str $i3,[$key,#-4]
- b .L256_loop
-
-.align 2
-.Ldone: mov r0,#0
- ldmia sp!,{r4-r12,lr}
-.Labrt:
-#if __ARM_ARCH__>=5
- ret @ bx lr
-#else
- tst lr,#1
- moveq pc,lr @ be binary compatible with V4, yet
- bx lr @ interoperable with Thumb ISA:-)
-#endif
-.size private_AES_set_encrypt_key,.-private_AES_set_encrypt_key
-
-.global private_AES_set_decrypt_key
-.type private_AES_set_decrypt_key,%function
-.align 5
-private_AES_set_decrypt_key:
- str lr,[sp,#-4]! @ push lr
- bl _armv4_AES_set_encrypt_key
- teq r0,#0
- ldr lr,[sp],#4 @ pop lr
- bne .Labrt
-
- mov r0,r2 @ AES_set_encrypt_key preserves r2,
- mov r1,r2 @ which is AES_KEY *key
- b _armv4_AES_set_enc2dec_key
-.size private_AES_set_decrypt_key,.-private_AES_set_decrypt_key
-
-@ void AES_set_enc2dec_key(const AES_KEY *inp,AES_KEY *out)
-.global AES_set_enc2dec_key
-.type AES_set_enc2dec_key,%function
-.align 5
-AES_set_enc2dec_key:
-_armv4_AES_set_enc2dec_key:
- stmdb sp!,{r4-r12,lr}
-
- ldr $rounds,[r0,#240]
- mov $i1,r0 @ input
- add $i2,r0,$rounds,lsl#4
- mov $key,r1 @ ouput
- add $tbl,r1,$rounds,lsl#4
- str $rounds,[r1,#240]
-
-.Linv: ldr $s0,[$i1],#16
- ldr $s1,[$i1,#-12]
- ldr $s2,[$i1,#-8]
- ldr $s3,[$i1,#-4]
- ldr $t1,[$i2],#-16
- ldr $t2,[$i2,#16+4]
- ldr $t3,[$i2,#16+8]
- ldr $i3,[$i2,#16+12]
- str $s0,[$tbl],#-16
- str $s1,[$tbl,#16+4]
- str $s2,[$tbl,#16+8]
- str $s3,[$tbl,#16+12]
- str $t1,[$key],#16
- str $t2,[$key,#-12]
- str $t3,[$key,#-8]
- str $i3,[$key,#-4]
- teq $i1,$i2
- bne .Linv
-
- ldr $s0,[$i1]
- ldr $s1,[$i1,#4]
- ldr $s2,[$i1,#8]
- ldr $s3,[$i1,#12]
- str $s0,[$key]
- str $s1,[$key,#4]
- str $s2,[$key,#8]
- str $s3,[$key,#12]
- sub $key,$key,$rounds,lsl#3
-___
-$mask80=$i1;
-$mask1b=$i2;
-$mask7f=$i3;
-$code.=<<___;
- ldr $s0,[$key,#16]! @ prefetch tp1
- mov $mask80,#0x80
- mov $mask1b,#0x1b
- orr $mask80,$mask80,#0x8000
- orr $mask1b,$mask1b,#0x1b00
- orr $mask80,$mask80,$mask80,lsl#16
- orr $mask1b,$mask1b,$mask1b,lsl#16
- sub $rounds,$rounds,#1
- mvn $mask7f,$mask80
- mov $rounds,$rounds,lsl#2 @ (rounds-1)*4
-
-.Lmix: and $t1,$s0,$mask80
- and $s1,$s0,$mask7f
- sub $t1,$t1,$t1,lsr#7
- and $t1,$t1,$mask1b
- eor $s1,$t1,$s1,lsl#1 @ tp2
-
- and $t1,$s1,$mask80
- and $s2,$s1,$mask7f
- sub $t1,$t1,$t1,lsr#7
- and $t1,$t1,$mask1b
- eor $s2,$t1,$s2,lsl#1 @ tp4
-
- and $t1,$s2,$mask80
- and $s3,$s2,$mask7f
- sub $t1,$t1,$t1,lsr#7
- and $t1,$t1,$mask1b
- eor $s3,$t1,$s3,lsl#1 @ tp8
-
- eor $t1,$s1,$s2
- eor $t2,$s0,$s3 @ tp9
- eor $t1,$t1,$s3 @ tpe
- eor $t1,$t1,$s1,ror#24
- eor $t1,$t1,$t2,ror#24 @ ^= ROTATE(tpb=tp9^tp2,8)
- eor $t1,$t1,$s2,ror#16
- eor $t1,$t1,$t2,ror#16 @ ^= ROTATE(tpd=tp9^tp4,16)
- eor $t1,$t1,$t2,ror#8 @ ^= ROTATE(tp9,24)
-
- ldr $s0,[$key,#4] @ prefetch tp1
- str $t1,[$key],#4
- subs $rounds,$rounds,#1
- bne .Lmix
-
- mov r0,#0
-#if __ARM_ARCH__>=5
- ldmia sp!,{r4-r12,pc}
-#else
- ldmia sp!,{r4-r12,lr}
- tst lr,#1
- moveq pc,lr @ be binary compatible with V4, yet
- bx lr @ interoperable with Thumb ISA:-)
-#endif
-.size AES_set_enc2dec_key,.-AES_set_enc2dec_key
-
-.type AES_Td,%object
-.align 5
-AES_Td:
-.word 0x51f4a750, 0x7e416553, 0x1a17a4c3, 0x3a275e96
-.word 0x3bab6bcb, 0x1f9d45f1, 0xacfa58ab, 0x4be30393
-.word 0x2030fa55, 0xad766df6, 0x88cc7691, 0xf5024c25
-.word 0x4fe5d7fc, 0xc52acbd7, 0x26354480, 0xb562a38f
-.word 0xdeb15a49, 0x25ba1b67, 0x45ea0e98, 0x5dfec0e1
-.word 0xc32f7502, 0x814cf012, 0x8d4697a3, 0x6bd3f9c6
-.word 0x038f5fe7, 0x15929c95, 0xbf6d7aeb, 0x955259da
-.word 0xd4be832d, 0x587421d3, 0x49e06929, 0x8ec9c844
-.word 0x75c2896a, 0xf48e7978, 0x99583e6b, 0x27b971dd
-.word 0xbee14fb6, 0xf088ad17, 0xc920ac66, 0x7dce3ab4
-.word 0x63df4a18, 0xe51a3182, 0x97513360, 0x62537f45
-.word 0xb16477e0, 0xbb6bae84, 0xfe81a01c, 0xf9082b94
-.word 0x70486858, 0x8f45fd19, 0x94de6c87, 0x527bf8b7
-.word 0xab73d323, 0x724b02e2, 0xe31f8f57, 0x6655ab2a
-.word 0xb2eb2807, 0x2fb5c203, 0x86c57b9a, 0xd33708a5
-.word 0x302887f2, 0x23bfa5b2, 0x02036aba, 0xed16825c
-.word 0x8acf1c2b, 0xa779b492, 0xf307f2f0, 0x4e69e2a1
-.word 0x65daf4cd, 0x0605bed5, 0xd134621f, 0xc4a6fe8a
-.word 0x342e539d, 0xa2f355a0, 0x058ae132, 0xa4f6eb75
-.word 0x0b83ec39, 0x4060efaa, 0x5e719f06, 0xbd6e1051
-.word 0x3e218af9, 0x96dd063d, 0xdd3e05ae, 0x4de6bd46
-.word 0x91548db5, 0x71c45d05, 0x0406d46f, 0x605015ff
-.word 0x1998fb24, 0xd6bde997, 0x894043cc, 0x67d99e77
-.word 0xb0e842bd, 0x07898b88, 0xe7195b38, 0x79c8eedb
-.word 0xa17c0a47, 0x7c420fe9, 0xf8841ec9, 0x00000000
-.word 0x09808683, 0x322bed48, 0x1e1170ac, 0x6c5a724e
-.word 0xfd0efffb, 0x0f853856, 0x3daed51e, 0x362d3927
-.word 0x0a0fd964, 0x685ca621, 0x9b5b54d1, 0x24362e3a
-.word 0x0c0a67b1, 0x9357e70f, 0xb4ee96d2, 0x1b9b919e
-.word 0x80c0c54f, 0x61dc20a2, 0x5a774b69, 0x1c121a16
-.word 0xe293ba0a, 0xc0a02ae5, 0x3c22e043, 0x121b171d
-.word 0x0e090d0b, 0xf28bc7ad, 0x2db6a8b9, 0x141ea9c8
-.word 0x57f11985, 0xaf75074c, 0xee99ddbb, 0xa37f60fd
-.word 0xf701269f, 0x5c72f5bc, 0x44663bc5, 0x5bfb7e34
-.word 0x8b432976, 0xcb23c6dc, 0xb6edfc68, 0xb8e4f163
-.word 0xd731dcca, 0x42638510, 0x13972240, 0x84c61120
-.word 0x854a247d, 0xd2bb3df8, 0xaef93211, 0xc729a16d
-.word 0x1d9e2f4b, 0xdcb230f3, 0x0d8652ec, 0x77c1e3d0
-.word 0x2bb3166c, 0xa970b999, 0x119448fa, 0x47e96422
-.word 0xa8fc8cc4, 0xa0f03f1a, 0x567d2cd8, 0x223390ef
-.word 0x87494ec7, 0xd938d1c1, 0x8ccaa2fe, 0x98d40b36
-.word 0xa6f581cf, 0xa57ade28, 0xdab78e26, 0x3fadbfa4
-.word 0x2c3a9de4, 0x5078920d, 0x6a5fcc9b, 0x547e4662
-.word 0xf68d13c2, 0x90d8b8e8, 0x2e39f75e, 0x82c3aff5
-.word 0x9f5d80be, 0x69d0937c, 0x6fd52da9, 0xcf2512b3
-.word 0xc8ac993b, 0x10187da7, 0xe89c636e, 0xdb3bbb7b
-.word 0xcd267809, 0x6e5918f4, 0xec9ab701, 0x834f9aa8
-.word 0xe6956e65, 0xaaffe67e, 0x21bccf08, 0xef15e8e6
-.word 0xbae79bd9, 0x4a6f36ce, 0xea9f09d4, 0x29b07cd6
-.word 0x31a4b2af, 0x2a3f2331, 0xc6a59430, 0x35a266c0
-.word 0x744ebc37, 0xfc82caa6, 0xe090d0b0, 0x33a7d815
-.word 0xf104984a, 0x41ecdaf7, 0x7fcd500e, 0x1791f62f
-.word 0x764dd68d, 0x43efb04d, 0xccaa4d54, 0xe49604df
-.word 0x9ed1b5e3, 0x4c6a881b, 0xc12c1fb8, 0x4665517f
-.word 0x9d5eea04, 0x018c355d, 0xfa877473, 0xfb0b412e
-.word 0xb3671d5a, 0x92dbd252, 0xe9105633, 0x6dd64713
-.word 0x9ad7618c, 0x37a10c7a, 0x59f8148e, 0xeb133c89
-.word 0xcea927ee, 0xb761c935, 0xe11ce5ed, 0x7a47b13c
-.word 0x9cd2df59, 0x55f2733f, 0x1814ce79, 0x73c737bf
-.word 0x53f7cdea, 0x5ffdaa5b, 0xdf3d6f14, 0x7844db86
-.word 0xcaaff381, 0xb968c43e, 0x3824342c, 0xc2a3405f
-.word 0x161dc372, 0xbce2250c, 0x283c498b, 0xff0d9541
-.word 0x39a80171, 0x080cb3de, 0xd8b4e49c, 0x6456c190
-.word 0x7bcb8461, 0xd532b670, 0x486c5c74, 0xd0b85742
-@ Td4[256]
-.byte 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38
-.byte 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
-.byte 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
-.byte 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
-.byte 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
-.byte 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
-.byte 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
-.byte 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
-.byte 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
-.byte 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
-.byte 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
-.byte 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
-.byte 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
-.byte 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
-.byte 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
-.byte 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
-.byte 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
-.byte 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
-.byte 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
-.byte 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
-.byte 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
-.byte 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
-.byte 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
-.byte 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
-.byte 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
-.byte 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
-.byte 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
-.byte 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
-.byte 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
-.byte 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
-.byte 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
-.byte 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
-.size AES_Td,.-AES_Td
-
-@ void AES_decrypt(const unsigned char *in, unsigned char *out,
-@ const AES_KEY *key) {
-.global AES_decrypt
-.type AES_decrypt,%function
-.align 5
-AES_decrypt:
-#if __ARM_ARCH__<7
- sub r3,pc,#8 @ AES_decrypt
-#else
- adr r3,AES_decrypt
-#endif
- stmdb sp!,{r1,r4-r12,lr}
- mov $rounds,r0 @ inp
- mov $key,r2
- sub $tbl,r3,#AES_decrypt-AES_Td @ Td
-#if __ARM_ARCH__<7
- ldrb $s0,[$rounds,#3] @ load input data in endian-neutral
- ldrb $t1,[$rounds,#2] @ manner...
- ldrb $t2,[$rounds,#1]
- ldrb $t3,[$rounds,#0]
- orr $s0,$s0,$t1,lsl#8
- ldrb $s1,[$rounds,#7]
- orr $s0,$s0,$t2,lsl#16
- ldrb $t1,[$rounds,#6]
- orr $s0,$s0,$t3,lsl#24
- ldrb $t2,[$rounds,#5]
- ldrb $t3,[$rounds,#4]
- orr $s1,$s1,$t1,lsl#8
- ldrb $s2,[$rounds,#11]
- orr $s1,$s1,$t2,lsl#16
- ldrb $t1,[$rounds,#10]
- orr $s1,$s1,$t3,lsl#24
- ldrb $t2,[$rounds,#9]
- ldrb $t3,[$rounds,#8]
- orr $s2,$s2,$t1,lsl#8
- ldrb $s3,[$rounds,#15]
- orr $s2,$s2,$t2,lsl#16
- ldrb $t1,[$rounds,#14]
- orr $s2,$s2,$t3,lsl#24
- ldrb $t2,[$rounds,#13]
- ldrb $t3,[$rounds,#12]
- orr $s3,$s3,$t1,lsl#8
- orr $s3,$s3,$t2,lsl#16
- orr $s3,$s3,$t3,lsl#24
-#else
- ldr $s0,[$rounds,#0]
- ldr $s1,[$rounds,#4]
- ldr $s2,[$rounds,#8]
- ldr $s3,[$rounds,#12]
-#ifdef __ARMEL__
- rev $s0,$s0
- rev $s1,$s1
- rev $s2,$s2
- rev $s3,$s3
-#endif
-#endif
- bl _armv4_AES_decrypt
-
- ldr $rounds,[sp],#4 @ pop out
-#if __ARM_ARCH__>=7
-#ifdef __ARMEL__
- rev $s0,$s0
- rev $s1,$s1
- rev $s2,$s2
- rev $s3,$s3
-#endif
- str $s0,[$rounds,#0]
- str $s1,[$rounds,#4]
- str $s2,[$rounds,#8]
- str $s3,[$rounds,#12]
-#else
- mov $t1,$s0,lsr#24 @ write output in endian-neutral
- mov $t2,$s0,lsr#16 @ manner...
- mov $t3,$s0,lsr#8
- strb $t1,[$rounds,#0]
- strb $t2,[$rounds,#1]
- mov $t1,$s1,lsr#24
- strb $t3,[$rounds,#2]
- mov $t2,$s1,lsr#16
- strb $s0,[$rounds,#3]
- mov $t3,$s1,lsr#8
- strb $t1,[$rounds,#4]
- strb $t2,[$rounds,#5]
- mov $t1,$s2,lsr#24
- strb $t3,[$rounds,#6]
- mov $t2,$s2,lsr#16
- strb $s1,[$rounds,#7]
- mov $t3,$s2,lsr#8
- strb $t1,[$rounds,#8]
- strb $t2,[$rounds,#9]
- mov $t1,$s3,lsr#24
- strb $t3,[$rounds,#10]
- mov $t2,$s3,lsr#16
- strb $s2,[$rounds,#11]
- mov $t3,$s3,lsr#8
- strb $t1,[$rounds,#12]
- strb $t2,[$rounds,#13]
- strb $t3,[$rounds,#14]
- strb $s3,[$rounds,#15]
-#endif
-#if __ARM_ARCH__>=5
- ldmia sp!,{r4-r12,pc}
-#else
- ldmia sp!,{r4-r12,lr}
- tst lr,#1
- moveq pc,lr @ be binary compatible with V4, yet
- bx lr @ interoperable with Thumb ISA:-)
-#endif
-.size AES_decrypt,.-AES_decrypt
-
-.type _armv4_AES_decrypt,%function
-.align 2
-_armv4_AES_decrypt:
- str lr,[sp,#-4]! @ push lr
- ldmia $key!,{$t1-$i1}
- eor $s0,$s0,$t1
- ldr $rounds,[$key,#240-16]
- eor $s1,$s1,$t2
- eor $s2,$s2,$t3
- eor $s3,$s3,$i1
- sub $rounds,$rounds,#1
- mov lr,#255
-
- and $i1,lr,$s0,lsr#16
- and $i2,lr,$s0,lsr#8
- and $i3,lr,$s0
- mov $s0,$s0,lsr#24
-.Ldec_loop:
- ldr $t1,[$tbl,$i1,lsl#2] @ Td1[s0>>16]
- and $i1,lr,$s1 @ i0
- ldr $t2,[$tbl,$i2,lsl#2] @ Td2[s0>>8]
- and $i2,lr,$s1,lsr#16
- ldr $t3,[$tbl,$i3,lsl#2] @ Td3[s0>>0]
- and $i3,lr,$s1,lsr#8
- ldr $s0,[$tbl,$s0,lsl#2] @ Td0[s0>>24]
- mov $s1,$s1,lsr#24
-
- ldr $i1,[$tbl,$i1,lsl#2] @ Td3[s1>>0]
- ldr $i2,[$tbl,$i2,lsl#2] @ Td1[s1>>16]
- ldr $i3,[$tbl,$i3,lsl#2] @ Td2[s1>>8]
- eor $s0,$s0,$i1,ror#24
- ldr $s1,[$tbl,$s1,lsl#2] @ Td0[s1>>24]
- and $i1,lr,$s2,lsr#8 @ i0
- eor $t2,$i2,$t2,ror#8
- and $i2,lr,$s2 @ i1
- eor $t3,$i3,$t3,ror#8
- and $i3,lr,$s2,lsr#16
- ldr $i1,[$tbl,$i1,lsl#2] @ Td2[s2>>8]
- eor $s1,$s1,$t1,ror#8
- ldr $i2,[$tbl,$i2,lsl#2] @ Td3[s2>>0]
- mov $s2,$s2,lsr#24
-
- ldr $i3,[$tbl,$i3,lsl#2] @ Td1[s2>>16]
- eor $s0,$s0,$i1,ror#16
- ldr $s2,[$tbl,$s2,lsl#2] @ Td0[s2>>24]
- and $i1,lr,$s3,lsr#16 @ i0
- eor $s1,$s1,$i2,ror#24
- and $i2,lr,$s3,lsr#8 @ i1
- eor $t3,$i3,$t3,ror#8
- and $i3,lr,$s3 @ i2
- ldr $i1,[$tbl,$i1,lsl#2] @ Td1[s3>>16]
- eor $s2,$s2,$t2,ror#8
- ldr $i2,[$tbl,$i2,lsl#2] @ Td2[s3>>8]
- mov $s3,$s3,lsr#24
-
- ldr $i3,[$tbl,$i3,lsl#2] @ Td3[s3>>0]
- eor $s0,$s0,$i1,ror#8
- ldr $i1,[$key],#16
- eor $s1,$s1,$i2,ror#16
- ldr $s3,[$tbl,$s3,lsl#2] @ Td0[s3>>24]
- eor $s2,$s2,$i3,ror#24
-
- ldr $t1,[$key,#-12]
- eor $s0,$s0,$i1
- ldr $t2,[$key,#-8]
- eor $s3,$s3,$t3,ror#8
- ldr $t3,[$key,#-4]
- and $i1,lr,$s0,lsr#16
- eor $s1,$s1,$t1
- and $i2,lr,$s0,lsr#8
- eor $s2,$s2,$t2
- and $i3,lr,$s0
- eor $s3,$s3,$t3
- mov $s0,$s0,lsr#24
-
- subs $rounds,$rounds,#1
- bne .Ldec_loop
-
- add $tbl,$tbl,#1024
-
- ldr $t2,[$tbl,#0] @ prefetch Td4
- ldr $t3,[$tbl,#32]
- ldr $t1,[$tbl,#64]
- ldr $t2,[$tbl,#96]
- ldr $t3,[$tbl,#128]
- ldr $t1,[$tbl,#160]
- ldr $t2,[$tbl,#192]
- ldr $t3,[$tbl,#224]
-
- ldrb $s0,[$tbl,$s0] @ Td4[s0>>24]
- ldrb $t1,[$tbl,$i1] @ Td4[s0>>16]
- and $i1,lr,$s1 @ i0
- ldrb $t2,[$tbl,$i2] @ Td4[s0>>8]
- and $i2,lr,$s1,lsr#16
- ldrb $t3,[$tbl,$i3] @ Td4[s0>>0]
- and $i3,lr,$s1,lsr#8
-
- add $s1,$tbl,$s1,lsr#24
- ldrb $i1,[$tbl,$i1] @ Td4[s1>>0]
- ldrb $s1,[$s1] @ Td4[s1>>24]
- ldrb $i2,[$tbl,$i2] @ Td4[s1>>16]
- eor $s0,$i1,$s0,lsl#24
- ldrb $i3,[$tbl,$i3] @ Td4[s1>>8]
- eor $s1,$t1,$s1,lsl#8
- and $i1,lr,$s2,lsr#8 @ i0
- eor $t2,$t2,$i2,lsl#8
- and $i2,lr,$s2 @ i1
- ldrb $i1,[$tbl,$i1] @ Td4[s2>>8]
- eor $t3,$t3,$i3,lsl#8
- ldrb $i2,[$tbl,$i2] @ Td4[s2>>0]
- and $i3,lr,$s2,lsr#16
-
- add $s2,$tbl,$s2,lsr#24
- ldrb $s2,[$s2] @ Td4[s2>>24]
- eor $s0,$s0,$i1,lsl#8
- ldrb $i3,[$tbl,$i3] @ Td4[s2>>16]
- eor $s1,$i2,$s1,lsl#16
- and $i1,lr,$s3,lsr#16 @ i0
- eor $s2,$t2,$s2,lsl#16
- and $i2,lr,$s3,lsr#8 @ i1
- ldrb $i1,[$tbl,$i1] @ Td4[s3>>16]
- eor $t3,$t3,$i3,lsl#16
- ldrb $i2,[$tbl,$i2] @ Td4[s3>>8]
- and $i3,lr,$s3 @ i2
-
- add $s3,$tbl,$s3,lsr#24
- ldrb $i3,[$tbl,$i3] @ Td4[s3>>0]
- ldrb $s3,[$s3] @ Td4[s3>>24]
- eor $s0,$s0,$i1,lsl#16
- ldr $i1,[$key,#0]
- eor $s1,$s1,$i2,lsl#8
- ldr $t1,[$key,#4]
- eor $s2,$i3,$s2,lsl#8
- ldr $t2,[$key,#8]
- eor $s3,$t3,$s3,lsl#24
- ldr $t3,[$key,#12]
-
- eor $s0,$s0,$i1
- eor $s1,$s1,$t1
- eor $s2,$s2,$t2
- eor $s3,$s3,$t3
-
- sub $tbl,$tbl,#1024
- ldr pc,[sp],#4 @ pop and return
-.size _armv4_AES_decrypt,.-_armv4_AES_decrypt
-.asciz "AES for ARMv4, CRYPTOGAMS by <appro\@openssl.org>"
-.align 2
-___
-
-$code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm; # make it possible to compile with -march=armv4
-$code =~ s/\bret\b/bx\tlr/gm;
-
-open SELF,$0;
-while(<SELF>) {
- next if (/^#!/);
- last if (!s/^#/@/ and !/^$/);
- print;
-}
-close SELF;
-
-print $code;
-close STDOUT; # enforce flush
diff --git a/app/openssl/crypto/aes/asm/aes-ia64.S b/app/openssl/crypto/aes/asm/aes-ia64.S
deleted file mode 100644
index 7f6c4c36..00000000
--- a/app/openssl/crypto/aes/asm/aes-ia64.S
+++ /dev/null
@@ -1,1123 +0,0 @@
-// ====================================================================
-// Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
-// project. Rights for redistribution and usage in source and binary
-// forms are granted according to the OpenSSL license.
-// ====================================================================
-//
-// What's wrong with compiler generated code? Compiler never uses
-// variable 'shr' which is pairable with 'extr'/'dep' instructions.
-// Then it uses 'zxt' which is an I-type, but can be replaced with
-// 'and' which in turn can be assigned to M-port [there're double as
-// much M-ports as there're I-ports on Itanium 2]. By sacrificing few
-// registers for small constants (255, 24 and 16) to be used with
-// 'shr' and 'and' instructions I can achieve better ILP, Intruction
-// Level Parallelism, and performance. This code outperforms GCC 3.3
-// generated code by over factor of 2 (two), GCC 3.4 - by 70% and
-// HP C - by 40%. Measured best-case scenario, i.e. aligned
-// big-endian input, ECB timing on Itanium 2 is (18 + 13*rounds)
-// ticks per block, or 9.25 CPU cycles per byte for 128 bit key.
-
-// Version 1.2 mitigates the hazard of cache-timing attacks by
-// a) compressing S-boxes from 8KB to 2KB+256B, b) scheduling
-// references to S-boxes for L2 cache latency, c) prefetching T[ed]4
-// prior last round. As result performance dropped to (26 + 15*rounds)
-// ticks per block or 11 cycles per byte processed with 128-bit key.
-// This is ~16% deterioration. For reference Itanium 2 L1 cache has
-// 64 bytes line size and L2 - 128 bytes...
-
-.ident "aes-ia64.S, version 1.2"
-.ident "IA-64 ISA artwork by Andy Polyakov <appro@fy.chalmers.se>"
-.explicit
-.text
-
-rk0=r8; rk1=r9;
-
-pfssave=r2;
-lcsave=r10;
-prsave=r3;
-maskff=r11;
-twenty4=r14;
-sixteen=r15;
-
-te00=r16; te11=r17; te22=r18; te33=r19;
-te01=r20; te12=r21; te23=r22; te30=r23;
-te02=r24; te13=r25; te20=r26; te31=r27;
-te03=r28; te10=r29; te21=r30; te32=r31;
-
-// these are rotating...
-t0=r32; s0=r33;
-t1=r34; s1=r35;
-t2=r36; s2=r37;
-t3=r38; s3=r39;
-
-te0=r40; te1=r41; te2=r42; te3=r43;
-
-#if defined(_HPUX_SOURCE) && !defined(_LP64)
-# define ADDP addp4
-#else
-# define ADDP add
-#endif
-
-// Offsets from Te0
-#define TE0 0
-#define TE2 2
-#if defined(_HPUX_SOURCE) || defined(B_ENDIAN)
-#define TE1 3
-#define TE3 1
-#else
-#define TE1 1
-#define TE3 3
-#endif
-
-// This implies that AES_KEY comprises 32-bit key schedule elements
-// even on LP64 platforms.
-#ifndef KSZ
-# define KSZ 4
-# define LDKEY ld4
-#endif
-
-.proc _ia64_AES_encrypt#
-// Input: rk0-rk1
-// te0
-// te3 as AES_KEY->rounds!!!
-// s0-s3
-// maskff,twenty4,sixteen
-// Output: r16,r20,r24,r28 as s0-s3
-// Clobber: r16-r31,rk0-rk1,r32-r43
-.align 32
-_ia64_AES_encrypt:
- .prologue
- .altrp b6
- .body
-{ .mmi; alloc r16=ar.pfs,12,0,0,8
- LDKEY t0=[rk0],2*KSZ
- mov pr.rot=1<<16 }
-{ .mmi; LDKEY t1=[rk1],2*KSZ
- add te1=TE1,te0
- add te3=-3,te3 };;
-{ .mib; LDKEY t2=[rk0],2*KSZ
- mov ar.ec=2 }
-{ .mib; LDKEY t3=[rk1],2*KSZ
- add te2=TE2,te0
- brp.loop.imp .Le_top,.Le_end-16 };;
-
-{ .mmi; xor s0=s0,t0
- xor s1=s1,t1
- mov ar.lc=te3 }
-{ .mmi; xor s2=s2,t2
- xor s3=s3,t3
- add te3=TE3,te0 };;
-
-.align 32
-.Le_top:
-{ .mmi; (p0) LDKEY t0=[rk0],2*KSZ // 0/0:rk[0]
- (p0) and te33=s3,maskff // 0/0:s3&0xff
- (p0) extr.u te22=s2,8,8 } // 0/0:s2>>8&0xff
-{ .mmi; (p0) LDKEY t1=[rk1],2*KSZ // 0/1:rk[1]
- (p0) and te30=s0,maskff // 0/1:s0&0xff
- (p0) shr.u te00=s0,twenty4 };; // 0/0:s0>>24
-{ .mmi; (p0) LDKEY t2=[rk0],2*KSZ // 1/2:rk[2]
- (p0) shladd te33=te33,3,te3 // 1/0:te0+s0>>24
- (p0) extr.u te23=s3,8,8 } // 1/1:s3>>8&0xff
-{ .mmi; (p0) LDKEY t3=[rk1],2*KSZ // 1/3:rk[3]
- (p0) shladd te30=te30,3,te3 // 1/1:te3+s0
- (p0) shr.u te01=s1,twenty4 };; // 1/1:s1>>24
-{ .mmi; (p0) ld4 te33=[te33] // 2/0:te3[s3&0xff]
- (p0) shladd te22=te22,3,te2 // 2/0:te2+s2>>8&0xff
- (p0) extr.u te20=s0,8,8 } // 2/2:s0>>8&0xff
-{ .mmi; (p0) ld4 te30=[te30] // 2/1:te3[s0]
- (p0) shladd te23=te23,3,te2 // 2/1:te2+s3>>8
- (p0) shr.u te02=s2,twenty4 };; // 2/2:s2>>24
-{ .mmi; (p0) ld4 te22=[te22] // 3/0:te2[s2>>8]
- (p0) shladd te20=te20,3,te2 // 3/2:te2+s0>>8
- (p0) extr.u te21=s1,8,8 } // 3/3:s1>>8&0xff
-{ .mmi; (p0) ld4 te23=[te23] // 3/1:te2[s3>>8]
- (p0) shladd te00=te00,3,te0 // 3/0:te0+s0>>24
- (p0) shr.u te03=s3,twenty4 };; // 3/3:s3>>24
-{ .mmi; (p0) ld4 te20=[te20] // 4/2:te2[s0>>8]
- (p0) shladd te21=te21,3,te2 // 4/3:te3+s2
- (p0) extr.u te11=s1,16,8 } // 4/0:s1>>16&0xff
-{ .mmi; (p0) ld4 te00=[te00] // 4/0:te0[s0>>24]
- (p0) shladd te01=te01,3,te0 // 4/1:te0+s1>>24
- (p0) shr.u te13=s3,sixteen };; // 4/2:s3>>16
-{ .mmi; (p0) ld4 te21=[te21] // 5/3:te2[s1>>8]
- (p0) shladd te11=te11,3,te1 // 5/0:te1+s1>>16
- (p0) extr.u te12=s2,16,8 } // 5/1:s2>>16&0xff
-{ .mmi; (p0) ld4 te01=[te01] // 5/1:te0[s1>>24]
- (p0) shladd te02=te02,3,te0 // 5/2:te0+s2>>24
- (p0) and te31=s1,maskff };; // 5/2:s1&0xff
-{ .mmi; (p0) ld4 te11=[te11] // 6/0:te1[s1>>16]
- (p0) shladd te12=te12,3,te1 // 6/1:te1+s2>>16
- (p0) extr.u te10=s0,16,8 } // 6/3:s0>>16&0xff
-{ .mmi; (p0) ld4 te02=[te02] // 6/2:te0[s2>>24]
- (p0) shladd te03=te03,3,te0 // 6/3:te1+s0>>16
- (p0) and te32=s2,maskff };; // 6/3:s2&0xff
-
-{ .mmi; (p0) ld4 te12=[te12] // 7/1:te1[s2>>16]
- (p0) shladd te31=te31,3,te3 // 7/2:te3+s1&0xff
- (p0) and te13=te13,maskff} // 7/2:s3>>16&0xff
-{ .mmi; (p0) ld4 te03=[te03] // 7/3:te0[s3>>24]
- (p0) shladd te32=te32,3,te3 // 7/3:te3+s2
- (p0) xor t0=t0,te33 };; // 7/0:
-{ .mmi; (p0) ld4 te31=[te31] // 8/2:te3[s1]
- (p0) shladd te13=te13,3,te1 // 8/2:te1+s3>>16
- (p0) xor t0=t0,te22 } // 8/0:
-{ .mmi; (p0) ld4 te32=[te32] // 8/3:te3[s2]
- (p0) shladd te10=te10,3,te1 // 8/3:te1+s0>>16
- (p0) xor t1=t1,te30 };; // 8/1:
-{ .mmi; (p0) ld4 te13=[te13] // 9/2:te1[s3>>16]
- (p0) ld4 te10=[te10] // 9/3:te1[s0>>16]
- (p0) xor t0=t0,te00 };; // 9/0: !L2 scheduling
-{ .mmi; (p0) xor t1=t1,te23 // 10[9]/1:
- (p0) xor t2=t2,te20 // 10[9]/2:
- (p0) xor t3=t3,te21 };; // 10[9]/3:
-{ .mmi; (p0) xor t0=t0,te11 // 11[10]/0:done!
- (p0) xor t1=t1,te01 // 11[10]/1:
- (p0) xor t2=t2,te02 };; // 11[10]/2: !L2 scheduling
-{ .mmi; (p0) xor t3=t3,te03 // 12[10]/3:
- (p16) cmp.eq p0,p17=r0,r0 };; // 12[10]/clear (p17)
-{ .mmi; (p0) xor t1=t1,te12 // 13[11]/1:done!
- (p0) xor t2=t2,te31 // 13[11]/2:
- (p0) xor t3=t3,te32 } // 13[11]/3:
-{ .mmi; (p17) add te0=2048,te0 // 13[11]/
- (p17) add te1=2048+64-TE1,te1};; // 13[11]/
-{ .mib; (p0) xor t2=t2,te13 // 14[12]/2:done!
- (p17) add te2=2048+128-TE2,te2} // 14[12]/
-{ .mib; (p0) xor t3=t3,te10 // 14[12]/3:done!
- (p17) add te3=2048+192-TE3,te3 // 14[12]/
- br.ctop.sptk .Le_top };;
-.Le_end:
-
-
-{ .mmi; ld8 te12=[te0] // prefetch Te4
- ld8 te31=[te1] }
-{ .mmi; ld8 te10=[te2]
- ld8 te32=[te3] }
-
-{ .mmi; LDKEY t0=[rk0],2*KSZ // 0/0:rk[0]
- and te33=s3,maskff // 0/0:s3&0xff
- extr.u te22=s2,8,8 } // 0/0:s2>>8&0xff
-{ .mmi; LDKEY t1=[rk1],2*KSZ // 0/1:rk[1]
- and te30=s0,maskff // 0/1:s0&0xff
- shr.u te00=s0,twenty4 };; // 0/0:s0>>24
-{ .mmi; LDKEY t2=[rk0],2*KSZ // 1/2:rk[2]
- add te33=te33,te0 // 1/0:te0+s0>>24
- extr.u te23=s3,8,8 } // 1/1:s3>>8&0xff
-{ .mmi; LDKEY t3=[rk1],2*KSZ // 1/3:rk[3]
- add te30=te30,te0 // 1/1:te0+s0
- shr.u te01=s1,twenty4 };; // 1/1:s1>>24
-{ .mmi; ld1 te33=[te33] // 2/0:te0[s3&0xff]
- add te22=te22,te0 // 2/0:te0+s2>>8&0xff
- extr.u te20=s0,8,8 } // 2/2:s0>>8&0xff
-{ .mmi; ld1 te30=[te30] // 2/1:te0[s0]
- add te23=te23,te0 // 2/1:te0+s3>>8
- shr.u te02=s2,twenty4 };; // 2/2:s2>>24
-{ .mmi; ld1 te22=[te22] // 3/0:te0[s2>>8]
- add te20=te20,te0 // 3/2:te0+s0>>8
- extr.u te21=s1,8,8 } // 3/3:s1>>8&0xff
-{ .mmi; ld1 te23=[te23] // 3/1:te0[s3>>8]
- add te00=te00,te0 // 3/0:te0+s0>>24
- shr.u te03=s3,twenty4 };; // 3/3:s3>>24
-{ .mmi; ld1 te20=[te20] // 4/2:te0[s0>>8]
- add te21=te21,te0 // 4/3:te0+s2
- extr.u te11=s1,16,8 } // 4/0:s1>>16&0xff
-{ .mmi; ld1 te00=[te00] // 4/0:te0[s0>>24]
- add te01=te01,te0 // 4/1:te0+s1>>24
- shr.u te13=s3,sixteen };; // 4/2:s3>>16
-{ .mmi; ld1 te21=[te21] // 5/3:te0[s1>>8]
- add te11=te11,te0 // 5/0:te0+s1>>16
- extr.u te12=s2,16,8 } // 5/1:s2>>16&0xff
-{ .mmi; ld1 te01=[te01] // 5/1:te0[s1>>24]
- add te02=te02,te0 // 5/2:te0+s2>>24
- and te31=s1,maskff };; // 5/2:s1&0xff
-{ .mmi; ld1 te11=[te11] // 6/0:te0[s1>>16]
- add te12=te12,te0 // 6/1:te0+s2>>16
- extr.u te10=s0,16,8 } // 6/3:s0>>16&0xff
-{ .mmi; ld1 te02=[te02] // 6/2:te0[s2>>24]
- add te03=te03,te0 // 6/3:te0+s0>>16
- and te32=s2,maskff };; // 6/3:s2&0xff
-
-{ .mmi; ld1 te12=[te12] // 7/1:te0[s2>>16]
- add te31=te31,te0 // 7/2:te0+s1&0xff
- dep te33=te22,te33,8,8} // 7/0:
-{ .mmi; ld1 te03=[te03] // 7/3:te0[s3>>24]
- add te32=te32,te0 // 7/3:te0+s2
- and te13=te13,maskff};; // 7/2:s3>>16&0xff
-{ .mmi; ld1 te31=[te31] // 8/2:te0[s1]
- add te13=te13,te0 // 8/2:te0+s3>>16
- dep te30=te23,te30,8,8} // 8/1:
-{ .mmi; ld1 te32=[te32] // 8/3:te0[s2]
- add te10=te10,te0 // 8/3:te0+s0>>16
- shl te00=te00,twenty4};; // 8/0:
-{ .mii; ld1 te13=[te13] // 9/2:te0[s3>>16]
- dep te33=te11,te33,16,8 // 9/0:
- shl te01=te01,twenty4};; // 9/1:
-{ .mii; ld1 te10=[te10] // 10/3:te0[s0>>16]
- dep te31=te20,te31,8,8 // 10/2:
- shl te02=te02,twenty4};; // 10/2:
-{ .mii; xor t0=t0,te33 // 11/0:
- dep te32=te21,te32,8,8 // 11/3:
- shl te12=te12,sixteen};; // 11/1:
-{ .mii; xor r16=t0,te00 // 12/0:done!
- dep te31=te13,te31,16,8 // 12/2:
- shl te03=te03,twenty4};; // 12/3:
-{ .mmi; xor t1=t1,te01 // 13/1:
- xor t2=t2,te02 // 13/2:
- dep te32=te10,te32,16,8};; // 13/3:
-{ .mmi; xor t1=t1,te30 // 14/1:
- xor r24=t2,te31 // 14/2:done!
- xor t3=t3,te32 };; // 14/3:
-{ .mib; xor r20=t1,te12 // 15/1:done!
- xor r28=t3,te03 // 15/3:done!
- br.ret.sptk b6 };;
-.endp _ia64_AES_encrypt#
-
-// void AES_encrypt (const void *in,void *out,const AES_KEY *key);
-.global AES_encrypt#
-.proc AES_encrypt#
-.align 32
-AES_encrypt:
- .prologue
- .save ar.pfs,pfssave
-{ .mmi; alloc pfssave=ar.pfs,3,1,12,0
- and out0=3,in0
- mov r3=ip }
-{ .mmi; ADDP in0=0,in0
- mov loc0=psr.um
- ADDP out11=KSZ*60,in2 };; // &AES_KEY->rounds
-
-{ .mmi; ld4 out11=[out11] // AES_KEY->rounds
- add out8=(AES_Te#-AES_encrypt#),r3 // Te0
- .save pr,prsave
- mov prsave=pr }
-{ .mmi; rum 1<<3 // clear um.ac
- .save ar.lc,lcsave
- mov lcsave=ar.lc };;
-
- .body
-#if defined(_HPUX_SOURCE) // HPUX is big-endian, cut 15+15 cycles...
-{ .mib; cmp.ne p6,p0=out0,r0
- add out0=4,in0
-(p6) br.dpnt.many .Le_i_unaligned };;
-
-{ .mmi; ld4 out1=[in0],8 // s0
- and out9=3,in1
- mov twenty4=24 }
-{ .mmi; ld4 out3=[out0],8 // s1
- ADDP rk0=0,in2
- mov sixteen=16 };;
-{ .mmi; ld4 out5=[in0] // s2
- cmp.ne p6,p0=out9,r0
- mov maskff=0xff }
-{ .mmb; ld4 out7=[out0] // s3
- ADDP rk1=KSZ,in2
- br.call.sptk.many b6=_ia64_AES_encrypt };;
-
-{ .mib; ADDP in0=4,in1
- ADDP in1=0,in1
-(p6) br.spnt .Le_o_unaligned };;
-
-{ .mii; mov psr.um=loc0
- mov ar.pfs=pfssave
- mov ar.lc=lcsave };;
-{ .mmi; st4 [in1]=r16,8 // s0
- st4 [in0]=r20,8 // s1
- mov pr=prsave,0x1ffff };;
-{ .mmb; st4 [in1]=r24 // s2
- st4 [in0]=r28 // s3
- br.ret.sptk.many b0 };;
-#endif
-
-.align 32
-.Le_i_unaligned:
-{ .mmi; add out0=1,in0
- add out2=2,in0
- add out4=3,in0 };;
-{ .mmi; ld1 r16=[in0],4
- ld1 r17=[out0],4 }//;;
-{ .mmi; ld1 r18=[out2],4
- ld1 out1=[out4],4 };; // s0
-{ .mmi; ld1 r20=[in0],4
- ld1 r21=[out0],4 }//;;
-{ .mmi; ld1 r22=[out2],4
- ld1 out3=[out4],4 };; // s1
-{ .mmi; ld1 r24=[in0],4
- ld1 r25=[out0],4 }//;;
-{ .mmi; ld1 r26=[out2],4
- ld1 out5=[out4],4 };; // s2
-{ .mmi; ld1 r28=[in0]
- ld1 r29=[out0] }//;;
-{ .mmi; ld1 r30=[out2]
- ld1 out7=[out4] };; // s3
-
-{ .mii;
- dep out1=r16,out1,24,8 //;;
- dep out3=r20,out3,24,8 }//;;
-{ .mii; ADDP rk0=0,in2
- dep out5=r24,out5,24,8 //;;
- dep out7=r28,out7,24,8 };;
-{ .mii; ADDP rk1=KSZ,in2
- dep out1=r17,out1,16,8 //;;
- dep out3=r21,out3,16,8 }//;;
-{ .mii; mov twenty4=24
- dep out5=r25,out5,16,8 //;;
- dep out7=r29,out7,16,8 };;
-{ .mii; mov sixteen=16
- dep out1=r18,out1,8,8 //;;
- dep out3=r22,out3,8,8 }//;;
-{ .mii; mov maskff=0xff
- dep out5=r26,out5,8,8 //;;
- dep out7=r30,out7,8,8 };;
-
-{ .mib; br.call.sptk.many b6=_ia64_AES_encrypt };;
-
-.Le_o_unaligned:
-{ .mii; ADDP out0=0,in1
- extr.u r17=r16,8,8 // s0
- shr.u r19=r16,twenty4 }//;;
-{ .mii; ADDP out1=1,in1
- extr.u r18=r16,16,8
- shr.u r23=r20,twenty4 }//;; // s1
-{ .mii; ADDP out2=2,in1
- extr.u r21=r20,8,8
- shr.u r22=r20,sixteen }//;;
-{ .mii; ADDP out3=3,in1
- extr.u r25=r24,8,8 // s2
- shr.u r27=r24,twenty4 };;
-{ .mii; st1 [out3]=r16,4
- extr.u r26=r24,16,8
- shr.u r31=r28,twenty4 }//;; // s3
-{ .mii; st1 [out2]=r17,4
- extr.u r29=r28,8,8
- shr.u r30=r28,sixteen }//;;
-
-{ .mmi; st1 [out1]=r18,4
- st1 [out0]=r19,4 };;
-{ .mmi; st1 [out3]=r20,4
- st1 [out2]=r21,4 }//;;
-{ .mmi; st1 [out1]=r22,4
- st1 [out0]=r23,4 };;
-{ .mmi; st1 [out3]=r24,4
- st1 [out2]=r25,4
- mov pr=prsave,0x1ffff }//;;
-{ .mmi; st1 [out1]=r26,4
- st1 [out0]=r27,4
- mov ar.pfs=pfssave };;
-{ .mmi; st1 [out3]=r28
- st1 [out2]=r29
- mov ar.lc=lcsave }//;;
-{ .mmi; st1 [out1]=r30
- st1 [out0]=r31 }
-{ .mfb; mov psr.um=loc0 // restore user mask
- br.ret.sptk.many b0 };;
-.endp AES_encrypt#
-
-// *AES_decrypt are autogenerated by the following script:
-#if 0
-#!/usr/bin/env perl
-print "// *AES_decrypt are autogenerated by the following script:\n#if 0\n";
-open(PROG,'<'.$0); while(<PROG>) { print; } close(PROG);
-print "#endif\n";
-while(<>) {
- $process=1 if (/\.proc\s+_ia64_AES_encrypt/);
- next if (!$process);
-
- #s/te00=s0/td00=s0/; s/te00/td00/g;
- s/te11=s1/td13=s3/; s/te11/td13/g;
- #s/te22=s2/td22=s2/; s/te22/td22/g;
- s/te33=s3/td31=s1/; s/te33/td31/g;
-
- #s/te01=s1/td01=s1/; s/te01/td01/g;
- s/te12=s2/td10=s0/; s/te12/td10/g;
- #s/te23=s3/td23=s3/; s/te23/td23/g;
- s/te30=s0/td32=s2/; s/te30/td32/g;
-
- #s/te02=s2/td02=s2/; s/te02/td02/g;
- s/te13=s3/td11=s1/; s/te13/td11/g;
- #s/te20=s0/td20=s0/; s/te20/td20/g;
- s/te31=s1/td33=s3/; s/te31/td33/g;
-
- #s/te03=s3/td03=s3/; s/te03/td03/g;
- s/te10=s0/td12=s2/; s/te10/td12/g;
- #s/te21=s1/td21=s1/; s/te21/td21/g;
- s/te32=s2/td30=s0/; s/te32/td30/g;
-
- s/td/te/g;
-
- s/AES_encrypt/AES_decrypt/g;
- s/\.Le_/.Ld_/g;
- s/AES_Te#/AES_Td#/g;
-
- print;
-
- exit if (/\.endp\s+AES_decrypt/);
-}
-#endif
-.proc _ia64_AES_decrypt#
-// Input: rk0-rk1
-// te0
-// te3 as AES_KEY->rounds!!!
-// s0-s3
-// maskff,twenty4,sixteen
-// Output: r16,r20,r24,r28 as s0-s3
-// Clobber: r16-r31,rk0-rk1,r32-r43
-.align 32
-_ia64_AES_decrypt:
- .prologue
- .altrp b6
- .body
-{ .mmi; alloc r16=ar.pfs,12,0,0,8
- LDKEY t0=[rk0],2*KSZ
- mov pr.rot=1<<16 }
-{ .mmi; LDKEY t1=[rk1],2*KSZ
- add te1=TE1,te0
- add te3=-3,te3 };;
-{ .mib; LDKEY t2=[rk0],2*KSZ
- mov ar.ec=2 }
-{ .mib; LDKEY t3=[rk1],2*KSZ
- add te2=TE2,te0
- brp.loop.imp .Ld_top,.Ld_end-16 };;
-
-{ .mmi; xor s0=s0,t0
- xor s1=s1,t1
- mov ar.lc=te3 }
-{ .mmi; xor s2=s2,t2
- xor s3=s3,t3
- add te3=TE3,te0 };;
-
-.align 32
-.Ld_top:
-{ .mmi; (p0) LDKEY t0=[rk0],2*KSZ // 0/0:rk[0]
- (p0) and te31=s1,maskff // 0/0:s3&0xff
- (p0) extr.u te22=s2,8,8 } // 0/0:s2>>8&0xff
-{ .mmi; (p0) LDKEY t1=[rk1],2*KSZ // 0/1:rk[1]
- (p0) and te32=s2,maskff // 0/1:s0&0xff
- (p0) shr.u te00=s0,twenty4 };; // 0/0:s0>>24
-{ .mmi; (p0) LDKEY t2=[rk0],2*KSZ // 1/2:rk[2]
- (p0) shladd te31=te31,3,te3 // 1/0:te0+s0>>24
- (p0) extr.u te23=s3,8,8 } // 1/1:s3>>8&0xff
-{ .mmi; (p0) LDKEY t3=[rk1],2*KSZ // 1/3:rk[3]
- (p0) shladd te32=te32,3,te3 // 1/1:te3+s0
- (p0) shr.u te01=s1,twenty4 };; // 1/1:s1>>24
-{ .mmi; (p0) ld4 te31=[te31] // 2/0:te3[s3&0xff]
- (p0) shladd te22=te22,3,te2 // 2/0:te2+s2>>8&0xff
- (p0) extr.u te20=s0,8,8 } // 2/2:s0>>8&0xff
-{ .mmi; (p0) ld4 te32=[te32] // 2/1:te3[s0]
- (p0) shladd te23=te23,3,te2 // 2/1:te2+s3>>8
- (p0) shr.u te02=s2,twenty4 };; // 2/2:s2>>24
-{ .mmi; (p0) ld4 te22=[te22] // 3/0:te2[s2>>8]
- (p0) shladd te20=te20,3,te2 // 3/2:te2+s0>>8
- (p0) extr.u te21=s1,8,8 } // 3/3:s1>>8&0xff
-{ .mmi; (p0) ld4 te23=[te23] // 3/1:te2[s3>>8]
- (p0) shladd te00=te00,3,te0 // 3/0:te0+s0>>24
- (p0) shr.u te03=s3,twenty4 };; // 3/3:s3>>24
-{ .mmi; (p0) ld4 te20=[te20] // 4/2:te2[s0>>8]
- (p0) shladd te21=te21,3,te2 // 4/3:te3+s2
- (p0) extr.u te13=s3,16,8 } // 4/0:s1>>16&0xff
-{ .mmi; (p0) ld4 te00=[te00] // 4/0:te0[s0>>24]
- (p0) shladd te01=te01,3,te0 // 4/1:te0+s1>>24
- (p0) shr.u te11=s1,sixteen };; // 4/2:s3>>16
-{ .mmi; (p0) ld4 te21=[te21] // 5/3:te2[s1>>8]
- (p0) shladd te13=te13,3,te1 // 5/0:te1+s1>>16
- (p0) extr.u te10=s0,16,8 } // 5/1:s2>>16&0xff
-{ .mmi; (p0) ld4 te01=[te01] // 5/1:te0[s1>>24]
- (p0) shladd te02=te02,3,te0 // 5/2:te0+s2>>24
- (p0) and te33=s3,maskff };; // 5/2:s1&0xff
-{ .mmi; (p0) ld4 te13=[te13] // 6/0:te1[s1>>16]
- (p0) shladd te10=te10,3,te1 // 6/1:te1+s2>>16
- (p0) extr.u te12=s2,16,8 } // 6/3:s0>>16&0xff
-{ .mmi; (p0) ld4 te02=[te02] // 6/2:te0[s2>>24]
- (p0) shladd te03=te03,3,te0 // 6/3:te1+s0>>16
- (p0) and te30=s0,maskff };; // 6/3:s2&0xff
-
-{ .mmi; (p0) ld4 te10=[te10] // 7/1:te1[s2>>16]
- (p0) shladd te33=te33,3,te3 // 7/2:te3+s1&0xff
- (p0) and te11=te11,maskff} // 7/2:s3>>16&0xff
-{ .mmi; (p0) ld4 te03=[te03] // 7/3:te0[s3>>24]
- (p0) shladd te30=te30,3,te3 // 7/3:te3+s2
- (p0) xor t0=t0,te31 };; // 7/0:
-{ .mmi; (p0) ld4 te33=[te33] // 8/2:te3[s1]
- (p0) shladd te11=te11,3,te1 // 8/2:te1+s3>>16
- (p0) xor t0=t0,te22 } // 8/0:
-{ .mmi; (p0) ld4 te30=[te30] // 8/3:te3[s2]
- (p0) shladd te12=te12,3,te1 // 8/3:te1+s0>>16
- (p0) xor t1=t1,te32 };; // 8/1:
-{ .mmi; (p0) ld4 te11=[te11] // 9/2:te1[s3>>16]
- (p0) ld4 te12=[te12] // 9/3:te1[s0>>16]
- (p0) xor t0=t0,te00 };; // 9/0: !L2 scheduling
-{ .mmi; (p0) xor t1=t1,te23 // 10[9]/1:
- (p0) xor t2=t2,te20 // 10[9]/2:
- (p0) xor t3=t3,te21 };; // 10[9]/3:
-{ .mmi; (p0) xor t0=t0,te13 // 11[10]/0:done!
- (p0) xor t1=t1,te01 // 11[10]/1:
- (p0) xor t2=t2,te02 };; // 11[10]/2: !L2 scheduling
-{ .mmi; (p0) xor t3=t3,te03 // 12[10]/3:
- (p16) cmp.eq p0,p17=r0,r0 };; // 12[10]/clear (p17)
-{ .mmi; (p0) xor t1=t1,te10 // 13[11]/1:done!
- (p0) xor t2=t2,te33 // 13[11]/2:
- (p0) xor t3=t3,te30 } // 13[11]/3:
-{ .mmi; (p17) add te0=2048,te0 // 13[11]/
- (p17) add te1=2048+64-TE1,te1};; // 13[11]/
-{ .mib; (p0) xor t2=t2,te11 // 14[12]/2:done!
- (p17) add te2=2048+128-TE2,te2} // 14[12]/
-{ .mib; (p0) xor t3=t3,te12 // 14[12]/3:done!
- (p17) add te3=2048+192-TE3,te3 // 14[12]/
- br.ctop.sptk .Ld_top };;
-.Ld_end:
-
-
-{ .mmi; ld8 te10=[te0] // prefetch Td4
- ld8 te33=[te1] }
-{ .mmi; ld8 te12=[te2]
- ld8 te30=[te3] }
-
-{ .mmi; LDKEY t0=[rk0],2*KSZ // 0/0:rk[0]
- and te31=s1,maskff // 0/0:s3&0xff
- extr.u te22=s2,8,8 } // 0/0:s2>>8&0xff
-{ .mmi; LDKEY t1=[rk1],2*KSZ // 0/1:rk[1]
- and te32=s2,maskff // 0/1:s0&0xff
- shr.u te00=s0,twenty4 };; // 0/0:s0>>24
-{ .mmi; LDKEY t2=[rk0],2*KSZ // 1/2:rk[2]
- add te31=te31,te0 // 1/0:te0+s0>>24
- extr.u te23=s3,8,8 } // 1/1:s3>>8&0xff
-{ .mmi; LDKEY t3=[rk1],2*KSZ // 1/3:rk[3]
- add te32=te32,te0 // 1/1:te0+s0
- shr.u te01=s1,twenty4 };; // 1/1:s1>>24
-{ .mmi; ld1 te31=[te31] // 2/0:te0[s3&0xff]
- add te22=te22,te0 // 2/0:te0+s2>>8&0xff
- extr.u te20=s0,8,8 } // 2/2:s0>>8&0xff
-{ .mmi; ld1 te32=[te32] // 2/1:te0[s0]
- add te23=te23,te0 // 2/1:te0+s3>>8
- shr.u te02=s2,twenty4 };; // 2/2:s2>>24
-{ .mmi; ld1 te22=[te22] // 3/0:te0[s2>>8]
- add te20=te20,te0 // 3/2:te0+s0>>8
- extr.u te21=s1,8,8 } // 3/3:s1>>8&0xff
-{ .mmi; ld1 te23=[te23] // 3/1:te0[s3>>8]
- add te00=te00,te0 // 3/0:te0+s0>>24
- shr.u te03=s3,twenty4 };; // 3/3:s3>>24
-{ .mmi; ld1 te20=[te20] // 4/2:te0[s0>>8]
- add te21=te21,te0 // 4/3:te0+s2
- extr.u te13=s3,16,8 } // 4/0:s1>>16&0xff
-{ .mmi; ld1 te00=[te00] // 4/0:te0[s0>>24]
- add te01=te01,te0 // 4/1:te0+s1>>24
- shr.u te11=s1,sixteen };; // 4/2:s3>>16
-{ .mmi; ld1 te21=[te21] // 5/3:te0[s1>>8]
- add te13=te13,te0 // 5/0:te0+s1>>16
- extr.u te10=s0,16,8 } // 5/1:s2>>16&0xff
-{ .mmi; ld1 te01=[te01] // 5/1:te0[s1>>24]
- add te02=te02,te0 // 5/2:te0+s2>>24
- and te33=s3,maskff };; // 5/2:s1&0xff
-{ .mmi; ld1 te13=[te13] // 6/0:te0[s1>>16]
- add te10=te10,te0 // 6/1:te0+s2>>16
- extr.u te12=s2,16,8 } // 6/3:s0>>16&0xff
-{ .mmi; ld1 te02=[te02] // 6/2:te0[s2>>24]
- add te03=te03,te0 // 6/3:te0+s0>>16
- and te30=s0,maskff };; // 6/3:s2&0xff
-
-{ .mmi; ld1 te10=[te10] // 7/1:te0[s2>>16]
- add te33=te33,te0 // 7/2:te0+s1&0xff
- dep te31=te22,te31,8,8} // 7/0:
-{ .mmi; ld1 te03=[te03] // 7/3:te0[s3>>24]
- add te30=te30,te0 // 7/3:te0+s2
- and te11=te11,maskff};; // 7/2:s3>>16&0xff
-{ .mmi; ld1 te33=[te33] // 8/2:te0[s1]
- add te11=te11,te0 // 8/2:te0+s3>>16
- dep te32=te23,te32,8,8} // 8/1:
-{ .mmi; ld1 te30=[te30] // 8/3:te0[s2]
- add te12=te12,te0 // 8/3:te0+s0>>16
- shl te00=te00,twenty4};; // 8/0:
-{ .mii; ld1 te11=[te11] // 9/2:te0[s3>>16]
- dep te31=te13,te31,16,8 // 9/0:
- shl te01=te01,twenty4};; // 9/1:
-{ .mii; ld1 te12=[te12] // 10/3:te0[s0>>16]
- dep te33=te20,te33,8,8 // 10/2:
- shl te02=te02,twenty4};; // 10/2:
-{ .mii; xor t0=t0,te31 // 11/0:
- dep te30=te21,te30,8,8 // 11/3:
- shl te10=te10,sixteen};; // 11/1:
-{ .mii; xor r16=t0,te00 // 12/0:done!
- dep te33=te11,te33,16,8 // 12/2:
- shl te03=te03,twenty4};; // 12/3:
-{ .mmi; xor t1=t1,te01 // 13/1:
- xor t2=t2,te02 // 13/2:
- dep te30=te12,te30,16,8};; // 13/3:
-{ .mmi; xor t1=t1,te32 // 14/1:
- xor r24=t2,te33 // 14/2:done!
- xor t3=t3,te30 };; // 14/3:
-{ .mib; xor r20=t1,te10 // 15/1:done!
- xor r28=t3,te03 // 15/3:done!
- br.ret.sptk b6 };;
-.endp _ia64_AES_decrypt#
-
-// void AES_decrypt (const void *in,void *out,const AES_KEY *key);
-.global AES_decrypt#
-.proc AES_decrypt#
-.align 32
-AES_decrypt:
- .prologue
- .save ar.pfs,pfssave
-{ .mmi; alloc pfssave=ar.pfs,3,1,12,0
- and out0=3,in0
- mov r3=ip }
-{ .mmi; ADDP in0=0,in0
- mov loc0=psr.um
- ADDP out11=KSZ*60,in2 };; // &AES_KEY->rounds
-
-{ .mmi; ld4 out11=[out11] // AES_KEY->rounds
- add out8=(AES_Td#-AES_decrypt#),r3 // Te0
- .save pr,prsave
- mov prsave=pr }
-{ .mmi; rum 1<<3 // clear um.ac
- .save ar.lc,lcsave
- mov lcsave=ar.lc };;
-
- .body
-#if defined(_HPUX_SOURCE) // HPUX is big-endian, cut 15+15 cycles...
-{ .mib; cmp.ne p6,p0=out0,r0
- add out0=4,in0
-(p6) br.dpnt.many .Ld_i_unaligned };;
-
-{ .mmi; ld4 out1=[in0],8 // s0
- and out9=3,in1
- mov twenty4=24 }
-{ .mmi; ld4 out3=[out0],8 // s1
- ADDP rk0=0,in2
- mov sixteen=16 };;
-{ .mmi; ld4 out5=[in0] // s2
- cmp.ne p6,p0=out9,r0
- mov maskff=0xff }
-{ .mmb; ld4 out7=[out0] // s3
- ADDP rk1=KSZ,in2
- br.call.sptk.many b6=_ia64_AES_decrypt };;
-
-{ .mib; ADDP in0=4,in1
- ADDP in1=0,in1
-(p6) br.spnt .Ld_o_unaligned };;
-
-{ .mii; mov psr.um=loc0
- mov ar.pfs=pfssave
- mov ar.lc=lcsave };;
-{ .mmi; st4 [in1]=r16,8 // s0
- st4 [in0]=r20,8 // s1
- mov pr=prsave,0x1ffff };;
-{ .mmb; st4 [in1]=r24 // s2
- st4 [in0]=r28 // s3
- br.ret.sptk.many b0 };;
-#endif
-
-.align 32
-.Ld_i_unaligned:
-{ .mmi; add out0=1,in0
- add out2=2,in0
- add out4=3,in0 };;
-{ .mmi; ld1 r16=[in0],4
- ld1 r17=[out0],4 }//;;
-{ .mmi; ld1 r18=[out2],4
- ld1 out1=[out4],4 };; // s0
-{ .mmi; ld1 r20=[in0],4
- ld1 r21=[out0],4 }//;;
-{ .mmi; ld1 r22=[out2],4
- ld1 out3=[out4],4 };; // s1
-{ .mmi; ld1 r24=[in0],4
- ld1 r25=[out0],4 }//;;
-{ .mmi; ld1 r26=[out2],4
- ld1 out5=[out4],4 };; // s2
-{ .mmi; ld1 r28=[in0]
- ld1 r29=[out0] }//;;
-{ .mmi; ld1 r30=[out2]
- ld1 out7=[out4] };; // s3
-
-{ .mii;
- dep out1=r16,out1,24,8 //;;
- dep out3=r20,out3,24,8 }//;;
-{ .mii; ADDP rk0=0,in2
- dep out5=r24,out5,24,8 //;;
- dep out7=r28,out7,24,8 };;
-{ .mii; ADDP rk1=KSZ,in2
- dep out1=r17,out1,16,8 //;;
- dep out3=r21,out3,16,8 }//;;
-{ .mii; mov twenty4=24
- dep out5=r25,out5,16,8 //;;
- dep out7=r29,out7,16,8 };;
-{ .mii; mov sixteen=16
- dep out1=r18,out1,8,8 //;;
- dep out3=r22,out3,8,8 }//;;
-{ .mii; mov maskff=0xff
- dep out5=r26,out5,8,8 //;;
- dep out7=r30,out7,8,8 };;
-
-{ .mib; br.call.sptk.many b6=_ia64_AES_decrypt };;
-
-.Ld_o_unaligned:
-{ .mii; ADDP out0=0,in1
- extr.u r17=r16,8,8 // s0
- shr.u r19=r16,twenty4 }//;;
-{ .mii; ADDP out1=1,in1
- extr.u r18=r16,16,8
- shr.u r23=r20,twenty4 }//;; // s1
-{ .mii; ADDP out2=2,in1
- extr.u r21=r20,8,8
- shr.u r22=r20,sixteen }//;;
-{ .mii; ADDP out3=3,in1
- extr.u r25=r24,8,8 // s2
- shr.u r27=r24,twenty4 };;
-{ .mii; st1 [out3]=r16,4
- extr.u r26=r24,16,8
- shr.u r31=r28,twenty4 }//;; // s3
-{ .mii; st1 [out2]=r17,4
- extr.u r29=r28,8,8
- shr.u r30=r28,sixteen }//;;
-
-{ .mmi; st1 [out1]=r18,4
- st1 [out0]=r19,4 };;
-{ .mmi; st1 [out3]=r20,4
- st1 [out2]=r21,4 }//;;
-{ .mmi; st1 [out1]=r22,4
- st1 [out0]=r23,4 };;
-{ .mmi; st1 [out3]=r24,4
- st1 [out2]=r25,4
- mov pr=prsave,0x1ffff }//;;
-{ .mmi; st1 [out1]=r26,4
- st1 [out0]=r27,4
- mov ar.pfs=pfssave };;
-{ .mmi; st1 [out3]=r28
- st1 [out2]=r29
- mov ar.lc=lcsave }//;;
-{ .mmi; st1 [out1]=r30
- st1 [out0]=r31 }
-{ .mfb; mov psr.um=loc0 // restore user mask
- br.ret.sptk.many b0 };;
-.endp AES_decrypt#
-
-// leave it in .text segment...
-.align 64
-.global AES_Te#
-.type AES_Te#,@object
-AES_Te: data4 0xc66363a5,0xc66363a5, 0xf87c7c84,0xf87c7c84
- data4 0xee777799,0xee777799, 0xf67b7b8d,0xf67b7b8d
- data4 0xfff2f20d,0xfff2f20d, 0xd66b6bbd,0xd66b6bbd
- data4 0xde6f6fb1,0xde6f6fb1, 0x91c5c554,0x91c5c554
- data4 0x60303050,0x60303050, 0x02010103,0x02010103
- data4 0xce6767a9,0xce6767a9, 0x562b2b7d,0x562b2b7d
- data4 0xe7fefe19,0xe7fefe19, 0xb5d7d762,0xb5d7d762
- data4 0x4dababe6,0x4dababe6, 0xec76769a,0xec76769a
- data4 0x8fcaca45,0x8fcaca45, 0x1f82829d,0x1f82829d
- data4 0x89c9c940,0x89c9c940, 0xfa7d7d87,0xfa7d7d87
- data4 0xeffafa15,0xeffafa15, 0xb25959eb,0xb25959eb
- data4 0x8e4747c9,0x8e4747c9, 0xfbf0f00b,0xfbf0f00b
- data4 0x41adadec,0x41adadec, 0xb3d4d467,0xb3d4d467
- data4 0x5fa2a2fd,0x5fa2a2fd, 0x45afafea,0x45afafea
- data4 0x239c9cbf,0x239c9cbf, 0x53a4a4f7,0x53a4a4f7
- data4 0xe4727296,0xe4727296, 0x9bc0c05b,0x9bc0c05b
- data4 0x75b7b7c2,0x75b7b7c2, 0xe1fdfd1c,0xe1fdfd1c
- data4 0x3d9393ae,0x3d9393ae, 0x4c26266a,0x4c26266a
- data4 0x6c36365a,0x6c36365a, 0x7e3f3f41,0x7e3f3f41
- data4 0xf5f7f702,0xf5f7f702, 0x83cccc4f,0x83cccc4f
- data4 0x6834345c,0x6834345c, 0x51a5a5f4,0x51a5a5f4
- data4 0xd1e5e534,0xd1e5e534, 0xf9f1f108,0xf9f1f108
- data4 0xe2717193,0xe2717193, 0xabd8d873,0xabd8d873
- data4 0x62313153,0x62313153, 0x2a15153f,0x2a15153f
- data4 0x0804040c,0x0804040c, 0x95c7c752,0x95c7c752
- data4 0x46232365,0x46232365, 0x9dc3c35e,0x9dc3c35e
- data4 0x30181828,0x30181828, 0x379696a1,0x379696a1
- data4 0x0a05050f,0x0a05050f, 0x2f9a9ab5,0x2f9a9ab5
- data4 0x0e070709,0x0e070709, 0x24121236,0x24121236
- data4 0x1b80809b,0x1b80809b, 0xdfe2e23d,0xdfe2e23d
- data4 0xcdebeb26,0xcdebeb26, 0x4e272769,0x4e272769
- data4 0x7fb2b2cd,0x7fb2b2cd, 0xea75759f,0xea75759f
- data4 0x1209091b,0x1209091b, 0x1d83839e,0x1d83839e
- data4 0x582c2c74,0x582c2c74, 0x341a1a2e,0x341a1a2e
- data4 0x361b1b2d,0x361b1b2d, 0xdc6e6eb2,0xdc6e6eb2
- data4 0xb45a5aee,0xb45a5aee, 0x5ba0a0fb,0x5ba0a0fb
- data4 0xa45252f6,0xa45252f6, 0x763b3b4d,0x763b3b4d
- data4 0xb7d6d661,0xb7d6d661, 0x7db3b3ce,0x7db3b3ce
- data4 0x5229297b,0x5229297b, 0xdde3e33e,0xdde3e33e
- data4 0x5e2f2f71,0x5e2f2f71, 0x13848497,0x13848497
- data4 0xa65353f5,0xa65353f5, 0xb9d1d168,0xb9d1d168
- data4 0x00000000,0x00000000, 0xc1eded2c,0xc1eded2c
- data4 0x40202060,0x40202060, 0xe3fcfc1f,0xe3fcfc1f
- data4 0x79b1b1c8,0x79b1b1c8, 0xb65b5bed,0xb65b5bed
- data4 0xd46a6abe,0xd46a6abe, 0x8dcbcb46,0x8dcbcb46
- data4 0x67bebed9,0x67bebed9, 0x7239394b,0x7239394b
- data4 0x944a4ade,0x944a4ade, 0x984c4cd4,0x984c4cd4
- data4 0xb05858e8,0xb05858e8, 0x85cfcf4a,0x85cfcf4a
- data4 0xbbd0d06b,0xbbd0d06b, 0xc5efef2a,0xc5efef2a
- data4 0x4faaaae5,0x4faaaae5, 0xedfbfb16,0xedfbfb16
- data4 0x864343c5,0x864343c5, 0x9a4d4dd7,0x9a4d4dd7
- data4 0x66333355,0x66333355, 0x11858594,0x11858594
- data4 0x8a4545cf,0x8a4545cf, 0xe9f9f910,0xe9f9f910
- data4 0x04020206,0x04020206, 0xfe7f7f81,0xfe7f7f81
- data4 0xa05050f0,0xa05050f0, 0x783c3c44,0x783c3c44
- data4 0x259f9fba,0x259f9fba, 0x4ba8a8e3,0x4ba8a8e3
- data4 0xa25151f3,0xa25151f3, 0x5da3a3fe,0x5da3a3fe
- data4 0x804040c0,0x804040c0, 0x058f8f8a,0x058f8f8a
- data4 0x3f9292ad,0x3f9292ad, 0x219d9dbc,0x219d9dbc
- data4 0x70383848,0x70383848, 0xf1f5f504,0xf1f5f504
- data4 0x63bcbcdf,0x63bcbcdf, 0x77b6b6c1,0x77b6b6c1
- data4 0xafdada75,0xafdada75, 0x42212163,0x42212163
- data4 0x20101030,0x20101030, 0xe5ffff1a,0xe5ffff1a
- data4 0xfdf3f30e,0xfdf3f30e, 0xbfd2d26d,0xbfd2d26d
- data4 0x81cdcd4c,0x81cdcd4c, 0x180c0c14,0x180c0c14
- data4 0x26131335,0x26131335, 0xc3ecec2f,0xc3ecec2f
- data4 0xbe5f5fe1,0xbe5f5fe1, 0x359797a2,0x359797a2
- data4 0x884444cc,0x884444cc, 0x2e171739,0x2e171739
- data4 0x93c4c457,0x93c4c457, 0x55a7a7f2,0x55a7a7f2
- data4 0xfc7e7e82,0xfc7e7e82, 0x7a3d3d47,0x7a3d3d47
- data4 0xc86464ac,0xc86464ac, 0xba5d5de7,0xba5d5de7
- data4 0x3219192b,0x3219192b, 0xe6737395,0xe6737395
- data4 0xc06060a0,0xc06060a0, 0x19818198,0x19818198
- data4 0x9e4f4fd1,0x9e4f4fd1, 0xa3dcdc7f,0xa3dcdc7f
- data4 0x44222266,0x44222266, 0x542a2a7e,0x542a2a7e
- data4 0x3b9090ab,0x3b9090ab, 0x0b888883,0x0b888883
- data4 0x8c4646ca,0x8c4646ca, 0xc7eeee29,0xc7eeee29
- data4 0x6bb8b8d3,0x6bb8b8d3, 0x2814143c,0x2814143c
- data4 0xa7dede79,0xa7dede79, 0xbc5e5ee2,0xbc5e5ee2
- data4 0x160b0b1d,0x160b0b1d, 0xaddbdb76,0xaddbdb76
- data4 0xdbe0e03b,0xdbe0e03b, 0x64323256,0x64323256
- data4 0x743a3a4e,0x743a3a4e, 0x140a0a1e,0x140a0a1e
- data4 0x924949db,0x924949db, 0x0c06060a,0x0c06060a
- data4 0x4824246c,0x4824246c, 0xb85c5ce4,0xb85c5ce4
- data4 0x9fc2c25d,0x9fc2c25d, 0xbdd3d36e,0xbdd3d36e
- data4 0x43acacef,0x43acacef, 0xc46262a6,0xc46262a6
- data4 0x399191a8,0x399191a8, 0x319595a4,0x319595a4
- data4 0xd3e4e437,0xd3e4e437, 0xf279798b,0xf279798b
- data4 0xd5e7e732,0xd5e7e732, 0x8bc8c843,0x8bc8c843
- data4 0x6e373759,0x6e373759, 0xda6d6db7,0xda6d6db7
- data4 0x018d8d8c,0x018d8d8c, 0xb1d5d564,0xb1d5d564
- data4 0x9c4e4ed2,0x9c4e4ed2, 0x49a9a9e0,0x49a9a9e0
- data4 0xd86c6cb4,0xd86c6cb4, 0xac5656fa,0xac5656fa
- data4 0xf3f4f407,0xf3f4f407, 0xcfeaea25,0xcfeaea25
- data4 0xca6565af,0xca6565af, 0xf47a7a8e,0xf47a7a8e
- data4 0x47aeaee9,0x47aeaee9, 0x10080818,0x10080818
- data4 0x6fbabad5,0x6fbabad5, 0xf0787888,0xf0787888
- data4 0x4a25256f,0x4a25256f, 0x5c2e2e72,0x5c2e2e72
- data4 0x381c1c24,0x381c1c24, 0x57a6a6f1,0x57a6a6f1
- data4 0x73b4b4c7,0x73b4b4c7, 0x97c6c651,0x97c6c651
- data4 0xcbe8e823,0xcbe8e823, 0xa1dddd7c,0xa1dddd7c
- data4 0xe874749c,0xe874749c, 0x3e1f1f21,0x3e1f1f21
- data4 0x964b4bdd,0x964b4bdd, 0x61bdbddc,0x61bdbddc
- data4 0x0d8b8b86,0x0d8b8b86, 0x0f8a8a85,0x0f8a8a85
- data4 0xe0707090,0xe0707090, 0x7c3e3e42,0x7c3e3e42
- data4 0x71b5b5c4,0x71b5b5c4, 0xcc6666aa,0xcc6666aa
- data4 0x904848d8,0x904848d8, 0x06030305,0x06030305
- data4 0xf7f6f601,0xf7f6f601, 0x1c0e0e12,0x1c0e0e12
- data4 0xc26161a3,0xc26161a3, 0x6a35355f,0x6a35355f
- data4 0xae5757f9,0xae5757f9, 0x69b9b9d0,0x69b9b9d0
- data4 0x17868691,0x17868691, 0x99c1c158,0x99c1c158
- data4 0x3a1d1d27,0x3a1d1d27, 0x279e9eb9,0x279e9eb9
- data4 0xd9e1e138,0xd9e1e138, 0xebf8f813,0xebf8f813
- data4 0x2b9898b3,0x2b9898b3, 0x22111133,0x22111133
- data4 0xd26969bb,0xd26969bb, 0xa9d9d970,0xa9d9d970
- data4 0x078e8e89,0x078e8e89, 0x339494a7,0x339494a7
- data4 0x2d9b9bb6,0x2d9b9bb6, 0x3c1e1e22,0x3c1e1e22
- data4 0x15878792,0x15878792, 0xc9e9e920,0xc9e9e920
- data4 0x87cece49,0x87cece49, 0xaa5555ff,0xaa5555ff
- data4 0x50282878,0x50282878, 0xa5dfdf7a,0xa5dfdf7a
- data4 0x038c8c8f,0x038c8c8f, 0x59a1a1f8,0x59a1a1f8
- data4 0x09898980,0x09898980, 0x1a0d0d17,0x1a0d0d17
- data4 0x65bfbfda,0x65bfbfda, 0xd7e6e631,0xd7e6e631
- data4 0x844242c6,0x844242c6, 0xd06868b8,0xd06868b8
- data4 0x824141c3,0x824141c3, 0x299999b0,0x299999b0
- data4 0x5a2d2d77,0x5a2d2d77, 0x1e0f0f11,0x1e0f0f11
- data4 0x7bb0b0cb,0x7bb0b0cb, 0xa85454fc,0xa85454fc
- data4 0x6dbbbbd6,0x6dbbbbd6, 0x2c16163a,0x2c16163a
-// Te4:
- data1 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5
- data1 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76
- data1 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0
- data1 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0
- data1 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc
- data1 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15
- data1 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a
- data1 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75
- data1 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0
- data1 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84
- data1 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b
- data1 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf
- data1 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85
- data1 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8
- data1 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5
- data1 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2
- data1 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17
- data1 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73
- data1 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88
- data1 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb
- data1 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c
- data1 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79
- data1 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9
- data1 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08
- data1 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6
- data1 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a
- data1 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e
- data1 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e
- data1 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94
- data1 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf
- data1 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68
- data1 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
-.size AES_Te#,2048+256 // HP-UX assembler fails to ".-AES_Te#"
-
-.align 64
-.global AES_Td#
-.type AES_Td#,@object
-AES_Td: data4 0x51f4a750,0x51f4a750, 0x7e416553,0x7e416553
- data4 0x1a17a4c3,0x1a17a4c3, 0x3a275e96,0x3a275e96
- data4 0x3bab6bcb,0x3bab6bcb, 0x1f9d45f1,0x1f9d45f1
- data4 0xacfa58ab,0xacfa58ab, 0x4be30393,0x4be30393
- data4 0x2030fa55,0x2030fa55, 0xad766df6,0xad766df6
- data4 0x88cc7691,0x88cc7691, 0xf5024c25,0xf5024c25
- data4 0x4fe5d7fc,0x4fe5d7fc, 0xc52acbd7,0xc52acbd7
- data4 0x26354480,0x26354480, 0xb562a38f,0xb562a38f
- data4 0xdeb15a49,0xdeb15a49, 0x25ba1b67,0x25ba1b67
- data4 0x45ea0e98,0x45ea0e98, 0x5dfec0e1,0x5dfec0e1
- data4 0xc32f7502,0xc32f7502, 0x814cf012,0x814cf012
- data4 0x8d4697a3,0x8d4697a3, 0x6bd3f9c6,0x6bd3f9c6
- data4 0x038f5fe7,0x038f5fe7, 0x15929c95,0x15929c95
- data4 0xbf6d7aeb,0xbf6d7aeb, 0x955259da,0x955259da
- data4 0xd4be832d,0xd4be832d, 0x587421d3,0x587421d3
- data4 0x49e06929,0x49e06929, 0x8ec9c844,0x8ec9c844
- data4 0x75c2896a,0x75c2896a, 0xf48e7978,0xf48e7978
- data4 0x99583e6b,0x99583e6b, 0x27b971dd,0x27b971dd
- data4 0xbee14fb6,0xbee14fb6, 0xf088ad17,0xf088ad17
- data4 0xc920ac66,0xc920ac66, 0x7dce3ab4,0x7dce3ab4
- data4 0x63df4a18,0x63df4a18, 0xe51a3182,0xe51a3182
- data4 0x97513360,0x97513360, 0x62537f45,0x62537f45
- data4 0xb16477e0,0xb16477e0, 0xbb6bae84,0xbb6bae84
- data4 0xfe81a01c,0xfe81a01c, 0xf9082b94,0xf9082b94
- data4 0x70486858,0x70486858, 0x8f45fd19,0x8f45fd19
- data4 0x94de6c87,0x94de6c87, 0x527bf8b7,0x527bf8b7
- data4 0xab73d323,0xab73d323, 0x724b02e2,0x724b02e2
- data4 0xe31f8f57,0xe31f8f57, 0x6655ab2a,0x6655ab2a
- data4 0xb2eb2807,0xb2eb2807, 0x2fb5c203,0x2fb5c203
- data4 0x86c57b9a,0x86c57b9a, 0xd33708a5,0xd33708a5
- data4 0x302887f2,0x302887f2, 0x23bfa5b2,0x23bfa5b2
- data4 0x02036aba,0x02036aba, 0xed16825c,0xed16825c
- data4 0x8acf1c2b,0x8acf1c2b, 0xa779b492,0xa779b492
- data4 0xf307f2f0,0xf307f2f0, 0x4e69e2a1,0x4e69e2a1
- data4 0x65daf4cd,0x65daf4cd, 0x0605bed5,0x0605bed5
- data4 0xd134621f,0xd134621f, 0xc4a6fe8a,0xc4a6fe8a
- data4 0x342e539d,0x342e539d, 0xa2f355a0,0xa2f355a0
- data4 0x058ae132,0x058ae132, 0xa4f6eb75,0xa4f6eb75
- data4 0x0b83ec39,0x0b83ec39, 0x4060efaa,0x4060efaa
- data4 0x5e719f06,0x5e719f06, 0xbd6e1051,0xbd6e1051
- data4 0x3e218af9,0x3e218af9, 0x96dd063d,0x96dd063d
- data4 0xdd3e05ae,0xdd3e05ae, 0x4de6bd46,0x4de6bd46
- data4 0x91548db5,0x91548db5, 0x71c45d05,0x71c45d05
- data4 0x0406d46f,0x0406d46f, 0x605015ff,0x605015ff
- data4 0x1998fb24,0x1998fb24, 0xd6bde997,0xd6bde997
- data4 0x894043cc,0x894043cc, 0x67d99e77,0x67d99e77
- data4 0xb0e842bd,0xb0e842bd, 0x07898b88,0x07898b88
- data4 0xe7195b38,0xe7195b38, 0x79c8eedb,0x79c8eedb
- data4 0xa17c0a47,0xa17c0a47, 0x7c420fe9,0x7c420fe9
- data4 0xf8841ec9,0xf8841ec9, 0x00000000,0x00000000
- data4 0x09808683,0x09808683, 0x322bed48,0x322bed48
- data4 0x1e1170ac,0x1e1170ac, 0x6c5a724e,0x6c5a724e
- data4 0xfd0efffb,0xfd0efffb, 0x0f853856,0x0f853856
- data4 0x3daed51e,0x3daed51e, 0x362d3927,0x362d3927
- data4 0x0a0fd964,0x0a0fd964, 0x685ca621,0x685ca621
- data4 0x9b5b54d1,0x9b5b54d1, 0x24362e3a,0x24362e3a
- data4 0x0c0a67b1,0x0c0a67b1, 0x9357e70f,0x9357e70f
- data4 0xb4ee96d2,0xb4ee96d2, 0x1b9b919e,0x1b9b919e
- data4 0x80c0c54f,0x80c0c54f, 0x61dc20a2,0x61dc20a2
- data4 0x5a774b69,0x5a774b69, 0x1c121a16,0x1c121a16
- data4 0xe293ba0a,0xe293ba0a, 0xc0a02ae5,0xc0a02ae5
- data4 0x3c22e043,0x3c22e043, 0x121b171d,0x121b171d
- data4 0x0e090d0b,0x0e090d0b, 0xf28bc7ad,0xf28bc7ad
- data4 0x2db6a8b9,0x2db6a8b9, 0x141ea9c8,0x141ea9c8
- data4 0x57f11985,0x57f11985, 0xaf75074c,0xaf75074c
- data4 0xee99ddbb,0xee99ddbb, 0xa37f60fd,0xa37f60fd
- data4 0xf701269f,0xf701269f, 0x5c72f5bc,0x5c72f5bc
- data4 0x44663bc5,0x44663bc5, 0x5bfb7e34,0x5bfb7e34
- data4 0x8b432976,0x8b432976, 0xcb23c6dc,0xcb23c6dc
- data4 0xb6edfc68,0xb6edfc68, 0xb8e4f163,0xb8e4f163
- data4 0xd731dcca,0xd731dcca, 0x42638510,0x42638510
- data4 0x13972240,0x13972240, 0x84c61120,0x84c61120
- data4 0x854a247d,0x854a247d, 0xd2bb3df8,0xd2bb3df8
- data4 0xaef93211,0xaef93211, 0xc729a16d,0xc729a16d
- data4 0x1d9e2f4b,0x1d9e2f4b, 0xdcb230f3,0xdcb230f3
- data4 0x0d8652ec,0x0d8652ec, 0x77c1e3d0,0x77c1e3d0
- data4 0x2bb3166c,0x2bb3166c, 0xa970b999,0xa970b999
- data4 0x119448fa,0x119448fa, 0x47e96422,0x47e96422
- data4 0xa8fc8cc4,0xa8fc8cc4, 0xa0f03f1a,0xa0f03f1a
- data4 0x567d2cd8,0x567d2cd8, 0x223390ef,0x223390ef
- data4 0x87494ec7,0x87494ec7, 0xd938d1c1,0xd938d1c1
- data4 0x8ccaa2fe,0x8ccaa2fe, 0x98d40b36,0x98d40b36
- data4 0xa6f581cf,0xa6f581cf, 0xa57ade28,0xa57ade28
- data4 0xdab78e26,0xdab78e26, 0x3fadbfa4,0x3fadbfa4
- data4 0x2c3a9de4,0x2c3a9de4, 0x5078920d,0x5078920d
- data4 0x6a5fcc9b,0x6a5fcc9b, 0x547e4662,0x547e4662
- data4 0xf68d13c2,0xf68d13c2, 0x90d8b8e8,0x90d8b8e8
- data4 0x2e39f75e,0x2e39f75e, 0x82c3aff5,0x82c3aff5
- data4 0x9f5d80be,0x9f5d80be, 0x69d0937c,0x69d0937c
- data4 0x6fd52da9,0x6fd52da9, 0xcf2512b3,0xcf2512b3
- data4 0xc8ac993b,0xc8ac993b, 0x10187da7,0x10187da7
- data4 0xe89c636e,0xe89c636e, 0xdb3bbb7b,0xdb3bbb7b
- data4 0xcd267809,0xcd267809, 0x6e5918f4,0x6e5918f4
- data4 0xec9ab701,0xec9ab701, 0x834f9aa8,0x834f9aa8
- data4 0xe6956e65,0xe6956e65, 0xaaffe67e,0xaaffe67e
- data4 0x21bccf08,0x21bccf08, 0xef15e8e6,0xef15e8e6
- data4 0xbae79bd9,0xbae79bd9, 0x4a6f36ce,0x4a6f36ce
- data4 0xea9f09d4,0xea9f09d4, 0x29b07cd6,0x29b07cd6
- data4 0x31a4b2af,0x31a4b2af, 0x2a3f2331,0x2a3f2331
- data4 0xc6a59430,0xc6a59430, 0x35a266c0,0x35a266c0
- data4 0x744ebc37,0x744ebc37, 0xfc82caa6,0xfc82caa6
- data4 0xe090d0b0,0xe090d0b0, 0x33a7d815,0x33a7d815
- data4 0xf104984a,0xf104984a, 0x41ecdaf7,0x41ecdaf7
- data4 0x7fcd500e,0x7fcd500e, 0x1791f62f,0x1791f62f
- data4 0x764dd68d,0x764dd68d, 0x43efb04d,0x43efb04d
- data4 0xccaa4d54,0xccaa4d54, 0xe49604df,0xe49604df
- data4 0x9ed1b5e3,0x9ed1b5e3, 0x4c6a881b,0x4c6a881b
- data4 0xc12c1fb8,0xc12c1fb8, 0x4665517f,0x4665517f
- data4 0x9d5eea04,0x9d5eea04, 0x018c355d,0x018c355d
- data4 0xfa877473,0xfa877473, 0xfb0b412e,0xfb0b412e
- data4 0xb3671d5a,0xb3671d5a, 0x92dbd252,0x92dbd252
- data4 0xe9105633,0xe9105633, 0x6dd64713,0x6dd64713
- data4 0x9ad7618c,0x9ad7618c, 0x37a10c7a,0x37a10c7a
- data4 0x59f8148e,0x59f8148e, 0xeb133c89,0xeb133c89
- data4 0xcea927ee,0xcea927ee, 0xb761c935,0xb761c935
- data4 0xe11ce5ed,0xe11ce5ed, 0x7a47b13c,0x7a47b13c
- data4 0x9cd2df59,0x9cd2df59, 0x55f2733f,0x55f2733f
- data4 0x1814ce79,0x1814ce79, 0x73c737bf,0x73c737bf
- data4 0x53f7cdea,0x53f7cdea, 0x5ffdaa5b,0x5ffdaa5b
- data4 0xdf3d6f14,0xdf3d6f14, 0x7844db86,0x7844db86
- data4 0xcaaff381,0xcaaff381, 0xb968c43e,0xb968c43e
- data4 0x3824342c,0x3824342c, 0xc2a3405f,0xc2a3405f
- data4 0x161dc372,0x161dc372, 0xbce2250c,0xbce2250c
- data4 0x283c498b,0x283c498b, 0xff0d9541,0xff0d9541
- data4 0x39a80171,0x39a80171, 0x080cb3de,0x080cb3de
- data4 0xd8b4e49c,0xd8b4e49c, 0x6456c190,0x6456c190
- data4 0x7bcb8461,0x7bcb8461, 0xd532b670,0xd532b670
- data4 0x486c5c74,0x486c5c74, 0xd0b85742,0xd0b85742
-// Td4:
- data1 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38
- data1 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
- data1 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
- data1 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
- data1 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
- data1 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
- data1 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
- data1 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
- data1 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
- data1 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
- data1 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
- data1 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
- data1 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
- data1 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
- data1 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
- data1 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
- data1 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
- data1 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
- data1 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
- data1 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
- data1 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
- data1 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
- data1 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
- data1 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
- data1 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
- data1 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
- data1 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
- data1 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
- data1 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
- data1 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
- data1 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
- data1 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
-.size AES_Td#,2048+256 // HP-UX assembler fails to ".-AES_Td#"
diff --git a/app/openssl/crypto/aes/asm/aes-mips.S b/app/openssl/crypto/aes/asm/aes-mips.S
deleted file mode 100644
index f5750bf8..00000000
--- a/app/openssl/crypto/aes/asm/aes-mips.S
+++ /dev/null
@@ -1,1337 +0,0 @@
-.text
-#ifdef OPENSSL_FIPSCANISTER
-# include <openssl/fipssyms.h>
-#endif
-
-#if !defined(__vxworks) || defined(__pic__)
-.option pic2
-#endif
-.set noat
-.align 5
-.ent _mips_AES_encrypt
-_mips_AES_encrypt:
- .frame $29,0,$31
- .set reorder
- lw $12,0($6)
- lw $13,4($6)
- lw $14,8($6)
- lw $15,12($6)
- lw $30,240($6)
- add $3,$6,16
-
- xor $8,$12
- xor $9,$13
- xor $10,$14
- xor $11,$15
-
- sub $30,1
- srl $1,$9,6
-.Loop_enc:
- srl $2,$10,6
- srl $24,$11,6
- srl $25,$8,6
- and $1,0x3fc
- and $2,0x3fc
- and $24,0x3fc
- and $25,0x3fc
- add $1,$7
- add $2,$7
- add $24,$7
- add $25,$7
- lwl $12,2($1) # Te1[s1>>16]
- lwl $13,2($2) # Te1[s2>>16]
- lwl $14,2($24) # Te1[s3>>16]
- lwl $15,2($25) # Te1[s0>>16]
- lwr $12,3($1) # Te1[s1>>16]
- lwr $13,3($2) # Te1[s2>>16]
- lwr $14,3($24) # Te1[s3>>16]
- lwr $15,3($25) # Te1[s0>>16]
-
- srl $1,$10,14
- srl $2,$11,14
- srl $24,$8,14
- srl $25,$9,14
- and $1,0x3fc
- and $2,0x3fc
- and $24,0x3fc
- and $25,0x3fc
- add $1,$7
- add $2,$7
- add $24,$7
- add $25,$7
- lwl $16,1($1) # Te2[s2>>8]
- lwl $17,1($2) # Te2[s3>>8]
- lwl $18,1($24) # Te2[s0>>8]
- lwl $19,1($25) # Te2[s1>>8]
- lwr $16,2($1) # Te2[s2>>8]
- lwr $17,2($2) # Te2[s3>>8]
- lwr $18,2($24) # Te2[s0>>8]
- lwr $19,2($25) # Te2[s1>>8]
-
- srl $1,$11,22
- srl $2,$8,22
- srl $24,$9,22
- srl $25,$10,22
- and $1,0x3fc
- and $2,0x3fc
- and $24,0x3fc
- and $25,0x3fc
- add $1,$7
- add $2,$7
- add $24,$7
- add $25,$7
- lwl $20,0($1) # Te3[s3]
- lwl $21,0($2) # Te3[s0]
- lwl $22,0($24) # Te3[s1]
- lwl $23,0($25) # Te3[s2]
- lwr $20,1($1) # Te3[s3]
- lwr $21,1($2) # Te3[s0]
- lwr $22,1($24) # Te3[s1]
- lwr $23,1($25) # Te3[s2]
-
- sll $1,$8,2
- sll $2,$9,2
- sll $24,$10,2
- sll $25,$11,2
- and $1,0x3fc
- and $2,0x3fc
- and $24,0x3fc
- and $25,0x3fc
- add $1,$7
- add $2,$7
- add $24,$7
- add $25,$7
- xor $12,$16
- xor $13,$17
- xor $14,$18
- xor $15,$19
- lw $16,0($1) # Te0[s0>>24]
- lw $17,0($2) # Te0[s1>>24]
- lw $18,0($24) # Te0[s2>>24]
- lw $19,0($25) # Te0[s3>>24]
-
- lw $8,0($3)
- lw $9,4($3)
- lw $10,8($3)
- lw $11,12($3)
-
- xor $12,$20
- xor $13,$21
- xor $14,$22
- xor $15,$23
-
- xor $12,$16
- xor $13,$17
- xor $14,$18
- xor $15,$19
-
- sub $30,1
- add $3,16
- xor $8,$12
- xor $9,$13
- xor $10,$14
- xor $11,$15
- .set noreorder
- bnez $30,.Loop_enc
- srl $1,$9,6
-
- .set reorder
- srl $2,$10,6
- srl $24,$11,6
- srl $25,$8,6
- and $1,0x3fc
- and $2,0x3fc
- and $24,0x3fc
- and $25,0x3fc
- add $1,$7
- add $2,$7
- add $24,$7
- add $25,$7
- lbu $12,2($1) # Te4[s1>>16]
- lbu $13,2($2) # Te4[s2>>16]
- lbu $14,2($24) # Te4[s3>>16]
- lbu $15,2($25) # Te4[s0>>16]
-
- srl $1,$10,14
- srl $2,$11,14
- srl $24,$8,14
- srl $25,$9,14
- and $1,0x3fc
- and $2,0x3fc
- and $24,0x3fc
- and $25,0x3fc
- add $1,$7
- add $2,$7
- add $24,$7
- add $25,$7
- lbu $16,2($1) # Te4[s2>>8]
- lbu $17,2($2) # Te4[s3>>8]
- lbu $18,2($24) # Te4[s0>>8]
- lbu $19,2($25) # Te4[s1>>8]
-
- sll $1,$8,2
- sll $2,$9,2
- sll $24,$10,2
- sll $25,$11,2
- and $1,0x3fc
- and $2,0x3fc
- and $24,0x3fc
- and $25,0x3fc
- add $1,$7
- add $2,$7
- add $24,$7
- add $25,$7
- lbu $20,2($1) # Te4[s0>>24]
- lbu $21,2($2) # Te4[s1>>24]
- lbu $22,2($24) # Te4[s2>>24]
- lbu $23,2($25) # Te4[s3>>24]
-
- srl $1,$11,22
- srl $2,$8,22
- srl $24,$9,22
- srl $25,$10,22
- and $1,0x3fc
- and $2,0x3fc
- and $24,0x3fc
- and $25,0x3fc
-
- sll $12,$12,8
- sll $13,$13,8
- sll $14,$14,8
- sll $15,$15,8
-
- sll $16,$16,16
- sll $17,$17,16
- sll $18,$18,16
- sll $19,$19,16
-
- xor $12,$16
- xor $13,$17
- xor $14,$18
- xor $15,$19
-
- add $1,$7
- add $2,$7
- add $24,$7
- add $25,$7
- lbu $16,2($1) # Te4[s3]
- lbu $17,2($2) # Te4[s0]
- lbu $18,2($24) # Te4[s1]
- lbu $19,2($25) # Te4[s2]
-
- #sll $20,$20,0
- #sll $21,$21,0
- #sll $22,$22,0
- #sll $23,$23,0
-
- lw $8,0($3)
- lw $9,4($3)
- lw $10,8($3)
- lw $11,12($3)
-
- xor $12,$20
- xor $13,$21
- xor $14,$22
- xor $15,$23
-
- sll $16,$16,24
- sll $17,$17,24
- sll $18,$18,24
- sll $19,$19,24
-
- xor $12,$16
- xor $13,$17
- xor $14,$18
- xor $15,$19
-
- xor $8,$12
- xor $9,$13
- xor $10,$14
- xor $11,$15
-
- jr $31
-.end _mips_AES_encrypt
-
-.align 5
-.globl AES_encrypt
-.ent AES_encrypt
-AES_encrypt:
- .frame $29,64,$31
- .mask 3237937152,-4
- .set noreorder
- .cpload $25
- sub $29,64
- sw $31,64-1*4($29)
- sw $30,64-2*4($29)
- sw $23,64-3*4($29)
- sw $22,64-4*4($29)
- sw $21,64-5*4($29)
- sw $20,64-6*4($29)
- sw $19,64-7*4($29)
- sw $18,64-8*4($29)
- sw $17,64-9*4($29)
- sw $16,64-10*4($29)
- .set reorder
- la $7,AES_Te # PIC-ified 'load address'
-
- lwl $8,0+3($4)
- lwl $9,4+3($4)
- lwl $10,8+3($4)
- lwl $11,12+3($4)
- lwr $8,0+0($4)
- lwr $9,4+0($4)
- lwr $10,8+0($4)
- lwr $11,12+0($4)
-
- bal _mips_AES_encrypt
-
- swr $8,0+0($5)
- swr $9,4+0($5)
- swr $10,8+0($5)
- swr $11,12+0($5)
- swl $8,0+3($5)
- swl $9,4+3($5)
- swl $10,8+3($5)
- swl $11,12+3($5)
-
- .set noreorder
- lw $31,64-1*4($29)
- lw $30,64-2*4($29)
- lw $23,64-3*4($29)
- lw $22,64-4*4($29)
- lw $21,64-5*4($29)
- lw $20,64-6*4($29)
- lw $19,64-7*4($29)
- lw $18,64-8*4($29)
- lw $17,64-9*4($29)
- lw $16,64-10*4($29)
- jr $31
- add $29,64
-.end AES_encrypt
-.align 5
-.ent _mips_AES_decrypt
-_mips_AES_decrypt:
- .frame $29,0,$31
- .set reorder
- lw $12,0($6)
- lw $13,4($6)
- lw $14,8($6)
- lw $15,12($6)
- lw $30,240($6)
- add $3,$6,16
-
- xor $8,$12
- xor $9,$13
- xor $10,$14
- xor $11,$15
-
- sub $30,1
- srl $1,$11,6
-.Loop_dec:
- srl $2,$8,6
- srl $24,$9,6
- srl $25,$10,6
- and $1,0x3fc
- and $2,0x3fc
- and $24,0x3fc
- and $25,0x3fc
- add $1,$7
- add $2,$7
- add $24,$7
- add $25,$7
- lwl $12,2($1) # Td1[s3>>16]
- lwl $13,2($2) # Td1[s0>>16]
- lwl $14,2($24) # Td1[s1>>16]
- lwl $15,2($25) # Td1[s2>>16]
- lwr $12,3($1) # Td1[s3>>16]
- lwr $13,3($2) # Td1[s0>>16]
- lwr $14,3($24) # Td1[s1>>16]
- lwr $15,3($25) # Td1[s2>>16]
-
- srl $1,$10,14
- srl $2,$11,14
- srl $24,$8,14
- srl $25,$9,14
- and $1,0x3fc
- and $2,0x3fc
- and $24,0x3fc
- and $25,0x3fc
- add $1,$7
- add $2,$7
- add $24,$7
- add $25,$7
- lwl $16,1($1) # Td2[s2>>8]
- lwl $17,1($2) # Td2[s3>>8]
- lwl $18,1($24) # Td2[s0>>8]
- lwl $19,1($25) # Td2[s1>>8]
- lwr $16,2($1) # Td2[s2>>8]
- lwr $17,2($2) # Td2[s3>>8]
- lwr $18,2($24) # Td2[s0>>8]
- lwr $19,2($25) # Td2[s1>>8]
-
- srl $1,$9,22
- srl $2,$10,22
- srl $24,$11,22
- srl $25,$8,22
- and $1,0x3fc
- and $2,0x3fc
- and $24,0x3fc
- and $25,0x3fc
- add $1,$7
- add $2,$7
- add $24,$7
- add $25,$7
- lwl $20,0($1) # Td3[s1]
- lwl $21,0($2) # Td3[s2]
- lwl $22,0($24) # Td3[s3]
- lwl $23,0($25) # Td3[s0]
- lwr $20,1($1) # Td3[s1]
- lwr $21,1($2) # Td3[s2]
- lwr $22,1($24) # Td3[s3]
- lwr $23,1($25) # Td3[s0]
-
- sll $1,$8,2
- sll $2,$9,2
- sll $24,$10,2
- sll $25,$11,2
- and $1,0x3fc
- and $2,0x3fc
- and $24,0x3fc
- and $25,0x3fc
- add $1,$7
- add $2,$7
- add $24,$7
- add $25,$7
-
- xor $12,$16
- xor $13,$17
- xor $14,$18
- xor $15,$19
-
-
- lw $16,0($1) # Td0[s0>>24]
- lw $17,0($2) # Td0[s1>>24]
- lw $18,0($24) # Td0[s2>>24]
- lw $19,0($25) # Td0[s3>>24]
-
- lw $8,0($3)
- lw $9,4($3)
- lw $10,8($3)
- lw $11,12($3)
-
- xor $12,$20
- xor $13,$21
- xor $14,$22
- xor $15,$23
-
- xor $12,$16
- xor $13,$17
- xor $14,$18
- xor $15,$19
-
- sub $30,1
- add $3,16
- xor $8,$12
- xor $9,$13
- xor $10,$14
- xor $11,$15
- .set noreorder
- bnez $30,.Loop_dec
- srl $1,$11,6
-
- .set reorder
- lw $16,1024($7) # prefetch Td4
- lw $17,1024+32($7)
- lw $18,1024+64($7)
- lw $19,1024+96($7)
- lw $20,1024+128($7)
- lw $21,1024+160($7)
- lw $22,1024+192($7)
- lw $23,1024+224($7)
-
- srl $1,$11,8
- srl $2,$8,8
- srl $24,$9,8
- srl $25,$10,8
- and $1,0xff
- and $2,0xff
- and $24,0xff
- and $25,0xff
- add $1,$7
- add $2,$7
- add $24,$7
- add $25,$7
- lbu $12,1024($1) # Td4[s3>>16]
- lbu $13,1024($2) # Td4[s0>>16]
- lbu $14,1024($24) # Td4[s1>>16]
- lbu $15,1024($25) # Td4[s2>>16]
-
- srl $1,$10,16
- srl $2,$11,16
- srl $24,$8,16
- srl $25,$9,16
- and $1,0xff
- and $2,0xff
- and $24,0xff
- and $25,0xff
- add $1,$7
- add $2,$7
- add $24,$7
- add $25,$7
- lbu $16,1024($1) # Td4[s2>>8]
- lbu $17,1024($2) # Td4[s3>>8]
- lbu $18,1024($24) # Td4[s0>>8]
- lbu $19,1024($25) # Td4[s1>>8]
-
- and $1,$8,0xff
- and $2,$9,0xff
- and $24,$10,0xff
- and $25,$11,0xff
- add $1,$7
- add $2,$7
- add $24,$7
- add $25,$7
- lbu $20,1024($1) # Td4[s0>>24]
- lbu $21,1024($2) # Td4[s1>>24]
- lbu $22,1024($24) # Td4[s2>>24]
- lbu $23,1024($25) # Td4[s3>>24]
-
- srl $1,$9,24
- srl $2,$10,24
- srl $24,$11,24
- srl $25,$8,24
-
- sll $12,$12,8
- sll $13,$13,8
- sll $14,$14,8
- sll $15,$15,8
-
- sll $16,$16,16
- sll $17,$17,16
- sll $18,$18,16
- sll $19,$19,16
-
- xor $12,$16
- xor $13,$17
- xor $14,$18
- xor $15,$19
-
- add $1,$7
- add $2,$7
- add $24,$7
- add $25,$7
- lbu $16,1024($1) # Td4[s1]
- lbu $17,1024($2) # Td4[s2]
- lbu $18,1024($24) # Td4[s3]
- lbu $19,1024($25) # Td4[s0]
-
- #sll $20,$20,0
- #sll $21,$21,0
- #sll $22,$22,0
- #sll $23,$23,0
-
- lw $8,0($3)
- lw $9,4($3)
- lw $10,8($3)
- lw $11,12($3)
-
- sll $16,$16,24
- sll $17,$17,24
- sll $18,$18,24
- sll $19,$19,24
-
-
- xor $12,$20
- xor $13,$21
- xor $14,$22
- xor $15,$23
-
- xor $12,$16
- xor $13,$17
- xor $14,$18
- xor $15,$19
-
- xor $8,$12
- xor $9,$13
- xor $10,$14
- xor $11,$15
-
- jr $31
-.end _mips_AES_decrypt
-
-.align 5
-.globl AES_decrypt
-.ent AES_decrypt
-AES_decrypt:
- .frame $29,64,$31
- .mask 3237937152,-4
- .set noreorder
- .cpload $25
- sub $29,64
- sw $31,64-1*4($29)
- sw $30,64-2*4($29)
- sw $23,64-3*4($29)
- sw $22,64-4*4($29)
- sw $21,64-5*4($29)
- sw $20,64-6*4($29)
- sw $19,64-7*4($29)
- sw $18,64-8*4($29)
- sw $17,64-9*4($29)
- sw $16,64-10*4($29)
- .set reorder
- la $7,AES_Td # PIC-ified 'load address'
-
- lwl $8,0+3($4)
- lwl $9,4+3($4)
- lwl $10,8+3($4)
- lwl $11,12+3($4)
- lwr $8,0+0($4)
- lwr $9,4+0($4)
- lwr $10,8+0($4)
- lwr $11,12+0($4)
-
- bal _mips_AES_decrypt
-
- swr $8,0+0($5)
- swr $9,4+0($5)
- swr $10,8+0($5)
- swr $11,12+0($5)
- swl $8,0+3($5)
- swl $9,4+3($5)
- swl $10,8+3($5)
- swl $11,12+3($5)
-
- .set noreorder
- lw $31,64-1*4($29)
- lw $30,64-2*4($29)
- lw $23,64-3*4($29)
- lw $22,64-4*4($29)
- lw $21,64-5*4($29)
- lw $20,64-6*4($29)
- lw $19,64-7*4($29)
- lw $18,64-8*4($29)
- lw $17,64-9*4($29)
- lw $16,64-10*4($29)
- jr $31
- add $29,64
-.end AES_decrypt
-.align 5
-.ent _mips_AES_set_encrypt_key
-_mips_AES_set_encrypt_key:
- .frame $29,0,$31
- .set noreorder
- beqz $4,.Lekey_done
- li $2,-1
- beqz $6,.Lekey_done
- add $3,$7,1024+256
-
- .set reorder
- lwl $8,0+3($4) # load 128 bits
- lwl $9,4+3($4)
- lwl $10,8+3($4)
- lwl $11,12+3($4)
- li $1,128
- lwr $8,0+0($4)
- lwr $9,4+0($4)
- lwr $10,8+0($4)
- lwr $11,12+0($4)
- .set noreorder
- beq $5,$1,.L128bits
- li $30,10
-
- .set reorder
- lwl $12,16+3($4) # load 192 bits
- lwl $13,20+3($4)
- li $1,192
- lwr $12,16+0($4)
- lwr $13,20+0($4)
- .set noreorder
- beq $5,$1,.L192bits
- li $30,8
-
- .set reorder
- lwl $14,24+3($4) # load 256 bits
- lwl $15,28+3($4)
- li $1,256
- lwr $14,24+0($4)
- lwr $15,28+0($4)
- .set noreorder
- beq $5,$1,.L256bits
- li $30,7
-
- b .Lekey_done
- li $2,-2
-
-.align 4
-.L128bits:
- .set reorder
- srl $1,$11,16
- srl $2,$11,8
- and $1,0xff
- and $2,0xff
- and $24,$11,0xff
- srl $25,$11,24
- add $1,$7
- add $2,$7
- add $24,$7
- add $25,$7
- lbu $1,1024($1)
- lbu $2,1024($2)
- lbu $24,1024($24)
- lbu $25,1024($25)
-
- sw $8,0($6)
- sw $9,4($6)
- sw $10,8($6)
- sw $11,12($6)
- sub $30,1
- add $6,16
-
- sll $1,$1,8
- #sll $2,$2,0
- sll $24,$24,24
- sll $25,$25,16
-
- xor $8,$1
- lw $1,0($3)
- xor $8,$2
- xor $8,$24
- xor $8,$25
- xor $8,$1
-
- xor $9,$8
- xor $10,$9
- xor $11,$10
-
- .set noreorder
- bnez $30,.L128bits
- add $3,4
-
- sw $8,0($6)
- sw $9,4($6)
- sw $10,8($6)
- li $30,10
- sw $11,12($6)
- li $2,0
- sw $30,80($6)
- b .Lekey_done
- sub $6,10*16
-
-.align 4
-.L192bits:
- .set reorder
- srl $1,$13,16
- srl $2,$13,8
- and $1,0xff
- and $2,0xff
- and $24,$13,0xff
- srl $25,$13,24
- add $1,$7
- add $2,$7
- add $24,$7
- add $25,$7
- lbu $1,1024($1)
- lbu $2,1024($2)
- lbu $24,1024($24)
- lbu $25,1024($25)
-
- sw $8,0($6)
- sw $9,4($6)
- sw $10,8($6)
- sw $11,12($6)
- sw $12,16($6)
- sw $13,20($6)
- sub $30,1
- add $6,24
-
- sll $1,$1,8
- #sll $2,$2,0
- sll $24,$24,24
- sll $25,$25,16
-
- xor $8,$1
- lw $1,0($3)
- xor $8,$2
- xor $8,$24
- xor $8,$25
- xor $8,$1
-
- xor $9,$8
- xor $10,$9
- xor $11,$10
- xor $12,$11
- xor $13,$12
-
- .set noreorder
- bnez $30,.L192bits
- add $3,4
-
- sw $8,0($6)
- sw $9,4($6)
- sw $10,8($6)
- li $30,12
- sw $11,12($6)
- li $2,0
- sw $30,48($6)
- b .Lekey_done
- sub $6,12*16
-
-.align 4
-.L256bits:
- .set reorder
- srl $1,$15,16
- srl $2,$15,8
- and $1,0xff
- and $2,0xff
- and $24,$15,0xff
- srl $25,$15,24
- add $1,$7
- add $2,$7
- add $24,$7
- add $25,$7
- lbu $1,1024($1)
- lbu $2,1024($2)
- lbu $24,1024($24)
- lbu $25,1024($25)
-
- sw $8,0($6)
- sw $9,4($6)
- sw $10,8($6)
- sw $11,12($6)
- sw $12,16($6)
- sw $13,20($6)
- sw $14,24($6)
- sw $15,28($6)
- sub $30,1
-
- sll $1,$1,8
- #sll $2,$2,0
- sll $24,$24,24
- sll $25,$25,16
-
- xor $8,$1
- lw $1,0($3)
- xor $8,$2
- xor $8,$24
- xor $8,$25
- xor $8,$1
-
- xor $9,$8
- xor $10,$9
- xor $11,$10
- beqz $30,.L256bits_done
-
- srl $1,$11,24
- srl $2,$11,16
- srl $24,$11,8
- and $25,$11,0xff
- and $2,0xff
- and $24,0xff
- add $1,$7
- add $2,$7
- add $24,$7
- add $25,$7
- lbu $1,1024($1)
- lbu $2,1024($2)
- lbu $24,1024($24)
- lbu $25,1024($25)
- sll $1,24
- sll $2,16
- sll $24,8
-
- xor $12,$1
- xor $12,$2
- xor $12,$24
- xor $12,$25
-
- xor $13,$12
- xor $14,$13
- xor $15,$14
-
- add $6,32
- .set noreorder
- b .L256bits
- add $3,4
-
-.L256bits_done:
- sw $8,32($6)
- sw $9,36($6)
- sw $10,40($6)
- li $30,14
- sw $11,44($6)
- li $2,0
- sw $30,48($6)
- sub $6,12*16
-
-.Lekey_done:
- jr $31
- nop
-.end _mips_AES_set_encrypt_key
-
-.globl private_AES_set_encrypt_key
-.ent private_AES_set_encrypt_key
-private_AES_set_encrypt_key:
- .frame $29,32,$31
- .mask 3221225472,-4
- .set noreorder
- .cpload $25
- sub $29,32
- sw $31,32-1*4($29)
- sw $30,32-2*4($29)
- .set reorder
- la $7,AES_Te # PIC-ified 'load address'
-
- bal _mips_AES_set_encrypt_key
-
- .set noreorder
- move $4,$2
- lw $31,32-1*4($29)
- lw $30,32-2*4($29)
- jr $31
- add $29,32
-.end private_AES_set_encrypt_key
-.align 5
-.globl private_AES_set_decrypt_key
-.ent private_AES_set_decrypt_key
-private_AES_set_decrypt_key:
- .frame $29,32,$31
- .mask 3221225472,-4
- .set noreorder
- .cpload $25
- sub $29,32
- sw $31,32-1*4($29)
- sw $30,32-2*4($29)
- .set reorder
- la $7,AES_Te # PIC-ified 'load address'
-
- bal _mips_AES_set_encrypt_key
-
- bltz $2,.Ldkey_done
-
- sll $1,$30,4
- add $4,$6,0
- add $5,$6,$1
-.align 4
-.Lswap:
- lw $8,0($4)
- lw $9,4($4)
- lw $10,8($4)
- lw $11,12($4)
- lw $12,0($5)
- lw $13,4($5)
- lw $14,8($5)
- lw $15,12($5)
- sw $8,0($5)
- sw $9,4($5)
- sw $10,8($5)
- sw $11,12($5)
- add $4,16
- sub $5,16
- sw $12,-16($4)
- sw $13,-12($4)
- sw $14,-8($4)
- sw $15,-4($4)
- bne $4,$5,.Lswap
-
- lw $8,16($6) # modulo-scheduled
- lui $2,0x8080
- sub $30,1
- or $2,0x8080
- sll $30,2
- add $6,16
- lui $25,0x1b1b
- nor $24,$0,$2
- or $25,0x1b1b
-.align 4
-.Lmix:
- and $1,$8,$2
- and $9,$8,$24
- srl $10,$1,7
- addu $9,$9 # tp2<<1
- subu $1,$10
- and $1,$25
- xor $9,$1
-
- and $1,$9,$2
- and $10,$9,$24
- srl $11,$1,7
- addu $10,$10 # tp4<<1
- subu $1,$11
- and $1,$25
- xor $10,$1
-
- and $1,$10,$2
- and $11,$10,$24
- srl $12,$1,7
- addu $11,$11 # tp8<<1
- subu $1,$12
- and $1,$25
- xor $11,$1
-
- xor $12,$11,$8
- xor $15,$11,$10
- xor $13,$12,$9
- xor $14,$12,$10
-
- sll $8,$14,16
- xor $15,$9
- srl $9,$14,16
- xor $15,$8
- sll $8,$12,8
- xor $15,$9
- srl $9,$12,24
- xor $15,$8
- sll $8,$13,24
- xor $15,$9
- srl $9,$13,8
- xor $15,$8
- lw $8,4($6) # modulo-scheduled
- xor $15,$9
- sub $30,1
- sw $15,0($6)
- add $6,4
- bnez $30,.Lmix
-
- li $2,0
-.Ldkey_done:
- .set noreorder
- move $4,$2
- lw $31,32-1*4($29)
- lw $30,32-2*4($29)
- jr $31
- add $29,32
-.end private_AES_set_decrypt_key
-.rdata
-.align 6
-AES_Te:
-.byte 0xc6,0x63,0x63,0xa5, 0xf8,0x7c,0x7c,0x84 # Te0
-.byte 0xee,0x77,0x77,0x99, 0xf6,0x7b,0x7b,0x8d
-.byte 0xff,0xf2,0xf2,0x0d, 0xd6,0x6b,0x6b,0xbd
-.byte 0xde,0x6f,0x6f,0xb1, 0x91,0xc5,0xc5,0x54
-.byte 0x60,0x30,0x30,0x50, 0x02,0x01,0x01,0x03
-.byte 0xce,0x67,0x67,0xa9, 0x56,0x2b,0x2b,0x7d
-.byte 0xe7,0xfe,0xfe,0x19, 0xb5,0xd7,0xd7,0x62
-.byte 0x4d,0xab,0xab,0xe6, 0xec,0x76,0x76,0x9a
-.byte 0x8f,0xca,0xca,0x45, 0x1f,0x82,0x82,0x9d
-.byte 0x89,0xc9,0xc9,0x40, 0xfa,0x7d,0x7d,0x87
-.byte 0xef,0xfa,0xfa,0x15, 0xb2,0x59,0x59,0xeb
-.byte 0x8e,0x47,0x47,0xc9, 0xfb,0xf0,0xf0,0x0b
-.byte 0x41,0xad,0xad,0xec, 0xb3,0xd4,0xd4,0x67
-.byte 0x5f,0xa2,0xa2,0xfd, 0x45,0xaf,0xaf,0xea
-.byte 0x23,0x9c,0x9c,0xbf, 0x53,0xa4,0xa4,0xf7
-.byte 0xe4,0x72,0x72,0x96, 0x9b,0xc0,0xc0,0x5b
-.byte 0x75,0xb7,0xb7,0xc2, 0xe1,0xfd,0xfd,0x1c
-.byte 0x3d,0x93,0x93,0xae, 0x4c,0x26,0x26,0x6a
-.byte 0x6c,0x36,0x36,0x5a, 0x7e,0x3f,0x3f,0x41
-.byte 0xf5,0xf7,0xf7,0x02, 0x83,0xcc,0xcc,0x4f
-.byte 0x68,0x34,0x34,0x5c, 0x51,0xa5,0xa5,0xf4
-.byte 0xd1,0xe5,0xe5,0x34, 0xf9,0xf1,0xf1,0x08
-.byte 0xe2,0x71,0x71,0x93, 0xab,0xd8,0xd8,0x73
-.byte 0x62,0x31,0x31,0x53, 0x2a,0x15,0x15,0x3f
-.byte 0x08,0x04,0x04,0x0c, 0x95,0xc7,0xc7,0x52
-.byte 0x46,0x23,0x23,0x65, 0x9d,0xc3,0xc3,0x5e
-.byte 0x30,0x18,0x18,0x28, 0x37,0x96,0x96,0xa1
-.byte 0x0a,0x05,0x05,0x0f, 0x2f,0x9a,0x9a,0xb5
-.byte 0x0e,0x07,0x07,0x09, 0x24,0x12,0x12,0x36
-.byte 0x1b,0x80,0x80,0x9b, 0xdf,0xe2,0xe2,0x3d
-.byte 0xcd,0xeb,0xeb,0x26, 0x4e,0x27,0x27,0x69
-.byte 0x7f,0xb2,0xb2,0xcd, 0xea,0x75,0x75,0x9f
-.byte 0x12,0x09,0x09,0x1b, 0x1d,0x83,0x83,0x9e
-.byte 0x58,0x2c,0x2c,0x74, 0x34,0x1a,0x1a,0x2e
-.byte 0x36,0x1b,0x1b,0x2d, 0xdc,0x6e,0x6e,0xb2
-.byte 0xb4,0x5a,0x5a,0xee, 0x5b,0xa0,0xa0,0xfb
-.byte 0xa4,0x52,0x52,0xf6, 0x76,0x3b,0x3b,0x4d
-.byte 0xb7,0xd6,0xd6,0x61, 0x7d,0xb3,0xb3,0xce
-.byte 0x52,0x29,0x29,0x7b, 0xdd,0xe3,0xe3,0x3e
-.byte 0x5e,0x2f,0x2f,0x71, 0x13,0x84,0x84,0x97
-.byte 0xa6,0x53,0x53,0xf5, 0xb9,0xd1,0xd1,0x68
-.byte 0x00,0x00,0x00,0x00, 0xc1,0xed,0xed,0x2c
-.byte 0x40,0x20,0x20,0x60, 0xe3,0xfc,0xfc,0x1f
-.byte 0x79,0xb1,0xb1,0xc8, 0xb6,0x5b,0x5b,0xed
-.byte 0xd4,0x6a,0x6a,0xbe, 0x8d,0xcb,0xcb,0x46
-.byte 0x67,0xbe,0xbe,0xd9, 0x72,0x39,0x39,0x4b
-.byte 0x94,0x4a,0x4a,0xde, 0x98,0x4c,0x4c,0xd4
-.byte 0xb0,0x58,0x58,0xe8, 0x85,0xcf,0xcf,0x4a
-.byte 0xbb,0xd0,0xd0,0x6b, 0xc5,0xef,0xef,0x2a
-.byte 0x4f,0xaa,0xaa,0xe5, 0xed,0xfb,0xfb,0x16
-.byte 0x86,0x43,0x43,0xc5, 0x9a,0x4d,0x4d,0xd7
-.byte 0x66,0x33,0x33,0x55, 0x11,0x85,0x85,0x94
-.byte 0x8a,0x45,0x45,0xcf, 0xe9,0xf9,0xf9,0x10
-.byte 0x04,0x02,0x02,0x06, 0xfe,0x7f,0x7f,0x81
-.byte 0xa0,0x50,0x50,0xf0, 0x78,0x3c,0x3c,0x44
-.byte 0x25,0x9f,0x9f,0xba, 0x4b,0xa8,0xa8,0xe3
-.byte 0xa2,0x51,0x51,0xf3, 0x5d,0xa3,0xa3,0xfe
-.byte 0x80,0x40,0x40,0xc0, 0x05,0x8f,0x8f,0x8a
-.byte 0x3f,0x92,0x92,0xad, 0x21,0x9d,0x9d,0xbc
-.byte 0x70,0x38,0x38,0x48, 0xf1,0xf5,0xf5,0x04
-.byte 0x63,0xbc,0xbc,0xdf, 0x77,0xb6,0xb6,0xc1
-.byte 0xaf,0xda,0xda,0x75, 0x42,0x21,0x21,0x63
-.byte 0x20,0x10,0x10,0x30, 0xe5,0xff,0xff,0x1a
-.byte 0xfd,0xf3,0xf3,0x0e, 0xbf,0xd2,0xd2,0x6d
-.byte 0x81,0xcd,0xcd,0x4c, 0x18,0x0c,0x0c,0x14
-.byte 0x26,0x13,0x13,0x35, 0xc3,0xec,0xec,0x2f
-.byte 0xbe,0x5f,0x5f,0xe1, 0x35,0x97,0x97,0xa2
-.byte 0x88,0x44,0x44,0xcc, 0x2e,0x17,0x17,0x39
-.byte 0x93,0xc4,0xc4,0x57, 0x55,0xa7,0xa7,0xf2
-.byte 0xfc,0x7e,0x7e,0x82, 0x7a,0x3d,0x3d,0x47
-.byte 0xc8,0x64,0x64,0xac, 0xba,0x5d,0x5d,0xe7
-.byte 0x32,0x19,0x19,0x2b, 0xe6,0x73,0x73,0x95
-.byte 0xc0,0x60,0x60,0xa0, 0x19,0x81,0x81,0x98
-.byte 0x9e,0x4f,0x4f,0xd1, 0xa3,0xdc,0xdc,0x7f
-.byte 0x44,0x22,0x22,0x66, 0x54,0x2a,0x2a,0x7e
-.byte 0x3b,0x90,0x90,0xab, 0x0b,0x88,0x88,0x83
-.byte 0x8c,0x46,0x46,0xca, 0xc7,0xee,0xee,0x29
-.byte 0x6b,0xb8,0xb8,0xd3, 0x28,0x14,0x14,0x3c
-.byte 0xa7,0xde,0xde,0x79, 0xbc,0x5e,0x5e,0xe2
-.byte 0x16,0x0b,0x0b,0x1d, 0xad,0xdb,0xdb,0x76
-.byte 0xdb,0xe0,0xe0,0x3b, 0x64,0x32,0x32,0x56
-.byte 0x74,0x3a,0x3a,0x4e, 0x14,0x0a,0x0a,0x1e
-.byte 0x92,0x49,0x49,0xdb, 0x0c,0x06,0x06,0x0a
-.byte 0x48,0x24,0x24,0x6c, 0xb8,0x5c,0x5c,0xe4
-.byte 0x9f,0xc2,0xc2,0x5d, 0xbd,0xd3,0xd3,0x6e
-.byte 0x43,0xac,0xac,0xef, 0xc4,0x62,0x62,0xa6
-.byte 0x39,0x91,0x91,0xa8, 0x31,0x95,0x95,0xa4
-.byte 0xd3,0xe4,0xe4,0x37, 0xf2,0x79,0x79,0x8b
-.byte 0xd5,0xe7,0xe7,0x32, 0x8b,0xc8,0xc8,0x43
-.byte 0x6e,0x37,0x37,0x59, 0xda,0x6d,0x6d,0xb7
-.byte 0x01,0x8d,0x8d,0x8c, 0xb1,0xd5,0xd5,0x64
-.byte 0x9c,0x4e,0x4e,0xd2, 0x49,0xa9,0xa9,0xe0
-.byte 0xd8,0x6c,0x6c,0xb4, 0xac,0x56,0x56,0xfa
-.byte 0xf3,0xf4,0xf4,0x07, 0xcf,0xea,0xea,0x25
-.byte 0xca,0x65,0x65,0xaf, 0xf4,0x7a,0x7a,0x8e
-.byte 0x47,0xae,0xae,0xe9, 0x10,0x08,0x08,0x18
-.byte 0x6f,0xba,0xba,0xd5, 0xf0,0x78,0x78,0x88
-.byte 0x4a,0x25,0x25,0x6f, 0x5c,0x2e,0x2e,0x72
-.byte 0x38,0x1c,0x1c,0x24, 0x57,0xa6,0xa6,0xf1
-.byte 0x73,0xb4,0xb4,0xc7, 0x97,0xc6,0xc6,0x51
-.byte 0xcb,0xe8,0xe8,0x23, 0xa1,0xdd,0xdd,0x7c
-.byte 0xe8,0x74,0x74,0x9c, 0x3e,0x1f,0x1f,0x21
-.byte 0x96,0x4b,0x4b,0xdd, 0x61,0xbd,0xbd,0xdc
-.byte 0x0d,0x8b,0x8b,0x86, 0x0f,0x8a,0x8a,0x85
-.byte 0xe0,0x70,0x70,0x90, 0x7c,0x3e,0x3e,0x42
-.byte 0x71,0xb5,0xb5,0xc4, 0xcc,0x66,0x66,0xaa
-.byte 0x90,0x48,0x48,0xd8, 0x06,0x03,0x03,0x05
-.byte 0xf7,0xf6,0xf6,0x01, 0x1c,0x0e,0x0e,0x12
-.byte 0xc2,0x61,0x61,0xa3, 0x6a,0x35,0x35,0x5f
-.byte 0xae,0x57,0x57,0xf9, 0x69,0xb9,0xb9,0xd0
-.byte 0x17,0x86,0x86,0x91, 0x99,0xc1,0xc1,0x58
-.byte 0x3a,0x1d,0x1d,0x27, 0x27,0x9e,0x9e,0xb9
-.byte 0xd9,0xe1,0xe1,0x38, 0xeb,0xf8,0xf8,0x13
-.byte 0x2b,0x98,0x98,0xb3, 0x22,0x11,0x11,0x33
-.byte 0xd2,0x69,0x69,0xbb, 0xa9,0xd9,0xd9,0x70
-.byte 0x07,0x8e,0x8e,0x89, 0x33,0x94,0x94,0xa7
-.byte 0x2d,0x9b,0x9b,0xb6, 0x3c,0x1e,0x1e,0x22
-.byte 0x15,0x87,0x87,0x92, 0xc9,0xe9,0xe9,0x20
-.byte 0x87,0xce,0xce,0x49, 0xaa,0x55,0x55,0xff
-.byte 0x50,0x28,0x28,0x78, 0xa5,0xdf,0xdf,0x7a
-.byte 0x03,0x8c,0x8c,0x8f, 0x59,0xa1,0xa1,0xf8
-.byte 0x09,0x89,0x89,0x80, 0x1a,0x0d,0x0d,0x17
-.byte 0x65,0xbf,0xbf,0xda, 0xd7,0xe6,0xe6,0x31
-.byte 0x84,0x42,0x42,0xc6, 0xd0,0x68,0x68,0xb8
-.byte 0x82,0x41,0x41,0xc3, 0x29,0x99,0x99,0xb0
-.byte 0x5a,0x2d,0x2d,0x77, 0x1e,0x0f,0x0f,0x11
-.byte 0x7b,0xb0,0xb0,0xcb, 0xa8,0x54,0x54,0xfc
-.byte 0x6d,0xbb,0xbb,0xd6, 0x2c,0x16,0x16,0x3a
-
-.byte 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5 # Te4
-.byte 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76
-.byte 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0
-.byte 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0
-.byte 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc
-.byte 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15
-.byte 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a
-.byte 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75
-.byte 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0
-.byte 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84
-.byte 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b
-.byte 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf
-.byte 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85
-.byte 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8
-.byte 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5
-.byte 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2
-.byte 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17
-.byte 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73
-.byte 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88
-.byte 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb
-.byte 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c
-.byte 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79
-.byte 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9
-.byte 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08
-.byte 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6
-.byte 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a
-.byte 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e
-.byte 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e
-.byte 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94
-.byte 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf
-.byte 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68
-.byte 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
-
-.byte 0x01,0x00,0x00,0x00, 0x02,0x00,0x00,0x00 # rcon
-.byte 0x04,0x00,0x00,0x00, 0x08,0x00,0x00,0x00
-.byte 0x10,0x00,0x00,0x00, 0x20,0x00,0x00,0x00
-.byte 0x40,0x00,0x00,0x00, 0x80,0x00,0x00,0x00
-.byte 0x1B,0x00,0x00,0x00, 0x36,0x00,0x00,0x00
-
-.align 6
-AES_Td:
-.byte 0x51,0xf4,0xa7,0x50, 0x7e,0x41,0x65,0x53 # Td0
-.byte 0x1a,0x17,0xa4,0xc3, 0x3a,0x27,0x5e,0x96
-.byte 0x3b,0xab,0x6b,0xcb, 0x1f,0x9d,0x45,0xf1
-.byte 0xac,0xfa,0x58,0xab, 0x4b,0xe3,0x03,0x93
-.byte 0x20,0x30,0xfa,0x55, 0xad,0x76,0x6d,0xf6
-.byte 0x88,0xcc,0x76,0x91, 0xf5,0x02,0x4c,0x25
-.byte 0x4f,0xe5,0xd7,0xfc, 0xc5,0x2a,0xcb,0xd7
-.byte 0x26,0x35,0x44,0x80, 0xb5,0x62,0xa3,0x8f
-.byte 0xde,0xb1,0x5a,0x49, 0x25,0xba,0x1b,0x67
-.byte 0x45,0xea,0x0e,0x98, 0x5d,0xfe,0xc0,0xe1
-.byte 0xc3,0x2f,0x75,0x02, 0x81,0x4c,0xf0,0x12
-.byte 0x8d,0x46,0x97,0xa3, 0x6b,0xd3,0xf9,0xc6
-.byte 0x03,0x8f,0x5f,0xe7, 0x15,0x92,0x9c,0x95
-.byte 0xbf,0x6d,0x7a,0xeb, 0x95,0x52,0x59,0xda
-.byte 0xd4,0xbe,0x83,0x2d, 0x58,0x74,0x21,0xd3
-.byte 0x49,0xe0,0x69,0x29, 0x8e,0xc9,0xc8,0x44
-.byte 0x75,0xc2,0x89,0x6a, 0xf4,0x8e,0x79,0x78
-.byte 0x99,0x58,0x3e,0x6b, 0x27,0xb9,0x71,0xdd
-.byte 0xbe,0xe1,0x4f,0xb6, 0xf0,0x88,0xad,0x17
-.byte 0xc9,0x20,0xac,0x66, 0x7d,0xce,0x3a,0xb4
-.byte 0x63,0xdf,0x4a,0x18, 0xe5,0x1a,0x31,0x82
-.byte 0x97,0x51,0x33,0x60, 0x62,0x53,0x7f,0x45
-.byte 0xb1,0x64,0x77,0xe0, 0xbb,0x6b,0xae,0x84
-.byte 0xfe,0x81,0xa0,0x1c, 0xf9,0x08,0x2b,0x94
-.byte 0x70,0x48,0x68,0x58, 0x8f,0x45,0xfd,0x19
-.byte 0x94,0xde,0x6c,0x87, 0x52,0x7b,0xf8,0xb7
-.byte 0xab,0x73,0xd3,0x23, 0x72,0x4b,0x02,0xe2
-.byte 0xe3,0x1f,0x8f,0x57, 0x66,0x55,0xab,0x2a
-.byte 0xb2,0xeb,0x28,0x07, 0x2f,0xb5,0xc2,0x03
-.byte 0x86,0xc5,0x7b,0x9a, 0xd3,0x37,0x08,0xa5
-.byte 0x30,0x28,0x87,0xf2, 0x23,0xbf,0xa5,0xb2
-.byte 0x02,0x03,0x6a,0xba, 0xed,0x16,0x82,0x5c
-.byte 0x8a,0xcf,0x1c,0x2b, 0xa7,0x79,0xb4,0x92
-.byte 0xf3,0x07,0xf2,0xf0, 0x4e,0x69,0xe2,0xa1
-.byte 0x65,0xda,0xf4,0xcd, 0x06,0x05,0xbe,0xd5
-.byte 0xd1,0x34,0x62,0x1f, 0xc4,0xa6,0xfe,0x8a
-.byte 0x34,0x2e,0x53,0x9d, 0xa2,0xf3,0x55,0xa0
-.byte 0x05,0x8a,0xe1,0x32, 0xa4,0xf6,0xeb,0x75
-.byte 0x0b,0x83,0xec,0x39, 0x40,0x60,0xef,0xaa
-.byte 0x5e,0x71,0x9f,0x06, 0xbd,0x6e,0x10,0x51
-.byte 0x3e,0x21,0x8a,0xf9, 0x96,0xdd,0x06,0x3d
-.byte 0xdd,0x3e,0x05,0xae, 0x4d,0xe6,0xbd,0x46
-.byte 0x91,0x54,0x8d,0xb5, 0x71,0xc4,0x5d,0x05
-.byte 0x04,0x06,0xd4,0x6f, 0x60,0x50,0x15,0xff
-.byte 0x19,0x98,0xfb,0x24, 0xd6,0xbd,0xe9,0x97
-.byte 0x89,0x40,0x43,0xcc, 0x67,0xd9,0x9e,0x77
-.byte 0xb0,0xe8,0x42,0xbd, 0x07,0x89,0x8b,0x88
-.byte 0xe7,0x19,0x5b,0x38, 0x79,0xc8,0xee,0xdb
-.byte 0xa1,0x7c,0x0a,0x47, 0x7c,0x42,0x0f,0xe9
-.byte 0xf8,0x84,0x1e,0xc9, 0x00,0x00,0x00,0x00
-.byte 0x09,0x80,0x86,0x83, 0x32,0x2b,0xed,0x48
-.byte 0x1e,0x11,0x70,0xac, 0x6c,0x5a,0x72,0x4e
-.byte 0xfd,0x0e,0xff,0xfb, 0x0f,0x85,0x38,0x56
-.byte 0x3d,0xae,0xd5,0x1e, 0x36,0x2d,0x39,0x27
-.byte 0x0a,0x0f,0xd9,0x64, 0x68,0x5c,0xa6,0x21
-.byte 0x9b,0x5b,0x54,0xd1, 0x24,0x36,0x2e,0x3a
-.byte 0x0c,0x0a,0x67,0xb1, 0x93,0x57,0xe7,0x0f
-.byte 0xb4,0xee,0x96,0xd2, 0x1b,0x9b,0x91,0x9e
-.byte 0x80,0xc0,0xc5,0x4f, 0x61,0xdc,0x20,0xa2
-.byte 0x5a,0x77,0x4b,0x69, 0x1c,0x12,0x1a,0x16
-.byte 0xe2,0x93,0xba,0x0a, 0xc0,0xa0,0x2a,0xe5
-.byte 0x3c,0x22,0xe0,0x43, 0x12,0x1b,0x17,0x1d
-.byte 0x0e,0x09,0x0d,0x0b, 0xf2,0x8b,0xc7,0xad
-.byte 0x2d,0xb6,0xa8,0xb9, 0x14,0x1e,0xa9,0xc8
-.byte 0x57,0xf1,0x19,0x85, 0xaf,0x75,0x07,0x4c
-.byte 0xee,0x99,0xdd,0xbb, 0xa3,0x7f,0x60,0xfd
-.byte 0xf7,0x01,0x26,0x9f, 0x5c,0x72,0xf5,0xbc
-.byte 0x44,0x66,0x3b,0xc5, 0x5b,0xfb,0x7e,0x34
-.byte 0x8b,0x43,0x29,0x76, 0xcb,0x23,0xc6,0xdc
-.byte 0xb6,0xed,0xfc,0x68, 0xb8,0xe4,0xf1,0x63
-.byte 0xd7,0x31,0xdc,0xca, 0x42,0x63,0x85,0x10
-.byte 0x13,0x97,0x22,0x40, 0x84,0xc6,0x11,0x20
-.byte 0x85,0x4a,0x24,0x7d, 0xd2,0xbb,0x3d,0xf8
-.byte 0xae,0xf9,0x32,0x11, 0xc7,0x29,0xa1,0x6d
-.byte 0x1d,0x9e,0x2f,0x4b, 0xdc,0xb2,0x30,0xf3
-.byte 0x0d,0x86,0x52,0xec, 0x77,0xc1,0xe3,0xd0
-.byte 0x2b,0xb3,0x16,0x6c, 0xa9,0x70,0xb9,0x99
-.byte 0x11,0x94,0x48,0xfa, 0x47,0xe9,0x64,0x22
-.byte 0xa8,0xfc,0x8c,0xc4, 0xa0,0xf0,0x3f,0x1a
-.byte 0x56,0x7d,0x2c,0xd8, 0x22,0x33,0x90,0xef
-.byte 0x87,0x49,0x4e,0xc7, 0xd9,0x38,0xd1,0xc1
-.byte 0x8c,0xca,0xa2,0xfe, 0x98,0xd4,0x0b,0x36
-.byte 0xa6,0xf5,0x81,0xcf, 0xa5,0x7a,0xde,0x28
-.byte 0xda,0xb7,0x8e,0x26, 0x3f,0xad,0xbf,0xa4
-.byte 0x2c,0x3a,0x9d,0xe4, 0x50,0x78,0x92,0x0d
-.byte 0x6a,0x5f,0xcc,0x9b, 0x54,0x7e,0x46,0x62
-.byte 0xf6,0x8d,0x13,0xc2, 0x90,0xd8,0xb8,0xe8
-.byte 0x2e,0x39,0xf7,0x5e, 0x82,0xc3,0xaf,0xf5
-.byte 0x9f,0x5d,0x80,0xbe, 0x69,0xd0,0x93,0x7c
-.byte 0x6f,0xd5,0x2d,0xa9, 0xcf,0x25,0x12,0xb3
-.byte 0xc8,0xac,0x99,0x3b, 0x10,0x18,0x7d,0xa7
-.byte 0xe8,0x9c,0x63,0x6e, 0xdb,0x3b,0xbb,0x7b
-.byte 0xcd,0x26,0x78,0x09, 0x6e,0x59,0x18,0xf4
-.byte 0xec,0x9a,0xb7,0x01, 0x83,0x4f,0x9a,0xa8
-.byte 0xe6,0x95,0x6e,0x65, 0xaa,0xff,0xe6,0x7e
-.byte 0x21,0xbc,0xcf,0x08, 0xef,0x15,0xe8,0xe6
-.byte 0xba,0xe7,0x9b,0xd9, 0x4a,0x6f,0x36,0xce
-.byte 0xea,0x9f,0x09,0xd4, 0x29,0xb0,0x7c,0xd6
-.byte 0x31,0xa4,0xb2,0xaf, 0x2a,0x3f,0x23,0x31
-.byte 0xc6,0xa5,0x94,0x30, 0x35,0xa2,0x66,0xc0
-.byte 0x74,0x4e,0xbc,0x37, 0xfc,0x82,0xca,0xa6
-.byte 0xe0,0x90,0xd0,0xb0, 0x33,0xa7,0xd8,0x15
-.byte 0xf1,0x04,0x98,0x4a, 0x41,0xec,0xda,0xf7
-.byte 0x7f,0xcd,0x50,0x0e, 0x17,0x91,0xf6,0x2f
-.byte 0x76,0x4d,0xd6,0x8d, 0x43,0xef,0xb0,0x4d
-.byte 0xcc,0xaa,0x4d,0x54, 0xe4,0x96,0x04,0xdf
-.byte 0x9e,0xd1,0xb5,0xe3, 0x4c,0x6a,0x88,0x1b
-.byte 0xc1,0x2c,0x1f,0xb8, 0x46,0x65,0x51,0x7f
-.byte 0x9d,0x5e,0xea,0x04, 0x01,0x8c,0x35,0x5d
-.byte 0xfa,0x87,0x74,0x73, 0xfb,0x0b,0x41,0x2e
-.byte 0xb3,0x67,0x1d,0x5a, 0x92,0xdb,0xd2,0x52
-.byte 0xe9,0x10,0x56,0x33, 0x6d,0xd6,0x47,0x13
-.byte 0x9a,0xd7,0x61,0x8c, 0x37,0xa1,0x0c,0x7a
-.byte 0x59,0xf8,0x14,0x8e, 0xeb,0x13,0x3c,0x89
-.byte 0xce,0xa9,0x27,0xee, 0xb7,0x61,0xc9,0x35
-.byte 0xe1,0x1c,0xe5,0xed, 0x7a,0x47,0xb1,0x3c
-.byte 0x9c,0xd2,0xdf,0x59, 0x55,0xf2,0x73,0x3f
-.byte 0x18,0x14,0xce,0x79, 0x73,0xc7,0x37,0xbf
-.byte 0x53,0xf7,0xcd,0xea, 0x5f,0xfd,0xaa,0x5b
-.byte 0xdf,0x3d,0x6f,0x14, 0x78,0x44,0xdb,0x86
-.byte 0xca,0xaf,0xf3,0x81, 0xb9,0x68,0xc4,0x3e
-.byte 0x38,0x24,0x34,0x2c, 0xc2,0xa3,0x40,0x5f
-.byte 0x16,0x1d,0xc3,0x72, 0xbc,0xe2,0x25,0x0c
-.byte 0x28,0x3c,0x49,0x8b, 0xff,0x0d,0x95,0x41
-.byte 0x39,0xa8,0x01,0x71, 0x08,0x0c,0xb3,0xde
-.byte 0xd8,0xb4,0xe4,0x9c, 0x64,0x56,0xc1,0x90
-.byte 0x7b,0xcb,0x84,0x61, 0xd5,0x32,0xb6,0x70
-.byte 0x48,0x6c,0x5c,0x74, 0xd0,0xb8,0x57,0x42
-
-.byte 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38 # Td4
-.byte 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
-.byte 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
-.byte 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
-.byte 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
-.byte 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
-.byte 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
-.byte 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
-.byte 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
-.byte 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
-.byte 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
-.byte 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
-.byte 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
-.byte 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
-.byte 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
-.byte 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
-.byte 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
-.byte 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
-.byte 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
-.byte 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
-.byte 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
-.byte 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
-.byte 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
-.byte 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
-.byte 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
-.byte 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
-.byte 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
-.byte 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
-.byte 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
-.byte 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
-.byte 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
-.byte 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
diff --git a/app/openssl/crypto/aes/asm/aes-mips.pl b/app/openssl/crypto/aes/asm/aes-mips.pl
deleted file mode 100644
index e5239542..00000000
--- a/app/openssl/crypto/aes/asm/aes-mips.pl
+++ /dev/null
@@ -1,1611 +0,0 @@
-#!/usr/bin/env perl
-
-# ====================================================================
-# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
-# project. The module is, however, dual licensed under OpenSSL and
-# CRYPTOGAMS licenses depending on where you obtain it. For further
-# details see http://www.openssl.org/~appro/cryptogams/.
-# ====================================================================
-
-# AES for MIPS
-
-# October 2010
-#
-# Code uses 1K[+256B] S-box and on single-issue core [such as R5000]
-# spends ~68 cycles per byte processed with 128-bit key. This is ~16%
-# faster than gcc-generated code, which is not very impressive. But
-# recall that compressed S-box requires extra processing, namely
-# additional rotations. Rotations are implemented with lwl/lwr pairs,
-# which is normally used for loading unaligned data. Another cool
-# thing about this module is its endian neutrality, which means that
-# it processes data without ever changing byte order...
-
-######################################################################
-# There is a number of MIPS ABI in use, O32 and N32/64 are most
-# widely used. Then there is a new contender: NUBI. It appears that if
-# one picks the latter, it's possible to arrange code in ABI neutral
-# manner. Therefore let's stick to NUBI register layout:
-#
-($zero,$at,$t0,$t1,$t2)=map("\$$_",(0..2,24,25));
-($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7)=map("\$$_",(4..11));
-($s0,$s1,$s2,$s3,$s4,$s5,$s6,$s7,$s8,$s9,$s10,$s11)=map("\$$_",(12..23));
-($gp,$tp,$sp,$fp,$ra)=map("\$$_",(3,28..31));
-#
-# The return value is placed in $a0. Following coding rules facilitate
-# interoperability:
-#
-# - never ever touch $tp, "thread pointer", former $gp;
-# - copy return value to $t0, former $v0 [or to $a0 if you're adapting
-# old code];
-# - on O32 populate $a4-$a7 with 'lw $aN,4*N($sp)' if necessary;
-#
-# For reference here is register layout for N32/64 MIPS ABIs:
-#
-# ($zero,$at,$v0,$v1)=map("\$$_",(0..3));
-# ($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7)=map("\$$_",(4..11));
-# ($t0,$t1,$t2,$t3,$t8,$t9)=map("\$$_",(12..15,24,25));
-# ($s0,$s1,$s2,$s3,$s4,$s5,$s6,$s7)=map("\$$_",(16..23));
-# ($gp,$sp,$fp,$ra)=map("\$$_",(28..31));
-#
-$flavour = shift; # supported flavours are o32,n32,64,nubi32,nubi64
-
-if ($flavour =~ /64|n32/i) {
- $PTR_ADD="dadd"; # incidentally works even on n32
- $PTR_SUB="dsub"; # incidentally works even on n32
- $REG_S="sd";
- $REG_L="ld";
- $PTR_SLL="dsll"; # incidentally works even on n32
- $SZREG=8;
-} else {
- $PTR_ADD="add";
- $PTR_SUB="sub";
- $REG_S="sw";
- $REG_L="lw";
- $PTR_SLL="sll";
- $SZREG=4;
-}
-$pf = ($flavour =~ /nubi/i) ? $t0 : $t2;
-#
-# <appro@openssl.org>
-#
-######################################################################
-
-$big_endian=(`echo MIPSEL | $ENV{CC} -E -P -`=~/MIPSEL/)?1:0;
-
-for (@ARGV) { $output=$_ if (/^\w[\w\-]*\.\w+$/); }
-open STDOUT,">$output";
-
-if (!defined($big_endian))
-{ $big_endian=(unpack('L',pack('N',1))==1); }
-
-while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
-open STDOUT,">$output";
-
-my ($MSB,$LSB)=(0,3); # automatically converted to little-endian
-
-$code.=<<___;
-.text
-#ifdef OPENSSL_FIPSCANISTER
-# include <openssl/fipssyms.h>
-#endif
-
-#if !defined(__vxworks) || defined(__pic__)
-.option pic2
-#endif
-.set noat
-___
-
-{{{
-my $FRAMESIZE=16*$SZREG;
-my $SAVED_REGS_MASK = ($flavour =~ /nubi/i) ? 0xc0fff008 : 0xc0ff0000;
-
-my ($inp,$out,$key,$Tbl,$s0,$s1,$s2,$s3)=($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7);
-my ($i0,$i1,$i2,$i3)=($at,$t0,$t1,$t2);
-my ($t0,$t1,$t2,$t3,$t4,$t5,$t6,$t7,$t8,$t9,$t10,$t11) = map("\$$_",(12..23));
-my ($key0,$cnt)=($gp,$fp);
-
-# instuction ordering is "stolen" from output from MIPSpro assembler
-# invoked with -mips3 -O3 arguments...
-$code.=<<___;
-.align 5
-.ent _mips_AES_encrypt
-_mips_AES_encrypt:
- .frame $sp,0,$ra
- .set reorder
- lw $t0,0($key)
- lw $t1,4($key)
- lw $t2,8($key)
- lw $t3,12($key)
- lw $cnt,240($key)
- $PTR_ADD $key0,$key,16
-
- xor $s0,$t0
- xor $s1,$t1
- xor $s2,$t2
- xor $s3,$t3
-
- sub $cnt,1
- _xtr $i0,$s1,16-2
-.Loop_enc:
- _xtr $i1,$s2,16-2
- _xtr $i2,$s3,16-2
- _xtr $i3,$s0,16-2
- and $i0,0x3fc
- and $i1,0x3fc
- and $i2,0x3fc
- and $i3,0x3fc
- $PTR_ADD $i0,$Tbl
- $PTR_ADD $i1,$Tbl
- $PTR_ADD $i2,$Tbl
- $PTR_ADD $i3,$Tbl
- lwl $t0,3($i0) # Te1[s1>>16]
- lwl $t1,3($i1) # Te1[s2>>16]
- lwl $t2,3($i2) # Te1[s3>>16]
- lwl $t3,3($i3) # Te1[s0>>16]
- lwr $t0,2($i0) # Te1[s1>>16]
- lwr $t1,2($i1) # Te1[s2>>16]
- lwr $t2,2($i2) # Te1[s3>>16]
- lwr $t3,2($i3) # Te1[s0>>16]
-
- _xtr $i0,$s2,8-2
- _xtr $i1,$s3,8-2
- _xtr $i2,$s0,8-2
- _xtr $i3,$s1,8-2
- and $i0,0x3fc
- and $i1,0x3fc
- and $i2,0x3fc
- and $i3,0x3fc
- $PTR_ADD $i0,$Tbl
- $PTR_ADD $i1,$Tbl
- $PTR_ADD $i2,$Tbl
- $PTR_ADD $i3,$Tbl
- lwl $t4,2($i0) # Te2[s2>>8]
- lwl $t5,2($i1) # Te2[s3>>8]
- lwl $t6,2($i2) # Te2[s0>>8]
- lwl $t7,2($i3) # Te2[s1>>8]
- lwr $t4,1($i0) # Te2[s2>>8]
- lwr $t5,1($i1) # Te2[s3>>8]
- lwr $t6,1($i2) # Te2[s0>>8]
- lwr $t7,1($i3) # Te2[s1>>8]
-
- _xtr $i0,$s3,0-2
- _xtr $i1,$s0,0-2
- _xtr $i2,$s1,0-2
- _xtr $i3,$s2,0-2
- and $i0,0x3fc
- and $i1,0x3fc
- and $i2,0x3fc
- and $i3,0x3fc
- $PTR_ADD $i0,$Tbl
- $PTR_ADD $i1,$Tbl
- $PTR_ADD $i2,$Tbl
- $PTR_ADD $i3,$Tbl
- lwl $t8,1($i0) # Te3[s3]
- lwl $t9,1($i1) # Te3[s0]
- lwl $t10,1($i2) # Te3[s1]
- lwl $t11,1($i3) # Te3[s2]
- lwr $t8,0($i0) # Te3[s3]
- lwr $t9,0($i1) # Te3[s0]
- lwr $t10,0($i2) # Te3[s1]
- lwr $t11,0($i3) # Te3[s2]
-
- _xtr $i0,$s0,24-2
- _xtr $i1,$s1,24-2
- _xtr $i2,$s2,24-2
- _xtr $i3,$s3,24-2
- and $i0,0x3fc
- and $i1,0x3fc
- and $i2,0x3fc
- and $i3,0x3fc
- $PTR_ADD $i0,$Tbl
- $PTR_ADD $i1,$Tbl
- $PTR_ADD $i2,$Tbl
- $PTR_ADD $i3,$Tbl
- xor $t0,$t4
- xor $t1,$t5
- xor $t2,$t6
- xor $t3,$t7
- lw $t4,0($i0) # Te0[s0>>24]
- lw $t5,0($i1) # Te0[s1>>24]
- lw $t6,0($i2) # Te0[s2>>24]
- lw $t7,0($i3) # Te0[s3>>24]
-
- lw $s0,0($key0)
- lw $s1,4($key0)
- lw $s2,8($key0)
- lw $s3,12($key0)
-
- xor $t0,$t8
- xor $t1,$t9
- xor $t2,$t10
- xor $t3,$t11
-
- xor $t0,$t4
- xor $t1,$t5
- xor $t2,$t6
- xor $t3,$t7
-
- sub $cnt,1
- $PTR_ADD $key0,16
- xor $s0,$t0
- xor $s1,$t1
- xor $s2,$t2
- xor $s3,$t3
- .set noreorder
- bnez $cnt,.Loop_enc
- _xtr $i0,$s1,16-2
-
- .set reorder
- _xtr $i1,$s2,16-2
- _xtr $i2,$s3,16-2
- _xtr $i3,$s0,16-2
- and $i0,0x3fc
- and $i1,0x3fc
- and $i2,0x3fc
- and $i3,0x3fc
- $PTR_ADD $i0,$Tbl
- $PTR_ADD $i1,$Tbl
- $PTR_ADD $i2,$Tbl
- $PTR_ADD $i3,$Tbl
- lbu $t0,2($i0) # Te4[s1>>16]
- lbu $t1,2($i1) # Te4[s2>>16]
- lbu $t2,2($i2) # Te4[s3>>16]
- lbu $t3,2($i3) # Te4[s0>>16]
-
- _xtr $i0,$s2,8-2
- _xtr $i1,$s3,8-2
- _xtr $i2,$s0,8-2
- _xtr $i3,$s1,8-2
- and $i0,0x3fc
- and $i1,0x3fc
- and $i2,0x3fc
- and $i3,0x3fc
- $PTR_ADD $i0,$Tbl
- $PTR_ADD $i1,$Tbl
- $PTR_ADD $i2,$Tbl
- $PTR_ADD $i3,$Tbl
- lbu $t4,2($i0) # Te4[s2>>8]
- lbu $t5,2($i1) # Te4[s3>>8]
- lbu $t6,2($i2) # Te4[s0>>8]
- lbu $t7,2($i3) # Te4[s1>>8]
-
- _xtr $i0,$s0,24-2
- _xtr $i1,$s1,24-2
- _xtr $i2,$s2,24-2
- _xtr $i3,$s3,24-2
- and $i0,0x3fc
- and $i1,0x3fc
- and $i2,0x3fc
- and $i3,0x3fc
- $PTR_ADD $i0,$Tbl
- $PTR_ADD $i1,$Tbl
- $PTR_ADD $i2,$Tbl
- $PTR_ADD $i3,$Tbl
- lbu $t8,2($i0) # Te4[s0>>24]
- lbu $t9,2($i1) # Te4[s1>>24]
- lbu $t10,2($i2) # Te4[s2>>24]
- lbu $t11,2($i3) # Te4[s3>>24]
-
- _xtr $i0,$s3,0-2
- _xtr $i1,$s0,0-2
- _xtr $i2,$s1,0-2
- _xtr $i3,$s2,0-2
- and $i0,0x3fc
- and $i1,0x3fc
- and $i2,0x3fc
- and $i3,0x3fc
-
- _ins $t0,16
- _ins $t1,16
- _ins $t2,16
- _ins $t3,16
-
- _ins $t4,8
- _ins $t5,8
- _ins $t6,8
- _ins $t7,8
-
- xor $t0,$t4
- xor $t1,$t5
- xor $t2,$t6
- xor $t3,$t7
-
- $PTR_ADD $i0,$Tbl
- $PTR_ADD $i1,$Tbl
- $PTR_ADD $i2,$Tbl
- $PTR_ADD $i3,$Tbl
- lbu $t4,2($i0) # Te4[s3]
- lbu $t5,2($i1) # Te4[s0]
- lbu $t6,2($i2) # Te4[s1]
- lbu $t7,2($i3) # Te4[s2]
-
- _ins $t8,24
- _ins $t9,24
- _ins $t10,24
- _ins $t11,24
-
- lw $s0,0($key0)
- lw $s1,4($key0)
- lw $s2,8($key0)
- lw $s3,12($key0)
-
- xor $t0,$t8
- xor $t1,$t9
- xor $t2,$t10
- xor $t3,$t11
-
- _ins $t4,0
- _ins $t5,0
- _ins $t6,0
- _ins $t7,0
-
- xor $t0,$t4
- xor $t1,$t5
- xor $t2,$t6
- xor $t3,$t7
-
- xor $s0,$t0
- xor $s1,$t1
- xor $s2,$t2
- xor $s3,$t3
-
- jr $ra
-.end _mips_AES_encrypt
-
-.align 5
-.globl AES_encrypt
-.ent AES_encrypt
-AES_encrypt:
- .frame $sp,$FRAMESIZE,$ra
- .mask $SAVED_REGS_MASK,-$SZREG
- .set noreorder
-___
-$code.=<<___ if ($flavour =~ /o32/i); # o32 PIC-ification
- .cpload $pf
-___
-$code.=<<___;
- $PTR_SUB $sp,$FRAMESIZE
- $REG_S $ra,$FRAMESIZE-1*$SZREG($sp)
- $REG_S $fp,$FRAMESIZE-2*$SZREG($sp)
- $REG_S $s11,$FRAMESIZE-3*$SZREG($sp)
- $REG_S $s10,$FRAMESIZE-4*$SZREG($sp)
- $REG_S $s9,$FRAMESIZE-5*$SZREG($sp)
- $REG_S $s8,$FRAMESIZE-6*$SZREG($sp)
- $REG_S $s7,$FRAMESIZE-7*$SZREG($sp)
- $REG_S $s6,$FRAMESIZE-8*$SZREG($sp)
- $REG_S $s5,$FRAMESIZE-9*$SZREG($sp)
- $REG_S $s4,$FRAMESIZE-10*$SZREG($sp)
-___
-$code.=<<___ if ($flavour =~ /nubi/i); # optimize non-nubi prologue
- $REG_S \$15,$FRAMESIZE-11*$SZREG($sp)
- $REG_S \$14,$FRAMESIZE-12*$SZREG($sp)
- $REG_S \$13,$FRAMESIZE-13*$SZREG($sp)
- $REG_S \$12,$FRAMESIZE-14*$SZREG($sp)
- $REG_S $gp,$FRAMESIZE-15*$SZREG($sp)
-___
-$code.=<<___ if ($flavour !~ /o32/i); # non-o32 PIC-ification
- .cplocal $Tbl
- .cpsetup $pf,$zero,AES_encrypt
-___
-$code.=<<___;
- .set reorder
- la $Tbl,AES_Te # PIC-ified 'load address'
-
- lwl $s0,0+$MSB($inp)
- lwl $s1,4+$MSB($inp)
- lwl $s2,8+$MSB($inp)
- lwl $s3,12+$MSB($inp)
- lwr $s0,0+$LSB($inp)
- lwr $s1,4+$LSB($inp)
- lwr $s2,8+$LSB($inp)
- lwr $s3,12+$LSB($inp)
-
- bal _mips_AES_encrypt
-
- swr $s0,0+$LSB($out)
- swr $s1,4+$LSB($out)
- swr $s2,8+$LSB($out)
- swr $s3,12+$LSB($out)
- swl $s0,0+$MSB($out)
- swl $s1,4+$MSB($out)
- swl $s2,8+$MSB($out)
- swl $s3,12+$MSB($out)
-
- .set noreorder
- $REG_L $ra,$FRAMESIZE-1*$SZREG($sp)
- $REG_L $fp,$FRAMESIZE-2*$SZREG($sp)
- $REG_L $s11,$FRAMESIZE-3*$SZREG($sp)
- $REG_L $s10,$FRAMESIZE-4*$SZREG($sp)
- $REG_L $s9,$FRAMESIZE-5*$SZREG($sp)
- $REG_L $s8,$FRAMESIZE-6*$SZREG($sp)
- $REG_L $s7,$FRAMESIZE-7*$SZREG($sp)
- $REG_L $s6,$FRAMESIZE-8*$SZREG($sp)
- $REG_L $s5,$FRAMESIZE-9*$SZREG($sp)
- $REG_L $s4,$FRAMESIZE-10*$SZREG($sp)
-___
-$code.=<<___ if ($flavour =~ /nubi/i);
- $REG_L \$15,$FRAMESIZE-11*$SZREG($sp)
- $REG_L \$14,$FRAMESIZE-12*$SZREG($sp)
- $REG_L \$13,$FRAMESIZE-13*$SZREG($sp)
- $REG_L \$12,$FRAMESIZE-14*$SZREG($sp)
- $REG_L $gp,$FRAMESIZE-15*$SZREG($sp)
-___
-$code.=<<___;
- jr $ra
- $PTR_ADD $sp,$FRAMESIZE
-.end AES_encrypt
-___
-
-$code.=<<___;
-.align 5
-.ent _mips_AES_decrypt
-_mips_AES_decrypt:
- .frame $sp,0,$ra
- .set reorder
- lw $t0,0($key)
- lw $t1,4($key)
- lw $t2,8($key)
- lw $t3,12($key)
- lw $cnt,240($key)
- $PTR_ADD $key0,$key,16
-
- xor $s0,$t0
- xor $s1,$t1
- xor $s2,$t2
- xor $s3,$t3
-
- sub $cnt,1
- _xtr $i0,$s3,16-2
-.Loop_dec:
- _xtr $i1,$s0,16-2
- _xtr $i2,$s1,16-2
- _xtr $i3,$s2,16-2
- and $i0,0x3fc
- and $i1,0x3fc
- and $i2,0x3fc
- and $i3,0x3fc
- $PTR_ADD $i0,$Tbl
- $PTR_ADD $i1,$Tbl
- $PTR_ADD $i2,$Tbl
- $PTR_ADD $i3,$Tbl
- lwl $t0,3($i0) # Td1[s3>>16]
- lwl $t1,3($i1) # Td1[s0>>16]
- lwl $t2,3($i2) # Td1[s1>>16]
- lwl $t3,3($i3) # Td1[s2>>16]
- lwr $t0,2($i0) # Td1[s3>>16]
- lwr $t1,2($i1) # Td1[s0>>16]
- lwr $t2,2($i2) # Td1[s1>>16]
- lwr $t3,2($i3) # Td1[s2>>16]
-
- _xtr $i0,$s2,8-2
- _xtr $i1,$s3,8-2
- _xtr $i2,$s0,8-2
- _xtr $i3,$s1,8-2
- and $i0,0x3fc
- and $i1,0x3fc
- and $i2,0x3fc
- and $i3,0x3fc
- $PTR_ADD $i0,$Tbl
- $PTR_ADD $i1,$Tbl
- $PTR_ADD $i2,$Tbl
- $PTR_ADD $i3,$Tbl
- lwl $t4,2($i0) # Td2[s2>>8]
- lwl $t5,2($i1) # Td2[s3>>8]
- lwl $t6,2($i2) # Td2[s0>>8]
- lwl $t7,2($i3) # Td2[s1>>8]
- lwr $t4,1($i0) # Td2[s2>>8]
- lwr $t5,1($i1) # Td2[s3>>8]
- lwr $t6,1($i2) # Td2[s0>>8]
- lwr $t7,1($i3) # Td2[s1>>8]
-
- _xtr $i0,$s1,0-2
- _xtr $i1,$s2,0-2
- _xtr $i2,$s3,0-2
- _xtr $i3,$s0,0-2
- and $i0,0x3fc
- and $i1,0x3fc
- and $i2,0x3fc
- and $i3,0x3fc
- $PTR_ADD $i0,$Tbl
- $PTR_ADD $i1,$Tbl
- $PTR_ADD $i2,$Tbl
- $PTR_ADD $i3,$Tbl
- lwl $t8,1($i0) # Td3[s1]
- lwl $t9,1($i1) # Td3[s2]
- lwl $t10,1($i2) # Td3[s3]
- lwl $t11,1($i3) # Td3[s0]
- lwr $t8,0($i0) # Td3[s1]
- lwr $t9,0($i1) # Td3[s2]
- lwr $t10,0($i2) # Td3[s3]
- lwr $t11,0($i3) # Td3[s0]
-
- _xtr $i0,$s0,24-2
- _xtr $i1,$s1,24-2
- _xtr $i2,$s2,24-2
- _xtr $i3,$s3,24-2
- and $i0,0x3fc
- and $i1,0x3fc
- and $i2,0x3fc
- and $i3,0x3fc
- $PTR_ADD $i0,$Tbl
- $PTR_ADD $i1,$Tbl
- $PTR_ADD $i2,$Tbl
- $PTR_ADD $i3,$Tbl
-
- xor $t0,$t4
- xor $t1,$t5
- xor $t2,$t6
- xor $t3,$t7
-
-
- lw $t4,0($i0) # Td0[s0>>24]
- lw $t5,0($i1) # Td0[s1>>24]
- lw $t6,0($i2) # Td0[s2>>24]
- lw $t7,0($i3) # Td0[s3>>24]
-
- lw $s0,0($key0)
- lw $s1,4($key0)
- lw $s2,8($key0)
- lw $s3,12($key0)
-
- xor $t0,$t8
- xor $t1,$t9
- xor $t2,$t10
- xor $t3,$t11
-
- xor $t0,$t4
- xor $t1,$t5
- xor $t2,$t6
- xor $t3,$t7
-
- sub $cnt,1
- $PTR_ADD $key0,16
- xor $s0,$t0
- xor $s1,$t1
- xor $s2,$t2
- xor $s3,$t3
- .set noreorder
- bnez $cnt,.Loop_dec
- _xtr $i0,$s3,16-2
-
- .set reorder
- lw $t4,1024($Tbl) # prefetch Td4
- lw $t5,1024+32($Tbl)
- lw $t6,1024+64($Tbl)
- lw $t7,1024+96($Tbl)
- lw $t8,1024+128($Tbl)
- lw $t9,1024+160($Tbl)
- lw $t10,1024+192($Tbl)
- lw $t11,1024+224($Tbl)
-
- _xtr $i0,$s3,16
- _xtr $i1,$s0,16
- _xtr $i2,$s1,16
- _xtr $i3,$s2,16
- and $i0,0xff
- and $i1,0xff
- and $i2,0xff
- and $i3,0xff
- $PTR_ADD $i0,$Tbl
- $PTR_ADD $i1,$Tbl
- $PTR_ADD $i2,$Tbl
- $PTR_ADD $i3,$Tbl
- lbu $t0,1024($i0) # Td4[s3>>16]
- lbu $t1,1024($i1) # Td4[s0>>16]
- lbu $t2,1024($i2) # Td4[s1>>16]
- lbu $t3,1024($i3) # Td4[s2>>16]
-
- _xtr $i0,$s2,8
- _xtr $i1,$s3,8
- _xtr $i2,$s0,8
- _xtr $i3,$s1,8
- and $i0,0xff
- and $i1,0xff
- and $i2,0xff
- and $i3,0xff
- $PTR_ADD $i0,$Tbl
- $PTR_ADD $i1,$Tbl
- $PTR_ADD $i2,$Tbl
- $PTR_ADD $i3,$Tbl
- lbu $t4,1024($i0) # Td4[s2>>8]
- lbu $t5,1024($i1) # Td4[s3>>8]
- lbu $t6,1024($i2) # Td4[s0>>8]
- lbu $t7,1024($i3) # Td4[s1>>8]
-
- _xtr $i0,$s0,24
- _xtr $i1,$s1,24
- _xtr $i2,$s2,24
- _xtr $i3,$s3,24
- $PTR_ADD $i0,$Tbl
- $PTR_ADD $i1,$Tbl
- $PTR_ADD $i2,$Tbl
- $PTR_ADD $i3,$Tbl
- lbu $t8,1024($i0) # Td4[s0>>24]
- lbu $t9,1024($i1) # Td4[s1>>24]
- lbu $t10,1024($i2) # Td4[s2>>24]
- lbu $t11,1024($i3) # Td4[s3>>24]
-
- _xtr $i0,$s1,0
- _xtr $i1,$s2,0
- _xtr $i2,$s3,0
- _xtr $i3,$s0,0
-
- _ins $t0,16
- _ins $t1,16
- _ins $t2,16
- _ins $t3,16
-
- _ins $t4,8
- _ins $t5,8
- _ins $t6,8
- _ins $t7,8
-
- xor $t0,$t4
- xor $t1,$t5
- xor $t2,$t6
- xor $t3,$t7
-
- $PTR_ADD $i0,$Tbl
- $PTR_ADD $i1,$Tbl
- $PTR_ADD $i2,$Tbl
- $PTR_ADD $i3,$Tbl
- lbu $t4,1024($i0) # Td4[s1]
- lbu $t5,1024($i1) # Td4[s2]
- lbu $t6,1024($i2) # Td4[s3]
- lbu $t7,1024($i3) # Td4[s0]
-
- _ins $t8,24
- _ins $t9,24
- _ins $t10,24
- _ins $t11,24
-
- lw $s0,0($key0)
- lw $s1,4($key0)
- lw $s2,8($key0)
- lw $s3,12($key0)
-
- _ins $t4,0
- _ins $t5,0
- _ins $t6,0
- _ins $t7,0
-
-
- xor $t0,$t8
- xor $t1,$t9
- xor $t2,$t10
- xor $t3,$t11
-
- xor $t0,$t4
- xor $t1,$t5
- xor $t2,$t6
- xor $t3,$t7
-
- xor $s0,$t0
- xor $s1,$t1
- xor $s2,$t2
- xor $s3,$t3
-
- jr $ra
-.end _mips_AES_decrypt
-
-.align 5
-.globl AES_decrypt
-.ent AES_decrypt
-AES_decrypt:
- .frame $sp,$FRAMESIZE,$ra
- .mask $SAVED_REGS_MASK,-$SZREG
- .set noreorder
-___
-$code.=<<___ if ($flavour =~ /o32/i); # o32 PIC-ification
- .cpload $pf
-___
-$code.=<<___;
- $PTR_SUB $sp,$FRAMESIZE
- $REG_S $ra,$FRAMESIZE-1*$SZREG($sp)
- $REG_S $fp,$FRAMESIZE-2*$SZREG($sp)
- $REG_S $s11,$FRAMESIZE-3*$SZREG($sp)
- $REG_S $s10,$FRAMESIZE-4*$SZREG($sp)
- $REG_S $s9,$FRAMESIZE-5*$SZREG($sp)
- $REG_S $s8,$FRAMESIZE-6*$SZREG($sp)
- $REG_S $s7,$FRAMESIZE-7*$SZREG($sp)
- $REG_S $s6,$FRAMESIZE-8*$SZREG($sp)
- $REG_S $s5,$FRAMESIZE-9*$SZREG($sp)
- $REG_S $s4,$FRAMESIZE-10*$SZREG($sp)
-___
-$code.=<<___ if ($flavour =~ /nubi/i); # optimize non-nubi prologue
- $REG_S \$15,$FRAMESIZE-11*$SZREG($sp)
- $REG_S \$14,$FRAMESIZE-12*$SZREG($sp)
- $REG_S \$13,$FRAMESIZE-13*$SZREG($sp)
- $REG_S \$12,$FRAMESIZE-14*$SZREG($sp)
- $REG_S $gp,$FRAMESIZE-15*$SZREG($sp)
-___
-$code.=<<___ if ($flavour !~ /o32/i); # non-o32 PIC-ification
- .cplocal $Tbl
- .cpsetup $pf,$zero,AES_decrypt
-___
-$code.=<<___;
- .set reorder
- la $Tbl,AES_Td # PIC-ified 'load address'
-
- lwl $s0,0+$MSB($inp)
- lwl $s1,4+$MSB($inp)
- lwl $s2,8+$MSB($inp)
- lwl $s3,12+$MSB($inp)
- lwr $s0,0+$LSB($inp)
- lwr $s1,4+$LSB($inp)
- lwr $s2,8+$LSB($inp)
- lwr $s3,12+$LSB($inp)
-
- bal _mips_AES_decrypt
-
- swr $s0,0+$LSB($out)
- swr $s1,4+$LSB($out)
- swr $s2,8+$LSB($out)
- swr $s3,12+$LSB($out)
- swl $s0,0+$MSB($out)
- swl $s1,4+$MSB($out)
- swl $s2,8+$MSB($out)
- swl $s3,12+$MSB($out)
-
- .set noreorder
- $REG_L $ra,$FRAMESIZE-1*$SZREG($sp)
- $REG_L $fp,$FRAMESIZE-2*$SZREG($sp)
- $REG_L $s11,$FRAMESIZE-3*$SZREG($sp)
- $REG_L $s10,$FRAMESIZE-4*$SZREG($sp)
- $REG_L $s9,$FRAMESIZE-5*$SZREG($sp)
- $REG_L $s8,$FRAMESIZE-6*$SZREG($sp)
- $REG_L $s7,$FRAMESIZE-7*$SZREG($sp)
- $REG_L $s6,$FRAMESIZE-8*$SZREG($sp)
- $REG_L $s5,$FRAMESIZE-9*$SZREG($sp)
- $REG_L $s4,$FRAMESIZE-10*$SZREG($sp)
-___
-$code.=<<___ if ($flavour =~ /nubi/i);
- $REG_L \$15,$FRAMESIZE-11*$SZREG($sp)
- $REG_L \$14,$FRAMESIZE-12*$SZREG($sp)
- $REG_L \$13,$FRAMESIZE-13*$SZREG($sp)
- $REG_L \$12,$FRAMESIZE-14*$SZREG($sp)
- $REG_L $gp,$FRAMESIZE-15*$SZREG($sp)
-___
-$code.=<<___;
- jr $ra
- $PTR_ADD $sp,$FRAMESIZE
-.end AES_decrypt
-___
-}}}
-
-{{{
-my $FRAMESIZE=8*$SZREG;
-my $SAVED_REGS_MASK = ($flavour =~ /nubi/i) ? 0xc000f008 : 0xc0000000;
-
-my ($inp,$bits,$key,$Tbl)=($a0,$a1,$a2,$a3);
-my ($rk0,$rk1,$rk2,$rk3,$rk4,$rk5,$rk6,$rk7)=($a4,$a5,$a6,$a7,$s0,$s1,$s2,$s3);
-my ($i0,$i1,$i2,$i3)=($at,$t0,$t1,$t2);
-my ($rcon,$cnt)=($gp,$fp);
-
-$code.=<<___;
-.align 5
-.ent _mips_AES_set_encrypt_key
-_mips_AES_set_encrypt_key:
- .frame $sp,0,$ra
- .set noreorder
- beqz $inp,.Lekey_done
- li $t0,-1
- beqz $key,.Lekey_done
- $PTR_ADD $rcon,$Tbl,1024+256
-
- .set reorder
- lwl $rk0,0+$MSB($inp) # load 128 bits
- lwl $rk1,4+$MSB($inp)
- lwl $rk2,8+$MSB($inp)
- lwl $rk3,12+$MSB($inp)
- li $at,128
- lwr $rk0,0+$LSB($inp)
- lwr $rk1,4+$LSB($inp)
- lwr $rk2,8+$LSB($inp)
- lwr $rk3,12+$LSB($inp)
- .set noreorder
- beq $bits,$at,.L128bits
- li $cnt,10
-
- .set reorder
- lwl $rk4,16+$MSB($inp) # load 192 bits
- lwl $rk5,20+$MSB($inp)
- li $at,192
- lwr $rk4,16+$LSB($inp)
- lwr $rk5,20+$LSB($inp)
- .set noreorder
- beq $bits,$at,.L192bits
- li $cnt,8
-
- .set reorder
- lwl $rk6,24+$MSB($inp) # load 256 bits
- lwl $rk7,28+$MSB($inp)
- li $at,256
- lwr $rk6,24+$LSB($inp)
- lwr $rk7,28+$LSB($inp)
- .set noreorder
- beq $bits,$at,.L256bits
- li $cnt,7
-
- b .Lekey_done
- li $t0,-2
-
-.align 4
-.L128bits:
- .set reorder
- srl $i0,$rk3,16
- srl $i1,$rk3,8
- and $i0,0xff
- and $i1,0xff
- and $i2,$rk3,0xff
- srl $i3,$rk3,24
- $PTR_ADD $i0,$Tbl
- $PTR_ADD $i1,$Tbl
- $PTR_ADD $i2,$Tbl
- $PTR_ADD $i3,$Tbl
- lbu $i0,1024($i0)
- lbu $i1,1024($i1)
- lbu $i2,1024($i2)
- lbu $i3,1024($i3)
-
- sw $rk0,0($key)
- sw $rk1,4($key)
- sw $rk2,8($key)
- sw $rk3,12($key)
- sub $cnt,1
- $PTR_ADD $key,16
-
- _bias $i0,24
- _bias $i1,16
- _bias $i2,8
- _bias $i3,0
-
- xor $rk0,$i0
- lw $i0,0($rcon)
- xor $rk0,$i1
- xor $rk0,$i2
- xor $rk0,$i3
- xor $rk0,$i0
-
- xor $rk1,$rk0
- xor $rk2,$rk1
- xor $rk3,$rk2
-
- .set noreorder
- bnez $cnt,.L128bits
- $PTR_ADD $rcon,4
-
- sw $rk0,0($key)
- sw $rk1,4($key)
- sw $rk2,8($key)
- li $cnt,10
- sw $rk3,12($key)
- li $t0,0
- sw $cnt,80($key)
- b .Lekey_done
- $PTR_SUB $key,10*16
-
-.align 4
-.L192bits:
- .set reorder
- srl $i0,$rk5,16
- srl $i1,$rk5,8
- and $i0,0xff
- and $i1,0xff
- and $i2,$rk5,0xff
- srl $i3,$rk5,24
- $PTR_ADD $i0,$Tbl
- $PTR_ADD $i1,$Tbl
- $PTR_ADD $i2,$Tbl
- $PTR_ADD $i3,$Tbl
- lbu $i0,1024($i0)
- lbu $i1,1024($i1)
- lbu $i2,1024($i2)
- lbu $i3,1024($i3)
-
- sw $rk0,0($key)
- sw $rk1,4($key)
- sw $rk2,8($key)
- sw $rk3,12($key)
- sw $rk4,16($key)
- sw $rk5,20($key)
- sub $cnt,1
- $PTR_ADD $key,24
-
- _bias $i0,24
- _bias $i1,16
- _bias $i2,8
- _bias $i3,0
-
- xor $rk0,$i0
- lw $i0,0($rcon)
- xor $rk0,$i1
- xor $rk0,$i2
- xor $rk0,$i3
- xor $rk0,$i0
-
- xor $rk1,$rk0
- xor $rk2,$rk1
- xor $rk3,$rk2
- xor $rk4,$rk3
- xor $rk5,$rk4
-
- .set noreorder
- bnez $cnt,.L192bits
- $PTR_ADD $rcon,4
-
- sw $rk0,0($key)
- sw $rk1,4($key)
- sw $rk2,8($key)
- li $cnt,12
- sw $rk3,12($key)
- li $t0,0
- sw $cnt,48($key)
- b .Lekey_done
- $PTR_SUB $key,12*16
-
-.align 4
-.L256bits:
- .set reorder
- srl $i0,$rk7,16
- srl $i1,$rk7,8
- and $i0,0xff
- and $i1,0xff
- and $i2,$rk7,0xff
- srl $i3,$rk7,24
- $PTR_ADD $i0,$Tbl
- $PTR_ADD $i1,$Tbl
- $PTR_ADD $i2,$Tbl
- $PTR_ADD $i3,$Tbl
- lbu $i0,1024($i0)
- lbu $i1,1024($i1)
- lbu $i2,1024($i2)
- lbu $i3,1024($i3)
-
- sw $rk0,0($key)
- sw $rk1,4($key)
- sw $rk2,8($key)
- sw $rk3,12($key)
- sw $rk4,16($key)
- sw $rk5,20($key)
- sw $rk6,24($key)
- sw $rk7,28($key)
- sub $cnt,1
-
- _bias $i0,24
- _bias $i1,16
- _bias $i2,8
- _bias $i3,0
-
- xor $rk0,$i0
- lw $i0,0($rcon)
- xor $rk0,$i1
- xor $rk0,$i2
- xor $rk0,$i3
- xor $rk0,$i0
-
- xor $rk1,$rk0
- xor $rk2,$rk1
- xor $rk3,$rk2
- beqz $cnt,.L256bits_done
-
- srl $i0,$rk3,24
- srl $i1,$rk3,16
- srl $i2,$rk3,8
- and $i3,$rk3,0xff
- and $i1,0xff
- and $i2,0xff
- $PTR_ADD $i0,$Tbl
- $PTR_ADD $i1,$Tbl
- $PTR_ADD $i2,$Tbl
- $PTR_ADD $i3,$Tbl
- lbu $i0,1024($i0)
- lbu $i1,1024($i1)
- lbu $i2,1024($i2)
- lbu $i3,1024($i3)
- sll $i0,24
- sll $i1,16
- sll $i2,8
-
- xor $rk4,$i0
- xor $rk4,$i1
- xor $rk4,$i2
- xor $rk4,$i3
-
- xor $rk5,$rk4
- xor $rk6,$rk5
- xor $rk7,$rk6
-
- $PTR_ADD $key,32
- .set noreorder
- b .L256bits
- $PTR_ADD $rcon,4
-
-.L256bits_done:
- sw $rk0,32($key)
- sw $rk1,36($key)
- sw $rk2,40($key)
- li $cnt,14
- sw $rk3,44($key)
- li $t0,0
- sw $cnt,48($key)
- $PTR_SUB $key,12*16
-
-.Lekey_done:
- jr $ra
- nop
-.end _mips_AES_set_encrypt_key
-
-.globl private_AES_set_encrypt_key
-.ent private_AES_set_encrypt_key
-private_AES_set_encrypt_key:
- .frame $sp,$FRAMESIZE,$ra
- .mask $SAVED_REGS_MASK,-$SZREG
- .set noreorder
-___
-$code.=<<___ if ($flavour =~ /o32/i); # o32 PIC-ification
- .cpload $pf
-___
-$code.=<<___;
- $PTR_SUB $sp,$FRAMESIZE
- $REG_S $ra,$FRAMESIZE-1*$SZREG($sp)
- $REG_S $fp,$FRAMESIZE-2*$SZREG($sp)
-___
-$code.=<<___ if ($flavour =~ /nubi/i); # optimize non-nubi prologue
- $REG_S $s3,$FRAMESIZE-3*$SZREG($sp)
- $REG_S $s2,$FRAMESIZE-4*$SZREG($sp)
- $REG_S $s1,$FRAMESIZE-5*$SZREG($sp)
- $REG_S $s0,$FRAMESIZE-6*$SZREG($sp)
- $REG_S $gp,$FRAMESIZE-7*$SZREG($sp)
-___
-$code.=<<___ if ($flavour !~ /o32/i); # non-o32 PIC-ification
- .cplocal $Tbl
- .cpsetup $pf,$zero,private_AES_set_encrypt_key
-___
-$code.=<<___;
- .set reorder
- la $Tbl,AES_Te # PIC-ified 'load address'
-
- bal _mips_AES_set_encrypt_key
-
- .set noreorder
- move $a0,$t0
- $REG_L $ra,$FRAMESIZE-1*$SZREG($sp)
- $REG_L $fp,$FRAMESIZE-2*$SZREG($sp)
-___
-$code.=<<___ if ($flavour =~ /nubi/i);
- $REG_L $s3,$FRAMESIZE-11*$SZREG($sp)
- $REG_L $s2,$FRAMESIZE-12*$SZREG($sp)
- $REG_L $s1,$FRAMESIZE-13*$SZREG($sp)
- $REG_L $s0,$FRAMESIZE-14*$SZREG($sp)
- $REG_L $gp,$FRAMESIZE-15*$SZREG($sp)
-___
-$code.=<<___;
- jr $ra
- $PTR_ADD $sp,$FRAMESIZE
-.end private_AES_set_encrypt_key
-___
-
-my ($head,$tail)=($inp,$bits);
-my ($tp1,$tp2,$tp4,$tp8,$tp9,$tpb,$tpd,$tpe)=($a4,$a5,$a6,$a7,$s0,$s1,$s2,$s3);
-my ($m,$x80808080,$x7f7f7f7f,$x1b1b1b1b)=($at,$t0,$t1,$t2);
-$code.=<<___;
-.align 5
-.globl private_AES_set_decrypt_key
-.ent private_AES_set_decrypt_key
-private_AES_set_decrypt_key:
- .frame $sp,$FRAMESIZE,$ra
- .mask $SAVED_REGS_MASK,-$SZREG
- .set noreorder
-___
-$code.=<<___ if ($flavour =~ /o32/i); # o32 PIC-ification
- .cpload $pf
-___
-$code.=<<___;
- $PTR_SUB $sp,$FRAMESIZE
- $REG_S $ra,$FRAMESIZE-1*$SZREG($sp)
- $REG_S $fp,$FRAMESIZE-2*$SZREG($sp)
-___
-$code.=<<___ if ($flavour =~ /nubi/i); # optimize non-nubi prologue
- $REG_S $s3,$FRAMESIZE-3*$SZREG($sp)
- $REG_S $s2,$FRAMESIZE-4*$SZREG($sp)
- $REG_S $s1,$FRAMESIZE-5*$SZREG($sp)
- $REG_S $s0,$FRAMESIZE-6*$SZREG($sp)
- $REG_S $gp,$FRAMESIZE-7*$SZREG($sp)
-___
-$code.=<<___ if ($flavour !~ /o32/i); # non-o32 PIC-ification
- .cplocal $Tbl
- .cpsetup $pf,$zero,private_AES_set_decrypt_key
-___
-$code.=<<___;
- .set reorder
- la $Tbl,AES_Te # PIC-ified 'load address'
-
- bal _mips_AES_set_encrypt_key
-
- bltz $t0,.Ldkey_done
-
- sll $at,$cnt,4
- $PTR_ADD $head,$key,0
- $PTR_ADD $tail,$key,$at
-.align 4
-.Lswap:
- lw $rk0,0($head)
- lw $rk1,4($head)
- lw $rk2,8($head)
- lw $rk3,12($head)
- lw $rk4,0($tail)
- lw $rk5,4($tail)
- lw $rk6,8($tail)
- lw $rk7,12($tail)
- sw $rk0,0($tail)
- sw $rk1,4($tail)
- sw $rk2,8($tail)
- sw $rk3,12($tail)
- $PTR_ADD $head,16
- $PTR_SUB $tail,16
- sw $rk4,-16($head)
- sw $rk5,-12($head)
- sw $rk6,-8($head)
- sw $rk7,-4($head)
- bne $head,$tail,.Lswap
-
- lw $tp1,16($key) # modulo-scheduled
- lui $x80808080,0x8080
- sub $cnt,1
- or $x80808080,0x8080
- sll $cnt,2
- $PTR_ADD $key,16
- lui $x1b1b1b1b,0x1b1b
- nor $x7f7f7f7f,$zero,$x80808080
- or $x1b1b1b1b,0x1b1b
-.align 4
-.Lmix:
- and $m,$tp1,$x80808080
- and $tp2,$tp1,$x7f7f7f7f
- srl $tp4,$m,7
- addu $tp2,$tp2 # tp2<<1
- subu $m,$tp4
- and $m,$x1b1b1b1b
- xor $tp2,$m
-
- and $m,$tp2,$x80808080
- and $tp4,$tp2,$x7f7f7f7f
- srl $tp8,$m,7
- addu $tp4,$tp4 # tp4<<1
- subu $m,$tp8
- and $m,$x1b1b1b1b
- xor $tp4,$m
-
- and $m,$tp4,$x80808080
- and $tp8,$tp4,$x7f7f7f7f
- srl $tp9,$m,7
- addu $tp8,$tp8 # tp8<<1
- subu $m,$tp9
- and $m,$x1b1b1b1b
- xor $tp8,$m
-
- xor $tp9,$tp8,$tp1
- xor $tpe,$tp8,$tp4
- xor $tpb,$tp9,$tp2
- xor $tpd,$tp9,$tp4
-
- _ror $tp1,$tpd,16
- xor $tpe,$tp2
- _ror $tp2,$tpd,-16
- xor $tpe,$tp1
- _ror $tp1,$tp9,8
- xor $tpe,$tp2
- _ror $tp2,$tp9,-24
- xor $tpe,$tp1
- _ror $tp1,$tpb,24
- xor $tpe,$tp2
- _ror $tp2,$tpb,-8
- xor $tpe,$tp1
- lw $tp1,4($key) # modulo-scheduled
- xor $tpe,$tp2
- sub $cnt,1
- sw $tpe,0($key)
- $PTR_ADD $key,4
- bnez $cnt,.Lmix
-
- li $t0,0
-.Ldkey_done:
- .set noreorder
- move $a0,$t0
- $REG_L $ra,$FRAMESIZE-1*$SZREG($sp)
- $REG_L $fp,$FRAMESIZE-2*$SZREG($sp)
-___
-$code.=<<___ if ($flavour =~ /nubi/i);
- $REG_L $s3,$FRAMESIZE-11*$SZREG($sp)
- $REG_L $s2,$FRAMESIZE-12*$SZREG($sp)
- $REG_L $s1,$FRAMESIZE-13*$SZREG($sp)
- $REG_L $s0,$FRAMESIZE-14*$SZREG($sp)
- $REG_L $gp,$FRAMESIZE-15*$SZREG($sp)
-___
-$code.=<<___;
- jr $ra
- $PTR_ADD $sp,$FRAMESIZE
-.end private_AES_set_decrypt_key
-___
-}}}
-
-######################################################################
-# Tables are kept in endian-neutral manner
-$code.=<<___;
-.rdata
-.align 6
-AES_Te:
-.byte 0xc6,0x63,0x63,0xa5, 0xf8,0x7c,0x7c,0x84 # Te0
-.byte 0xee,0x77,0x77,0x99, 0xf6,0x7b,0x7b,0x8d
-.byte 0xff,0xf2,0xf2,0x0d, 0xd6,0x6b,0x6b,0xbd
-.byte 0xde,0x6f,0x6f,0xb1, 0x91,0xc5,0xc5,0x54
-.byte 0x60,0x30,0x30,0x50, 0x02,0x01,0x01,0x03
-.byte 0xce,0x67,0x67,0xa9, 0x56,0x2b,0x2b,0x7d
-.byte 0xe7,0xfe,0xfe,0x19, 0xb5,0xd7,0xd7,0x62
-.byte 0x4d,0xab,0xab,0xe6, 0xec,0x76,0x76,0x9a
-.byte 0x8f,0xca,0xca,0x45, 0x1f,0x82,0x82,0x9d
-.byte 0x89,0xc9,0xc9,0x40, 0xfa,0x7d,0x7d,0x87
-.byte 0xef,0xfa,0xfa,0x15, 0xb2,0x59,0x59,0xeb
-.byte 0x8e,0x47,0x47,0xc9, 0xfb,0xf0,0xf0,0x0b
-.byte 0x41,0xad,0xad,0xec, 0xb3,0xd4,0xd4,0x67
-.byte 0x5f,0xa2,0xa2,0xfd, 0x45,0xaf,0xaf,0xea
-.byte 0x23,0x9c,0x9c,0xbf, 0x53,0xa4,0xa4,0xf7
-.byte 0xe4,0x72,0x72,0x96, 0x9b,0xc0,0xc0,0x5b
-.byte 0x75,0xb7,0xb7,0xc2, 0xe1,0xfd,0xfd,0x1c
-.byte 0x3d,0x93,0x93,0xae, 0x4c,0x26,0x26,0x6a
-.byte 0x6c,0x36,0x36,0x5a, 0x7e,0x3f,0x3f,0x41
-.byte 0xf5,0xf7,0xf7,0x02, 0x83,0xcc,0xcc,0x4f
-.byte 0x68,0x34,0x34,0x5c, 0x51,0xa5,0xa5,0xf4
-.byte 0xd1,0xe5,0xe5,0x34, 0xf9,0xf1,0xf1,0x08
-.byte 0xe2,0x71,0x71,0x93, 0xab,0xd8,0xd8,0x73
-.byte 0x62,0x31,0x31,0x53, 0x2a,0x15,0x15,0x3f
-.byte 0x08,0x04,0x04,0x0c, 0x95,0xc7,0xc7,0x52
-.byte 0x46,0x23,0x23,0x65, 0x9d,0xc3,0xc3,0x5e
-.byte 0x30,0x18,0x18,0x28, 0x37,0x96,0x96,0xa1
-.byte 0x0a,0x05,0x05,0x0f, 0x2f,0x9a,0x9a,0xb5
-.byte 0x0e,0x07,0x07,0x09, 0x24,0x12,0x12,0x36
-.byte 0x1b,0x80,0x80,0x9b, 0xdf,0xe2,0xe2,0x3d
-.byte 0xcd,0xeb,0xeb,0x26, 0x4e,0x27,0x27,0x69
-.byte 0x7f,0xb2,0xb2,0xcd, 0xea,0x75,0x75,0x9f
-.byte 0x12,0x09,0x09,0x1b, 0x1d,0x83,0x83,0x9e
-.byte 0x58,0x2c,0x2c,0x74, 0x34,0x1a,0x1a,0x2e
-.byte 0x36,0x1b,0x1b,0x2d, 0xdc,0x6e,0x6e,0xb2
-.byte 0xb4,0x5a,0x5a,0xee, 0x5b,0xa0,0xa0,0xfb
-.byte 0xa4,0x52,0x52,0xf6, 0x76,0x3b,0x3b,0x4d
-.byte 0xb7,0xd6,0xd6,0x61, 0x7d,0xb3,0xb3,0xce
-.byte 0x52,0x29,0x29,0x7b, 0xdd,0xe3,0xe3,0x3e
-.byte 0x5e,0x2f,0x2f,0x71, 0x13,0x84,0x84,0x97
-.byte 0xa6,0x53,0x53,0xf5, 0xb9,0xd1,0xd1,0x68
-.byte 0x00,0x00,0x00,0x00, 0xc1,0xed,0xed,0x2c
-.byte 0x40,0x20,0x20,0x60, 0xe3,0xfc,0xfc,0x1f
-.byte 0x79,0xb1,0xb1,0xc8, 0xb6,0x5b,0x5b,0xed
-.byte 0xd4,0x6a,0x6a,0xbe, 0x8d,0xcb,0xcb,0x46
-.byte 0x67,0xbe,0xbe,0xd9, 0x72,0x39,0x39,0x4b
-.byte 0x94,0x4a,0x4a,0xde, 0x98,0x4c,0x4c,0xd4
-.byte 0xb0,0x58,0x58,0xe8, 0x85,0xcf,0xcf,0x4a
-.byte 0xbb,0xd0,0xd0,0x6b, 0xc5,0xef,0xef,0x2a
-.byte 0x4f,0xaa,0xaa,0xe5, 0xed,0xfb,0xfb,0x16
-.byte 0x86,0x43,0x43,0xc5, 0x9a,0x4d,0x4d,0xd7
-.byte 0x66,0x33,0x33,0x55, 0x11,0x85,0x85,0x94
-.byte 0x8a,0x45,0x45,0xcf, 0xe9,0xf9,0xf9,0x10
-.byte 0x04,0x02,0x02,0x06, 0xfe,0x7f,0x7f,0x81
-.byte 0xa0,0x50,0x50,0xf0, 0x78,0x3c,0x3c,0x44
-.byte 0x25,0x9f,0x9f,0xba, 0x4b,0xa8,0xa8,0xe3
-.byte 0xa2,0x51,0x51,0xf3, 0x5d,0xa3,0xa3,0xfe
-.byte 0x80,0x40,0x40,0xc0, 0x05,0x8f,0x8f,0x8a
-.byte 0x3f,0x92,0x92,0xad, 0x21,0x9d,0x9d,0xbc
-.byte 0x70,0x38,0x38,0x48, 0xf1,0xf5,0xf5,0x04
-.byte 0x63,0xbc,0xbc,0xdf, 0x77,0xb6,0xb6,0xc1
-.byte 0xaf,0xda,0xda,0x75, 0x42,0x21,0x21,0x63
-.byte 0x20,0x10,0x10,0x30, 0xe5,0xff,0xff,0x1a
-.byte 0xfd,0xf3,0xf3,0x0e, 0xbf,0xd2,0xd2,0x6d
-.byte 0x81,0xcd,0xcd,0x4c, 0x18,0x0c,0x0c,0x14
-.byte 0x26,0x13,0x13,0x35, 0xc3,0xec,0xec,0x2f
-.byte 0xbe,0x5f,0x5f,0xe1, 0x35,0x97,0x97,0xa2
-.byte 0x88,0x44,0x44,0xcc, 0x2e,0x17,0x17,0x39
-.byte 0x93,0xc4,0xc4,0x57, 0x55,0xa7,0xa7,0xf2
-.byte 0xfc,0x7e,0x7e,0x82, 0x7a,0x3d,0x3d,0x47
-.byte 0xc8,0x64,0x64,0xac, 0xba,0x5d,0x5d,0xe7
-.byte 0x32,0x19,0x19,0x2b, 0xe6,0x73,0x73,0x95
-.byte 0xc0,0x60,0x60,0xa0, 0x19,0x81,0x81,0x98
-.byte 0x9e,0x4f,0x4f,0xd1, 0xa3,0xdc,0xdc,0x7f
-.byte 0x44,0x22,0x22,0x66, 0x54,0x2a,0x2a,0x7e
-.byte 0x3b,0x90,0x90,0xab, 0x0b,0x88,0x88,0x83
-.byte 0x8c,0x46,0x46,0xca, 0xc7,0xee,0xee,0x29
-.byte 0x6b,0xb8,0xb8,0xd3, 0x28,0x14,0x14,0x3c
-.byte 0xa7,0xde,0xde,0x79, 0xbc,0x5e,0x5e,0xe2
-.byte 0x16,0x0b,0x0b,0x1d, 0xad,0xdb,0xdb,0x76
-.byte 0xdb,0xe0,0xe0,0x3b, 0x64,0x32,0x32,0x56
-.byte 0x74,0x3a,0x3a,0x4e, 0x14,0x0a,0x0a,0x1e
-.byte 0x92,0x49,0x49,0xdb, 0x0c,0x06,0x06,0x0a
-.byte 0x48,0x24,0x24,0x6c, 0xb8,0x5c,0x5c,0xe4
-.byte 0x9f,0xc2,0xc2,0x5d, 0xbd,0xd3,0xd3,0x6e
-.byte 0x43,0xac,0xac,0xef, 0xc4,0x62,0x62,0xa6
-.byte 0x39,0x91,0x91,0xa8, 0x31,0x95,0x95,0xa4
-.byte 0xd3,0xe4,0xe4,0x37, 0xf2,0x79,0x79,0x8b
-.byte 0xd5,0xe7,0xe7,0x32, 0x8b,0xc8,0xc8,0x43
-.byte 0x6e,0x37,0x37,0x59, 0xda,0x6d,0x6d,0xb7
-.byte 0x01,0x8d,0x8d,0x8c, 0xb1,0xd5,0xd5,0x64
-.byte 0x9c,0x4e,0x4e,0xd2, 0x49,0xa9,0xa9,0xe0
-.byte 0xd8,0x6c,0x6c,0xb4, 0xac,0x56,0x56,0xfa
-.byte 0xf3,0xf4,0xf4,0x07, 0xcf,0xea,0xea,0x25
-.byte 0xca,0x65,0x65,0xaf, 0xf4,0x7a,0x7a,0x8e
-.byte 0x47,0xae,0xae,0xe9, 0x10,0x08,0x08,0x18
-.byte 0x6f,0xba,0xba,0xd5, 0xf0,0x78,0x78,0x88
-.byte 0x4a,0x25,0x25,0x6f, 0x5c,0x2e,0x2e,0x72
-.byte 0x38,0x1c,0x1c,0x24, 0x57,0xa6,0xa6,0xf1
-.byte 0x73,0xb4,0xb4,0xc7, 0x97,0xc6,0xc6,0x51
-.byte 0xcb,0xe8,0xe8,0x23, 0xa1,0xdd,0xdd,0x7c
-.byte 0xe8,0x74,0x74,0x9c, 0x3e,0x1f,0x1f,0x21
-.byte 0x96,0x4b,0x4b,0xdd, 0x61,0xbd,0xbd,0xdc
-.byte 0x0d,0x8b,0x8b,0x86, 0x0f,0x8a,0x8a,0x85
-.byte 0xe0,0x70,0x70,0x90, 0x7c,0x3e,0x3e,0x42
-.byte 0x71,0xb5,0xb5,0xc4, 0xcc,0x66,0x66,0xaa
-.byte 0x90,0x48,0x48,0xd8, 0x06,0x03,0x03,0x05
-.byte 0xf7,0xf6,0xf6,0x01, 0x1c,0x0e,0x0e,0x12
-.byte 0xc2,0x61,0x61,0xa3, 0x6a,0x35,0x35,0x5f
-.byte 0xae,0x57,0x57,0xf9, 0x69,0xb9,0xb9,0xd0
-.byte 0x17,0x86,0x86,0x91, 0x99,0xc1,0xc1,0x58
-.byte 0x3a,0x1d,0x1d,0x27, 0x27,0x9e,0x9e,0xb9
-.byte 0xd9,0xe1,0xe1,0x38, 0xeb,0xf8,0xf8,0x13
-.byte 0x2b,0x98,0x98,0xb3, 0x22,0x11,0x11,0x33
-.byte 0xd2,0x69,0x69,0xbb, 0xa9,0xd9,0xd9,0x70
-.byte 0x07,0x8e,0x8e,0x89, 0x33,0x94,0x94,0xa7
-.byte 0x2d,0x9b,0x9b,0xb6, 0x3c,0x1e,0x1e,0x22
-.byte 0x15,0x87,0x87,0x92, 0xc9,0xe9,0xe9,0x20
-.byte 0x87,0xce,0xce,0x49, 0xaa,0x55,0x55,0xff
-.byte 0x50,0x28,0x28,0x78, 0xa5,0xdf,0xdf,0x7a
-.byte 0x03,0x8c,0x8c,0x8f, 0x59,0xa1,0xa1,0xf8
-.byte 0x09,0x89,0x89,0x80, 0x1a,0x0d,0x0d,0x17
-.byte 0x65,0xbf,0xbf,0xda, 0xd7,0xe6,0xe6,0x31
-.byte 0x84,0x42,0x42,0xc6, 0xd0,0x68,0x68,0xb8
-.byte 0x82,0x41,0x41,0xc3, 0x29,0x99,0x99,0xb0
-.byte 0x5a,0x2d,0x2d,0x77, 0x1e,0x0f,0x0f,0x11
-.byte 0x7b,0xb0,0xb0,0xcb, 0xa8,0x54,0x54,0xfc
-.byte 0x6d,0xbb,0xbb,0xd6, 0x2c,0x16,0x16,0x3a
-
-.byte 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5 # Te4
-.byte 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76
-.byte 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0
-.byte 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0
-.byte 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc
-.byte 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15
-.byte 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a
-.byte 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75
-.byte 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0
-.byte 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84
-.byte 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b
-.byte 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf
-.byte 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85
-.byte 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8
-.byte 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5
-.byte 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2
-.byte 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17
-.byte 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73
-.byte 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88
-.byte 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb
-.byte 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c
-.byte 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79
-.byte 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9
-.byte 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08
-.byte 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6
-.byte 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a
-.byte 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e
-.byte 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e
-.byte 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94
-.byte 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf
-.byte 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68
-.byte 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
-
-.byte 0x01,0x00,0x00,0x00, 0x02,0x00,0x00,0x00 # rcon
-.byte 0x04,0x00,0x00,0x00, 0x08,0x00,0x00,0x00
-.byte 0x10,0x00,0x00,0x00, 0x20,0x00,0x00,0x00
-.byte 0x40,0x00,0x00,0x00, 0x80,0x00,0x00,0x00
-.byte 0x1B,0x00,0x00,0x00, 0x36,0x00,0x00,0x00
-
-.align 6
-AES_Td:
-.byte 0x51,0xf4,0xa7,0x50, 0x7e,0x41,0x65,0x53 # Td0
-.byte 0x1a,0x17,0xa4,0xc3, 0x3a,0x27,0x5e,0x96
-.byte 0x3b,0xab,0x6b,0xcb, 0x1f,0x9d,0x45,0xf1
-.byte 0xac,0xfa,0x58,0xab, 0x4b,0xe3,0x03,0x93
-.byte 0x20,0x30,0xfa,0x55, 0xad,0x76,0x6d,0xf6
-.byte 0x88,0xcc,0x76,0x91, 0xf5,0x02,0x4c,0x25
-.byte 0x4f,0xe5,0xd7,0xfc, 0xc5,0x2a,0xcb,0xd7
-.byte 0x26,0x35,0x44,0x80, 0xb5,0x62,0xa3,0x8f
-.byte 0xde,0xb1,0x5a,0x49, 0x25,0xba,0x1b,0x67
-.byte 0x45,0xea,0x0e,0x98, 0x5d,0xfe,0xc0,0xe1
-.byte 0xc3,0x2f,0x75,0x02, 0x81,0x4c,0xf0,0x12
-.byte 0x8d,0x46,0x97,0xa3, 0x6b,0xd3,0xf9,0xc6
-.byte 0x03,0x8f,0x5f,0xe7, 0x15,0x92,0x9c,0x95
-.byte 0xbf,0x6d,0x7a,0xeb, 0x95,0x52,0x59,0xda
-.byte 0xd4,0xbe,0x83,0x2d, 0x58,0x74,0x21,0xd3
-.byte 0x49,0xe0,0x69,0x29, 0x8e,0xc9,0xc8,0x44
-.byte 0x75,0xc2,0x89,0x6a, 0xf4,0x8e,0x79,0x78
-.byte 0x99,0x58,0x3e,0x6b, 0x27,0xb9,0x71,0xdd
-.byte 0xbe,0xe1,0x4f,0xb6, 0xf0,0x88,0xad,0x17
-.byte 0xc9,0x20,0xac,0x66, 0x7d,0xce,0x3a,0xb4
-.byte 0x63,0xdf,0x4a,0x18, 0xe5,0x1a,0x31,0x82
-.byte 0x97,0x51,0x33,0x60, 0x62,0x53,0x7f,0x45
-.byte 0xb1,0x64,0x77,0xe0, 0xbb,0x6b,0xae,0x84
-.byte 0xfe,0x81,0xa0,0x1c, 0xf9,0x08,0x2b,0x94
-.byte 0x70,0x48,0x68,0x58, 0x8f,0x45,0xfd,0x19
-.byte 0x94,0xde,0x6c,0x87, 0x52,0x7b,0xf8,0xb7
-.byte 0xab,0x73,0xd3,0x23, 0x72,0x4b,0x02,0xe2
-.byte 0xe3,0x1f,0x8f,0x57, 0x66,0x55,0xab,0x2a
-.byte 0xb2,0xeb,0x28,0x07, 0x2f,0xb5,0xc2,0x03
-.byte 0x86,0xc5,0x7b,0x9a, 0xd3,0x37,0x08,0xa5
-.byte 0x30,0x28,0x87,0xf2, 0x23,0xbf,0xa5,0xb2
-.byte 0x02,0x03,0x6a,0xba, 0xed,0x16,0x82,0x5c
-.byte 0x8a,0xcf,0x1c,0x2b, 0xa7,0x79,0xb4,0x92
-.byte 0xf3,0x07,0xf2,0xf0, 0x4e,0x69,0xe2,0xa1
-.byte 0x65,0xda,0xf4,0xcd, 0x06,0x05,0xbe,0xd5
-.byte 0xd1,0x34,0x62,0x1f, 0xc4,0xa6,0xfe,0x8a
-.byte 0x34,0x2e,0x53,0x9d, 0xa2,0xf3,0x55,0xa0
-.byte 0x05,0x8a,0xe1,0x32, 0xa4,0xf6,0xeb,0x75
-.byte 0x0b,0x83,0xec,0x39, 0x40,0x60,0xef,0xaa
-.byte 0x5e,0x71,0x9f,0x06, 0xbd,0x6e,0x10,0x51
-.byte 0x3e,0x21,0x8a,0xf9, 0x96,0xdd,0x06,0x3d
-.byte 0xdd,0x3e,0x05,0xae, 0x4d,0xe6,0xbd,0x46
-.byte 0x91,0x54,0x8d,0xb5, 0x71,0xc4,0x5d,0x05
-.byte 0x04,0x06,0xd4,0x6f, 0x60,0x50,0x15,0xff
-.byte 0x19,0x98,0xfb,0x24, 0xd6,0xbd,0xe9,0x97
-.byte 0x89,0x40,0x43,0xcc, 0x67,0xd9,0x9e,0x77
-.byte 0xb0,0xe8,0x42,0xbd, 0x07,0x89,0x8b,0x88
-.byte 0xe7,0x19,0x5b,0x38, 0x79,0xc8,0xee,0xdb
-.byte 0xa1,0x7c,0x0a,0x47, 0x7c,0x42,0x0f,0xe9
-.byte 0xf8,0x84,0x1e,0xc9, 0x00,0x00,0x00,0x00
-.byte 0x09,0x80,0x86,0x83, 0x32,0x2b,0xed,0x48
-.byte 0x1e,0x11,0x70,0xac, 0x6c,0x5a,0x72,0x4e
-.byte 0xfd,0x0e,0xff,0xfb, 0x0f,0x85,0x38,0x56
-.byte 0x3d,0xae,0xd5,0x1e, 0x36,0x2d,0x39,0x27
-.byte 0x0a,0x0f,0xd9,0x64, 0x68,0x5c,0xa6,0x21
-.byte 0x9b,0x5b,0x54,0xd1, 0x24,0x36,0x2e,0x3a
-.byte 0x0c,0x0a,0x67,0xb1, 0x93,0x57,0xe7,0x0f
-.byte 0xb4,0xee,0x96,0xd2, 0x1b,0x9b,0x91,0x9e
-.byte 0x80,0xc0,0xc5,0x4f, 0x61,0xdc,0x20,0xa2
-.byte 0x5a,0x77,0x4b,0x69, 0x1c,0x12,0x1a,0x16
-.byte 0xe2,0x93,0xba,0x0a, 0xc0,0xa0,0x2a,0xe5
-.byte 0x3c,0x22,0xe0,0x43, 0x12,0x1b,0x17,0x1d
-.byte 0x0e,0x09,0x0d,0x0b, 0xf2,0x8b,0xc7,0xad
-.byte 0x2d,0xb6,0xa8,0xb9, 0x14,0x1e,0xa9,0xc8
-.byte 0x57,0xf1,0x19,0x85, 0xaf,0x75,0x07,0x4c
-.byte 0xee,0x99,0xdd,0xbb, 0xa3,0x7f,0x60,0xfd
-.byte 0xf7,0x01,0x26,0x9f, 0x5c,0x72,0xf5,0xbc
-.byte 0x44,0x66,0x3b,0xc5, 0x5b,0xfb,0x7e,0x34
-.byte 0x8b,0x43,0x29,0x76, 0xcb,0x23,0xc6,0xdc
-.byte 0xb6,0xed,0xfc,0x68, 0xb8,0xe4,0xf1,0x63
-.byte 0xd7,0x31,0xdc,0xca, 0x42,0x63,0x85,0x10
-.byte 0x13,0x97,0x22,0x40, 0x84,0xc6,0x11,0x20
-.byte 0x85,0x4a,0x24,0x7d, 0xd2,0xbb,0x3d,0xf8
-.byte 0xae,0xf9,0x32,0x11, 0xc7,0x29,0xa1,0x6d
-.byte 0x1d,0x9e,0x2f,0x4b, 0xdc,0xb2,0x30,0xf3
-.byte 0x0d,0x86,0x52,0xec, 0x77,0xc1,0xe3,0xd0
-.byte 0x2b,0xb3,0x16,0x6c, 0xa9,0x70,0xb9,0x99
-.byte 0x11,0x94,0x48,0xfa, 0x47,0xe9,0x64,0x22
-.byte 0xa8,0xfc,0x8c,0xc4, 0xa0,0xf0,0x3f,0x1a
-.byte 0x56,0x7d,0x2c,0xd8, 0x22,0x33,0x90,0xef
-.byte 0x87,0x49,0x4e,0xc7, 0xd9,0x38,0xd1,0xc1
-.byte 0x8c,0xca,0xa2,0xfe, 0x98,0xd4,0x0b,0x36
-.byte 0xa6,0xf5,0x81,0xcf, 0xa5,0x7a,0xde,0x28
-.byte 0xda,0xb7,0x8e,0x26, 0x3f,0xad,0xbf,0xa4
-.byte 0x2c,0x3a,0x9d,0xe4, 0x50,0x78,0x92,0x0d
-.byte 0x6a,0x5f,0xcc,0x9b, 0x54,0x7e,0x46,0x62
-.byte 0xf6,0x8d,0x13,0xc2, 0x90,0xd8,0xb8,0xe8
-.byte 0x2e,0x39,0xf7,0x5e, 0x82,0xc3,0xaf,0xf5
-.byte 0x9f,0x5d,0x80,0xbe, 0x69,0xd0,0x93,0x7c
-.byte 0x6f,0xd5,0x2d,0xa9, 0xcf,0x25,0x12,0xb3
-.byte 0xc8,0xac,0x99,0x3b, 0x10,0x18,0x7d,0xa7
-.byte 0xe8,0x9c,0x63,0x6e, 0xdb,0x3b,0xbb,0x7b
-.byte 0xcd,0x26,0x78,0x09, 0x6e,0x59,0x18,0xf4
-.byte 0xec,0x9a,0xb7,0x01, 0x83,0x4f,0x9a,0xa8
-.byte 0xe6,0x95,0x6e,0x65, 0xaa,0xff,0xe6,0x7e
-.byte 0x21,0xbc,0xcf,0x08, 0xef,0x15,0xe8,0xe6
-.byte 0xba,0xe7,0x9b,0xd9, 0x4a,0x6f,0x36,0xce
-.byte 0xea,0x9f,0x09,0xd4, 0x29,0xb0,0x7c,0xd6
-.byte 0x31,0xa4,0xb2,0xaf, 0x2a,0x3f,0x23,0x31
-.byte 0xc6,0xa5,0x94,0x30, 0x35,0xa2,0x66,0xc0
-.byte 0x74,0x4e,0xbc,0x37, 0xfc,0x82,0xca,0xa6
-.byte 0xe0,0x90,0xd0,0xb0, 0x33,0xa7,0xd8,0x15
-.byte 0xf1,0x04,0x98,0x4a, 0x41,0xec,0xda,0xf7
-.byte 0x7f,0xcd,0x50,0x0e, 0x17,0x91,0xf6,0x2f
-.byte 0x76,0x4d,0xd6,0x8d, 0x43,0xef,0xb0,0x4d
-.byte 0xcc,0xaa,0x4d,0x54, 0xe4,0x96,0x04,0xdf
-.byte 0x9e,0xd1,0xb5,0xe3, 0x4c,0x6a,0x88,0x1b
-.byte 0xc1,0x2c,0x1f,0xb8, 0x46,0x65,0x51,0x7f
-.byte 0x9d,0x5e,0xea,0x04, 0x01,0x8c,0x35,0x5d
-.byte 0xfa,0x87,0x74,0x73, 0xfb,0x0b,0x41,0x2e
-.byte 0xb3,0x67,0x1d,0x5a, 0x92,0xdb,0xd2,0x52
-.byte 0xe9,0x10,0x56,0x33, 0x6d,0xd6,0x47,0x13
-.byte 0x9a,0xd7,0x61,0x8c, 0x37,0xa1,0x0c,0x7a
-.byte 0x59,0xf8,0x14,0x8e, 0xeb,0x13,0x3c,0x89
-.byte 0xce,0xa9,0x27,0xee, 0xb7,0x61,0xc9,0x35
-.byte 0xe1,0x1c,0xe5,0xed, 0x7a,0x47,0xb1,0x3c
-.byte 0x9c,0xd2,0xdf,0x59, 0x55,0xf2,0x73,0x3f
-.byte 0x18,0x14,0xce,0x79, 0x73,0xc7,0x37,0xbf
-.byte 0x53,0xf7,0xcd,0xea, 0x5f,0xfd,0xaa,0x5b
-.byte 0xdf,0x3d,0x6f,0x14, 0x78,0x44,0xdb,0x86
-.byte 0xca,0xaf,0xf3,0x81, 0xb9,0x68,0xc4,0x3e
-.byte 0x38,0x24,0x34,0x2c, 0xc2,0xa3,0x40,0x5f
-.byte 0x16,0x1d,0xc3,0x72, 0xbc,0xe2,0x25,0x0c
-.byte 0x28,0x3c,0x49,0x8b, 0xff,0x0d,0x95,0x41
-.byte 0x39,0xa8,0x01,0x71, 0x08,0x0c,0xb3,0xde
-.byte 0xd8,0xb4,0xe4,0x9c, 0x64,0x56,0xc1,0x90
-.byte 0x7b,0xcb,0x84,0x61, 0xd5,0x32,0xb6,0x70
-.byte 0x48,0x6c,0x5c,0x74, 0xd0,0xb8,0x57,0x42
-
-.byte 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38 # Td4
-.byte 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
-.byte 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
-.byte 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
-.byte 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
-.byte 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
-.byte 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
-.byte 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
-.byte 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
-.byte 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
-.byte 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
-.byte 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
-.byte 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
-.byte 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
-.byte 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
-.byte 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
-.byte 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
-.byte 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
-.byte 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
-.byte 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
-.byte 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
-.byte 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
-.byte 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
-.byte 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
-.byte 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
-.byte 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
-.byte 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
-.byte 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
-.byte 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
-.byte 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
-.byte 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
-.byte 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
-___
-
-foreach (split("\n",$code)) {
- s/\`([^\`]*)\`/eval $1/ge;
-
- # made-up _instructions, _xtr, _ins, _ror and _bias, cope
- # with byte order dependencies...
- if (/^\s+_/) {
- s/(_[a-z]+\s+)(\$[0-9]+),([^,]+)(#.*)*$/$1$2,$2,$3/;
-
- s/_xtr\s+(\$[0-9]+),(\$[0-9]+),([0-9]+(\-2)*)/
- sprintf("srl\t$1,$2,%d",$big_endian ? eval($3)
- : eval("24-$3"))/e or
- s/_ins\s+(\$[0-9]+),(\$[0-9]+),([0-9]+)/
- sprintf("sll\t$1,$2,%d",$big_endian ? eval($3)
- : eval("24-$3"))/e or
- s/_ror\s+(\$[0-9]+),(\$[0-9]+),(\-?[0-9]+)/
- sprintf("srl\t$1,$2,%d",$big_endian ? eval($3)
- : eval("$3*-1"))/e or
- s/_bias\s+(\$[0-9]+),(\$[0-9]+),([0-9]+)/
- sprintf("sll\t$1,$2,%d",$big_endian ? eval($3)
- : eval("($3-16)&31"))/e;
-
- s/srl\s+(\$[0-9]+),(\$[0-9]+),\-([0-9]+)/
- sprintf("sll\t$1,$2,$3")/e or
- s/srl\s+(\$[0-9]+),(\$[0-9]+),0/
- sprintf("and\t$1,$2,0xff")/e or
- s/(sll\s+\$[0-9]+,\$[0-9]+,0)/#$1/;
- }
-
- # convert lwl/lwr and swr/swl to little-endian order
- if (!$big_endian && /^\s+[sl]w[lr]\s+/) {
- s/([sl]wl.*)([0-9]+)\((\$[0-9]+)\)/
- sprintf("$1%d($3)",eval("$2-$2%4+($2%4-1)&3"))/e or
- s/([sl]wr.*)([0-9]+)\((\$[0-9]+)\)/
- sprintf("$1%d($3)",eval("$2-$2%4+($2%4+1)&3"))/e;
- }
-
- print $_,"\n";
-}
-
-close STDOUT;
diff --git a/app/openssl/crypto/aes/asm/aes-parisc.pl b/app/openssl/crypto/aes/asm/aes-parisc.pl
deleted file mode 100644
index 714dcfbb..00000000
--- a/app/openssl/crypto/aes/asm/aes-parisc.pl
+++ /dev/null
@@ -1,1022 +0,0 @@
-#!/usr/bin/env perl
-
-# ====================================================================
-# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
-# project. The module is, however, dual licensed under OpenSSL and
-# CRYPTOGAMS licenses depending on where you obtain it. For further
-# details see http://www.openssl.org/~appro/cryptogams/.
-# ====================================================================
-
-# AES for PA-RISC.
-#
-# June 2009.
-#
-# The module is mechanical transliteration of aes-sparcv9.pl, but with
-# a twist: S-boxes are compressed even further down to 1K+256B. On
-# PA-7100LC performance is ~40% better than gcc 3.2 generated code and
-# is about 33 cycles per byte processed with 128-bit key. Newer CPUs
-# perform at 16 cycles per byte. It's not faster than code generated
-# by vendor compiler, but recall that it has compressed S-boxes, which
-# requires extra processing.
-#
-# Special thanks to polarhome.com for providing HP-UX account.
-
-$flavour = shift;
-$output = shift;
-open STDOUT,">$output";
-
-if ($flavour =~ /64/) {
- $LEVEL ="2.0W";
- $SIZE_T =8;
- $FRAME_MARKER =80;
- $SAVED_RP =16;
- $PUSH ="std";
- $PUSHMA ="std,ma";
- $POP ="ldd";
- $POPMB ="ldd,mb";
-} else {
- $LEVEL ="1.0";
- $SIZE_T =4;
- $FRAME_MARKER =48;
- $SAVED_RP =20;
- $PUSH ="stw";
- $PUSHMA ="stwm";
- $POP ="ldw";
- $POPMB ="ldwm";
-}
-
-$FRAME=16*$SIZE_T+$FRAME_MARKER;# 16 saved regs + frame marker
- # [+ argument transfer]
-$inp="%r26"; # arg0
-$out="%r25"; # arg1
-$key="%r24"; # arg2
-
-($s0,$s1,$s2,$s3) = ("%r1","%r2","%r3","%r4");
-($t0,$t1,$t2,$t3) = ("%r5","%r6","%r7","%r8");
-
-($acc0, $acc1, $acc2, $acc3, $acc4, $acc5, $acc6, $acc7,
- $acc8, $acc9,$acc10,$acc11,$acc12,$acc13,$acc14,$acc15) =
-("%r9","%r10","%r11","%r12","%r13","%r14","%r15","%r16",
-"%r17","%r18","%r19","%r20","%r21","%r22","%r23","%r26");
-
-$tbl="%r28";
-$rounds="%r29";
-
-$code=<<___;
- .LEVEL $LEVEL
- .SPACE \$TEXT\$
- .SUBSPA \$CODE\$,QUAD=0,ALIGN=8,ACCESS=0x2C,CODE_ONLY
-
- .EXPORT AES_encrypt,ENTRY,ARGW0=GR,ARGW1=GR,ARGW2=GR
- .ALIGN 64
-AES_encrypt
- .PROC
- .CALLINFO FRAME=`$FRAME-16*$SIZE_T`,NO_CALLS,SAVE_RP,ENTRY_GR=18
- .ENTRY
- $PUSH %r2,-$SAVED_RP(%sp) ; standard prologue
- $PUSHMA %r3,$FRAME(%sp)
- $PUSH %r4,`-$FRAME+1*$SIZE_T`(%sp)
- $PUSH %r5,`-$FRAME+2*$SIZE_T`(%sp)
- $PUSH %r6,`-$FRAME+3*$SIZE_T`(%sp)
- $PUSH %r7,`-$FRAME+4*$SIZE_T`(%sp)
- $PUSH %r8,`-$FRAME+5*$SIZE_T`(%sp)
- $PUSH %r9,`-$FRAME+6*$SIZE_T`(%sp)
- $PUSH %r10,`-$FRAME+7*$SIZE_T`(%sp)
- $PUSH %r11,`-$FRAME+8*$SIZE_T`(%sp)
- $PUSH %r12,`-$FRAME+9*$SIZE_T`(%sp)
- $PUSH %r13,`-$FRAME+10*$SIZE_T`(%sp)
- $PUSH %r14,`-$FRAME+11*$SIZE_T`(%sp)
- $PUSH %r15,`-$FRAME+12*$SIZE_T`(%sp)
- $PUSH %r16,`-$FRAME+13*$SIZE_T`(%sp)
- $PUSH %r17,`-$FRAME+14*$SIZE_T`(%sp)
- $PUSH %r18,`-$FRAME+15*$SIZE_T`(%sp)
-
- blr %r0,$tbl
- ldi 3,$t0
-L\$enc_pic
- andcm $tbl,$t0,$tbl
- ldo L\$AES_Te-L\$enc_pic($tbl),$tbl
-
- and $inp,$t0,$t0
- sub $inp,$t0,$inp
- ldw 0($inp),$s0
- ldw 4($inp),$s1
- ldw 8($inp),$s2
- comib,= 0,$t0,L\$enc_inp_aligned
- ldw 12($inp),$s3
-
- sh3addl $t0,%r0,$t0
- subi 32,$t0,$t0
- mtctl $t0,%cr11
- ldw 16($inp),$t1
- vshd $s0,$s1,$s0
- vshd $s1,$s2,$s1
- vshd $s2,$s3,$s2
- vshd $s3,$t1,$s3
-
-L\$enc_inp_aligned
- bl _parisc_AES_encrypt,%r31
- nop
-
- extru,<> $out,31,2,%r0
- b L\$enc_out_aligned
- nop
-
- _srm $s0,24,$acc0
- _srm $s0,16,$acc1
- stb $acc0,0($out)
- _srm $s0,8,$acc2
- stb $acc1,1($out)
- _srm $s1,24,$acc4
- stb $acc2,2($out)
- _srm $s1,16,$acc5
- stb $s0,3($out)
- _srm $s1,8,$acc6
- stb $acc4,4($out)
- _srm $s2,24,$acc0
- stb $acc5,5($out)
- _srm $s2,16,$acc1
- stb $acc6,6($out)
- _srm $s2,8,$acc2
- stb $s1,7($out)
- _srm $s3,24,$acc4
- stb $acc0,8($out)
- _srm $s3,16,$acc5
- stb $acc1,9($out)
- _srm $s3,8,$acc6
- stb $acc2,10($out)
- stb $s2,11($out)
- stb $acc4,12($out)
- stb $acc5,13($out)
- stb $acc6,14($out)
- b L\$enc_done
- stb $s3,15($out)
-
-L\$enc_out_aligned
- stw $s0,0($out)
- stw $s1,4($out)
- stw $s2,8($out)
- stw $s3,12($out)
-
-L\$enc_done
- $POP `-$FRAME-$SAVED_RP`(%sp),%r2 ; standard epilogue
- $POP `-$FRAME+1*$SIZE_T`(%sp),%r4
- $POP `-$FRAME+2*$SIZE_T`(%sp),%r5
- $POP `-$FRAME+3*$SIZE_T`(%sp),%r6
- $POP `-$FRAME+4*$SIZE_T`(%sp),%r7
- $POP `-$FRAME+5*$SIZE_T`(%sp),%r8
- $POP `-$FRAME+6*$SIZE_T`(%sp),%r9
- $POP `-$FRAME+7*$SIZE_T`(%sp),%r10
- $POP `-$FRAME+8*$SIZE_T`(%sp),%r11
- $POP `-$FRAME+9*$SIZE_T`(%sp),%r12
- $POP `-$FRAME+10*$SIZE_T`(%sp),%r13
- $POP `-$FRAME+11*$SIZE_T`(%sp),%r14
- $POP `-$FRAME+12*$SIZE_T`(%sp),%r15
- $POP `-$FRAME+13*$SIZE_T`(%sp),%r16
- $POP `-$FRAME+14*$SIZE_T`(%sp),%r17
- $POP `-$FRAME+15*$SIZE_T`(%sp),%r18
- bv (%r2)
- .EXIT
- $POPMB -$FRAME(%sp),%r3
- .PROCEND
-
- .ALIGN 16
-_parisc_AES_encrypt
- .PROC
- .CALLINFO MILLICODE
- .ENTRY
- ldw 240($key),$rounds
- ldw 0($key),$t0
- ldw 4($key),$t1
- ldw 8($key),$t2
- _srm $rounds,1,$rounds
- xor $t0,$s0,$s0
- ldw 12($key),$t3
- _srm $s0,24,$acc0
- xor $t1,$s1,$s1
- ldw 16($key),$t0
- _srm $s1,16,$acc1
- xor $t2,$s2,$s2
- ldw 20($key),$t1
- xor $t3,$s3,$s3
- ldw 24($key),$t2
- ldw 28($key),$t3
-L\$enc_loop
- _srm $s2,8,$acc2
- ldwx,s $acc0($tbl),$acc0
- _srm $s3,0,$acc3
- ldwx,s $acc1($tbl),$acc1
- _srm $s1,24,$acc4
- ldwx,s $acc2($tbl),$acc2
- _srm $s2,16,$acc5
- ldwx,s $acc3($tbl),$acc3
- _srm $s3,8,$acc6
- ldwx,s $acc4($tbl),$acc4
- _srm $s0,0,$acc7
- ldwx,s $acc5($tbl),$acc5
- _srm $s2,24,$acc8
- ldwx,s $acc6($tbl),$acc6
- _srm $s3,16,$acc9
- ldwx,s $acc7($tbl),$acc7
- _srm $s0,8,$acc10
- ldwx,s $acc8($tbl),$acc8
- _srm $s1,0,$acc11
- ldwx,s $acc9($tbl),$acc9
- _srm $s3,24,$acc12
- ldwx,s $acc10($tbl),$acc10
- _srm $s0,16,$acc13
- ldwx,s $acc11($tbl),$acc11
- _srm $s1,8,$acc14
- ldwx,s $acc12($tbl),$acc12
- _srm $s2,0,$acc15
- ldwx,s $acc13($tbl),$acc13
- ldwx,s $acc14($tbl),$acc14
- ldwx,s $acc15($tbl),$acc15
- addib,= -1,$rounds,L\$enc_last
- ldo 32($key),$key
-
- _ror $acc1,8,$acc1
- xor $acc0,$t0,$t0
- ldw 0($key),$s0
- _ror $acc2,16,$acc2
- xor $acc1,$t0,$t0
- ldw 4($key),$s1
- _ror $acc3,24,$acc3
- xor $acc2,$t0,$t0
- ldw 8($key),$s2
- _ror $acc5,8,$acc5
- xor $acc3,$t0,$t0
- ldw 12($key),$s3
- _ror $acc6,16,$acc6
- xor $acc4,$t1,$t1
- _ror $acc7,24,$acc7
- xor $acc5,$t1,$t1
- _ror $acc9,8,$acc9
- xor $acc6,$t1,$t1
- _ror $acc10,16,$acc10
- xor $acc7,$t1,$t1
- _ror $acc11,24,$acc11
- xor $acc8,$t2,$t2
- _ror $acc13,8,$acc13
- xor $acc9,$t2,$t2
- _ror $acc14,16,$acc14
- xor $acc10,$t2,$t2
- _ror $acc15,24,$acc15
- xor $acc11,$t2,$t2
- xor $acc12,$acc14,$acc14
- xor $acc13,$t3,$t3
- _srm $t0,24,$acc0
- xor $acc14,$t3,$t3
- _srm $t1,16,$acc1
- xor $acc15,$t3,$t3
-
- _srm $t2,8,$acc2
- ldwx,s $acc0($tbl),$acc0
- _srm $t3,0,$acc3
- ldwx,s $acc1($tbl),$acc1
- _srm $t1,24,$acc4
- ldwx,s $acc2($tbl),$acc2
- _srm $t2,16,$acc5
- ldwx,s $acc3($tbl),$acc3
- _srm $t3,8,$acc6
- ldwx,s $acc4($tbl),$acc4
- _srm $t0,0,$acc7
- ldwx,s $acc5($tbl),$acc5
- _srm $t2,24,$acc8
- ldwx,s $acc6($tbl),$acc6
- _srm $t3,16,$acc9
- ldwx,s $acc7($tbl),$acc7
- _srm $t0,8,$acc10
- ldwx,s $acc8($tbl),$acc8
- _srm $t1,0,$acc11
- ldwx,s $acc9($tbl),$acc9
- _srm $t3,24,$acc12
- ldwx,s $acc10($tbl),$acc10
- _srm $t0,16,$acc13
- ldwx,s $acc11($tbl),$acc11
- _srm $t1,8,$acc14
- ldwx,s $acc12($tbl),$acc12
- _srm $t2,0,$acc15
- ldwx,s $acc13($tbl),$acc13
- _ror $acc1,8,$acc1
- ldwx,s $acc14($tbl),$acc14
-
- _ror $acc2,16,$acc2
- xor $acc0,$s0,$s0
- ldwx,s $acc15($tbl),$acc15
- _ror $acc3,24,$acc3
- xor $acc1,$s0,$s0
- ldw 16($key),$t0
- _ror $acc5,8,$acc5
- xor $acc2,$s0,$s0
- ldw 20($key),$t1
- _ror $acc6,16,$acc6
- xor $acc3,$s0,$s0
- ldw 24($key),$t2
- _ror $acc7,24,$acc7
- xor $acc4,$s1,$s1
- ldw 28($key),$t3
- _ror $acc9,8,$acc9
- xor $acc5,$s1,$s1
- ldw 1024+0($tbl),%r0 ; prefetch te4
- _ror $acc10,16,$acc10
- xor $acc6,$s1,$s1
- ldw 1024+32($tbl),%r0 ; prefetch te4
- _ror $acc11,24,$acc11
- xor $acc7,$s1,$s1
- ldw 1024+64($tbl),%r0 ; prefetch te4
- _ror $acc13,8,$acc13
- xor $acc8,$s2,$s2
- ldw 1024+96($tbl),%r0 ; prefetch te4
- _ror $acc14,16,$acc14
- xor $acc9,$s2,$s2
- ldw 1024+128($tbl),%r0 ; prefetch te4
- _ror $acc15,24,$acc15
- xor $acc10,$s2,$s2
- ldw 1024+160($tbl),%r0 ; prefetch te4
- _srm $s0,24,$acc0
- xor $acc11,$s2,$s2
- ldw 1024+192($tbl),%r0 ; prefetch te4
- xor $acc12,$acc14,$acc14
- xor $acc13,$s3,$s3
- ldw 1024+224($tbl),%r0 ; prefetch te4
- _srm $s1,16,$acc1
- xor $acc14,$s3,$s3
- b L\$enc_loop
- xor $acc15,$s3,$s3
-
- .ALIGN 16
-L\$enc_last
- ldo 1024($tbl),$rounds
- _ror $acc1,8,$acc1
- xor $acc0,$t0,$t0
- ldw 0($key),$s0
- _ror $acc2,16,$acc2
- xor $acc1,$t0,$t0
- ldw 4($key),$s1
- _ror $acc3,24,$acc3
- xor $acc2,$t0,$t0
- ldw 8($key),$s2
- _ror $acc5,8,$acc5
- xor $acc3,$t0,$t0
- ldw 12($key),$s3
- _ror $acc6,16,$acc6
- xor $acc4,$t1,$t1
- _ror $acc7,24,$acc7
- xor $acc5,$t1,$t1
- _ror $acc9,8,$acc9
- xor $acc6,$t1,$t1
- _ror $acc10,16,$acc10
- xor $acc7,$t1,$t1
- _ror $acc11,24,$acc11
- xor $acc8,$t2,$t2
- _ror $acc13,8,$acc13
- xor $acc9,$t2,$t2
- _ror $acc14,16,$acc14
- xor $acc10,$t2,$t2
- _ror $acc15,24,$acc15
- xor $acc11,$t2,$t2
- xor $acc12,$acc14,$acc14
- xor $acc13,$t3,$t3
- _srm $t0,24,$acc0
- xor $acc14,$t3,$t3
- _srm $t1,16,$acc1
- xor $acc15,$t3,$t3
-
- _srm $t2,8,$acc2
- ldbx $acc0($rounds),$acc0
- _srm $t1,24,$acc4
- ldbx $acc1($rounds),$acc1
- _srm $t2,16,$acc5
- _srm $t3,0,$acc3
- ldbx $acc2($rounds),$acc2
- ldbx $acc3($rounds),$acc3
- _srm $t3,8,$acc6
- ldbx $acc4($rounds),$acc4
- _srm $t2,24,$acc8
- ldbx $acc5($rounds),$acc5
- _srm $t3,16,$acc9
- _srm $t0,0,$acc7
- ldbx $acc6($rounds),$acc6
- ldbx $acc7($rounds),$acc7
- _srm $t0,8,$acc10
- ldbx $acc8($rounds),$acc8
- _srm $t3,24,$acc12
- ldbx $acc9($rounds),$acc9
- _srm $t0,16,$acc13
- _srm $t1,0,$acc11
- ldbx $acc10($rounds),$acc10
- _srm $t1,8,$acc14
- ldbx $acc11($rounds),$acc11
- ldbx $acc12($rounds),$acc12
- ldbx $acc13($rounds),$acc13
- _srm $t2,0,$acc15
- ldbx $acc14($rounds),$acc14
-
- dep $acc0,7,8,$acc3
- ldbx $acc15($rounds),$acc15
- dep $acc4,7,8,$acc7
- dep $acc1,15,8,$acc3
- dep $acc5,15,8,$acc7
- dep $acc2,23,8,$acc3
- dep $acc6,23,8,$acc7
- xor $acc3,$s0,$s0
- xor $acc7,$s1,$s1
- dep $acc8,7,8,$acc11
- dep $acc12,7,8,$acc15
- dep $acc9,15,8,$acc11
- dep $acc13,15,8,$acc15
- dep $acc10,23,8,$acc11
- dep $acc14,23,8,$acc15
- xor $acc11,$s2,$s2
-
- bv (%r31)
- .EXIT
- xor $acc15,$s3,$s3
- .PROCEND
-
- .ALIGN 64
-L\$AES_Te
- .WORD 0xc66363a5, 0xf87c7c84, 0xee777799, 0xf67b7b8d
- .WORD 0xfff2f20d, 0xd66b6bbd, 0xde6f6fb1, 0x91c5c554
- .WORD 0x60303050, 0x02010103, 0xce6767a9, 0x562b2b7d
- .WORD 0xe7fefe19, 0xb5d7d762, 0x4dababe6, 0xec76769a
- .WORD 0x8fcaca45, 0x1f82829d, 0x89c9c940, 0xfa7d7d87
- .WORD 0xeffafa15, 0xb25959eb, 0x8e4747c9, 0xfbf0f00b
- .WORD 0x41adadec, 0xb3d4d467, 0x5fa2a2fd, 0x45afafea
- .WORD 0x239c9cbf, 0x53a4a4f7, 0xe4727296, 0x9bc0c05b
- .WORD 0x75b7b7c2, 0xe1fdfd1c, 0x3d9393ae, 0x4c26266a
- .WORD 0x6c36365a, 0x7e3f3f41, 0xf5f7f702, 0x83cccc4f
- .WORD 0x6834345c, 0x51a5a5f4, 0xd1e5e534, 0xf9f1f108
- .WORD 0xe2717193, 0xabd8d873, 0x62313153, 0x2a15153f
- .WORD 0x0804040c, 0x95c7c752, 0x46232365, 0x9dc3c35e
- .WORD 0x30181828, 0x379696a1, 0x0a05050f, 0x2f9a9ab5
- .WORD 0x0e070709, 0x24121236, 0x1b80809b, 0xdfe2e23d
- .WORD 0xcdebeb26, 0x4e272769, 0x7fb2b2cd, 0xea75759f
- .WORD 0x1209091b, 0x1d83839e, 0x582c2c74, 0x341a1a2e
- .WORD 0x361b1b2d, 0xdc6e6eb2, 0xb45a5aee, 0x5ba0a0fb
- .WORD 0xa45252f6, 0x763b3b4d, 0xb7d6d661, 0x7db3b3ce
- .WORD 0x5229297b, 0xdde3e33e, 0x5e2f2f71, 0x13848497
- .WORD 0xa65353f5, 0xb9d1d168, 0x00000000, 0xc1eded2c
- .WORD 0x40202060, 0xe3fcfc1f, 0x79b1b1c8, 0xb65b5bed
- .WORD 0xd46a6abe, 0x8dcbcb46, 0x67bebed9, 0x7239394b
- .WORD 0x944a4ade, 0x984c4cd4, 0xb05858e8, 0x85cfcf4a
- .WORD 0xbbd0d06b, 0xc5efef2a, 0x4faaaae5, 0xedfbfb16
- .WORD 0x864343c5, 0x9a4d4dd7, 0x66333355, 0x11858594
- .WORD 0x8a4545cf, 0xe9f9f910, 0x04020206, 0xfe7f7f81
- .WORD 0xa05050f0, 0x783c3c44, 0x259f9fba, 0x4ba8a8e3
- .WORD 0xa25151f3, 0x5da3a3fe, 0x804040c0, 0x058f8f8a
- .WORD 0x3f9292ad, 0x219d9dbc, 0x70383848, 0xf1f5f504
- .WORD 0x63bcbcdf, 0x77b6b6c1, 0xafdada75, 0x42212163
- .WORD 0x20101030, 0xe5ffff1a, 0xfdf3f30e, 0xbfd2d26d
- .WORD 0x81cdcd4c, 0x180c0c14, 0x26131335, 0xc3ecec2f
- .WORD 0xbe5f5fe1, 0x359797a2, 0x884444cc, 0x2e171739
- .WORD 0x93c4c457, 0x55a7a7f2, 0xfc7e7e82, 0x7a3d3d47
- .WORD 0xc86464ac, 0xba5d5de7, 0x3219192b, 0xe6737395
- .WORD 0xc06060a0, 0x19818198, 0x9e4f4fd1, 0xa3dcdc7f
- .WORD 0x44222266, 0x542a2a7e, 0x3b9090ab, 0x0b888883
- .WORD 0x8c4646ca, 0xc7eeee29, 0x6bb8b8d3, 0x2814143c
- .WORD 0xa7dede79, 0xbc5e5ee2, 0x160b0b1d, 0xaddbdb76
- .WORD 0xdbe0e03b, 0x64323256, 0x743a3a4e, 0x140a0a1e
- .WORD 0x924949db, 0x0c06060a, 0x4824246c, 0xb85c5ce4
- .WORD 0x9fc2c25d, 0xbdd3d36e, 0x43acacef, 0xc46262a6
- .WORD 0x399191a8, 0x319595a4, 0xd3e4e437, 0xf279798b
- .WORD 0xd5e7e732, 0x8bc8c843, 0x6e373759, 0xda6d6db7
- .WORD 0x018d8d8c, 0xb1d5d564, 0x9c4e4ed2, 0x49a9a9e0
- .WORD 0xd86c6cb4, 0xac5656fa, 0xf3f4f407, 0xcfeaea25
- .WORD 0xca6565af, 0xf47a7a8e, 0x47aeaee9, 0x10080818
- .WORD 0x6fbabad5, 0xf0787888, 0x4a25256f, 0x5c2e2e72
- .WORD 0x381c1c24, 0x57a6a6f1, 0x73b4b4c7, 0x97c6c651
- .WORD 0xcbe8e823, 0xa1dddd7c, 0xe874749c, 0x3e1f1f21
- .WORD 0x964b4bdd, 0x61bdbddc, 0x0d8b8b86, 0x0f8a8a85
- .WORD 0xe0707090, 0x7c3e3e42, 0x71b5b5c4, 0xcc6666aa
- .WORD 0x904848d8, 0x06030305, 0xf7f6f601, 0x1c0e0e12
- .WORD 0xc26161a3, 0x6a35355f, 0xae5757f9, 0x69b9b9d0
- .WORD 0x17868691, 0x99c1c158, 0x3a1d1d27, 0x279e9eb9
- .WORD 0xd9e1e138, 0xebf8f813, 0x2b9898b3, 0x22111133
- .WORD 0xd26969bb, 0xa9d9d970, 0x078e8e89, 0x339494a7
- .WORD 0x2d9b9bb6, 0x3c1e1e22, 0x15878792, 0xc9e9e920
- .WORD 0x87cece49, 0xaa5555ff, 0x50282878, 0xa5dfdf7a
- .WORD 0x038c8c8f, 0x59a1a1f8, 0x09898980, 0x1a0d0d17
- .WORD 0x65bfbfda, 0xd7e6e631, 0x844242c6, 0xd06868b8
- .WORD 0x824141c3, 0x299999b0, 0x5a2d2d77, 0x1e0f0f11
- .WORD 0x7bb0b0cb, 0xa85454fc, 0x6dbbbbd6, 0x2c16163a
- .BYTE 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5
- .BYTE 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76
- .BYTE 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0
- .BYTE 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0
- .BYTE 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc
- .BYTE 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15
- .BYTE 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a
- .BYTE 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75
- .BYTE 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0
- .BYTE 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84
- .BYTE 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b
- .BYTE 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf
- .BYTE 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85
- .BYTE 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8
- .BYTE 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5
- .BYTE 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2
- .BYTE 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17
- .BYTE 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73
- .BYTE 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88
- .BYTE 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb
- .BYTE 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c
- .BYTE 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79
- .BYTE 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9
- .BYTE 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08
- .BYTE 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6
- .BYTE 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a
- .BYTE 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e
- .BYTE 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e
- .BYTE 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94
- .BYTE 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf
- .BYTE 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68
- .BYTE 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
-___
-
-$code.=<<___;
- .EXPORT AES_decrypt,ENTRY,ARGW0=GR,ARGW1=GR,ARGW2=GR
- .ALIGN 16
-AES_decrypt
- .PROC
- .CALLINFO FRAME=`$FRAME-16*$SIZE_T`,NO_CALLS,SAVE_RP,ENTRY_GR=18
- .ENTRY
- $PUSH %r2,-$SAVED_RP(%sp) ; standard prologue
- $PUSHMA %r3,$FRAME(%sp)
- $PUSH %r4,`-$FRAME+1*$SIZE_T`(%sp)
- $PUSH %r5,`-$FRAME+2*$SIZE_T`(%sp)
- $PUSH %r6,`-$FRAME+3*$SIZE_T`(%sp)
- $PUSH %r7,`-$FRAME+4*$SIZE_T`(%sp)
- $PUSH %r8,`-$FRAME+5*$SIZE_T`(%sp)
- $PUSH %r9,`-$FRAME+6*$SIZE_T`(%sp)
- $PUSH %r10,`-$FRAME+7*$SIZE_T`(%sp)
- $PUSH %r11,`-$FRAME+8*$SIZE_T`(%sp)
- $PUSH %r12,`-$FRAME+9*$SIZE_T`(%sp)
- $PUSH %r13,`-$FRAME+10*$SIZE_T`(%sp)
- $PUSH %r14,`-$FRAME+11*$SIZE_T`(%sp)
- $PUSH %r15,`-$FRAME+12*$SIZE_T`(%sp)
- $PUSH %r16,`-$FRAME+13*$SIZE_T`(%sp)
- $PUSH %r17,`-$FRAME+14*$SIZE_T`(%sp)
- $PUSH %r18,`-$FRAME+15*$SIZE_T`(%sp)
-
- blr %r0,$tbl
- ldi 3,$t0
-L\$dec_pic
- andcm $tbl,$t0,$tbl
- ldo L\$AES_Td-L\$dec_pic($tbl),$tbl
-
- and $inp,$t0,$t0
- sub $inp,$t0,$inp
- ldw 0($inp),$s0
- ldw 4($inp),$s1
- ldw 8($inp),$s2
- comib,= 0,$t0,L\$dec_inp_aligned
- ldw 12($inp),$s3
-
- sh3addl $t0,%r0,$t0
- subi 32,$t0,$t0
- mtctl $t0,%cr11
- ldw 16($inp),$t1
- vshd $s0,$s1,$s0
- vshd $s1,$s2,$s1
- vshd $s2,$s3,$s2
- vshd $s3,$t1,$s3
-
-L\$dec_inp_aligned
- bl _parisc_AES_decrypt,%r31
- nop
-
- extru,<> $out,31,2,%r0
- b L\$dec_out_aligned
- nop
-
- _srm $s0,24,$acc0
- _srm $s0,16,$acc1
- stb $acc0,0($out)
- _srm $s0,8,$acc2
- stb $acc1,1($out)
- _srm $s1,24,$acc4
- stb $acc2,2($out)
- _srm $s1,16,$acc5
- stb $s0,3($out)
- _srm $s1,8,$acc6
- stb $acc4,4($out)
- _srm $s2,24,$acc0
- stb $acc5,5($out)
- _srm $s2,16,$acc1
- stb $acc6,6($out)
- _srm $s2,8,$acc2
- stb $s1,7($out)
- _srm $s3,24,$acc4
- stb $acc0,8($out)
- _srm $s3,16,$acc5
- stb $acc1,9($out)
- _srm $s3,8,$acc6
- stb $acc2,10($out)
- stb $s2,11($out)
- stb $acc4,12($out)
- stb $acc5,13($out)
- stb $acc6,14($out)
- b L\$dec_done
- stb $s3,15($out)
-
-L\$dec_out_aligned
- stw $s0,0($out)
- stw $s1,4($out)
- stw $s2,8($out)
- stw $s3,12($out)
-
-L\$dec_done
- $POP `-$FRAME-$SAVED_RP`(%sp),%r2 ; standard epilogue
- $POP `-$FRAME+1*$SIZE_T`(%sp),%r4
- $POP `-$FRAME+2*$SIZE_T`(%sp),%r5
- $POP `-$FRAME+3*$SIZE_T`(%sp),%r6
- $POP `-$FRAME+4*$SIZE_T`(%sp),%r7
- $POP `-$FRAME+5*$SIZE_T`(%sp),%r8
- $POP `-$FRAME+6*$SIZE_T`(%sp),%r9
- $POP `-$FRAME+7*$SIZE_T`(%sp),%r10
- $POP `-$FRAME+8*$SIZE_T`(%sp),%r11
- $POP `-$FRAME+9*$SIZE_T`(%sp),%r12
- $POP `-$FRAME+10*$SIZE_T`(%sp),%r13
- $POP `-$FRAME+11*$SIZE_T`(%sp),%r14
- $POP `-$FRAME+12*$SIZE_T`(%sp),%r15
- $POP `-$FRAME+13*$SIZE_T`(%sp),%r16
- $POP `-$FRAME+14*$SIZE_T`(%sp),%r17
- $POP `-$FRAME+15*$SIZE_T`(%sp),%r18
- bv (%r2)
- .EXIT
- $POPMB -$FRAME(%sp),%r3
- .PROCEND
-
- .ALIGN 16
-_parisc_AES_decrypt
- .PROC
- .CALLINFO MILLICODE
- .ENTRY
- ldw 240($key),$rounds
- ldw 0($key),$t0
- ldw 4($key),$t1
- ldw 8($key),$t2
- ldw 12($key),$t3
- _srm $rounds,1,$rounds
- xor $t0,$s0,$s0
- ldw 16($key),$t0
- xor $t1,$s1,$s1
- ldw 20($key),$t1
- _srm $s0,24,$acc0
- xor $t2,$s2,$s2
- ldw 24($key),$t2
- xor $t3,$s3,$s3
- ldw 28($key),$t3
- _srm $s3,16,$acc1
-L\$dec_loop
- _srm $s2,8,$acc2
- ldwx,s $acc0($tbl),$acc0
- _srm $s1,0,$acc3
- ldwx,s $acc1($tbl),$acc1
- _srm $s1,24,$acc4
- ldwx,s $acc2($tbl),$acc2
- _srm $s0,16,$acc5
- ldwx,s $acc3($tbl),$acc3
- _srm $s3,8,$acc6
- ldwx,s $acc4($tbl),$acc4
- _srm $s2,0,$acc7
- ldwx,s $acc5($tbl),$acc5
- _srm $s2,24,$acc8
- ldwx,s $acc6($tbl),$acc6
- _srm $s1,16,$acc9
- ldwx,s $acc7($tbl),$acc7
- _srm $s0,8,$acc10
- ldwx,s $acc8($tbl),$acc8
- _srm $s3,0,$acc11
- ldwx,s $acc9($tbl),$acc9
- _srm $s3,24,$acc12
- ldwx,s $acc10($tbl),$acc10
- _srm $s2,16,$acc13
- ldwx,s $acc11($tbl),$acc11
- _srm $s1,8,$acc14
- ldwx,s $acc12($tbl),$acc12
- _srm $s0,0,$acc15
- ldwx,s $acc13($tbl),$acc13
- ldwx,s $acc14($tbl),$acc14
- ldwx,s $acc15($tbl),$acc15
- addib,= -1,$rounds,L\$dec_last
- ldo 32($key),$key
-
- _ror $acc1,8,$acc1
- xor $acc0,$t0,$t0
- ldw 0($key),$s0
- _ror $acc2,16,$acc2
- xor $acc1,$t0,$t0
- ldw 4($key),$s1
- _ror $acc3,24,$acc3
- xor $acc2,$t0,$t0
- ldw 8($key),$s2
- _ror $acc5,8,$acc5
- xor $acc3,$t0,$t0
- ldw 12($key),$s3
- _ror $acc6,16,$acc6
- xor $acc4,$t1,$t1
- _ror $acc7,24,$acc7
- xor $acc5,$t1,$t1
- _ror $acc9,8,$acc9
- xor $acc6,$t1,$t1
- _ror $acc10,16,$acc10
- xor $acc7,$t1,$t1
- _ror $acc11,24,$acc11
- xor $acc8,$t2,$t2
- _ror $acc13,8,$acc13
- xor $acc9,$t2,$t2
- _ror $acc14,16,$acc14
- xor $acc10,$t2,$t2
- _ror $acc15,24,$acc15
- xor $acc11,$t2,$t2
- xor $acc12,$acc14,$acc14
- xor $acc13,$t3,$t3
- _srm $t0,24,$acc0
- xor $acc14,$t3,$t3
- xor $acc15,$t3,$t3
- _srm $t3,16,$acc1
-
- _srm $t2,8,$acc2
- ldwx,s $acc0($tbl),$acc0
- _srm $t1,0,$acc3
- ldwx,s $acc1($tbl),$acc1
- _srm $t1,24,$acc4
- ldwx,s $acc2($tbl),$acc2
- _srm $t0,16,$acc5
- ldwx,s $acc3($tbl),$acc3
- _srm $t3,8,$acc6
- ldwx,s $acc4($tbl),$acc4
- _srm $t2,0,$acc7
- ldwx,s $acc5($tbl),$acc5
- _srm $t2,24,$acc8
- ldwx,s $acc6($tbl),$acc6
- _srm $t1,16,$acc9
- ldwx,s $acc7($tbl),$acc7
- _srm $t0,8,$acc10
- ldwx,s $acc8($tbl),$acc8
- _srm $t3,0,$acc11
- ldwx,s $acc9($tbl),$acc9
- _srm $t3,24,$acc12
- ldwx,s $acc10($tbl),$acc10
- _srm $t2,16,$acc13
- ldwx,s $acc11($tbl),$acc11
- _srm $t1,8,$acc14
- ldwx,s $acc12($tbl),$acc12
- _srm $t0,0,$acc15
- ldwx,s $acc13($tbl),$acc13
- _ror $acc1,8,$acc1
- ldwx,s $acc14($tbl),$acc14
-
- _ror $acc2,16,$acc2
- xor $acc0,$s0,$s0
- ldwx,s $acc15($tbl),$acc15
- _ror $acc3,24,$acc3
- xor $acc1,$s0,$s0
- ldw 16($key),$t0
- _ror $acc5,8,$acc5
- xor $acc2,$s0,$s0
- ldw 20($key),$t1
- _ror $acc6,16,$acc6
- xor $acc3,$s0,$s0
- ldw 24($key),$t2
- _ror $acc7,24,$acc7
- xor $acc4,$s1,$s1
- ldw 28($key),$t3
- _ror $acc9,8,$acc9
- xor $acc5,$s1,$s1
- ldw 1024+0($tbl),%r0 ; prefetch td4
- _ror $acc10,16,$acc10
- xor $acc6,$s1,$s1
- ldw 1024+32($tbl),%r0 ; prefetch td4
- _ror $acc11,24,$acc11
- xor $acc7,$s1,$s1
- ldw 1024+64($tbl),%r0 ; prefetch td4
- _ror $acc13,8,$acc13
- xor $acc8,$s2,$s2
- ldw 1024+96($tbl),%r0 ; prefetch td4
- _ror $acc14,16,$acc14
- xor $acc9,$s2,$s2
- ldw 1024+128($tbl),%r0 ; prefetch td4
- _ror $acc15,24,$acc15
- xor $acc10,$s2,$s2
- ldw 1024+160($tbl),%r0 ; prefetch td4
- _srm $s0,24,$acc0
- xor $acc11,$s2,$s2
- ldw 1024+192($tbl),%r0 ; prefetch td4
- xor $acc12,$acc14,$acc14
- xor $acc13,$s3,$s3
- ldw 1024+224($tbl),%r0 ; prefetch td4
- xor $acc14,$s3,$s3
- xor $acc15,$s3,$s3
- b L\$dec_loop
- _srm $s3,16,$acc1
-
- .ALIGN 16
-L\$dec_last
- ldo 1024($tbl),$rounds
- _ror $acc1,8,$acc1
- xor $acc0,$t0,$t0
- ldw 0($key),$s0
- _ror $acc2,16,$acc2
- xor $acc1,$t0,$t0
- ldw 4($key),$s1
- _ror $acc3,24,$acc3
- xor $acc2,$t0,$t0
- ldw 8($key),$s2
- _ror $acc5,8,$acc5
- xor $acc3,$t0,$t0
- ldw 12($key),$s3
- _ror $acc6,16,$acc6
- xor $acc4,$t1,$t1
- _ror $acc7,24,$acc7
- xor $acc5,$t1,$t1
- _ror $acc9,8,$acc9
- xor $acc6,$t1,$t1
- _ror $acc10,16,$acc10
- xor $acc7,$t1,$t1
- _ror $acc11,24,$acc11
- xor $acc8,$t2,$t2
- _ror $acc13,8,$acc13
- xor $acc9,$t2,$t2
- _ror $acc14,16,$acc14
- xor $acc10,$t2,$t2
- _ror $acc15,24,$acc15
- xor $acc11,$t2,$t2
- xor $acc12,$acc14,$acc14
- xor $acc13,$t3,$t3
- _srm $t0,24,$acc0
- xor $acc14,$t3,$t3
- xor $acc15,$t3,$t3
- _srm $t3,16,$acc1
-
- _srm $t2,8,$acc2
- ldbx $acc0($rounds),$acc0
- _srm $t1,24,$acc4
- ldbx $acc1($rounds),$acc1
- _srm $t0,16,$acc5
- _srm $t1,0,$acc3
- ldbx $acc2($rounds),$acc2
- ldbx $acc3($rounds),$acc3
- _srm $t3,8,$acc6
- ldbx $acc4($rounds),$acc4
- _srm $t2,24,$acc8
- ldbx $acc5($rounds),$acc5
- _srm $t1,16,$acc9
- _srm $t2,0,$acc7
- ldbx $acc6($rounds),$acc6
- ldbx $acc7($rounds),$acc7
- _srm $t0,8,$acc10
- ldbx $acc8($rounds),$acc8
- _srm $t3,24,$acc12
- ldbx $acc9($rounds),$acc9
- _srm $t2,16,$acc13
- _srm $t3,0,$acc11
- ldbx $acc10($rounds),$acc10
- _srm $t1,8,$acc14
- ldbx $acc11($rounds),$acc11
- ldbx $acc12($rounds),$acc12
- ldbx $acc13($rounds),$acc13
- _srm $t0,0,$acc15
- ldbx $acc14($rounds),$acc14
-
- dep $acc0,7,8,$acc3
- ldbx $acc15($rounds),$acc15
- dep $acc4,7,8,$acc7
- dep $acc1,15,8,$acc3
- dep $acc5,15,8,$acc7
- dep $acc2,23,8,$acc3
- dep $acc6,23,8,$acc7
- xor $acc3,$s0,$s0
- xor $acc7,$s1,$s1
- dep $acc8,7,8,$acc11
- dep $acc12,7,8,$acc15
- dep $acc9,15,8,$acc11
- dep $acc13,15,8,$acc15
- dep $acc10,23,8,$acc11
- dep $acc14,23,8,$acc15
- xor $acc11,$s2,$s2
-
- bv (%r31)
- .EXIT
- xor $acc15,$s3,$s3
- .PROCEND
-
- .ALIGN 64
-L\$AES_Td
- .WORD 0x51f4a750, 0x7e416553, 0x1a17a4c3, 0x3a275e96
- .WORD 0x3bab6bcb, 0x1f9d45f1, 0xacfa58ab, 0x4be30393
- .WORD 0x2030fa55, 0xad766df6, 0x88cc7691, 0xf5024c25
- .WORD 0x4fe5d7fc, 0xc52acbd7, 0x26354480, 0xb562a38f
- .WORD 0xdeb15a49, 0x25ba1b67, 0x45ea0e98, 0x5dfec0e1
- .WORD 0xc32f7502, 0x814cf012, 0x8d4697a3, 0x6bd3f9c6
- .WORD 0x038f5fe7, 0x15929c95, 0xbf6d7aeb, 0x955259da
- .WORD 0xd4be832d, 0x587421d3, 0x49e06929, 0x8ec9c844
- .WORD 0x75c2896a, 0xf48e7978, 0x99583e6b, 0x27b971dd
- .WORD 0xbee14fb6, 0xf088ad17, 0xc920ac66, 0x7dce3ab4
- .WORD 0x63df4a18, 0xe51a3182, 0x97513360, 0x62537f45
- .WORD 0xb16477e0, 0xbb6bae84, 0xfe81a01c, 0xf9082b94
- .WORD 0x70486858, 0x8f45fd19, 0x94de6c87, 0x527bf8b7
- .WORD 0xab73d323, 0x724b02e2, 0xe31f8f57, 0x6655ab2a
- .WORD 0xb2eb2807, 0x2fb5c203, 0x86c57b9a, 0xd33708a5
- .WORD 0x302887f2, 0x23bfa5b2, 0x02036aba, 0xed16825c
- .WORD 0x8acf1c2b, 0xa779b492, 0xf307f2f0, 0x4e69e2a1
- .WORD 0x65daf4cd, 0x0605bed5, 0xd134621f, 0xc4a6fe8a
- .WORD 0x342e539d, 0xa2f355a0, 0x058ae132, 0xa4f6eb75
- .WORD 0x0b83ec39, 0x4060efaa, 0x5e719f06, 0xbd6e1051
- .WORD 0x3e218af9, 0x96dd063d, 0xdd3e05ae, 0x4de6bd46
- .WORD 0x91548db5, 0x71c45d05, 0x0406d46f, 0x605015ff
- .WORD 0x1998fb24, 0xd6bde997, 0x894043cc, 0x67d99e77
- .WORD 0xb0e842bd, 0x07898b88, 0xe7195b38, 0x79c8eedb
- .WORD 0xa17c0a47, 0x7c420fe9, 0xf8841ec9, 0x00000000
- .WORD 0x09808683, 0x322bed48, 0x1e1170ac, 0x6c5a724e
- .WORD 0xfd0efffb, 0x0f853856, 0x3daed51e, 0x362d3927
- .WORD 0x0a0fd964, 0x685ca621, 0x9b5b54d1, 0x24362e3a
- .WORD 0x0c0a67b1, 0x9357e70f, 0xb4ee96d2, 0x1b9b919e
- .WORD 0x80c0c54f, 0x61dc20a2, 0x5a774b69, 0x1c121a16
- .WORD 0xe293ba0a, 0xc0a02ae5, 0x3c22e043, 0x121b171d
- .WORD 0x0e090d0b, 0xf28bc7ad, 0x2db6a8b9, 0x141ea9c8
- .WORD 0x57f11985, 0xaf75074c, 0xee99ddbb, 0xa37f60fd
- .WORD 0xf701269f, 0x5c72f5bc, 0x44663bc5, 0x5bfb7e34
- .WORD 0x8b432976, 0xcb23c6dc, 0xb6edfc68, 0xb8e4f163
- .WORD 0xd731dcca, 0x42638510, 0x13972240, 0x84c61120
- .WORD 0x854a247d, 0xd2bb3df8, 0xaef93211, 0xc729a16d
- .WORD 0x1d9e2f4b, 0xdcb230f3, 0x0d8652ec, 0x77c1e3d0
- .WORD 0x2bb3166c, 0xa970b999, 0x119448fa, 0x47e96422
- .WORD 0xa8fc8cc4, 0xa0f03f1a, 0x567d2cd8, 0x223390ef
- .WORD 0x87494ec7, 0xd938d1c1, 0x8ccaa2fe, 0x98d40b36
- .WORD 0xa6f581cf, 0xa57ade28, 0xdab78e26, 0x3fadbfa4
- .WORD 0x2c3a9de4, 0x5078920d, 0x6a5fcc9b, 0x547e4662
- .WORD 0xf68d13c2, 0x90d8b8e8, 0x2e39f75e, 0x82c3aff5
- .WORD 0x9f5d80be, 0x69d0937c, 0x6fd52da9, 0xcf2512b3
- .WORD 0xc8ac993b, 0x10187da7, 0xe89c636e, 0xdb3bbb7b
- .WORD 0xcd267809, 0x6e5918f4, 0xec9ab701, 0x834f9aa8
- .WORD 0xe6956e65, 0xaaffe67e, 0x21bccf08, 0xef15e8e6
- .WORD 0xbae79bd9, 0x4a6f36ce, 0xea9f09d4, 0x29b07cd6
- .WORD 0x31a4b2af, 0x2a3f2331, 0xc6a59430, 0x35a266c0
- .WORD 0x744ebc37, 0xfc82caa6, 0xe090d0b0, 0x33a7d815
- .WORD 0xf104984a, 0x41ecdaf7, 0x7fcd500e, 0x1791f62f
- .WORD 0x764dd68d, 0x43efb04d, 0xccaa4d54, 0xe49604df
- .WORD 0x9ed1b5e3, 0x4c6a881b, 0xc12c1fb8, 0x4665517f
- .WORD 0x9d5eea04, 0x018c355d, 0xfa877473, 0xfb0b412e
- .WORD 0xb3671d5a, 0x92dbd252, 0xe9105633, 0x6dd64713
- .WORD 0x9ad7618c, 0x37a10c7a, 0x59f8148e, 0xeb133c89
- .WORD 0xcea927ee, 0xb761c935, 0xe11ce5ed, 0x7a47b13c
- .WORD 0x9cd2df59, 0x55f2733f, 0x1814ce79, 0x73c737bf
- .WORD 0x53f7cdea, 0x5ffdaa5b, 0xdf3d6f14, 0x7844db86
- .WORD 0xcaaff381, 0xb968c43e, 0x3824342c, 0xc2a3405f
- .WORD 0x161dc372, 0xbce2250c, 0x283c498b, 0xff0d9541
- .WORD 0x39a80171, 0x080cb3de, 0xd8b4e49c, 0x6456c190
- .WORD 0x7bcb8461, 0xd532b670, 0x486c5c74, 0xd0b85742
- .BYTE 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38
- .BYTE 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
- .BYTE 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
- .BYTE 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
- .BYTE 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
- .BYTE 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
- .BYTE 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
- .BYTE 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
- .BYTE 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
- .BYTE 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
- .BYTE 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
- .BYTE 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
- .BYTE 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
- .BYTE 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
- .BYTE 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
- .BYTE 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
- .BYTE 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
- .BYTE 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
- .BYTE 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
- .BYTE 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
- .BYTE 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
- .BYTE 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
- .BYTE 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
- .BYTE 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
- .BYTE 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
- .BYTE 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
- .BYTE 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
- .BYTE 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
- .BYTE 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
- .BYTE 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
- .BYTE 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
- .BYTE 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
- .STRINGZ "AES for PA-RISC, CRYPTOGAMS by <appro\@openssl.org>"
-___
-
-foreach (split("\n",$code)) {
- s/\`([^\`]*)\`/eval $1/ge;
-
- # translate made up instructons: _ror, _srm
- s/_ror(\s+)(%r[0-9]+),/shd$1$2,$2,/ or
-
- s/_srm(\s+%r[0-9]+),([0-9]+),/
- $SIZE_T==4 ? sprintf("extru%s,%d,8,",$1,31-$2)
- : sprintf("extrd,u%s,%d,8,",$1,63-$2)/e;
-
- s/,\*/,/ if ($SIZE_T==4);
- s/\bbv\b(.*\(%r2\))/bve$1/ if ($SIZE_T==8);
- print $_,"\n";
-}
-close STDOUT;
diff --git a/app/openssl/crypto/aes/asm/aes-ppc.pl b/app/openssl/crypto/aes/asm/aes-ppc.pl
deleted file mode 100644
index 7c52cbe5..00000000
--- a/app/openssl/crypto/aes/asm/aes-ppc.pl
+++ /dev/null
@@ -1,1365 +0,0 @@
-#!/usr/bin/env perl
-
-# ====================================================================
-# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
-# project. The module is, however, dual licensed under OpenSSL and
-# CRYPTOGAMS licenses depending on where you obtain it. For further
-# details see http://www.openssl.org/~appro/cryptogams/.
-# ====================================================================
-
-# Needs more work: key setup, CBC routine...
-#
-# ppc_AES_[en|de]crypt perform at 18 cycles per byte processed with
-# 128-bit key, which is ~40% better than 64-bit code generated by gcc
-# 4.0. But these are not the ones currently used! Their "compact"
-# counterparts are, for security reason. ppc_AES_encrypt_compact runs
-# at 1/2 of ppc_AES_encrypt speed, while ppc_AES_decrypt_compact -
-# at 1/3 of ppc_AES_decrypt.
-
-# February 2010
-#
-# Rescheduling instructions to favour Power6 pipeline gave 10%
-# performance improvement on the platfrom in question (and marginal
-# improvement even on others). It should be noted that Power6 fails
-# to process byte in 18 cycles, only in 23, because it fails to issue
-# 4 load instructions in two cycles, only in 3. As result non-compact
-# block subroutines are 25% slower than one would expect. Compact
-# functions scale better, because they have pure computational part,
-# which scales perfectly with clock frequency. To be specific
-# ppc_AES_encrypt_compact operates at 42 cycles per byte, while
-# ppc_AES_decrypt_compact - at 55 (in 64-bit build).
-
-$flavour = shift;
-
-if ($flavour =~ /64/) {
- $SIZE_T =8;
- $LRSAVE =2*$SIZE_T;
- $STU ="stdu";
- $POP ="ld";
- $PUSH ="std";
-} elsif ($flavour =~ /32/) {
- $SIZE_T =4;
- $LRSAVE =$SIZE_T;
- $STU ="stwu";
- $POP ="lwz";
- $PUSH ="stw";
-} else { die "nonsense $flavour"; }
-
-$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
-( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
-( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
-die "can't locate ppc-xlate.pl";
-
-open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
-
-$FRAME=32*$SIZE_T;
-
-sub _data_word()
-{ my $i;
- while(defined($i=shift)) { $code.=sprintf"\t.long\t0x%08x,0x%08x\n",$i,$i; }
-}
-
-$sp="r1";
-$toc="r2";
-$inp="r3";
-$out="r4";
-$key="r5";
-
-$Tbl0="r3";
-$Tbl1="r6";
-$Tbl2="r7";
-$Tbl3="r2";
-
-$s0="r8";
-$s1="r9";
-$s2="r10";
-$s3="r11";
-
-$t0="r12";
-$t1="r13";
-$t2="r14";
-$t3="r15";
-
-$acc00="r16";
-$acc01="r17";
-$acc02="r18";
-$acc03="r19";
-
-$acc04="r20";
-$acc05="r21";
-$acc06="r22";
-$acc07="r23";
-
-$acc08="r24";
-$acc09="r25";
-$acc10="r26";
-$acc11="r27";
-
-$acc12="r28";
-$acc13="r29";
-$acc14="r30";
-$acc15="r31";
-
-# stay away from TLS pointer
-if ($SIZE_T==8) { die if ($t1 ne "r13"); $t1="r0"; }
-else { die if ($Tbl3 ne "r2"); $Tbl3=$t0; $t0="r0"; }
-$mask80=$Tbl2;
-$mask1b=$Tbl3;
-
-$code.=<<___;
-.machine "any"
-.text
-
-.align 7
-LAES_Te:
- mflr r0
- bcl 20,31,\$+4
- mflr $Tbl0 ; vvvvv "distance" between . and 1st data entry
- addi $Tbl0,$Tbl0,`128-8`
- mtlr r0
- blr
- .long 0
- .byte 0,12,0x14,0,0,0,0,0
- .space `64-9*4`
-LAES_Td:
- mflr r0
- bcl 20,31,\$+4
- mflr $Tbl0 ; vvvvvvvv "distance" between . and 1st data entry
- addi $Tbl0,$Tbl0,`128-64-8+2048+256`
- mtlr r0
- blr
- .long 0
- .byte 0,12,0x14,0,0,0,0,0
- .space `128-64-9*4`
-___
-&_data_word(
- 0xc66363a5, 0xf87c7c84, 0xee777799, 0xf67b7b8d,
- 0xfff2f20d, 0xd66b6bbd, 0xde6f6fb1, 0x91c5c554,
- 0x60303050, 0x02010103, 0xce6767a9, 0x562b2b7d,
- 0xe7fefe19, 0xb5d7d762, 0x4dababe6, 0xec76769a,
- 0x8fcaca45, 0x1f82829d, 0x89c9c940, 0xfa7d7d87,
- 0xeffafa15, 0xb25959eb, 0x8e4747c9, 0xfbf0f00b,
- 0x41adadec, 0xb3d4d467, 0x5fa2a2fd, 0x45afafea,
- 0x239c9cbf, 0x53a4a4f7, 0xe4727296, 0x9bc0c05b,
- 0x75b7b7c2, 0xe1fdfd1c, 0x3d9393ae, 0x4c26266a,
- 0x6c36365a, 0x7e3f3f41, 0xf5f7f702, 0x83cccc4f,
- 0x6834345c, 0x51a5a5f4, 0xd1e5e534, 0xf9f1f108,
- 0xe2717193, 0xabd8d873, 0x62313153, 0x2a15153f,
- 0x0804040c, 0x95c7c752, 0x46232365, 0x9dc3c35e,
- 0x30181828, 0x379696a1, 0x0a05050f, 0x2f9a9ab5,
- 0x0e070709, 0x24121236, 0x1b80809b, 0xdfe2e23d,
- 0xcdebeb26, 0x4e272769, 0x7fb2b2cd, 0xea75759f,
- 0x1209091b, 0x1d83839e, 0x582c2c74, 0x341a1a2e,
- 0x361b1b2d, 0xdc6e6eb2, 0xb45a5aee, 0x5ba0a0fb,
- 0xa45252f6, 0x763b3b4d, 0xb7d6d661, 0x7db3b3ce,
- 0x5229297b, 0xdde3e33e, 0x5e2f2f71, 0x13848497,
- 0xa65353f5, 0xb9d1d168, 0x00000000, 0xc1eded2c,
- 0x40202060, 0xe3fcfc1f, 0x79b1b1c8, 0xb65b5bed,
- 0xd46a6abe, 0x8dcbcb46, 0x67bebed9, 0x7239394b,
- 0x944a4ade, 0x984c4cd4, 0xb05858e8, 0x85cfcf4a,
- 0xbbd0d06b, 0xc5efef2a, 0x4faaaae5, 0xedfbfb16,
- 0x864343c5, 0x9a4d4dd7, 0x66333355, 0x11858594,
- 0x8a4545cf, 0xe9f9f910, 0x04020206, 0xfe7f7f81,
- 0xa05050f0, 0x783c3c44, 0x259f9fba, 0x4ba8a8e3,
- 0xa25151f3, 0x5da3a3fe, 0x804040c0, 0x058f8f8a,
- 0x3f9292ad, 0x219d9dbc, 0x70383848, 0xf1f5f504,
- 0x63bcbcdf, 0x77b6b6c1, 0xafdada75, 0x42212163,
- 0x20101030, 0xe5ffff1a, 0xfdf3f30e, 0xbfd2d26d,
- 0x81cdcd4c, 0x180c0c14, 0x26131335, 0xc3ecec2f,
- 0xbe5f5fe1, 0x359797a2, 0x884444cc, 0x2e171739,
- 0x93c4c457, 0x55a7a7f2, 0xfc7e7e82, 0x7a3d3d47,
- 0xc86464ac, 0xba5d5de7, 0x3219192b, 0xe6737395,
- 0xc06060a0, 0x19818198, 0x9e4f4fd1, 0xa3dcdc7f,
- 0x44222266, 0x542a2a7e, 0x3b9090ab, 0x0b888883,
- 0x8c4646ca, 0xc7eeee29, 0x6bb8b8d3, 0x2814143c,
- 0xa7dede79, 0xbc5e5ee2, 0x160b0b1d, 0xaddbdb76,
- 0xdbe0e03b, 0x64323256, 0x743a3a4e, 0x140a0a1e,
- 0x924949db, 0x0c06060a, 0x4824246c, 0xb85c5ce4,
- 0x9fc2c25d, 0xbdd3d36e, 0x43acacef, 0xc46262a6,
- 0x399191a8, 0x319595a4, 0xd3e4e437, 0xf279798b,
- 0xd5e7e732, 0x8bc8c843, 0x6e373759, 0xda6d6db7,
- 0x018d8d8c, 0xb1d5d564, 0x9c4e4ed2, 0x49a9a9e0,
- 0xd86c6cb4, 0xac5656fa, 0xf3f4f407, 0xcfeaea25,
- 0xca6565af, 0xf47a7a8e, 0x47aeaee9, 0x10080818,
- 0x6fbabad5, 0xf0787888, 0x4a25256f, 0x5c2e2e72,
- 0x381c1c24, 0x57a6a6f1, 0x73b4b4c7, 0x97c6c651,
- 0xcbe8e823, 0xa1dddd7c, 0xe874749c, 0x3e1f1f21,
- 0x964b4bdd, 0x61bdbddc, 0x0d8b8b86, 0x0f8a8a85,
- 0xe0707090, 0x7c3e3e42, 0x71b5b5c4, 0xcc6666aa,
- 0x904848d8, 0x06030305, 0xf7f6f601, 0x1c0e0e12,
- 0xc26161a3, 0x6a35355f, 0xae5757f9, 0x69b9b9d0,
- 0x17868691, 0x99c1c158, 0x3a1d1d27, 0x279e9eb9,
- 0xd9e1e138, 0xebf8f813, 0x2b9898b3, 0x22111133,
- 0xd26969bb, 0xa9d9d970, 0x078e8e89, 0x339494a7,
- 0x2d9b9bb6, 0x3c1e1e22, 0x15878792, 0xc9e9e920,
- 0x87cece49, 0xaa5555ff, 0x50282878, 0xa5dfdf7a,
- 0x038c8c8f, 0x59a1a1f8, 0x09898980, 0x1a0d0d17,
- 0x65bfbfda, 0xd7e6e631, 0x844242c6, 0xd06868b8,
- 0x824141c3, 0x299999b0, 0x5a2d2d77, 0x1e0f0f11,
- 0x7bb0b0cb, 0xa85454fc, 0x6dbbbbd6, 0x2c16163a);
-$code.=<<___;
-.byte 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5
-.byte 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76
-.byte 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0
-.byte 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0
-.byte 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc
-.byte 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15
-.byte 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a
-.byte 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75
-.byte 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0
-.byte 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84
-.byte 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b
-.byte 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf
-.byte 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85
-.byte 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8
-.byte 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5
-.byte 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2
-.byte 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17
-.byte 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73
-.byte 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88
-.byte 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb
-.byte 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c
-.byte 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79
-.byte 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9
-.byte 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08
-.byte 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6
-.byte 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a
-.byte 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e
-.byte 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e
-.byte 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94
-.byte 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf
-.byte 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68
-.byte 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
-___
-&_data_word(
- 0x51f4a750, 0x7e416553, 0x1a17a4c3, 0x3a275e96,
- 0x3bab6bcb, 0x1f9d45f1, 0xacfa58ab, 0x4be30393,
- 0x2030fa55, 0xad766df6, 0x88cc7691, 0xf5024c25,
- 0x4fe5d7fc, 0xc52acbd7, 0x26354480, 0xb562a38f,
- 0xdeb15a49, 0x25ba1b67, 0x45ea0e98, 0x5dfec0e1,
- 0xc32f7502, 0x814cf012, 0x8d4697a3, 0x6bd3f9c6,
- 0x038f5fe7, 0x15929c95, 0xbf6d7aeb, 0x955259da,
- 0xd4be832d, 0x587421d3, 0x49e06929, 0x8ec9c844,
- 0x75c2896a, 0xf48e7978, 0x99583e6b, 0x27b971dd,
- 0xbee14fb6, 0xf088ad17, 0xc920ac66, 0x7dce3ab4,
- 0x63df4a18, 0xe51a3182, 0x97513360, 0x62537f45,
- 0xb16477e0, 0xbb6bae84, 0xfe81a01c, 0xf9082b94,
- 0x70486858, 0x8f45fd19, 0x94de6c87, 0x527bf8b7,
- 0xab73d323, 0x724b02e2, 0xe31f8f57, 0x6655ab2a,
- 0xb2eb2807, 0x2fb5c203, 0x86c57b9a, 0xd33708a5,
- 0x302887f2, 0x23bfa5b2, 0x02036aba, 0xed16825c,
- 0x8acf1c2b, 0xa779b492, 0xf307f2f0, 0x4e69e2a1,
- 0x65daf4cd, 0x0605bed5, 0xd134621f, 0xc4a6fe8a,
- 0x342e539d, 0xa2f355a0, 0x058ae132, 0xa4f6eb75,
- 0x0b83ec39, 0x4060efaa, 0x5e719f06, 0xbd6e1051,
- 0x3e218af9, 0x96dd063d, 0xdd3e05ae, 0x4de6bd46,
- 0x91548db5, 0x71c45d05, 0x0406d46f, 0x605015ff,
- 0x1998fb24, 0xd6bde997, 0x894043cc, 0x67d99e77,
- 0xb0e842bd, 0x07898b88, 0xe7195b38, 0x79c8eedb,
- 0xa17c0a47, 0x7c420fe9, 0xf8841ec9, 0x00000000,
- 0x09808683, 0x322bed48, 0x1e1170ac, 0x6c5a724e,
- 0xfd0efffb, 0x0f853856, 0x3daed51e, 0x362d3927,
- 0x0a0fd964, 0x685ca621, 0x9b5b54d1, 0x24362e3a,
- 0x0c0a67b1, 0x9357e70f, 0xb4ee96d2, 0x1b9b919e,
- 0x80c0c54f, 0x61dc20a2, 0x5a774b69, 0x1c121a16,
- 0xe293ba0a, 0xc0a02ae5, 0x3c22e043, 0x121b171d,
- 0x0e090d0b, 0xf28bc7ad, 0x2db6a8b9, 0x141ea9c8,
- 0x57f11985, 0xaf75074c, 0xee99ddbb, 0xa37f60fd,
- 0xf701269f, 0x5c72f5bc, 0x44663bc5, 0x5bfb7e34,
- 0x8b432976, 0xcb23c6dc, 0xb6edfc68, 0xb8e4f163,
- 0xd731dcca, 0x42638510, 0x13972240, 0x84c61120,
- 0x854a247d, 0xd2bb3df8, 0xaef93211, 0xc729a16d,
- 0x1d9e2f4b, 0xdcb230f3, 0x0d8652ec, 0x77c1e3d0,
- 0x2bb3166c, 0xa970b999, 0x119448fa, 0x47e96422,
- 0xa8fc8cc4, 0xa0f03f1a, 0x567d2cd8, 0x223390ef,
- 0x87494ec7, 0xd938d1c1, 0x8ccaa2fe, 0x98d40b36,
- 0xa6f581cf, 0xa57ade28, 0xdab78e26, 0x3fadbfa4,
- 0x2c3a9de4, 0x5078920d, 0x6a5fcc9b, 0x547e4662,
- 0xf68d13c2, 0x90d8b8e8, 0x2e39f75e, 0x82c3aff5,
- 0x9f5d80be, 0x69d0937c, 0x6fd52da9, 0xcf2512b3,
- 0xc8ac993b, 0x10187da7, 0xe89c636e, 0xdb3bbb7b,
- 0xcd267809, 0x6e5918f4, 0xec9ab701, 0x834f9aa8,
- 0xe6956e65, 0xaaffe67e, 0x21bccf08, 0xef15e8e6,
- 0xbae79bd9, 0x4a6f36ce, 0xea9f09d4, 0x29b07cd6,
- 0x31a4b2af, 0x2a3f2331, 0xc6a59430, 0x35a266c0,
- 0x744ebc37, 0xfc82caa6, 0xe090d0b0, 0x33a7d815,
- 0xf104984a, 0x41ecdaf7, 0x7fcd500e, 0x1791f62f,
- 0x764dd68d, 0x43efb04d, 0xccaa4d54, 0xe49604df,
- 0x9ed1b5e3, 0x4c6a881b, 0xc12c1fb8, 0x4665517f,
- 0x9d5eea04, 0x018c355d, 0xfa877473, 0xfb0b412e,
- 0xb3671d5a, 0x92dbd252, 0xe9105633, 0x6dd64713,
- 0x9ad7618c, 0x37a10c7a, 0x59f8148e, 0xeb133c89,
- 0xcea927ee, 0xb761c935, 0xe11ce5ed, 0x7a47b13c,
- 0x9cd2df59, 0x55f2733f, 0x1814ce79, 0x73c737bf,
- 0x53f7cdea, 0x5ffdaa5b, 0xdf3d6f14, 0x7844db86,
- 0xcaaff381, 0xb968c43e, 0x3824342c, 0xc2a3405f,
- 0x161dc372, 0xbce2250c, 0x283c498b, 0xff0d9541,
- 0x39a80171, 0x080cb3de, 0xd8b4e49c, 0x6456c190,
- 0x7bcb8461, 0xd532b670, 0x486c5c74, 0xd0b85742);
-$code.=<<___;
-.byte 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38
-.byte 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
-.byte 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
-.byte 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
-.byte 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
-.byte 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
-.byte 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
-.byte 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
-.byte 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
-.byte 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
-.byte 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
-.byte 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
-.byte 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
-.byte 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
-.byte 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
-.byte 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
-.byte 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
-.byte 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
-.byte 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
-.byte 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
-.byte 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
-.byte 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
-.byte 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
-.byte 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
-.byte 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
-.byte 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
-.byte 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
-.byte 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
-.byte 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
-.byte 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
-.byte 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
-.byte 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
-
-
-.globl .AES_encrypt
-.align 7
-.AES_encrypt:
- $STU $sp,-$FRAME($sp)
- mflr r0
-
- $PUSH $toc,`$FRAME-$SIZE_T*20`($sp)
- $PUSH r13,`$FRAME-$SIZE_T*19`($sp)
- $PUSH r14,`$FRAME-$SIZE_T*18`($sp)
- $PUSH r15,`$FRAME-$SIZE_T*17`($sp)
- $PUSH r16,`$FRAME-$SIZE_T*16`($sp)
- $PUSH r17,`$FRAME-$SIZE_T*15`($sp)
- $PUSH r18,`$FRAME-$SIZE_T*14`($sp)
- $PUSH r19,`$FRAME-$SIZE_T*13`($sp)
- $PUSH r20,`$FRAME-$SIZE_T*12`($sp)
- $PUSH r21,`$FRAME-$SIZE_T*11`($sp)
- $PUSH r22,`$FRAME-$SIZE_T*10`($sp)
- $PUSH r23,`$FRAME-$SIZE_T*9`($sp)
- $PUSH r24,`$FRAME-$SIZE_T*8`($sp)
- $PUSH r25,`$FRAME-$SIZE_T*7`($sp)
- $PUSH r26,`$FRAME-$SIZE_T*6`($sp)
- $PUSH r27,`$FRAME-$SIZE_T*5`($sp)
- $PUSH r28,`$FRAME-$SIZE_T*4`($sp)
- $PUSH r29,`$FRAME-$SIZE_T*3`($sp)
- $PUSH r30,`$FRAME-$SIZE_T*2`($sp)
- $PUSH r31,`$FRAME-$SIZE_T*1`($sp)
- $PUSH r0,`$FRAME+$LRSAVE`($sp)
-
- andi. $t0,$inp,3
- andi. $t1,$out,3
- or. $t0,$t0,$t1
- bne Lenc_unaligned
-
-Lenc_unaligned_ok:
- lwz $s0,0($inp)
- lwz $s1,4($inp)
- lwz $s2,8($inp)
- lwz $s3,12($inp)
- bl LAES_Te
- bl Lppc_AES_encrypt_compact
- stw $s0,0($out)
- stw $s1,4($out)
- stw $s2,8($out)
- stw $s3,12($out)
- b Lenc_done
-
-Lenc_unaligned:
- subfic $t0,$inp,4096
- subfic $t1,$out,4096
- andi. $t0,$t0,4096-16
- beq Lenc_xpage
- andi. $t1,$t1,4096-16
- bne Lenc_unaligned_ok
-
-Lenc_xpage:
- lbz $acc00,0($inp)
- lbz $acc01,1($inp)
- lbz $acc02,2($inp)
- lbz $s0,3($inp)
- lbz $acc04,4($inp)
- lbz $acc05,5($inp)
- lbz $acc06,6($inp)
- lbz $s1,7($inp)
- lbz $acc08,8($inp)
- lbz $acc09,9($inp)
- lbz $acc10,10($inp)
- insrwi $s0,$acc00,8,0
- lbz $s2,11($inp)
- insrwi $s1,$acc04,8,0
- lbz $acc12,12($inp)
- insrwi $s0,$acc01,8,8
- lbz $acc13,13($inp)
- insrwi $s1,$acc05,8,8
- lbz $acc14,14($inp)
- insrwi $s0,$acc02,8,16
- lbz $s3,15($inp)
- insrwi $s1,$acc06,8,16
- insrwi $s2,$acc08,8,0
- insrwi $s3,$acc12,8,0
- insrwi $s2,$acc09,8,8
- insrwi $s3,$acc13,8,8
- insrwi $s2,$acc10,8,16
- insrwi $s3,$acc14,8,16
-
- bl LAES_Te
- bl Lppc_AES_encrypt_compact
-
- extrwi $acc00,$s0,8,0
- extrwi $acc01,$s0,8,8
- stb $acc00,0($out)
- extrwi $acc02,$s0,8,16
- stb $acc01,1($out)
- stb $acc02,2($out)
- extrwi $acc04,$s1,8,0
- stb $s0,3($out)
- extrwi $acc05,$s1,8,8
- stb $acc04,4($out)
- extrwi $acc06,$s1,8,16
- stb $acc05,5($out)
- stb $acc06,6($out)
- extrwi $acc08,$s2,8,0
- stb $s1,7($out)
- extrwi $acc09,$s2,8,8
- stb $acc08,8($out)
- extrwi $acc10,$s2,8,16
- stb $acc09,9($out)
- stb $acc10,10($out)
- extrwi $acc12,$s3,8,0
- stb $s2,11($out)
- extrwi $acc13,$s3,8,8
- stb $acc12,12($out)
- extrwi $acc14,$s3,8,16
- stb $acc13,13($out)
- stb $acc14,14($out)
- stb $s3,15($out)
-
-Lenc_done:
- $POP r0,`$FRAME+$LRSAVE`($sp)
- $POP $toc,`$FRAME-$SIZE_T*20`($sp)
- $POP r13,`$FRAME-$SIZE_T*19`($sp)
- $POP r14,`$FRAME-$SIZE_T*18`($sp)
- $POP r15,`$FRAME-$SIZE_T*17`($sp)
- $POP r16,`$FRAME-$SIZE_T*16`($sp)
- $POP r17,`$FRAME-$SIZE_T*15`($sp)
- $POP r18,`$FRAME-$SIZE_T*14`($sp)
- $POP r19,`$FRAME-$SIZE_T*13`($sp)
- $POP r20,`$FRAME-$SIZE_T*12`($sp)
- $POP r21,`$FRAME-$SIZE_T*11`($sp)
- $POP r22,`$FRAME-$SIZE_T*10`($sp)
- $POP r23,`$FRAME-$SIZE_T*9`($sp)
- $POP r24,`$FRAME-$SIZE_T*8`($sp)
- $POP r25,`$FRAME-$SIZE_T*7`($sp)
- $POP r26,`$FRAME-$SIZE_T*6`($sp)
- $POP r27,`$FRAME-$SIZE_T*5`($sp)
- $POP r28,`$FRAME-$SIZE_T*4`($sp)
- $POP r29,`$FRAME-$SIZE_T*3`($sp)
- $POP r30,`$FRAME-$SIZE_T*2`($sp)
- $POP r31,`$FRAME-$SIZE_T*1`($sp)
- mtlr r0
- addi $sp,$sp,$FRAME
- blr
- .long 0
- .byte 0,12,4,1,0x80,18,3,0
- .long 0
-
-.align 5
-Lppc_AES_encrypt:
- lwz $acc00,240($key)
- addi $Tbl1,$Tbl0,3
- lwz $t0,0($key)
- addi $Tbl2,$Tbl0,2
- lwz $t1,4($key)
- addi $Tbl3,$Tbl0,1
- lwz $t2,8($key)
- addi $acc00,$acc00,-1
- lwz $t3,12($key)
- addi $key,$key,16
- xor $s0,$s0,$t0
- xor $s1,$s1,$t1
- xor $s2,$s2,$t2
- xor $s3,$s3,$t3
- mtctr $acc00
-.align 4
-Lenc_loop:
- rlwinm $acc00,$s0,`32-24+3`,21,28
- rlwinm $acc01,$s1,`32-24+3`,21,28
- rlwinm $acc02,$s2,`32-24+3`,21,28
- rlwinm $acc03,$s3,`32-24+3`,21,28
- lwz $t0,0($key)
- rlwinm $acc04,$s1,`32-16+3`,21,28
- lwz $t1,4($key)
- rlwinm $acc05,$s2,`32-16+3`,21,28
- lwz $t2,8($key)
- rlwinm $acc06,$s3,`32-16+3`,21,28
- lwz $t3,12($key)
- rlwinm $acc07,$s0,`32-16+3`,21,28
- lwzx $acc00,$Tbl0,$acc00
- rlwinm $acc08,$s2,`32-8+3`,21,28
- lwzx $acc01,$Tbl0,$acc01
- rlwinm $acc09,$s3,`32-8+3`,21,28
- lwzx $acc02,$Tbl0,$acc02
- rlwinm $acc10,$s0,`32-8+3`,21,28
- lwzx $acc03,$Tbl0,$acc03
- rlwinm $acc11,$s1,`32-8+3`,21,28
- lwzx $acc04,$Tbl1,$acc04
- rlwinm $acc12,$s3,`0+3`,21,28
- lwzx $acc05,$Tbl1,$acc05
- rlwinm $acc13,$s0,`0+3`,21,28
- lwzx $acc06,$Tbl1,$acc06
- rlwinm $acc14,$s1,`0+3`,21,28
- lwzx $acc07,$Tbl1,$acc07
- rlwinm $acc15,$s2,`0+3`,21,28
- lwzx $acc08,$Tbl2,$acc08
- xor $t0,$t0,$acc00
- lwzx $acc09,$Tbl2,$acc09
- xor $t1,$t1,$acc01
- lwzx $acc10,$Tbl2,$acc10
- xor $t2,$t2,$acc02
- lwzx $acc11,$Tbl2,$acc11
- xor $t3,$t3,$acc03
- lwzx $acc12,$Tbl3,$acc12
- xor $t0,$t0,$acc04
- lwzx $acc13,$Tbl3,$acc13
- xor $t1,$t1,$acc05
- lwzx $acc14,$Tbl3,$acc14
- xor $t2,$t2,$acc06
- lwzx $acc15,$Tbl3,$acc15
- xor $t3,$t3,$acc07
- xor $t0,$t0,$acc08
- xor $t1,$t1,$acc09
- xor $t2,$t2,$acc10
- xor $t3,$t3,$acc11
- xor $s0,$t0,$acc12
- xor $s1,$t1,$acc13
- xor $s2,$t2,$acc14
- xor $s3,$t3,$acc15
- addi $key,$key,16
- bdnz- Lenc_loop
-
- addi $Tbl2,$Tbl0,2048
- nop
- lwz $t0,0($key)
- rlwinm $acc00,$s0,`32-24`,24,31
- lwz $t1,4($key)
- rlwinm $acc01,$s1,`32-24`,24,31
- lwz $t2,8($key)
- rlwinm $acc02,$s2,`32-24`,24,31
- lwz $t3,12($key)
- rlwinm $acc03,$s3,`32-24`,24,31
- lwz $acc08,`2048+0`($Tbl0) ! prefetch Te4
- rlwinm $acc04,$s1,`32-16`,24,31
- lwz $acc09,`2048+32`($Tbl0)
- rlwinm $acc05,$s2,`32-16`,24,31
- lwz $acc10,`2048+64`($Tbl0)
- rlwinm $acc06,$s3,`32-16`,24,31
- lwz $acc11,`2048+96`($Tbl0)
- rlwinm $acc07,$s0,`32-16`,24,31
- lwz $acc12,`2048+128`($Tbl0)
- rlwinm $acc08,$s2,`32-8`,24,31
- lwz $acc13,`2048+160`($Tbl0)
- rlwinm $acc09,$s3,`32-8`,24,31
- lwz $acc14,`2048+192`($Tbl0)
- rlwinm $acc10,$s0,`32-8`,24,31
- lwz $acc15,`2048+224`($Tbl0)
- rlwinm $acc11,$s1,`32-8`,24,31
- lbzx $acc00,$Tbl2,$acc00
- rlwinm $acc12,$s3,`0`,24,31
- lbzx $acc01,$Tbl2,$acc01
- rlwinm $acc13,$s0,`0`,24,31
- lbzx $acc02,$Tbl2,$acc02
- rlwinm $acc14,$s1,`0`,24,31
- lbzx $acc03,$Tbl2,$acc03
- rlwinm $acc15,$s2,`0`,24,31
- lbzx $acc04,$Tbl2,$acc04
- rlwinm $s0,$acc00,24,0,7
- lbzx $acc05,$Tbl2,$acc05
- rlwinm $s1,$acc01,24,0,7
- lbzx $acc06,$Tbl2,$acc06
- rlwinm $s2,$acc02,24,0,7
- lbzx $acc07,$Tbl2,$acc07
- rlwinm $s3,$acc03,24,0,7
- lbzx $acc08,$Tbl2,$acc08
- rlwimi $s0,$acc04,16,8,15
- lbzx $acc09,$Tbl2,$acc09
- rlwimi $s1,$acc05,16,8,15
- lbzx $acc10,$Tbl2,$acc10
- rlwimi $s2,$acc06,16,8,15
- lbzx $acc11,$Tbl2,$acc11
- rlwimi $s3,$acc07,16,8,15
- lbzx $acc12,$Tbl2,$acc12
- rlwimi $s0,$acc08,8,16,23
- lbzx $acc13,$Tbl2,$acc13
- rlwimi $s1,$acc09,8,16,23
- lbzx $acc14,$Tbl2,$acc14
- rlwimi $s2,$acc10,8,16,23
- lbzx $acc15,$Tbl2,$acc15
- rlwimi $s3,$acc11,8,16,23
- or $s0,$s0,$acc12
- or $s1,$s1,$acc13
- or $s2,$s2,$acc14
- or $s3,$s3,$acc15
- xor $s0,$s0,$t0
- xor $s1,$s1,$t1
- xor $s2,$s2,$t2
- xor $s3,$s3,$t3
- blr
- .long 0
- .byte 0,12,0x14,0,0,0,0,0
-
-.align 4
-Lppc_AES_encrypt_compact:
- lwz $acc00,240($key)
- addi $Tbl1,$Tbl0,2048
- lwz $t0,0($key)
- lis $mask80,0x8080
- lwz $t1,4($key)
- lis $mask1b,0x1b1b
- lwz $t2,8($key)
- ori $mask80,$mask80,0x8080
- lwz $t3,12($key)
- ori $mask1b,$mask1b,0x1b1b
- addi $key,$key,16
- mtctr $acc00
-.align 4
-Lenc_compact_loop:
- xor $s0,$s0,$t0
- xor $s1,$s1,$t1
- rlwinm $acc00,$s0,`32-24`,24,31
- xor $s2,$s2,$t2
- rlwinm $acc01,$s1,`32-24`,24,31
- xor $s3,$s3,$t3
- rlwinm $acc02,$s2,`32-24`,24,31
- rlwinm $acc03,$s3,`32-24`,24,31
- rlwinm $acc04,$s1,`32-16`,24,31
- rlwinm $acc05,$s2,`32-16`,24,31
- rlwinm $acc06,$s3,`32-16`,24,31
- rlwinm $acc07,$s0,`32-16`,24,31
- lbzx $acc00,$Tbl1,$acc00
- rlwinm $acc08,$s2,`32-8`,24,31
- lbzx $acc01,$Tbl1,$acc01
- rlwinm $acc09,$s3,`32-8`,24,31
- lbzx $acc02,$Tbl1,$acc02
- rlwinm $acc10,$s0,`32-8`,24,31
- lbzx $acc03,$Tbl1,$acc03
- rlwinm $acc11,$s1,`32-8`,24,31
- lbzx $acc04,$Tbl1,$acc04
- rlwinm $acc12,$s3,`0`,24,31
- lbzx $acc05,$Tbl1,$acc05
- rlwinm $acc13,$s0,`0`,24,31
- lbzx $acc06,$Tbl1,$acc06
- rlwinm $acc14,$s1,`0`,24,31
- lbzx $acc07,$Tbl1,$acc07
- rlwinm $acc15,$s2,`0`,24,31
- lbzx $acc08,$Tbl1,$acc08
- rlwinm $s0,$acc00,24,0,7
- lbzx $acc09,$Tbl1,$acc09
- rlwinm $s1,$acc01,24,0,7
- lbzx $acc10,$Tbl1,$acc10
- rlwinm $s2,$acc02,24,0,7
- lbzx $acc11,$Tbl1,$acc11
- rlwinm $s3,$acc03,24,0,7
- lbzx $acc12,$Tbl1,$acc12
- rlwimi $s0,$acc04,16,8,15
- lbzx $acc13,$Tbl1,$acc13
- rlwimi $s1,$acc05,16,8,15
- lbzx $acc14,$Tbl1,$acc14
- rlwimi $s2,$acc06,16,8,15
- lbzx $acc15,$Tbl1,$acc15
- rlwimi $s3,$acc07,16,8,15
- rlwimi $s0,$acc08,8,16,23
- rlwimi $s1,$acc09,8,16,23
- rlwimi $s2,$acc10,8,16,23
- rlwimi $s3,$acc11,8,16,23
- lwz $t0,0($key)
- or $s0,$s0,$acc12
- lwz $t1,4($key)
- or $s1,$s1,$acc13
- lwz $t2,8($key)
- or $s2,$s2,$acc14
- lwz $t3,12($key)
- or $s3,$s3,$acc15
-
- addi $key,$key,16
- bdz Lenc_compact_done
-
- and $acc00,$s0,$mask80 # r1=r0&0x80808080
- and $acc01,$s1,$mask80
- and $acc02,$s2,$mask80
- and $acc03,$s3,$mask80
- srwi $acc04,$acc00,7 # r1>>7
- andc $acc08,$s0,$mask80 # r0&0x7f7f7f7f
- srwi $acc05,$acc01,7
- andc $acc09,$s1,$mask80
- srwi $acc06,$acc02,7
- andc $acc10,$s2,$mask80
- srwi $acc07,$acc03,7
- andc $acc11,$s3,$mask80
- sub $acc00,$acc00,$acc04 # r1-(r1>>7)
- sub $acc01,$acc01,$acc05
- sub $acc02,$acc02,$acc06
- sub $acc03,$acc03,$acc07
- add $acc08,$acc08,$acc08 # (r0&0x7f7f7f7f)<<1
- add $acc09,$acc09,$acc09
- add $acc10,$acc10,$acc10
- add $acc11,$acc11,$acc11
- and $acc00,$acc00,$mask1b # (r1-(r1>>7))&0x1b1b1b1b
- and $acc01,$acc01,$mask1b
- and $acc02,$acc02,$mask1b
- and $acc03,$acc03,$mask1b
- xor $acc00,$acc00,$acc08 # r2
- xor $acc01,$acc01,$acc09
- rotlwi $acc12,$s0,16 # ROTATE(r0,16)
- xor $acc02,$acc02,$acc10
- rotlwi $acc13,$s1,16
- xor $acc03,$acc03,$acc11
- rotlwi $acc14,$s2,16
-
- xor $s0,$s0,$acc00 # r0^r2
- rotlwi $acc15,$s3,16
- xor $s1,$s1,$acc01
- rotrwi $s0,$s0,24 # ROTATE(r2^r0,24)
- xor $s2,$s2,$acc02
- rotrwi $s1,$s1,24
- xor $s3,$s3,$acc03
- rotrwi $s2,$s2,24
- xor $s0,$s0,$acc00 # ROTATE(r2^r0,24)^r2
- rotrwi $s3,$s3,24
- xor $s1,$s1,$acc01
- xor $s2,$s2,$acc02
- xor $s3,$s3,$acc03
- rotlwi $acc08,$acc12,8 # ROTATE(r0,24)
- xor $s0,$s0,$acc12 #
- rotlwi $acc09,$acc13,8
- xor $s1,$s1,$acc13
- rotlwi $acc10,$acc14,8
- xor $s2,$s2,$acc14
- rotlwi $acc11,$acc15,8
- xor $s3,$s3,$acc15
- xor $s0,$s0,$acc08 #
- xor $s1,$s1,$acc09
- xor $s2,$s2,$acc10
- xor $s3,$s3,$acc11
-
- b Lenc_compact_loop
-.align 4
-Lenc_compact_done:
- xor $s0,$s0,$t0
- xor $s1,$s1,$t1
- xor $s2,$s2,$t2
- xor $s3,$s3,$t3
- blr
- .long 0
- .byte 0,12,0x14,0,0,0,0,0
-
-.globl .AES_decrypt
-.align 7
-.AES_decrypt:
- $STU $sp,-$FRAME($sp)
- mflr r0
-
- $PUSH $toc,`$FRAME-$SIZE_T*20`($sp)
- $PUSH r13,`$FRAME-$SIZE_T*19`($sp)
- $PUSH r14,`$FRAME-$SIZE_T*18`($sp)
- $PUSH r15,`$FRAME-$SIZE_T*17`($sp)
- $PUSH r16,`$FRAME-$SIZE_T*16`($sp)
- $PUSH r17,`$FRAME-$SIZE_T*15`($sp)
- $PUSH r18,`$FRAME-$SIZE_T*14`($sp)
- $PUSH r19,`$FRAME-$SIZE_T*13`($sp)
- $PUSH r20,`$FRAME-$SIZE_T*12`($sp)
- $PUSH r21,`$FRAME-$SIZE_T*11`($sp)
- $PUSH r22,`$FRAME-$SIZE_T*10`($sp)
- $PUSH r23,`$FRAME-$SIZE_T*9`($sp)
- $PUSH r24,`$FRAME-$SIZE_T*8`($sp)
- $PUSH r25,`$FRAME-$SIZE_T*7`($sp)
- $PUSH r26,`$FRAME-$SIZE_T*6`($sp)
- $PUSH r27,`$FRAME-$SIZE_T*5`($sp)
- $PUSH r28,`$FRAME-$SIZE_T*4`($sp)
- $PUSH r29,`$FRAME-$SIZE_T*3`($sp)
- $PUSH r30,`$FRAME-$SIZE_T*2`($sp)
- $PUSH r31,`$FRAME-$SIZE_T*1`($sp)
- $PUSH r0,`$FRAME+$LRSAVE`($sp)
-
- andi. $t0,$inp,3
- andi. $t1,$out,3
- or. $t0,$t0,$t1
- bne Ldec_unaligned
-
-Ldec_unaligned_ok:
- lwz $s0,0($inp)
- lwz $s1,4($inp)
- lwz $s2,8($inp)
- lwz $s3,12($inp)
- bl LAES_Td
- bl Lppc_AES_decrypt_compact
- stw $s0,0($out)
- stw $s1,4($out)
- stw $s2,8($out)
- stw $s3,12($out)
- b Ldec_done
-
-Ldec_unaligned:
- subfic $t0,$inp,4096
- subfic $t1,$out,4096
- andi. $t0,$t0,4096-16
- beq Ldec_xpage
- andi. $t1,$t1,4096-16
- bne Ldec_unaligned_ok
-
-Ldec_xpage:
- lbz $acc00,0($inp)
- lbz $acc01,1($inp)
- lbz $acc02,2($inp)
- lbz $s0,3($inp)
- lbz $acc04,4($inp)
- lbz $acc05,5($inp)
- lbz $acc06,6($inp)
- lbz $s1,7($inp)
- lbz $acc08,8($inp)
- lbz $acc09,9($inp)
- lbz $acc10,10($inp)
- insrwi $s0,$acc00,8,0
- lbz $s2,11($inp)
- insrwi $s1,$acc04,8,0
- lbz $acc12,12($inp)
- insrwi $s0,$acc01,8,8
- lbz $acc13,13($inp)
- insrwi $s1,$acc05,8,8
- lbz $acc14,14($inp)
- insrwi $s0,$acc02,8,16
- lbz $s3,15($inp)
- insrwi $s1,$acc06,8,16
- insrwi $s2,$acc08,8,0
- insrwi $s3,$acc12,8,0
- insrwi $s2,$acc09,8,8
- insrwi $s3,$acc13,8,8
- insrwi $s2,$acc10,8,16
- insrwi $s3,$acc14,8,16
-
- bl LAES_Td
- bl Lppc_AES_decrypt_compact
-
- extrwi $acc00,$s0,8,0
- extrwi $acc01,$s0,8,8
- stb $acc00,0($out)
- extrwi $acc02,$s0,8,16
- stb $acc01,1($out)
- stb $acc02,2($out)
- extrwi $acc04,$s1,8,0
- stb $s0,3($out)
- extrwi $acc05,$s1,8,8
- stb $acc04,4($out)
- extrwi $acc06,$s1,8,16
- stb $acc05,5($out)
- stb $acc06,6($out)
- extrwi $acc08,$s2,8,0
- stb $s1,7($out)
- extrwi $acc09,$s2,8,8
- stb $acc08,8($out)
- extrwi $acc10,$s2,8,16
- stb $acc09,9($out)
- stb $acc10,10($out)
- extrwi $acc12,$s3,8,0
- stb $s2,11($out)
- extrwi $acc13,$s3,8,8
- stb $acc12,12($out)
- extrwi $acc14,$s3,8,16
- stb $acc13,13($out)
- stb $acc14,14($out)
- stb $s3,15($out)
-
-Ldec_done:
- $POP r0,`$FRAME+$LRSAVE`($sp)
- $POP $toc,`$FRAME-$SIZE_T*20`($sp)
- $POP r13,`$FRAME-$SIZE_T*19`($sp)
- $POP r14,`$FRAME-$SIZE_T*18`($sp)
- $POP r15,`$FRAME-$SIZE_T*17`($sp)
- $POP r16,`$FRAME-$SIZE_T*16`($sp)
- $POP r17,`$FRAME-$SIZE_T*15`($sp)
- $POP r18,`$FRAME-$SIZE_T*14`($sp)
- $POP r19,`$FRAME-$SIZE_T*13`($sp)
- $POP r20,`$FRAME-$SIZE_T*12`($sp)
- $POP r21,`$FRAME-$SIZE_T*11`($sp)
- $POP r22,`$FRAME-$SIZE_T*10`($sp)
- $POP r23,`$FRAME-$SIZE_T*9`($sp)
- $POP r24,`$FRAME-$SIZE_T*8`($sp)
- $POP r25,`$FRAME-$SIZE_T*7`($sp)
- $POP r26,`$FRAME-$SIZE_T*6`($sp)
- $POP r27,`$FRAME-$SIZE_T*5`($sp)
- $POP r28,`$FRAME-$SIZE_T*4`($sp)
- $POP r29,`$FRAME-$SIZE_T*3`($sp)
- $POP r30,`$FRAME-$SIZE_T*2`($sp)
- $POP r31,`$FRAME-$SIZE_T*1`($sp)
- mtlr r0
- addi $sp,$sp,$FRAME
- blr
- .long 0
- .byte 0,12,4,1,0x80,18,3,0
- .long 0
-
-.align 5
-Lppc_AES_decrypt:
- lwz $acc00,240($key)
- addi $Tbl1,$Tbl0,3
- lwz $t0,0($key)
- addi $Tbl2,$Tbl0,2
- lwz $t1,4($key)
- addi $Tbl3,$Tbl0,1
- lwz $t2,8($key)
- addi $acc00,$acc00,-1
- lwz $t3,12($key)
- addi $key,$key,16
- xor $s0,$s0,$t0
- xor $s1,$s1,$t1
- xor $s2,$s2,$t2
- xor $s3,$s3,$t3
- mtctr $acc00
-.align 4
-Ldec_loop:
- rlwinm $acc00,$s0,`32-24+3`,21,28
- rlwinm $acc01,$s1,`32-24+3`,21,28
- rlwinm $acc02,$s2,`32-24+3`,21,28
- rlwinm $acc03,$s3,`32-24+3`,21,28
- lwz $t0,0($key)
- rlwinm $acc04,$s3,`32-16+3`,21,28
- lwz $t1,4($key)
- rlwinm $acc05,$s0,`32-16+3`,21,28
- lwz $t2,8($key)
- rlwinm $acc06,$s1,`32-16+3`,21,28
- lwz $t3,12($key)
- rlwinm $acc07,$s2,`32-16+3`,21,28
- lwzx $acc00,$Tbl0,$acc00
- rlwinm $acc08,$s2,`32-8+3`,21,28
- lwzx $acc01,$Tbl0,$acc01
- rlwinm $acc09,$s3,`32-8+3`,21,28
- lwzx $acc02,$Tbl0,$acc02
- rlwinm $acc10,$s0,`32-8+3`,21,28
- lwzx $acc03,$Tbl0,$acc03
- rlwinm $acc11,$s1,`32-8+3`,21,28
- lwzx $acc04,$Tbl1,$acc04
- rlwinm $acc12,$s1,`0+3`,21,28
- lwzx $acc05,$Tbl1,$acc05
- rlwinm $acc13,$s2,`0+3`,21,28
- lwzx $acc06,$Tbl1,$acc06
- rlwinm $acc14,$s3,`0+3`,21,28
- lwzx $acc07,$Tbl1,$acc07
- rlwinm $acc15,$s0,`0+3`,21,28
- lwzx $acc08,$Tbl2,$acc08
- xor $t0,$t0,$acc00
- lwzx $acc09,$Tbl2,$acc09
- xor $t1,$t1,$acc01
- lwzx $acc10,$Tbl2,$acc10
- xor $t2,$t2,$acc02
- lwzx $acc11,$Tbl2,$acc11
- xor $t3,$t3,$acc03
- lwzx $acc12,$Tbl3,$acc12
- xor $t0,$t0,$acc04
- lwzx $acc13,$Tbl3,$acc13
- xor $t1,$t1,$acc05
- lwzx $acc14,$Tbl3,$acc14
- xor $t2,$t2,$acc06
- lwzx $acc15,$Tbl3,$acc15
- xor $t3,$t3,$acc07
- xor $t0,$t0,$acc08
- xor $t1,$t1,$acc09
- xor $t2,$t2,$acc10
- xor $t3,$t3,$acc11
- xor $s0,$t0,$acc12
- xor $s1,$t1,$acc13
- xor $s2,$t2,$acc14
- xor $s3,$t3,$acc15
- addi $key,$key,16
- bdnz- Ldec_loop
-
- addi $Tbl2,$Tbl0,2048
- nop
- lwz $t0,0($key)
- rlwinm $acc00,$s0,`32-24`,24,31
- lwz $t1,4($key)
- rlwinm $acc01,$s1,`32-24`,24,31
- lwz $t2,8($key)
- rlwinm $acc02,$s2,`32-24`,24,31
- lwz $t3,12($key)
- rlwinm $acc03,$s3,`32-24`,24,31
- lwz $acc08,`2048+0`($Tbl0) ! prefetch Td4
- rlwinm $acc04,$s3,`32-16`,24,31
- lwz $acc09,`2048+32`($Tbl0)
- rlwinm $acc05,$s0,`32-16`,24,31
- lwz $acc10,`2048+64`($Tbl0)
- lbzx $acc00,$Tbl2,$acc00
- lwz $acc11,`2048+96`($Tbl0)
- lbzx $acc01,$Tbl2,$acc01
- lwz $acc12,`2048+128`($Tbl0)
- rlwinm $acc06,$s1,`32-16`,24,31
- lwz $acc13,`2048+160`($Tbl0)
- rlwinm $acc07,$s2,`32-16`,24,31
- lwz $acc14,`2048+192`($Tbl0)
- rlwinm $acc08,$s2,`32-8`,24,31
- lwz $acc15,`2048+224`($Tbl0)
- rlwinm $acc09,$s3,`32-8`,24,31
- lbzx $acc02,$Tbl2,$acc02
- rlwinm $acc10,$s0,`32-8`,24,31
- lbzx $acc03,$Tbl2,$acc03
- rlwinm $acc11,$s1,`32-8`,24,31
- lbzx $acc04,$Tbl2,$acc04
- rlwinm $acc12,$s1,`0`,24,31
- lbzx $acc05,$Tbl2,$acc05
- rlwinm $acc13,$s2,`0`,24,31
- lbzx $acc06,$Tbl2,$acc06
- rlwinm $acc14,$s3,`0`,24,31
- lbzx $acc07,$Tbl2,$acc07
- rlwinm $acc15,$s0,`0`,24,31
- lbzx $acc08,$Tbl2,$acc08
- rlwinm $s0,$acc00,24,0,7
- lbzx $acc09,$Tbl2,$acc09
- rlwinm $s1,$acc01,24,0,7
- lbzx $acc10,$Tbl2,$acc10
- rlwinm $s2,$acc02,24,0,7
- lbzx $acc11,$Tbl2,$acc11
- rlwinm $s3,$acc03,24,0,7
- lbzx $acc12,$Tbl2,$acc12
- rlwimi $s0,$acc04,16,8,15
- lbzx $acc13,$Tbl2,$acc13
- rlwimi $s1,$acc05,16,8,15
- lbzx $acc14,$Tbl2,$acc14
- rlwimi $s2,$acc06,16,8,15
- lbzx $acc15,$Tbl2,$acc15
- rlwimi $s3,$acc07,16,8,15
- rlwimi $s0,$acc08,8,16,23
- rlwimi $s1,$acc09,8,16,23
- rlwimi $s2,$acc10,8,16,23
- rlwimi $s3,$acc11,8,16,23
- or $s0,$s0,$acc12
- or $s1,$s1,$acc13
- or $s2,$s2,$acc14
- or $s3,$s3,$acc15
- xor $s0,$s0,$t0
- xor $s1,$s1,$t1
- xor $s2,$s2,$t2
- xor $s3,$s3,$t3
- blr
- .long 0
- .byte 0,12,0x14,0,0,0,0,0
-
-.align 4
-Lppc_AES_decrypt_compact:
- lwz $acc00,240($key)
- addi $Tbl1,$Tbl0,2048
- lwz $t0,0($key)
- lis $mask80,0x8080
- lwz $t1,4($key)
- lis $mask1b,0x1b1b
- lwz $t2,8($key)
- ori $mask80,$mask80,0x8080
- lwz $t3,12($key)
- ori $mask1b,$mask1b,0x1b1b
- addi $key,$key,16
-___
-$code.=<<___ if ($SIZE_T==8);
- insrdi $mask80,$mask80,32,0
- insrdi $mask1b,$mask1b,32,0
-___
-$code.=<<___;
- mtctr $acc00
-.align 4
-Ldec_compact_loop:
- xor $s0,$s0,$t0
- xor $s1,$s1,$t1
- rlwinm $acc00,$s0,`32-24`,24,31
- xor $s2,$s2,$t2
- rlwinm $acc01,$s1,`32-24`,24,31
- xor $s3,$s3,$t3
- rlwinm $acc02,$s2,`32-24`,24,31
- rlwinm $acc03,$s3,`32-24`,24,31
- rlwinm $acc04,$s3,`32-16`,24,31
- rlwinm $acc05,$s0,`32-16`,24,31
- rlwinm $acc06,$s1,`32-16`,24,31
- rlwinm $acc07,$s2,`32-16`,24,31
- lbzx $acc00,$Tbl1,$acc00
- rlwinm $acc08,$s2,`32-8`,24,31
- lbzx $acc01,$Tbl1,$acc01
- rlwinm $acc09,$s3,`32-8`,24,31
- lbzx $acc02,$Tbl1,$acc02
- rlwinm $acc10,$s0,`32-8`,24,31
- lbzx $acc03,$Tbl1,$acc03
- rlwinm $acc11,$s1,`32-8`,24,31
- lbzx $acc04,$Tbl1,$acc04
- rlwinm $acc12,$s1,`0`,24,31
- lbzx $acc05,$Tbl1,$acc05
- rlwinm $acc13,$s2,`0`,24,31
- lbzx $acc06,$Tbl1,$acc06
- rlwinm $acc14,$s3,`0`,24,31
- lbzx $acc07,$Tbl1,$acc07
- rlwinm $acc15,$s0,`0`,24,31
- lbzx $acc08,$Tbl1,$acc08
- rlwinm $s0,$acc00,24,0,7
- lbzx $acc09,$Tbl1,$acc09
- rlwinm $s1,$acc01,24,0,7
- lbzx $acc10,$Tbl1,$acc10
- rlwinm $s2,$acc02,24,0,7
- lbzx $acc11,$Tbl1,$acc11
- rlwinm $s3,$acc03,24,0,7
- lbzx $acc12,$Tbl1,$acc12
- rlwimi $s0,$acc04,16,8,15
- lbzx $acc13,$Tbl1,$acc13
- rlwimi $s1,$acc05,16,8,15
- lbzx $acc14,$Tbl1,$acc14
- rlwimi $s2,$acc06,16,8,15
- lbzx $acc15,$Tbl1,$acc15
- rlwimi $s3,$acc07,16,8,15
- rlwimi $s0,$acc08,8,16,23
- rlwimi $s1,$acc09,8,16,23
- rlwimi $s2,$acc10,8,16,23
- rlwimi $s3,$acc11,8,16,23
- lwz $t0,0($key)
- or $s0,$s0,$acc12
- lwz $t1,4($key)
- or $s1,$s1,$acc13
- lwz $t2,8($key)
- or $s2,$s2,$acc14
- lwz $t3,12($key)
- or $s3,$s3,$acc15
-
- addi $key,$key,16
- bdz Ldec_compact_done
-___
-$code.=<<___ if ($SIZE_T==8);
- # vectorized permutation improves decrypt performance by 10%
- insrdi $s0,$s1,32,0
- insrdi $s2,$s3,32,0
-
- and $acc00,$s0,$mask80 # r1=r0&0x80808080
- and $acc02,$s2,$mask80
- srdi $acc04,$acc00,7 # r1>>7
- srdi $acc06,$acc02,7
- andc $acc08,$s0,$mask80 # r0&0x7f7f7f7f
- andc $acc10,$s2,$mask80
- sub $acc00,$acc00,$acc04 # r1-(r1>>7)
- sub $acc02,$acc02,$acc06
- add $acc08,$acc08,$acc08 # (r0&0x7f7f7f7f)<<1
- add $acc10,$acc10,$acc10
- and $acc00,$acc00,$mask1b # (r1-(r1>>7))&0x1b1b1b1b
- and $acc02,$acc02,$mask1b
- xor $acc00,$acc00,$acc08 # r2
- xor $acc02,$acc02,$acc10
-
- and $acc04,$acc00,$mask80 # r1=r2&0x80808080
- and $acc06,$acc02,$mask80
- srdi $acc08,$acc04,7 # r1>>7
- srdi $acc10,$acc06,7
- andc $acc12,$acc00,$mask80 # r2&0x7f7f7f7f
- andc $acc14,$acc02,$mask80
- sub $acc04,$acc04,$acc08 # r1-(r1>>7)
- sub $acc06,$acc06,$acc10
- add $acc12,$acc12,$acc12 # (r2&0x7f7f7f7f)<<1
- add $acc14,$acc14,$acc14
- and $acc04,$acc04,$mask1b # (r1-(r1>>7))&0x1b1b1b1b
- and $acc06,$acc06,$mask1b
- xor $acc04,$acc04,$acc12 # r4
- xor $acc06,$acc06,$acc14
-
- and $acc08,$acc04,$mask80 # r1=r4&0x80808080
- and $acc10,$acc06,$mask80
- srdi $acc12,$acc08,7 # r1>>7
- srdi $acc14,$acc10,7
- sub $acc08,$acc08,$acc12 # r1-(r1>>7)
- sub $acc10,$acc10,$acc14
- andc $acc12,$acc04,$mask80 # r4&0x7f7f7f7f
- andc $acc14,$acc06,$mask80
- add $acc12,$acc12,$acc12 # (r4&0x7f7f7f7f)<<1
- add $acc14,$acc14,$acc14
- and $acc08,$acc08,$mask1b # (r1-(r1>>7))&0x1b1b1b1b
- and $acc10,$acc10,$mask1b
- xor $acc08,$acc08,$acc12 # r8
- xor $acc10,$acc10,$acc14
-
- xor $acc00,$acc00,$s0 # r2^r0
- xor $acc02,$acc02,$s2
- xor $acc04,$acc04,$s0 # r4^r0
- xor $acc06,$acc06,$s2
-
- extrdi $acc01,$acc00,32,0
- extrdi $acc03,$acc02,32,0
- extrdi $acc05,$acc04,32,0
- extrdi $acc07,$acc06,32,0
- extrdi $acc09,$acc08,32,0
- extrdi $acc11,$acc10,32,0
-___
-$code.=<<___ if ($SIZE_T==4);
- and $acc00,$s0,$mask80 # r1=r0&0x80808080
- and $acc01,$s1,$mask80
- and $acc02,$s2,$mask80
- and $acc03,$s3,$mask80
- srwi $acc04,$acc00,7 # r1>>7
- andc $acc08,$s0,$mask80 # r0&0x7f7f7f7f
- srwi $acc05,$acc01,7
- andc $acc09,$s1,$mask80
- srwi $acc06,$acc02,7
- andc $acc10,$s2,$mask80
- srwi $acc07,$acc03,7
- andc $acc11,$s3,$mask80
- sub $acc00,$acc00,$acc04 # r1-(r1>>7)
- sub $acc01,$acc01,$acc05
- sub $acc02,$acc02,$acc06
- sub $acc03,$acc03,$acc07
- add $acc08,$acc08,$acc08 # (r0&0x7f7f7f7f)<<1
- add $acc09,$acc09,$acc09
- add $acc10,$acc10,$acc10
- add $acc11,$acc11,$acc11
- and $acc00,$acc00,$mask1b # (r1-(r1>>7))&0x1b1b1b1b
- and $acc01,$acc01,$mask1b
- and $acc02,$acc02,$mask1b
- and $acc03,$acc03,$mask1b
- xor $acc00,$acc00,$acc08 # r2
- xor $acc01,$acc01,$acc09
- xor $acc02,$acc02,$acc10
- xor $acc03,$acc03,$acc11
-
- and $acc04,$acc00,$mask80 # r1=r2&0x80808080
- and $acc05,$acc01,$mask80
- and $acc06,$acc02,$mask80
- and $acc07,$acc03,$mask80
- srwi $acc08,$acc04,7 # r1>>7
- andc $acc12,$acc00,$mask80 # r2&0x7f7f7f7f
- srwi $acc09,$acc05,7
- andc $acc13,$acc01,$mask80
- srwi $acc10,$acc06,7
- andc $acc14,$acc02,$mask80
- srwi $acc11,$acc07,7
- andc $acc15,$acc03,$mask80
- sub $acc04,$acc04,$acc08 # r1-(r1>>7)
- sub $acc05,$acc05,$acc09
- sub $acc06,$acc06,$acc10
- sub $acc07,$acc07,$acc11
- add $acc12,$acc12,$acc12 # (r2&0x7f7f7f7f)<<1
- add $acc13,$acc13,$acc13
- add $acc14,$acc14,$acc14
- add $acc15,$acc15,$acc15
- and $acc04,$acc04,$mask1b # (r1-(r1>>7))&0x1b1b1b1b
- and $acc05,$acc05,$mask1b
- and $acc06,$acc06,$mask1b
- and $acc07,$acc07,$mask1b
- xor $acc04,$acc04,$acc12 # r4
- xor $acc05,$acc05,$acc13
- xor $acc06,$acc06,$acc14
- xor $acc07,$acc07,$acc15
-
- and $acc08,$acc04,$mask80 # r1=r4&0x80808080
- and $acc09,$acc05,$mask80
- srwi $acc12,$acc08,7 # r1>>7
- and $acc10,$acc06,$mask80
- srwi $acc13,$acc09,7
- and $acc11,$acc07,$mask80
- srwi $acc14,$acc10,7
- sub $acc08,$acc08,$acc12 # r1-(r1>>7)
- srwi $acc15,$acc11,7
- sub $acc09,$acc09,$acc13
- sub $acc10,$acc10,$acc14
- sub $acc11,$acc11,$acc15
- andc $acc12,$acc04,$mask80 # r4&0x7f7f7f7f
- andc $acc13,$acc05,$mask80
- andc $acc14,$acc06,$mask80
- andc $acc15,$acc07,$mask80
- add $acc12,$acc12,$acc12 # (r4&0x7f7f7f7f)<<1
- add $acc13,$acc13,$acc13
- add $acc14,$acc14,$acc14
- add $acc15,$acc15,$acc15
- and $acc08,$acc08,$mask1b # (r1-(r1>>7))&0x1b1b1b1b
- and $acc09,$acc09,$mask1b
- and $acc10,$acc10,$mask1b
- and $acc11,$acc11,$mask1b
- xor $acc08,$acc08,$acc12 # r8
- xor $acc09,$acc09,$acc13
- xor $acc10,$acc10,$acc14
- xor $acc11,$acc11,$acc15
-
- xor $acc00,$acc00,$s0 # r2^r0
- xor $acc01,$acc01,$s1
- xor $acc02,$acc02,$s2
- xor $acc03,$acc03,$s3
- xor $acc04,$acc04,$s0 # r4^r0
- xor $acc05,$acc05,$s1
- xor $acc06,$acc06,$s2
- xor $acc07,$acc07,$s3
-___
-$code.=<<___;
- rotrwi $s0,$s0,8 # = ROTATE(r0,8)
- rotrwi $s1,$s1,8
- xor $s0,$s0,$acc00 # ^= r2^r0
- rotrwi $s2,$s2,8
- xor $s1,$s1,$acc01
- rotrwi $s3,$s3,8
- xor $s2,$s2,$acc02
- xor $s3,$s3,$acc03
- xor $acc00,$acc00,$acc08
- xor $acc01,$acc01,$acc09
- xor $acc02,$acc02,$acc10
- xor $acc03,$acc03,$acc11
- xor $s0,$s0,$acc04 # ^= r4^r0
- rotrwi $acc00,$acc00,24
- xor $s1,$s1,$acc05
- rotrwi $acc01,$acc01,24
- xor $s2,$s2,$acc06
- rotrwi $acc02,$acc02,24
- xor $s3,$s3,$acc07
- rotrwi $acc03,$acc03,24
- xor $acc04,$acc04,$acc08
- xor $acc05,$acc05,$acc09
- xor $acc06,$acc06,$acc10
- xor $acc07,$acc07,$acc11
- xor $s0,$s0,$acc08 # ^= r8 [^((r4^r0)^(r2^r0)=r4^r2)]
- rotrwi $acc04,$acc04,16
- xor $s1,$s1,$acc09
- rotrwi $acc05,$acc05,16
- xor $s2,$s2,$acc10
- rotrwi $acc06,$acc06,16
- xor $s3,$s3,$acc11
- rotrwi $acc07,$acc07,16
- xor $s0,$s0,$acc00 # ^= ROTATE(r8^r2^r0,24)
- rotrwi $acc08,$acc08,8
- xor $s1,$s1,$acc01
- rotrwi $acc09,$acc09,8
- xor $s2,$s2,$acc02
- rotrwi $acc10,$acc10,8
- xor $s3,$s3,$acc03
- rotrwi $acc11,$acc11,8
- xor $s0,$s0,$acc04 # ^= ROTATE(r8^r4^r0,16)
- xor $s1,$s1,$acc05
- xor $s2,$s2,$acc06
- xor $s3,$s3,$acc07
- xor $s0,$s0,$acc08 # ^= ROTATE(r8,8)
- xor $s1,$s1,$acc09
- xor $s2,$s2,$acc10
- xor $s3,$s3,$acc11
-
- b Ldec_compact_loop
-.align 4
-Ldec_compact_done:
- xor $s0,$s0,$t0
- xor $s1,$s1,$t1
- xor $s2,$s2,$t2
- xor $s3,$s3,$t3
- blr
- .long 0
- .byte 0,12,0x14,0,0,0,0,0
-
-.asciz "AES for PPC, CRYPTOGAMS by <appro\@openssl.org>"
-.align 7
-___
-
-$code =~ s/\`([^\`]*)\`/eval $1/gem;
-print $code;
-close STDOUT;
diff --git a/app/openssl/crypto/aes/asm/aes-s390x.pl b/app/openssl/crypto/aes/asm/aes-s390x.pl
deleted file mode 100644
index e75dcd03..00000000
--- a/app/openssl/crypto/aes/asm/aes-s390x.pl
+++ /dev/null
@@ -1,2237 +0,0 @@
-#!/usr/bin/env perl
-
-# ====================================================================
-# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
-# project. The module is, however, dual licensed under OpenSSL and
-# CRYPTOGAMS licenses depending on where you obtain it. For further
-# details see http://www.openssl.org/~appro/cryptogams/.
-# ====================================================================
-
-# AES for s390x.
-
-# April 2007.
-#
-# Software performance improvement over gcc-generated code is ~70% and
-# in absolute terms is ~73 cycles per byte processed with 128-bit key.
-# You're likely to exclaim "why so slow?" Keep in mind that z-CPUs are
-# *strictly* in-order execution and issued instruction [in this case
-# load value from memory is critical] has to complete before execution
-# flow proceeds. S-boxes are compressed to 2KB[+256B].
-#
-# As for hardware acceleration support. It's basically a "teaser," as
-# it can and should be improved in several ways. Most notably support
-# for CBC is not utilized, nor multiple blocks are ever processed.
-# Then software key schedule can be postponed till hardware support
-# detection... Performance improvement over assembler is reportedly
-# ~2.5x, but can reach >8x [naturally on larger chunks] if proper
-# support is implemented.
-
-# May 2007.
-#
-# Implement AES_set_[en|de]crypt_key. Key schedule setup is avoided
-# for 128-bit keys, if hardware support is detected.
-
-# Januray 2009.
-#
-# Add support for hardware AES192/256 and reschedule instructions to
-# minimize/avoid Address Generation Interlock hazard and to favour
-# dual-issue z10 pipeline. This gave ~25% improvement on z10 and
-# almost 50% on z9. The gain is smaller on z10, because being dual-
-# issue z10 makes it improssible to eliminate the interlock condition:
-# critial path is not long enough. Yet it spends ~24 cycles per byte
-# processed with 128-bit key.
-#
-# Unlike previous version hardware support detection takes place only
-# at the moment of key schedule setup, which is denoted in key->rounds.
-# This is done, because deferred key setup can't be made MT-safe, not
-# for keys longer than 128 bits.
-#
-# Add AES_cbc_encrypt, which gives incredible performance improvement,
-# it was measured to be ~6.6x. It's less than previously mentioned 8x,
-# because software implementation was optimized.
-
-# May 2010.
-#
-# Add AES_ctr32_encrypt. If hardware-assisted, it provides up to 4.3x
-# performance improvement over "generic" counter mode routine relying
-# on single-block, also hardware-assisted, AES_encrypt. "Up to" refers
-# to the fact that exact throughput value depends on current stack
-# frame alignment within 4KB page. In worst case you get ~75% of the
-# maximum, but *on average* it would be as much as ~98%. Meaning that
-# worst case is unlike, it's like hitting ravine on plateau.
-
-# November 2010.
-#
-# Adapt for -m31 build. If kernel supports what's called "highgprs"
-# feature on Linux [see /proc/cpuinfo], it's possible to use 64-bit
-# instructions and achieve "64-bit" performance even in 31-bit legacy
-# application context. The feature is not specific to any particular
-# processor, as long as it's "z-CPU". Latter implies that the code
-# remains z/Architecture specific. On z990 it was measured to perform
-# 2x better than code generated by gcc 4.3.
-
-# December 2010.
-#
-# Add support for z196 "cipher message with counter" instruction.
-# Note however that it's disengaged, because it was measured to
-# perform ~12% worse than vanilla km-based code...
-
-# February 2011.
-#
-# Add AES_xts_[en|de]crypt. This includes support for z196 km-xts-aes
-# instructions, which deliver ~70% improvement at 8KB block size over
-# vanilla km-based code, 37% - at most like 512-bytes block size.
-
-$flavour = shift;
-
-if ($flavour =~ /3[12]/) {
- $SIZE_T=4;
- $g="";
-} else {
- $SIZE_T=8;
- $g="g";
-}
-
-while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
-open STDOUT,">$output";
-
-$softonly=0; # allow hardware support
-
-$t0="%r0"; $mask="%r0";
-$t1="%r1";
-$t2="%r2"; $inp="%r2";
-$t3="%r3"; $out="%r3"; $bits="%r3";
-$key="%r4";
-$i1="%r5";
-$i2="%r6";
-$i3="%r7";
-$s0="%r8";
-$s1="%r9";
-$s2="%r10";
-$s3="%r11";
-$tbl="%r12";
-$rounds="%r13";
-$ra="%r14";
-$sp="%r15";
-
-$stdframe=16*$SIZE_T+4*8;
-
-sub _data_word()
-{ my $i;
- while(defined($i=shift)) { $code.=sprintf".long\t0x%08x,0x%08x\n",$i,$i; }
-}
-
-$code=<<___;
-.text
-
-.type AES_Te,\@object
-.align 256
-AES_Te:
-___
-&_data_word(
- 0xc66363a5, 0xf87c7c84, 0xee777799, 0xf67b7b8d,
- 0xfff2f20d, 0xd66b6bbd, 0xde6f6fb1, 0x91c5c554,
- 0x60303050, 0x02010103, 0xce6767a9, 0x562b2b7d,
- 0xe7fefe19, 0xb5d7d762, 0x4dababe6, 0xec76769a,
- 0x8fcaca45, 0x1f82829d, 0x89c9c940, 0xfa7d7d87,
- 0xeffafa15, 0xb25959eb, 0x8e4747c9, 0xfbf0f00b,
- 0x41adadec, 0xb3d4d467, 0x5fa2a2fd, 0x45afafea,
- 0x239c9cbf, 0x53a4a4f7, 0xe4727296, 0x9bc0c05b,
- 0x75b7b7c2, 0xe1fdfd1c, 0x3d9393ae, 0x4c26266a,
- 0x6c36365a, 0x7e3f3f41, 0xf5f7f702, 0x83cccc4f,
- 0x6834345c, 0x51a5a5f4, 0xd1e5e534, 0xf9f1f108,
- 0xe2717193, 0xabd8d873, 0x62313153, 0x2a15153f,
- 0x0804040c, 0x95c7c752, 0x46232365, 0x9dc3c35e,
- 0x30181828, 0x379696a1, 0x0a05050f, 0x2f9a9ab5,
- 0x0e070709, 0x24121236, 0x1b80809b, 0xdfe2e23d,
- 0xcdebeb26, 0x4e272769, 0x7fb2b2cd, 0xea75759f,
- 0x1209091b, 0x1d83839e, 0x582c2c74, 0x341a1a2e,
- 0x361b1b2d, 0xdc6e6eb2, 0xb45a5aee, 0x5ba0a0fb,
- 0xa45252f6, 0x763b3b4d, 0xb7d6d661, 0x7db3b3ce,
- 0x5229297b, 0xdde3e33e, 0x5e2f2f71, 0x13848497,
- 0xa65353f5, 0xb9d1d168, 0x00000000, 0xc1eded2c,
- 0x40202060, 0xe3fcfc1f, 0x79b1b1c8, 0xb65b5bed,
- 0xd46a6abe, 0x8dcbcb46, 0x67bebed9, 0x7239394b,
- 0x944a4ade, 0x984c4cd4, 0xb05858e8, 0x85cfcf4a,
- 0xbbd0d06b, 0xc5efef2a, 0x4faaaae5, 0xedfbfb16,
- 0x864343c5, 0x9a4d4dd7, 0x66333355, 0x11858594,
- 0x8a4545cf, 0xe9f9f910, 0x04020206, 0xfe7f7f81,
- 0xa05050f0, 0x783c3c44, 0x259f9fba, 0x4ba8a8e3,
- 0xa25151f3, 0x5da3a3fe, 0x804040c0, 0x058f8f8a,
- 0x3f9292ad, 0x219d9dbc, 0x70383848, 0xf1f5f504,
- 0x63bcbcdf, 0x77b6b6c1, 0xafdada75, 0x42212163,
- 0x20101030, 0xe5ffff1a, 0xfdf3f30e, 0xbfd2d26d,
- 0x81cdcd4c, 0x180c0c14, 0x26131335, 0xc3ecec2f,
- 0xbe5f5fe1, 0x359797a2, 0x884444cc, 0x2e171739,
- 0x93c4c457, 0x55a7a7f2, 0xfc7e7e82, 0x7a3d3d47,
- 0xc86464ac, 0xba5d5de7, 0x3219192b, 0xe6737395,
- 0xc06060a0, 0x19818198, 0x9e4f4fd1, 0xa3dcdc7f,
- 0x44222266, 0x542a2a7e, 0x3b9090ab, 0x0b888883,
- 0x8c4646ca, 0xc7eeee29, 0x6bb8b8d3, 0x2814143c,
- 0xa7dede79, 0xbc5e5ee2, 0x160b0b1d, 0xaddbdb76,
- 0xdbe0e03b, 0x64323256, 0x743a3a4e, 0x140a0a1e,
- 0x924949db, 0x0c06060a, 0x4824246c, 0xb85c5ce4,
- 0x9fc2c25d, 0xbdd3d36e, 0x43acacef, 0xc46262a6,
- 0x399191a8, 0x319595a4, 0xd3e4e437, 0xf279798b,
- 0xd5e7e732, 0x8bc8c843, 0x6e373759, 0xda6d6db7,
- 0x018d8d8c, 0xb1d5d564, 0x9c4e4ed2, 0x49a9a9e0,
- 0xd86c6cb4, 0xac5656fa, 0xf3f4f407, 0xcfeaea25,
- 0xca6565af, 0xf47a7a8e, 0x47aeaee9, 0x10080818,
- 0x6fbabad5, 0xf0787888, 0x4a25256f, 0x5c2e2e72,
- 0x381c1c24, 0x57a6a6f1, 0x73b4b4c7, 0x97c6c651,
- 0xcbe8e823, 0xa1dddd7c, 0xe874749c, 0x3e1f1f21,
- 0x964b4bdd, 0x61bdbddc, 0x0d8b8b86, 0x0f8a8a85,
- 0xe0707090, 0x7c3e3e42, 0x71b5b5c4, 0xcc6666aa,
- 0x904848d8, 0x06030305, 0xf7f6f601, 0x1c0e0e12,
- 0xc26161a3, 0x6a35355f, 0xae5757f9, 0x69b9b9d0,
- 0x17868691, 0x99c1c158, 0x3a1d1d27, 0x279e9eb9,
- 0xd9e1e138, 0xebf8f813, 0x2b9898b3, 0x22111133,
- 0xd26969bb, 0xa9d9d970, 0x078e8e89, 0x339494a7,
- 0x2d9b9bb6, 0x3c1e1e22, 0x15878792, 0xc9e9e920,
- 0x87cece49, 0xaa5555ff, 0x50282878, 0xa5dfdf7a,
- 0x038c8c8f, 0x59a1a1f8, 0x09898980, 0x1a0d0d17,
- 0x65bfbfda, 0xd7e6e631, 0x844242c6, 0xd06868b8,
- 0x824141c3, 0x299999b0, 0x5a2d2d77, 0x1e0f0f11,
- 0x7bb0b0cb, 0xa85454fc, 0x6dbbbbd6, 0x2c16163a);
-$code.=<<___;
-# Te4[256]
-.byte 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5
-.byte 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76
-.byte 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0
-.byte 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0
-.byte 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc
-.byte 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15
-.byte 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a
-.byte 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75
-.byte 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0
-.byte 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84
-.byte 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b
-.byte 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf
-.byte 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85
-.byte 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8
-.byte 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5
-.byte 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2
-.byte 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17
-.byte 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73
-.byte 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88
-.byte 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb
-.byte 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c
-.byte 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79
-.byte 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9
-.byte 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08
-.byte 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6
-.byte 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a
-.byte 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e
-.byte 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e
-.byte 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94
-.byte 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf
-.byte 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68
-.byte 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
-# rcon[]
-.long 0x01000000, 0x02000000, 0x04000000, 0x08000000
-.long 0x10000000, 0x20000000, 0x40000000, 0x80000000
-.long 0x1B000000, 0x36000000, 0, 0, 0, 0, 0, 0
-.align 256
-.size AES_Te,.-AES_Te
-
-# void AES_encrypt(const unsigned char *inp, unsigned char *out,
-# const AES_KEY *key) {
-.globl AES_encrypt
-.type AES_encrypt,\@function
-AES_encrypt:
-___
-$code.=<<___ if (!$softonly);
- l %r0,240($key)
- lhi %r1,16
- clr %r0,%r1
- jl .Lesoft
-
- la %r1,0($key)
- #la %r2,0($inp)
- la %r4,0($out)
- lghi %r3,16 # single block length
- .long 0xb92e0042 # km %r4,%r2
- brc 1,.-4 # can this happen?
- br %r14
-.align 64
-.Lesoft:
-___
-$code.=<<___;
- stm${g} %r3,$ra,3*$SIZE_T($sp)
-
- llgf $s0,0($inp)
- llgf $s1,4($inp)
- llgf $s2,8($inp)
- llgf $s3,12($inp)
-
- larl $tbl,AES_Te
- bras $ra,_s390x_AES_encrypt
-
- l${g} $out,3*$SIZE_T($sp)
- st $s0,0($out)
- st $s1,4($out)
- st $s2,8($out)
- st $s3,12($out)
-
- lm${g} %r6,$ra,6*$SIZE_T($sp)
- br $ra
-.size AES_encrypt,.-AES_encrypt
-
-.type _s390x_AES_encrypt,\@function
-.align 16
-_s390x_AES_encrypt:
- st${g} $ra,15*$SIZE_T($sp)
- x $s0,0($key)
- x $s1,4($key)
- x $s2,8($key)
- x $s3,12($key)
- l $rounds,240($key)
- llill $mask,`0xff<<3`
- aghi $rounds,-1
- j .Lenc_loop
-.align 16
-.Lenc_loop:
- sllg $t1,$s0,`0+3`
- srlg $t2,$s0,`8-3`
- srlg $t3,$s0,`16-3`
- srl $s0,`24-3`
- nr $s0,$mask
- ngr $t1,$mask
- nr $t2,$mask
- nr $t3,$mask
-
- srlg $i1,$s1,`16-3` # i0
- sllg $i2,$s1,`0+3`
- srlg $i3,$s1,`8-3`
- srl $s1,`24-3`
- nr $i1,$mask
- nr $s1,$mask
- ngr $i2,$mask
- nr $i3,$mask
-
- l $s0,0($s0,$tbl) # Te0[s0>>24]
- l $t1,1($t1,$tbl) # Te3[s0>>0]
- l $t2,2($t2,$tbl) # Te2[s0>>8]
- l $t3,3($t3,$tbl) # Te1[s0>>16]
-
- x $s0,3($i1,$tbl) # Te1[s1>>16]
- l $s1,0($s1,$tbl) # Te0[s1>>24]
- x $t2,1($i2,$tbl) # Te3[s1>>0]
- x $t3,2($i3,$tbl) # Te2[s1>>8]
-
- srlg $i1,$s2,`8-3` # i0
- srlg $i2,$s2,`16-3` # i1
- nr $i1,$mask
- nr $i2,$mask
- sllg $i3,$s2,`0+3`
- srl $s2,`24-3`
- nr $s2,$mask
- ngr $i3,$mask
-
- xr $s1,$t1
- srlg $ra,$s3,`8-3` # i1
- sllg $t1,$s3,`0+3` # i0
- nr $ra,$mask
- la $key,16($key)
- ngr $t1,$mask
-
- x $s0,2($i1,$tbl) # Te2[s2>>8]
- x $s1,3($i2,$tbl) # Te1[s2>>16]
- l $s2,0($s2,$tbl) # Te0[s2>>24]
- x $t3,1($i3,$tbl) # Te3[s2>>0]
-
- srlg $i3,$s3,`16-3` # i2
- xr $s2,$t2
- srl $s3,`24-3`
- nr $i3,$mask
- nr $s3,$mask
-
- x $s0,0($key)
- x $s1,4($key)
- x $s2,8($key)
- x $t3,12($key)
-
- x $s0,1($t1,$tbl) # Te3[s3>>0]
- x $s1,2($ra,$tbl) # Te2[s3>>8]
- x $s2,3($i3,$tbl) # Te1[s3>>16]
- l $s3,0($s3,$tbl) # Te0[s3>>24]
- xr $s3,$t3
-
- brct $rounds,.Lenc_loop
- .align 16
-
- sllg $t1,$s0,`0+3`
- srlg $t2,$s0,`8-3`
- ngr $t1,$mask
- srlg $t3,$s0,`16-3`
- srl $s0,`24-3`
- nr $s0,$mask
- nr $t2,$mask
- nr $t3,$mask
-
- srlg $i1,$s1,`16-3` # i0
- sllg $i2,$s1,`0+3`
- ngr $i2,$mask
- srlg $i3,$s1,`8-3`
- srl $s1,`24-3`
- nr $i1,$mask
- nr $s1,$mask
- nr $i3,$mask
-
- llgc $s0,2($s0,$tbl) # Te4[s0>>24]
- llgc $t1,2($t1,$tbl) # Te4[s0>>0]
- sll $s0,24
- llgc $t2,2($t2,$tbl) # Te4[s0>>8]
- llgc $t3,2($t3,$tbl) # Te4[s0>>16]
- sll $t2,8
- sll $t3,16
-
- llgc $i1,2($i1,$tbl) # Te4[s1>>16]
- llgc $s1,2($s1,$tbl) # Te4[s1>>24]
- llgc $i2,2($i2,$tbl) # Te4[s1>>0]
- llgc $i3,2($i3,$tbl) # Te4[s1>>8]
- sll $i1,16
- sll $s1,24
- sll $i3,8
- or $s0,$i1
- or $s1,$t1
- or $t2,$i2
- or $t3,$i3
-
- srlg $i1,$s2,`8-3` # i0
- srlg $i2,$s2,`16-3` # i1
- nr $i1,$mask
- nr $i2,$mask
- sllg $i3,$s2,`0+3`
- srl $s2,`24-3`
- ngr $i3,$mask
- nr $s2,$mask
-
- sllg $t1,$s3,`0+3` # i0
- srlg $ra,$s3,`8-3` # i1
- ngr $t1,$mask
-
- llgc $i1,2($i1,$tbl) # Te4[s2>>8]
- llgc $i2,2($i2,$tbl) # Te4[s2>>16]
- sll $i1,8
- llgc $s2,2($s2,$tbl) # Te4[s2>>24]
- llgc $i3,2($i3,$tbl) # Te4[s2>>0]
- sll $i2,16
- nr $ra,$mask
- sll $s2,24
- or $s0,$i1
- or $s1,$i2
- or $s2,$t2
- or $t3,$i3
-
- srlg $i3,$s3,`16-3` # i2
- srl $s3,`24-3`
- nr $i3,$mask
- nr $s3,$mask
-
- l $t0,16($key)
- l $t2,20($key)
-
- llgc $i1,2($t1,$tbl) # Te4[s3>>0]
- llgc $i2,2($ra,$tbl) # Te4[s3>>8]
- llgc $i3,2($i3,$tbl) # Te4[s3>>16]
- llgc $s3,2($s3,$tbl) # Te4[s3>>24]
- sll $i2,8
- sll $i3,16
- sll $s3,24
- or $s0,$i1
- or $s1,$i2
- or $s2,$i3
- or $s3,$t3
-
- l${g} $ra,15*$SIZE_T($sp)
- xr $s0,$t0
- xr $s1,$t2
- x $s2,24($key)
- x $s3,28($key)
-
- br $ra
-.size _s390x_AES_encrypt,.-_s390x_AES_encrypt
-___
-
-$code.=<<___;
-.type AES_Td,\@object
-.align 256
-AES_Td:
-___
-&_data_word(
- 0x51f4a750, 0x7e416553, 0x1a17a4c3, 0x3a275e96,
- 0x3bab6bcb, 0x1f9d45f1, 0xacfa58ab, 0x4be30393,
- 0x2030fa55, 0xad766df6, 0x88cc7691, 0xf5024c25,
- 0x4fe5d7fc, 0xc52acbd7, 0x26354480, 0xb562a38f,
- 0xdeb15a49, 0x25ba1b67, 0x45ea0e98, 0x5dfec0e1,
- 0xc32f7502, 0x814cf012, 0x8d4697a3, 0x6bd3f9c6,
- 0x038f5fe7, 0x15929c95, 0xbf6d7aeb, 0x955259da,
- 0xd4be832d, 0x587421d3, 0x49e06929, 0x8ec9c844,
- 0x75c2896a, 0xf48e7978, 0x99583e6b, 0x27b971dd,
- 0xbee14fb6, 0xf088ad17, 0xc920ac66, 0x7dce3ab4,
- 0x63df4a18, 0xe51a3182, 0x97513360, 0x62537f45,
- 0xb16477e0, 0xbb6bae84, 0xfe81a01c, 0xf9082b94,
- 0x70486858, 0x8f45fd19, 0x94de6c87, 0x527bf8b7,
- 0xab73d323, 0x724b02e2, 0xe31f8f57, 0x6655ab2a,
- 0xb2eb2807, 0x2fb5c203, 0x86c57b9a, 0xd33708a5,
- 0x302887f2, 0x23bfa5b2, 0x02036aba, 0xed16825c,
- 0x8acf1c2b, 0xa779b492, 0xf307f2f0, 0x4e69e2a1,
- 0x65daf4cd, 0x0605bed5, 0xd134621f, 0xc4a6fe8a,
- 0x342e539d, 0xa2f355a0, 0x058ae132, 0xa4f6eb75,
- 0x0b83ec39, 0x4060efaa, 0x5e719f06, 0xbd6e1051,
- 0x3e218af9, 0x96dd063d, 0xdd3e05ae, 0x4de6bd46,
- 0x91548db5, 0x71c45d05, 0x0406d46f, 0x605015ff,
- 0x1998fb24, 0xd6bde997, 0x894043cc, 0x67d99e77,
- 0xb0e842bd, 0x07898b88, 0xe7195b38, 0x79c8eedb,
- 0xa17c0a47, 0x7c420fe9, 0xf8841ec9, 0x00000000,
- 0x09808683, 0x322bed48, 0x1e1170ac, 0x6c5a724e,
- 0xfd0efffb, 0x0f853856, 0x3daed51e, 0x362d3927,
- 0x0a0fd964, 0x685ca621, 0x9b5b54d1, 0x24362e3a,
- 0x0c0a67b1, 0x9357e70f, 0xb4ee96d2, 0x1b9b919e,
- 0x80c0c54f, 0x61dc20a2, 0x5a774b69, 0x1c121a16,
- 0xe293ba0a, 0xc0a02ae5, 0x3c22e043, 0x121b171d,
- 0x0e090d0b, 0xf28bc7ad, 0x2db6a8b9, 0x141ea9c8,
- 0x57f11985, 0xaf75074c, 0xee99ddbb, 0xa37f60fd,
- 0xf701269f, 0x5c72f5bc, 0x44663bc5, 0x5bfb7e34,
- 0x8b432976, 0xcb23c6dc, 0xb6edfc68, 0xb8e4f163,
- 0xd731dcca, 0x42638510, 0x13972240, 0x84c61120,
- 0x854a247d, 0xd2bb3df8, 0xaef93211, 0xc729a16d,
- 0x1d9e2f4b, 0xdcb230f3, 0x0d8652ec, 0x77c1e3d0,
- 0x2bb3166c, 0xa970b999, 0x119448fa, 0x47e96422,
- 0xa8fc8cc4, 0xa0f03f1a, 0x567d2cd8, 0x223390ef,
- 0x87494ec7, 0xd938d1c1, 0x8ccaa2fe, 0x98d40b36,
- 0xa6f581cf, 0xa57ade28, 0xdab78e26, 0x3fadbfa4,
- 0x2c3a9de4, 0x5078920d, 0x6a5fcc9b, 0x547e4662,
- 0xf68d13c2, 0x90d8b8e8, 0x2e39f75e, 0x82c3aff5,
- 0x9f5d80be, 0x69d0937c, 0x6fd52da9, 0xcf2512b3,
- 0xc8ac993b, 0x10187da7, 0xe89c636e, 0xdb3bbb7b,
- 0xcd267809, 0x6e5918f4, 0xec9ab701, 0x834f9aa8,
- 0xe6956e65, 0xaaffe67e, 0x21bccf08, 0xef15e8e6,
- 0xbae79bd9, 0x4a6f36ce, 0xea9f09d4, 0x29b07cd6,
- 0x31a4b2af, 0x2a3f2331, 0xc6a59430, 0x35a266c0,
- 0x744ebc37, 0xfc82caa6, 0xe090d0b0, 0x33a7d815,
- 0xf104984a, 0x41ecdaf7, 0x7fcd500e, 0x1791f62f,
- 0x764dd68d, 0x43efb04d, 0xccaa4d54, 0xe49604df,
- 0x9ed1b5e3, 0x4c6a881b, 0xc12c1fb8, 0x4665517f,
- 0x9d5eea04, 0x018c355d, 0xfa877473, 0xfb0b412e,
- 0xb3671d5a, 0x92dbd252, 0xe9105633, 0x6dd64713,
- 0x9ad7618c, 0x37a10c7a, 0x59f8148e, 0xeb133c89,
- 0xcea927ee, 0xb761c935, 0xe11ce5ed, 0x7a47b13c,
- 0x9cd2df59, 0x55f2733f, 0x1814ce79, 0x73c737bf,
- 0x53f7cdea, 0x5ffdaa5b, 0xdf3d6f14, 0x7844db86,
- 0xcaaff381, 0xb968c43e, 0x3824342c, 0xc2a3405f,
- 0x161dc372, 0xbce2250c, 0x283c498b, 0xff0d9541,
- 0x39a80171, 0x080cb3de, 0xd8b4e49c, 0x6456c190,
- 0x7bcb8461, 0xd532b670, 0x486c5c74, 0xd0b85742);
-$code.=<<___;
-# Td4[256]
-.byte 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38
-.byte 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
-.byte 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
-.byte 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
-.byte 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
-.byte 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
-.byte 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
-.byte 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
-.byte 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
-.byte 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
-.byte 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
-.byte 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
-.byte 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
-.byte 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
-.byte 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
-.byte 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
-.byte 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
-.byte 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
-.byte 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
-.byte 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
-.byte 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
-.byte 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
-.byte 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
-.byte 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
-.byte 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
-.byte 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
-.byte 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
-.byte 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
-.byte 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
-.byte 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
-.byte 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
-.byte 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
-.size AES_Td,.-AES_Td
-
-# void AES_decrypt(const unsigned char *inp, unsigned char *out,
-# const AES_KEY *key) {
-.globl AES_decrypt
-.type AES_decrypt,\@function
-AES_decrypt:
-___
-$code.=<<___ if (!$softonly);
- l %r0,240($key)
- lhi %r1,16
- clr %r0,%r1
- jl .Ldsoft
-
- la %r1,0($key)
- #la %r2,0($inp)
- la %r4,0($out)
- lghi %r3,16 # single block length
- .long 0xb92e0042 # km %r4,%r2
- brc 1,.-4 # can this happen?
- br %r14
-.align 64
-.Ldsoft:
-___
-$code.=<<___;
- stm${g} %r3,$ra,3*$SIZE_T($sp)
-
- llgf $s0,0($inp)
- llgf $s1,4($inp)
- llgf $s2,8($inp)
- llgf $s3,12($inp)
-
- larl $tbl,AES_Td
- bras $ra,_s390x_AES_decrypt
-
- l${g} $out,3*$SIZE_T($sp)
- st $s0,0($out)
- st $s1,4($out)
- st $s2,8($out)
- st $s3,12($out)
-
- lm${g} %r6,$ra,6*$SIZE_T($sp)
- br $ra
-.size AES_decrypt,.-AES_decrypt
-
-.type _s390x_AES_decrypt,\@function
-.align 16
-_s390x_AES_decrypt:
- st${g} $ra,15*$SIZE_T($sp)
- x $s0,0($key)
- x $s1,4($key)
- x $s2,8($key)
- x $s3,12($key)
- l $rounds,240($key)
- llill $mask,`0xff<<3`
- aghi $rounds,-1
- j .Ldec_loop
-.align 16
-.Ldec_loop:
- srlg $t1,$s0,`16-3`
- srlg $t2,$s0,`8-3`
- sllg $t3,$s0,`0+3`
- srl $s0,`24-3`
- nr $s0,$mask
- nr $t1,$mask
- nr $t2,$mask
- ngr $t3,$mask
-
- sllg $i1,$s1,`0+3` # i0
- srlg $i2,$s1,`16-3`
- srlg $i3,$s1,`8-3`
- srl $s1,`24-3`
- ngr $i1,$mask
- nr $s1,$mask
- nr $i2,$mask
- nr $i3,$mask
-
- l $s0,0($s0,$tbl) # Td0[s0>>24]
- l $t1,3($t1,$tbl) # Td1[s0>>16]
- l $t2,2($t2,$tbl) # Td2[s0>>8]
- l $t3,1($t3,$tbl) # Td3[s0>>0]
-
- x $s0,1($i1,$tbl) # Td3[s1>>0]
- l $s1,0($s1,$tbl) # Td0[s1>>24]
- x $t2,3($i2,$tbl) # Td1[s1>>16]
- x $t3,2($i3,$tbl) # Td2[s1>>8]
-
- srlg $i1,$s2,`8-3` # i0
- sllg $i2,$s2,`0+3` # i1
- srlg $i3,$s2,`16-3`
- srl $s2,`24-3`
- nr $i1,$mask
- ngr $i2,$mask
- nr $s2,$mask
- nr $i3,$mask
-
- xr $s1,$t1
- srlg $ra,$s3,`8-3` # i1
- srlg $t1,$s3,`16-3` # i0
- nr $ra,$mask
- la $key,16($key)
- nr $t1,$mask
-
- x $s0,2($i1,$tbl) # Td2[s2>>8]
- x $s1,1($i2,$tbl) # Td3[s2>>0]
- l $s2,0($s2,$tbl) # Td0[s2>>24]
- x $t3,3($i3,$tbl) # Td1[s2>>16]
-
- sllg $i3,$s3,`0+3` # i2
- srl $s3,`24-3`
- ngr $i3,$mask
- nr $s3,$mask
-
- xr $s2,$t2
- x $s0,0($key)
- x $s1,4($key)
- x $s2,8($key)
- x $t3,12($key)
-
- x $s0,3($t1,$tbl) # Td1[s3>>16]
- x $s1,2($ra,$tbl) # Td2[s3>>8]
- x $s2,1($i3,$tbl) # Td3[s3>>0]
- l $s3,0($s3,$tbl) # Td0[s3>>24]
- xr $s3,$t3
-
- brct $rounds,.Ldec_loop
- .align 16
-
- l $t1,`2048+0`($tbl) # prefetch Td4
- l $t2,`2048+64`($tbl)
- l $t3,`2048+128`($tbl)
- l $i1,`2048+192`($tbl)
- llill $mask,0xff
-
- srlg $i3,$s0,24 # i0
- srlg $t1,$s0,16
- srlg $t2,$s0,8
- nr $s0,$mask # i3
- nr $t1,$mask
-
- srlg $i1,$s1,24
- nr $t2,$mask
- srlg $i2,$s1,16
- srlg $ra,$s1,8
- nr $s1,$mask # i0
- nr $i2,$mask
- nr $ra,$mask
-
- llgc $i3,2048($i3,$tbl) # Td4[s0>>24]
- llgc $t1,2048($t1,$tbl) # Td4[s0>>16]
- llgc $t2,2048($t2,$tbl) # Td4[s0>>8]
- sll $t1,16
- llgc $t3,2048($s0,$tbl) # Td4[s0>>0]
- sllg $s0,$i3,24
- sll $t2,8
-
- llgc $s1,2048($s1,$tbl) # Td4[s1>>0]
- llgc $i1,2048($i1,$tbl) # Td4[s1>>24]
- llgc $i2,2048($i2,$tbl) # Td4[s1>>16]
- sll $i1,24
- llgc $i3,2048($ra,$tbl) # Td4[s1>>8]
- sll $i2,16
- sll $i3,8
- or $s0,$s1
- or $t1,$i1
- or $t2,$i2
- or $t3,$i3
-
- srlg $i1,$s2,8 # i0
- srlg $i2,$s2,24
- srlg $i3,$s2,16
- nr $s2,$mask # i1
- nr $i1,$mask
- nr $i3,$mask
- llgc $i1,2048($i1,$tbl) # Td4[s2>>8]
- llgc $s1,2048($s2,$tbl) # Td4[s2>>0]
- llgc $i2,2048($i2,$tbl) # Td4[s2>>24]
- llgc $i3,2048($i3,$tbl) # Td4[s2>>16]
- sll $i1,8
- sll $i2,24
- or $s0,$i1
- sll $i3,16
- or $t2,$i2
- or $t3,$i3
-
- srlg $i1,$s3,16 # i0
- srlg $i2,$s3,8 # i1
- srlg $i3,$s3,24
- nr $s3,$mask # i2
- nr $i1,$mask
- nr $i2,$mask
-
- l${g} $ra,15*$SIZE_T($sp)
- or $s1,$t1
- l $t0,16($key)
- l $t1,20($key)
-
- llgc $i1,2048($i1,$tbl) # Td4[s3>>16]
- llgc $i2,2048($i2,$tbl) # Td4[s3>>8]
- sll $i1,16
- llgc $s2,2048($s3,$tbl) # Td4[s3>>0]
- llgc $s3,2048($i3,$tbl) # Td4[s3>>24]
- sll $i2,8
- sll $s3,24
- or $s0,$i1
- or $s1,$i2
- or $s2,$t2
- or $s3,$t3
-
- xr $s0,$t0
- xr $s1,$t1
- x $s2,24($key)
- x $s3,28($key)
-
- br $ra
-.size _s390x_AES_decrypt,.-_s390x_AES_decrypt
-___
-
-$code.=<<___;
-# void AES_set_encrypt_key(const unsigned char *in, int bits,
-# AES_KEY *key) {
-.globl private_AES_set_encrypt_key
-.type private_AES_set_encrypt_key,\@function
-.align 16
-private_AES_set_encrypt_key:
-_s390x_AES_set_encrypt_key:
- lghi $t0,0
- cl${g}r $inp,$t0
- je .Lminus1
- cl${g}r $key,$t0
- je .Lminus1
-
- lghi $t0,128
- clr $bits,$t0
- je .Lproceed
- lghi $t0,192
- clr $bits,$t0
- je .Lproceed
- lghi $t0,256
- clr $bits,$t0
- je .Lproceed
- lghi %r2,-2
- br %r14
-
-.align 16
-.Lproceed:
-___
-$code.=<<___ if (!$softonly);
- # convert bits to km code, [128,192,256]->[18,19,20]
- lhi %r5,-128
- lhi %r0,18
- ar %r5,$bits
- srl %r5,6
- ar %r5,%r0
-
- larl %r1,OPENSSL_s390xcap_P
- lg %r0,0(%r1)
- tmhl %r0,0x4000 # check for message-security assist
- jz .Lekey_internal
-
- lghi %r0,0 # query capability vector
- la %r1,16($sp)
- .long 0xb92f0042 # kmc %r4,%r2
-
- llihh %r1,0x8000
- srlg %r1,%r1,0(%r5)
- ng %r1,16($sp)
- jz .Lekey_internal
-
- lmg %r0,%r1,0($inp) # just copy 128 bits...
- stmg %r0,%r1,0($key)
- lhi %r0,192
- cr $bits,%r0
- jl 1f
- lg %r1,16($inp)
- stg %r1,16($key)
- je 1f
- lg %r1,24($inp)
- stg %r1,24($key)
-1: st $bits,236($key) # save bits [for debugging purposes]
- lgr $t0,%r5
- st %r5,240($key) # save km code
- lghi %r2,0
- br %r14
-___
-$code.=<<___;
-.align 16
-.Lekey_internal:
- stm${g} %r4,%r13,4*$SIZE_T($sp) # all non-volatile regs and $key
-
- larl $tbl,AES_Te+2048
-
- llgf $s0,0($inp)
- llgf $s1,4($inp)
- llgf $s2,8($inp)
- llgf $s3,12($inp)
- st $s0,0($key)
- st $s1,4($key)
- st $s2,8($key)
- st $s3,12($key)
- lghi $t0,128
- cr $bits,$t0
- jne .Lnot128
-
- llill $mask,0xff
- lghi $t3,0 # i=0
- lghi $rounds,10
- st $rounds,240($key)
-
- llgfr $t2,$s3 # temp=rk[3]
- srlg $i1,$s3,8
- srlg $i2,$s3,16
- srlg $i3,$s3,24
- nr $t2,$mask
- nr $i1,$mask
- nr $i2,$mask
-
-.align 16
-.L128_loop:
- la $t2,0($t2,$tbl)
- la $i1,0($i1,$tbl)
- la $i2,0($i2,$tbl)
- la $i3,0($i3,$tbl)
- icm $t2,2,0($t2) # Te4[rk[3]>>0]<<8
- icm $t2,4,0($i1) # Te4[rk[3]>>8]<<16
- icm $t2,8,0($i2) # Te4[rk[3]>>16]<<24
- icm $t2,1,0($i3) # Te4[rk[3]>>24]
- x $t2,256($t3,$tbl) # rcon[i]
- xr $s0,$t2 # rk[4]=rk[0]^...
- xr $s1,$s0 # rk[5]=rk[1]^rk[4]
- xr $s2,$s1 # rk[6]=rk[2]^rk[5]
- xr $s3,$s2 # rk[7]=rk[3]^rk[6]
-
- llgfr $t2,$s3 # temp=rk[3]
- srlg $i1,$s3,8
- srlg $i2,$s3,16
- nr $t2,$mask
- nr $i1,$mask
- srlg $i3,$s3,24
- nr $i2,$mask
-
- st $s0,16($key)
- st $s1,20($key)
- st $s2,24($key)
- st $s3,28($key)
- la $key,16($key) # key+=4
- la $t3,4($t3) # i++
- brct $rounds,.L128_loop
- lghi $t0,10
- lghi %r2,0
- lm${g} %r4,%r13,4*$SIZE_T($sp)
- br $ra
-
-.align 16
-.Lnot128:
- llgf $t0,16($inp)
- llgf $t1,20($inp)
- st $t0,16($key)
- st $t1,20($key)
- lghi $t0,192
- cr $bits,$t0
- jne .Lnot192
-
- llill $mask,0xff
- lghi $t3,0 # i=0
- lghi $rounds,12
- st $rounds,240($key)
- lghi $rounds,8
-
- srlg $i1,$t1,8
- srlg $i2,$t1,16
- srlg $i3,$t1,24
- nr $t1,$mask
- nr $i1,$mask
- nr $i2,$mask
-
-.align 16
-.L192_loop:
- la $t1,0($t1,$tbl)
- la $i1,0($i1,$tbl)
- la $i2,0($i2,$tbl)
- la $i3,0($i3,$tbl)
- icm $t1,2,0($t1) # Te4[rk[5]>>0]<<8
- icm $t1,4,0($i1) # Te4[rk[5]>>8]<<16
- icm $t1,8,0($i2) # Te4[rk[5]>>16]<<24
- icm $t1,1,0($i3) # Te4[rk[5]>>24]
- x $t1,256($t3,$tbl) # rcon[i]
- xr $s0,$t1 # rk[6]=rk[0]^...
- xr $s1,$s0 # rk[7]=rk[1]^rk[6]
- xr $s2,$s1 # rk[8]=rk[2]^rk[7]
- xr $s3,$s2 # rk[9]=rk[3]^rk[8]
-
- st $s0,24($key)
- st $s1,28($key)
- st $s2,32($key)
- st $s3,36($key)
- brct $rounds,.L192_continue
- lghi $t0,12
- lghi %r2,0
- lm${g} %r4,%r13,4*$SIZE_T($sp)
- br $ra
-
-.align 16
-.L192_continue:
- lgr $t1,$s3
- x $t1,16($key) # rk[10]=rk[4]^rk[9]
- st $t1,40($key)
- x $t1,20($key) # rk[11]=rk[5]^rk[10]
- st $t1,44($key)
-
- srlg $i1,$t1,8
- srlg $i2,$t1,16
- srlg $i3,$t1,24
- nr $t1,$mask
- nr $i1,$mask
- nr $i2,$mask
-
- la $key,24($key) # key+=6
- la $t3,4($t3) # i++
- j .L192_loop
-
-.align 16
-.Lnot192:
- llgf $t0,24($inp)
- llgf $t1,28($inp)
- st $t0,24($key)
- st $t1,28($key)
- llill $mask,0xff
- lghi $t3,0 # i=0
- lghi $rounds,14
- st $rounds,240($key)
- lghi $rounds,7
-
- srlg $i1,$t1,8
- srlg $i2,$t1,16
- srlg $i3,$t1,24
- nr $t1,$mask
- nr $i1,$mask
- nr $i2,$mask
-
-.align 16
-.L256_loop:
- la $t1,0($t1,$tbl)
- la $i1,0($i1,$tbl)
- la $i2,0($i2,$tbl)
- la $i3,0($i3,$tbl)
- icm $t1,2,0($t1) # Te4[rk[7]>>0]<<8
- icm $t1,4,0($i1) # Te4[rk[7]>>8]<<16
- icm $t1,8,0($i2) # Te4[rk[7]>>16]<<24
- icm $t1,1,0($i3) # Te4[rk[7]>>24]
- x $t1,256($t3,$tbl) # rcon[i]
- xr $s0,$t1 # rk[8]=rk[0]^...
- xr $s1,$s0 # rk[9]=rk[1]^rk[8]
- xr $s2,$s1 # rk[10]=rk[2]^rk[9]
- xr $s3,$s2 # rk[11]=rk[3]^rk[10]
- st $s0,32($key)
- st $s1,36($key)
- st $s2,40($key)
- st $s3,44($key)
- brct $rounds,.L256_continue
- lghi $t0,14
- lghi %r2,0
- lm${g} %r4,%r13,4*$SIZE_T($sp)
- br $ra
-
-.align 16
-.L256_continue:
- lgr $t1,$s3 # temp=rk[11]
- srlg $i1,$s3,8
- srlg $i2,$s3,16
- srlg $i3,$s3,24
- nr $t1,$mask
- nr $i1,$mask
- nr $i2,$mask
- la $t1,0($t1,$tbl)
- la $i1,0($i1,$tbl)
- la $i2,0($i2,$tbl)
- la $i3,0($i3,$tbl)
- llgc $t1,0($t1) # Te4[rk[11]>>0]
- icm $t1,2,0($i1) # Te4[rk[11]>>8]<<8
- icm $t1,4,0($i2) # Te4[rk[11]>>16]<<16
- icm $t1,8,0($i3) # Te4[rk[11]>>24]<<24
- x $t1,16($key) # rk[12]=rk[4]^...
- st $t1,48($key)
- x $t1,20($key) # rk[13]=rk[5]^rk[12]
- st $t1,52($key)
- x $t1,24($key) # rk[14]=rk[6]^rk[13]
- st $t1,56($key)
- x $t1,28($key) # rk[15]=rk[7]^rk[14]
- st $t1,60($key)
-
- srlg $i1,$t1,8
- srlg $i2,$t1,16
- srlg $i3,$t1,24
- nr $t1,$mask
- nr $i1,$mask
- nr $i2,$mask
-
- la $key,32($key) # key+=8
- la $t3,4($t3) # i++
- j .L256_loop
-
-.Lminus1:
- lghi %r2,-1
- br $ra
-.size private_AES_set_encrypt_key,.-private_AES_set_encrypt_key
-
-# void AES_set_decrypt_key(const unsigned char *in, int bits,
-# AES_KEY *key) {
-.globl private_AES_set_decrypt_key
-.type private_AES_set_decrypt_key,\@function
-.align 16
-private_AES_set_decrypt_key:
- #st${g} $key,4*$SIZE_T($sp) # I rely on AES_set_encrypt_key to
- st${g} $ra,14*$SIZE_T($sp) # save non-volatile registers and $key!
- bras $ra,_s390x_AES_set_encrypt_key
- #l${g} $key,4*$SIZE_T($sp)
- l${g} $ra,14*$SIZE_T($sp)
- ltgr %r2,%r2
- bnzr $ra
-___
-$code.=<<___ if (!$softonly);
- #l $t0,240($key)
- lhi $t1,16
- cr $t0,$t1
- jl .Lgo
- oill $t0,0x80 # set "decrypt" bit
- st $t0,240($key)
- br $ra
-___
-$code.=<<___;
-.align 16
-.Lgo: lgr $rounds,$t0 #llgf $rounds,240($key)
- la $i1,0($key)
- sllg $i2,$rounds,4
- la $i2,0($i2,$key)
- srl $rounds,1
- lghi $t1,-16
-
-.align 16
-.Linv: lmg $s0,$s1,0($i1)
- lmg $s2,$s3,0($i2)
- stmg $s0,$s1,0($i2)
- stmg $s2,$s3,0($i1)
- la $i1,16($i1)
- la $i2,0($t1,$i2)
- brct $rounds,.Linv
-___
-$mask80=$i1;
-$mask1b=$i2;
-$maskfe=$i3;
-$code.=<<___;
- llgf $rounds,240($key)
- aghi $rounds,-1
- sll $rounds,2 # (rounds-1)*4
- llilh $mask80,0x8080
- llilh $mask1b,0x1b1b
- llilh $maskfe,0xfefe
- oill $mask80,0x8080
- oill $mask1b,0x1b1b
- oill $maskfe,0xfefe
-
-.align 16
-.Lmix: l $s0,16($key) # tp1
- lr $s1,$s0
- ngr $s1,$mask80
- srlg $t1,$s1,7
- slr $s1,$t1
- nr $s1,$mask1b
- sllg $t1,$s0,1
- nr $t1,$maskfe
- xr $s1,$t1 # tp2
-
- lr $s2,$s1
- ngr $s2,$mask80
- srlg $t1,$s2,7
- slr $s2,$t1
- nr $s2,$mask1b
- sllg $t1,$s1,1
- nr $t1,$maskfe
- xr $s2,$t1 # tp4
-
- lr $s3,$s2
- ngr $s3,$mask80
- srlg $t1,$s3,7
- slr $s3,$t1
- nr $s3,$mask1b
- sllg $t1,$s2,1
- nr $t1,$maskfe
- xr $s3,$t1 # tp8
-
- xr $s1,$s0 # tp2^tp1
- xr $s2,$s0 # tp4^tp1
- rll $s0,$s0,24 # = ROTATE(tp1,8)
- xr $s2,$s3 # ^=tp8
- xr $s0,$s1 # ^=tp2^tp1
- xr $s1,$s3 # tp2^tp1^tp8
- xr $s0,$s2 # ^=tp4^tp1^tp8
- rll $s1,$s1,8
- rll $s2,$s2,16
- xr $s0,$s1 # ^= ROTATE(tp8^tp2^tp1,24)
- rll $s3,$s3,24
- xr $s0,$s2 # ^= ROTATE(tp8^tp4^tp1,16)
- xr $s0,$s3 # ^= ROTATE(tp8,8)
-
- st $s0,16($key)
- la $key,4($key)
- brct $rounds,.Lmix
-
- lm${g} %r6,%r13,6*$SIZE_T($sp)# as was saved by AES_set_encrypt_key!
- lghi %r2,0
- br $ra
-.size private_AES_set_decrypt_key,.-private_AES_set_decrypt_key
-___
-
-########################################################################
-# void AES_cbc_encrypt(const unsigned char *in, unsigned char *out,
-# size_t length, const AES_KEY *key,
-# unsigned char *ivec, const int enc)
-{
-my $inp="%r2";
-my $out="%r4"; # length and out are swapped
-my $len="%r3";
-my $key="%r5";
-my $ivp="%r6";
-
-$code.=<<___;
-.globl AES_cbc_encrypt
-.type AES_cbc_encrypt,\@function
-.align 16
-AES_cbc_encrypt:
- xgr %r3,%r4 # flip %r3 and %r4, out and len
- xgr %r4,%r3
- xgr %r3,%r4
-___
-$code.=<<___ if (!$softonly);
- lhi %r0,16
- cl %r0,240($key)
- jh .Lcbc_software
-
- lg %r0,0($ivp) # copy ivec
- lg %r1,8($ivp)
- stmg %r0,%r1,16($sp)
- lmg %r0,%r1,0($key) # copy key, cover 256 bit
- stmg %r0,%r1,32($sp)
- lmg %r0,%r1,16($key)
- stmg %r0,%r1,48($sp)
- l %r0,240($key) # load kmc code
- lghi $key,15 # res=len%16, len-=res;
- ngr $key,$len
- sl${g}r $len,$key
- la %r1,16($sp) # parameter block - ivec || key
- jz .Lkmc_truncated
- .long 0xb92f0042 # kmc %r4,%r2
- brc 1,.-4 # pay attention to "partial completion"
- ltr $key,$key
- jnz .Lkmc_truncated
-.Lkmc_done:
- lmg %r0,%r1,16($sp) # copy ivec to caller
- stg %r0,0($ivp)
- stg %r1,8($ivp)
- br $ra
-.align 16
-.Lkmc_truncated:
- ahi $key,-1 # it's the way it's encoded in mvc
- tmll %r0,0x80
- jnz .Lkmc_truncated_dec
- lghi %r1,0
- stg %r1,16*$SIZE_T($sp)
- stg %r1,16*$SIZE_T+8($sp)
- bras %r1,1f
- mvc 16*$SIZE_T(1,$sp),0($inp)
-1: ex $key,0(%r1)
- la %r1,16($sp) # restore parameter block
- la $inp,16*$SIZE_T($sp)
- lghi $len,16
- .long 0xb92f0042 # kmc %r4,%r2
- j .Lkmc_done
-.align 16
-.Lkmc_truncated_dec:
- st${g} $out,4*$SIZE_T($sp)
- la $out,16*$SIZE_T($sp)
- lghi $len,16
- .long 0xb92f0042 # kmc %r4,%r2
- l${g} $out,4*$SIZE_T($sp)
- bras %r1,2f
- mvc 0(1,$out),16*$SIZE_T($sp)
-2: ex $key,0(%r1)
- j .Lkmc_done
-.align 16
-.Lcbc_software:
-___
-$code.=<<___;
- stm${g} $key,$ra,5*$SIZE_T($sp)
- lhi %r0,0
- cl %r0,`$stdframe+$SIZE_T-4`($sp)
- je .Lcbc_decrypt
-
- larl $tbl,AES_Te
-
- llgf $s0,0($ivp)
- llgf $s1,4($ivp)
- llgf $s2,8($ivp)
- llgf $s3,12($ivp)
-
- lghi $t0,16
- sl${g}r $len,$t0
- brc 4,.Lcbc_enc_tail # if borrow
-.Lcbc_enc_loop:
- stm${g} $inp,$out,2*$SIZE_T($sp)
- x $s0,0($inp)
- x $s1,4($inp)
- x $s2,8($inp)
- x $s3,12($inp)
- lgr %r4,$key
-
- bras $ra,_s390x_AES_encrypt
-
- lm${g} $inp,$key,2*$SIZE_T($sp)
- st $s0,0($out)
- st $s1,4($out)
- st $s2,8($out)
- st $s3,12($out)
-
- la $inp,16($inp)
- la $out,16($out)
- lghi $t0,16
- lt${g}r $len,$len
- jz .Lcbc_enc_done
- sl${g}r $len,$t0
- brc 4,.Lcbc_enc_tail # if borrow
- j .Lcbc_enc_loop
-.align 16
-.Lcbc_enc_done:
- l${g} $ivp,6*$SIZE_T($sp)
- st $s0,0($ivp)
- st $s1,4($ivp)
- st $s2,8($ivp)
- st $s3,12($ivp)
-
- lm${g} %r7,$ra,7*$SIZE_T($sp)
- br $ra
-
-.align 16
-.Lcbc_enc_tail:
- aghi $len,15
- lghi $t0,0
- stg $t0,16*$SIZE_T($sp)
- stg $t0,16*$SIZE_T+8($sp)
- bras $t1,3f
- mvc 16*$SIZE_T(1,$sp),0($inp)
-3: ex $len,0($t1)
- lghi $len,0
- la $inp,16*$SIZE_T($sp)
- j .Lcbc_enc_loop
-
-.align 16
-.Lcbc_decrypt:
- larl $tbl,AES_Td
-
- lg $t0,0($ivp)
- lg $t1,8($ivp)
- stmg $t0,$t1,16*$SIZE_T($sp)
-
-.Lcbc_dec_loop:
- stm${g} $inp,$out,2*$SIZE_T($sp)
- llgf $s0,0($inp)
- llgf $s1,4($inp)
- llgf $s2,8($inp)
- llgf $s3,12($inp)
- lgr %r4,$key
-
- bras $ra,_s390x_AES_decrypt
-
- lm${g} $inp,$key,2*$SIZE_T($sp)
- sllg $s0,$s0,32
- sllg $s2,$s2,32
- lr $s0,$s1
- lr $s2,$s3
-
- lg $t0,0($inp)
- lg $t1,8($inp)
- xg $s0,16*$SIZE_T($sp)
- xg $s2,16*$SIZE_T+8($sp)
- lghi $s1,16
- sl${g}r $len,$s1
- brc 4,.Lcbc_dec_tail # if borrow
- brc 2,.Lcbc_dec_done # if zero
- stg $s0,0($out)
- stg $s2,8($out)
- stmg $t0,$t1,16*$SIZE_T($sp)
-
- la $inp,16($inp)
- la $out,16($out)
- j .Lcbc_dec_loop
-
-.Lcbc_dec_done:
- stg $s0,0($out)
- stg $s2,8($out)
-.Lcbc_dec_exit:
- lm${g} %r6,$ra,6*$SIZE_T($sp)
- stmg $t0,$t1,0($ivp)
-
- br $ra
-
-.align 16
-.Lcbc_dec_tail:
- aghi $len,15
- stg $s0,16*$SIZE_T($sp)
- stg $s2,16*$SIZE_T+8($sp)
- bras $s1,4f
- mvc 0(1,$out),16*$SIZE_T($sp)
-4: ex $len,0($s1)
- j .Lcbc_dec_exit
-.size AES_cbc_encrypt,.-AES_cbc_encrypt
-___
-}
-########################################################################
-# void AES_ctr32_encrypt(const unsigned char *in, unsigned char *out,
-# size_t blocks, const AES_KEY *key,
-# const unsigned char *ivec)
-{
-my $inp="%r2";
-my $out="%r4"; # blocks and out are swapped
-my $len="%r3";
-my $key="%r5"; my $iv0="%r5";
-my $ivp="%r6";
-my $fp ="%r7";
-
-$code.=<<___;
-.globl AES_ctr32_encrypt
-.type AES_ctr32_encrypt,\@function
-.align 16
-AES_ctr32_encrypt:
- xgr %r3,%r4 # flip %r3 and %r4, $out and $len
- xgr %r4,%r3
- xgr %r3,%r4
- llgfr $len,$len # safe in ctr32 subroutine even in 64-bit case
-___
-$code.=<<___ if (!$softonly);
- l %r0,240($key)
- lhi %r1,16
- clr %r0,%r1
- jl .Lctr32_software
-
- stm${g} %r6,$s3,6*$SIZE_T($sp)
-
- slgr $out,$inp
- la %r1,0($key) # %r1 is permanent copy of $key
- lg $iv0,0($ivp) # load ivec
- lg $ivp,8($ivp)
-
- # prepare and allocate stack frame at the top of 4K page
- # with 1K reserved for eventual signal handling
- lghi $s0,-1024-256-16# guarantee at least 256-bytes buffer
- lghi $s1,-4096
- algr $s0,$sp
- lgr $fp,$sp
- ngr $s0,$s1 # align at page boundary
- slgr $fp,$s0 # total buffer size
- lgr $s2,$sp
- lghi $s1,1024+16 # sl[g]fi is extended-immediate facility
- slgr $fp,$s1 # deduct reservation to get usable buffer size
- # buffer size is at lest 256 and at most 3072+256-16
-
- la $sp,1024($s0) # alloca
- srlg $fp,$fp,4 # convert bytes to blocks, minimum 16
- st${g} $s2,0($sp) # back-chain
- st${g} $fp,$SIZE_T($sp)
-
- slgr $len,$fp
- brc 1,.Lctr32_hw_switch # not zero, no borrow
- algr $fp,$len # input is shorter than allocated buffer
- lghi $len,0
- st${g} $fp,$SIZE_T($sp)
-
-.Lctr32_hw_switch:
-___
-$code.=<<___ if (0); ######### kmctr code was measured to be ~12% slower
- larl $s0,OPENSSL_s390xcap_P
- lg $s0,8($s0)
- tmhh $s0,0x0004 # check for message_security-assist-4
- jz .Lctr32_km_loop
-
- llgfr $s0,%r0
- lgr $s1,%r1
- lghi %r0,0
- la %r1,16($sp)
- .long 0xb92d2042 # kmctr %r4,%r2,%r2
-
- llihh %r0,0x8000 # check if kmctr supports the function code
- srlg %r0,%r0,0($s0)
- ng %r0,16($sp)
- lgr %r0,$s0
- lgr %r1,$s1
- jz .Lctr32_km_loop
-
-####### kmctr code
- algr $out,$inp # restore $out
- lgr $s1,$len # $s1 undertakes $len
- j .Lctr32_kmctr_loop
-.align 16
-.Lctr32_kmctr_loop:
- la $s2,16($sp)
- lgr $s3,$fp
-.Lctr32_kmctr_prepare:
- stg $iv0,0($s2)
- stg $ivp,8($s2)
- la $s2,16($s2)
- ahi $ivp,1 # 32-bit increment, preserves upper half
- brct $s3,.Lctr32_kmctr_prepare
-
- #la $inp,0($inp) # inp
- sllg $len,$fp,4 # len
- #la $out,0($out) # out
- la $s2,16($sp) # iv
- .long 0xb92da042 # kmctr $out,$s2,$inp
- brc 1,.-4 # pay attention to "partial completion"
-
- slgr $s1,$fp
- brc 1,.Lctr32_kmctr_loop # not zero, no borrow
- algr $fp,$s1
- lghi $s1,0
- brc 4+1,.Lctr32_kmctr_loop # not zero
-
- l${g} $sp,0($sp)
- lm${g} %r6,$s3,6*$SIZE_T($sp)
- br $ra
-.align 16
-___
-$code.=<<___;
-.Lctr32_km_loop:
- la $s2,16($sp)
- lgr $s3,$fp
-.Lctr32_km_prepare:
- stg $iv0,0($s2)
- stg $ivp,8($s2)
- la $s2,16($s2)
- ahi $ivp,1 # 32-bit increment, preserves upper half
- brct $s3,.Lctr32_km_prepare
-
- la $s0,16($sp) # inp
- sllg $s1,$fp,4 # len
- la $s2,16($sp) # out
- .long 0xb92e00a8 # km %r10,%r8
- brc 1,.-4 # pay attention to "partial completion"
-
- la $s2,16($sp)
- lgr $s3,$fp
- slgr $s2,$inp
-.Lctr32_km_xor:
- lg $s0,0($inp)
- lg $s1,8($inp)
- xg $s0,0($s2,$inp)
- xg $s1,8($s2,$inp)
- stg $s0,0($out,$inp)
- stg $s1,8($out,$inp)
- la $inp,16($inp)
- brct $s3,.Lctr32_km_xor
-
- slgr $len,$fp
- brc 1,.Lctr32_km_loop # not zero, no borrow
- algr $fp,$len
- lghi $len,0
- brc 4+1,.Lctr32_km_loop # not zero
-
- l${g} $s0,0($sp)
- l${g} $s1,$SIZE_T($sp)
- la $s2,16($sp)
-.Lctr32_km_zap:
- stg $s0,0($s2)
- stg $s0,8($s2)
- la $s2,16($s2)
- brct $s1,.Lctr32_km_zap
-
- la $sp,0($s0)
- lm${g} %r6,$s3,6*$SIZE_T($sp)
- br $ra
-.align 16
-.Lctr32_software:
-___
-$code.=<<___;
- stm${g} $key,$ra,5*$SIZE_T($sp)
- sl${g}r $inp,$out
- larl $tbl,AES_Te
- llgf $t1,12($ivp)
-
-.Lctr32_loop:
- stm${g} $inp,$out,2*$SIZE_T($sp)
- llgf $s0,0($ivp)
- llgf $s1,4($ivp)
- llgf $s2,8($ivp)
- lgr $s3,$t1
- st $t1,16*$SIZE_T($sp)
- lgr %r4,$key
-
- bras $ra,_s390x_AES_encrypt
-
- lm${g} $inp,$ivp,2*$SIZE_T($sp)
- llgf $t1,16*$SIZE_T($sp)
- x $s0,0($inp,$out)
- x $s1,4($inp,$out)
- x $s2,8($inp,$out)
- x $s3,12($inp,$out)
- stm $s0,$s3,0($out)
-
- la $out,16($out)
- ahi $t1,1 # 32-bit increment
- brct $len,.Lctr32_loop
-
- lm${g} %r6,$ra,6*$SIZE_T($sp)
- br $ra
-.size AES_ctr32_encrypt,.-AES_ctr32_encrypt
-___
-}
-
-########################################################################
-# void AES_xts_encrypt(const char *inp,char *out,size_t len,
-# const AES_KEY *key1, const AES_KEY *key2,
-# const unsigned char iv[16]);
-#
-{
-my $inp="%r2";
-my $out="%r4"; # len and out are swapped
-my $len="%r3";
-my $key1="%r5"; # $i1
-my $key2="%r6"; # $i2
-my $fp="%r7"; # $i3
-my $tweak=16*$SIZE_T+16; # or $stdframe-16, bottom of the frame...
-
-$code.=<<___;
-.type _s390x_xts_km,\@function
-.align 16
-_s390x_xts_km:
-___
-$code.=<<___ if(1);
- llgfr $s0,%r0 # put aside the function code
- lghi $s1,0x7f
- nr $s1,%r0
- lghi %r0,0 # query capability vector
- la %r1,$tweak-16($sp)
- .long 0xb92e0042 # km %r4,%r2
- llihh %r1,0x8000
- srlg %r1,%r1,32($s1) # check for 32+function code
- ng %r1,$tweak-16($sp)
- lgr %r0,$s0 # restore the function code
- la %r1,0($key1) # restore $key1
- jz .Lxts_km_vanilla
-
- lmg $i2,$i3,$tweak($sp) # put aside the tweak value
- algr $out,$inp
-
- oill %r0,32 # switch to xts function code
- aghi $s1,-18 #
- sllg $s1,$s1,3 # (function code - 18)*8, 0 or 16
- la %r1,$tweak-16($sp)
- slgr %r1,$s1 # parameter block position
- lmg $s0,$s3,0($key1) # load 256 bits of key material,
- stmg $s0,$s3,0(%r1) # and copy it to parameter block.
- # yes, it contains junk and overlaps
- # with the tweak in 128-bit case.
- # it's done to avoid conditional
- # branch.
- stmg $i2,$i3,$tweak($sp) # "re-seat" the tweak value
-
- .long 0xb92e0042 # km %r4,%r2
- brc 1,.-4 # pay attention to "partial completion"
-
- lrvg $s0,$tweak+0($sp) # load the last tweak
- lrvg $s1,$tweak+8($sp)
- stmg %r0,%r3,$tweak-32($sp) # wipe copy of the key
-
- nill %r0,0xffdf # switch back to original function code
- la %r1,0($key1) # restore pointer to $key1
- slgr $out,$inp
-
- llgc $len,2*$SIZE_T-1($sp)
- nill $len,0x0f # $len%=16
- br $ra
-
-.align 16
-.Lxts_km_vanilla:
-___
-$code.=<<___;
- # prepare and allocate stack frame at the top of 4K page
- # with 1K reserved for eventual signal handling
- lghi $s0,-1024-256-16# guarantee at least 256-bytes buffer
- lghi $s1,-4096
- algr $s0,$sp
- lgr $fp,$sp
- ngr $s0,$s1 # align at page boundary
- slgr $fp,$s0 # total buffer size
- lgr $s2,$sp
- lghi $s1,1024+16 # sl[g]fi is extended-immediate facility
- slgr $fp,$s1 # deduct reservation to get usable buffer size
- # buffer size is at lest 256 and at most 3072+256-16
-
- la $sp,1024($s0) # alloca
- nill $fp,0xfff0 # round to 16*n
- st${g} $s2,0($sp) # back-chain
- nill $len,0xfff0 # redundant
- st${g} $fp,$SIZE_T($sp)
-
- slgr $len,$fp
- brc 1,.Lxts_km_go # not zero, no borrow
- algr $fp,$len # input is shorter than allocated buffer
- lghi $len,0
- st${g} $fp,$SIZE_T($sp)
-
-.Lxts_km_go:
- lrvg $s0,$tweak+0($s2) # load the tweak value in little-endian
- lrvg $s1,$tweak+8($s2)
-
- la $s2,16($sp) # vector of ascending tweak values
- slgr $s2,$inp
- srlg $s3,$fp,4
- j .Lxts_km_start
-
-.Lxts_km_loop:
- la $s2,16($sp)
- slgr $s2,$inp
- srlg $s3,$fp,4
-.Lxts_km_prepare:
- lghi $i1,0x87
- srag $i2,$s1,63 # broadcast upper bit
- ngr $i1,$i2 # rem
- algr $s0,$s0
- alcgr $s1,$s1
- xgr $s0,$i1
-.Lxts_km_start:
- lrvgr $i1,$s0 # flip byte order
- lrvgr $i2,$s1
- stg $i1,0($s2,$inp)
- stg $i2,8($s2,$inp)
- xg $i1,0($inp)
- xg $i2,8($inp)
- stg $i1,0($out,$inp)
- stg $i2,8($out,$inp)
- la $inp,16($inp)
- brct $s3,.Lxts_km_prepare
-
- slgr $inp,$fp # rewind $inp
- la $s2,0($out,$inp)
- lgr $s3,$fp
- .long 0xb92e00aa # km $s2,$s2
- brc 1,.-4 # pay attention to "partial completion"
-
- la $s2,16($sp)
- slgr $s2,$inp
- srlg $s3,$fp,4
-.Lxts_km_xor:
- lg $i1,0($out,$inp)
- lg $i2,8($out,$inp)
- xg $i1,0($s2,$inp)
- xg $i2,8($s2,$inp)
- stg $i1,0($out,$inp)
- stg $i2,8($out,$inp)
- la $inp,16($inp)
- brct $s3,.Lxts_km_xor
-
- slgr $len,$fp
- brc 1,.Lxts_km_loop # not zero, no borrow
- algr $fp,$len
- lghi $len,0
- brc 4+1,.Lxts_km_loop # not zero
-
- l${g} $i1,0($sp) # back-chain
- llgf $fp,`2*$SIZE_T-4`($sp) # bytes used
- la $i2,16($sp)
- srlg $fp,$fp,4
-.Lxts_km_zap:
- stg $i1,0($i2)
- stg $i1,8($i2)
- la $i2,16($i2)
- brct $fp,.Lxts_km_zap
-
- la $sp,0($i1)
- llgc $len,2*$SIZE_T-1($i1)
- nill $len,0x0f # $len%=16
- bzr $ra
-
- # generate one more tweak...
- lghi $i1,0x87
- srag $i2,$s1,63 # broadcast upper bit
- ngr $i1,$i2 # rem
- algr $s0,$s0
- alcgr $s1,$s1
- xgr $s0,$i1
-
- ltr $len,$len # clear zero flag
- br $ra
-.size _s390x_xts_km,.-_s390x_xts_km
-
-.globl AES_xts_encrypt
-.type AES_xts_encrypt,\@function
-.align 16
-AES_xts_encrypt:
- xgr %r3,%r4 # flip %r3 and %r4, $out and $len
- xgr %r4,%r3
- xgr %r3,%r4
-___
-$code.=<<___ if ($SIZE_T==4);
- llgfr $len,$len
-___
-$code.=<<___;
- st${g} $len,1*$SIZE_T($sp) # save copy of $len
- srag $len,$len,4 # formally wrong, because it expands
- # sign byte, but who can afford asking
- # to process more than 2^63-1 bytes?
- # I use it, because it sets condition
- # code...
- bcr 8,$ra # abort if zero (i.e. less than 16)
-___
-$code.=<<___ if (!$softonly);
- llgf %r0,240($key2)
- lhi %r1,16
- clr %r0,%r1
- jl .Lxts_enc_software
-
- st${g} $ra,5*$SIZE_T($sp)
- stm${g} %r6,$s3,6*$SIZE_T($sp)
-
- sllg $len,$len,4 # $len&=~15
- slgr $out,$inp
-
- # generate the tweak value
- l${g} $s3,$stdframe($sp) # pointer to iv
- la $s2,$tweak($sp)
- lmg $s0,$s1,0($s3)
- lghi $s3,16
- stmg $s0,$s1,0($s2)
- la %r1,0($key2) # $key2 is not needed anymore
- .long 0xb92e00aa # km $s2,$s2, generate the tweak
- brc 1,.-4 # can this happen?
-
- l %r0,240($key1)
- la %r1,0($key1) # $key1 is not needed anymore
- bras $ra,_s390x_xts_km
- jz .Lxts_enc_km_done
-
- aghi $inp,-16 # take one step back
- la $i3,0($out,$inp) # put aside real $out
-.Lxts_enc_km_steal:
- llgc $i1,16($inp)
- llgc $i2,0($out,$inp)
- stc $i1,0($out,$inp)
- stc $i2,16($out,$inp)
- la $inp,1($inp)
- brct $len,.Lxts_enc_km_steal
-
- la $s2,0($i3)
- lghi $s3,16
- lrvgr $i1,$s0 # flip byte order
- lrvgr $i2,$s1
- xg $i1,0($s2)
- xg $i2,8($s2)
- stg $i1,0($s2)
- stg $i2,8($s2)
- .long 0xb92e00aa # km $s2,$s2
- brc 1,.-4 # can this happen?
- lrvgr $i1,$s0 # flip byte order
- lrvgr $i2,$s1
- xg $i1,0($i3)
- xg $i2,8($i3)
- stg $i1,0($i3)
- stg $i2,8($i3)
-
-.Lxts_enc_km_done:
- stg $sp,$tweak+0($sp) # wipe tweak
- stg $sp,$tweak+8($sp)
- l${g} $ra,5*$SIZE_T($sp)
- lm${g} %r6,$s3,6*$SIZE_T($sp)
- br $ra
-.align 16
-.Lxts_enc_software:
-___
-$code.=<<___;
- stm${g} %r6,$ra,6*$SIZE_T($sp)
-
- slgr $out,$inp
-
- l${g} $s3,$stdframe($sp) # ivp
- llgf $s0,0($s3) # load iv
- llgf $s1,4($s3)
- llgf $s2,8($s3)
- llgf $s3,12($s3)
- stm${g} %r2,%r5,2*$SIZE_T($sp)
- la $key,0($key2)
- larl $tbl,AES_Te
- bras $ra,_s390x_AES_encrypt # generate the tweak
- lm${g} %r2,%r5,2*$SIZE_T($sp)
- stm $s0,$s3,$tweak($sp) # save the tweak
- j .Lxts_enc_enter
-
-.align 16
-.Lxts_enc_loop:
- lrvg $s1,$tweak+0($sp) # load the tweak in little-endian
- lrvg $s3,$tweak+8($sp)
- lghi %r1,0x87
- srag %r0,$s3,63 # broadcast upper bit
- ngr %r1,%r0 # rem
- algr $s1,$s1
- alcgr $s3,$s3
- xgr $s1,%r1
- lrvgr $s1,$s1 # flip byte order
- lrvgr $s3,$s3
- srlg $s0,$s1,32 # smash the tweak to 4x32-bits
- stg $s1,$tweak+0($sp) # save the tweak
- llgfr $s1,$s1
- srlg $s2,$s3,32
- stg $s3,$tweak+8($sp)
- llgfr $s3,$s3
- la $inp,16($inp) # $inp+=16
-.Lxts_enc_enter:
- x $s0,0($inp) # ^=*($inp)
- x $s1,4($inp)
- x $s2,8($inp)
- x $s3,12($inp)
- stm${g} %r2,%r3,2*$SIZE_T($sp) # only two registers are changing
- la $key,0($key1)
- bras $ra,_s390x_AES_encrypt
- lm${g} %r2,%r5,2*$SIZE_T($sp)
- x $s0,$tweak+0($sp) # ^=tweak
- x $s1,$tweak+4($sp)
- x $s2,$tweak+8($sp)
- x $s3,$tweak+12($sp)
- st $s0,0($out,$inp)
- st $s1,4($out,$inp)
- st $s2,8($out,$inp)
- st $s3,12($out,$inp)
- brct${g} $len,.Lxts_enc_loop
-
- llgc $len,`2*$SIZE_T-1`($sp)
- nill $len,0x0f # $len%16
- jz .Lxts_enc_done
-
- la $i3,0($inp,$out) # put aside real $out
-.Lxts_enc_steal:
- llgc %r0,16($inp)
- llgc %r1,0($out,$inp)
- stc %r0,0($out,$inp)
- stc %r1,16($out,$inp)
- la $inp,1($inp)
- brct $len,.Lxts_enc_steal
- la $out,0($i3) # restore real $out
-
- # generate last tweak...
- lrvg $s1,$tweak+0($sp) # load the tweak in little-endian
- lrvg $s3,$tweak+8($sp)
- lghi %r1,0x87
- srag %r0,$s3,63 # broadcast upper bit
- ngr %r1,%r0 # rem
- algr $s1,$s1
- alcgr $s3,$s3
- xgr $s1,%r1
- lrvgr $s1,$s1 # flip byte order
- lrvgr $s3,$s3
- srlg $s0,$s1,32 # smash the tweak to 4x32-bits
- stg $s1,$tweak+0($sp) # save the tweak
- llgfr $s1,$s1
- srlg $s2,$s3,32
- stg $s3,$tweak+8($sp)
- llgfr $s3,$s3
-
- x $s0,0($out) # ^=*(inp)|stolen cipther-text
- x $s1,4($out)
- x $s2,8($out)
- x $s3,12($out)
- st${g} $out,4*$SIZE_T($sp)
- la $key,0($key1)
- bras $ra,_s390x_AES_encrypt
- l${g} $out,4*$SIZE_T($sp)
- x $s0,`$tweak+0`($sp) # ^=tweak
- x $s1,`$tweak+4`($sp)
- x $s2,`$tweak+8`($sp)
- x $s3,`$tweak+12`($sp)
- st $s0,0($out)
- st $s1,4($out)
- st $s2,8($out)
- st $s3,12($out)
-
-.Lxts_enc_done:
- stg $sp,$tweak+0($sp) # wipe tweak
- stg $sp,$twesk+8($sp)
- lm${g} %r6,$ra,6*$SIZE_T($sp)
- br $ra
-.size AES_xts_encrypt,.-AES_xts_encrypt
-___
-# void AES_xts_decrypt(const char *inp,char *out,size_t len,
-# const AES_KEY *key1, const AES_KEY *key2,
-# const unsigned char iv[16]);
-#
-$code.=<<___;
-.globl AES_xts_decrypt
-.type AES_xts_decrypt,\@function
-.align 16
-AES_xts_decrypt:
- xgr %r3,%r4 # flip %r3 and %r4, $out and $len
- xgr %r4,%r3
- xgr %r3,%r4
-___
-$code.=<<___ if ($SIZE_T==4);
- llgfr $len,$len
-___
-$code.=<<___;
- st${g} $len,1*$SIZE_T($sp) # save copy of $len
- aghi $len,-16
- bcr 4,$ra # abort if less than zero. formally
- # wrong, because $len is unsigned,
- # but who can afford asking to
- # process more than 2^63-1 bytes?
- tmll $len,0x0f
- jnz .Lxts_dec_proceed
- aghi $len,16
-.Lxts_dec_proceed:
-___
-$code.=<<___ if (!$softonly);
- llgf %r0,240($key2)
- lhi %r1,16
- clr %r0,%r1
- jl .Lxts_dec_software
-
- st${g} $ra,5*$SIZE_T($sp)
- stm${g} %r6,$s3,6*$SIZE_T($sp)
-
- nill $len,0xfff0 # $len&=~15
- slgr $out,$inp
-
- # generate the tweak value
- l${g} $s3,$stdframe($sp) # pointer to iv
- la $s2,$tweak($sp)
- lmg $s0,$s1,0($s3)
- lghi $s3,16
- stmg $s0,$s1,0($s2)
- la %r1,0($key2) # $key2 is not needed past this point
- .long 0xb92e00aa # km $s2,$s2, generate the tweak
- brc 1,.-4 # can this happen?
-
- l %r0,240($key1)
- la %r1,0($key1) # $key1 is not needed anymore
-
- ltgr $len,$len
- jz .Lxts_dec_km_short
- bras $ra,_s390x_xts_km
- jz .Lxts_dec_km_done
-
- lrvgr $s2,$s0 # make copy in reverse byte order
- lrvgr $s3,$s1
- j .Lxts_dec_km_2ndtweak
-
-.Lxts_dec_km_short:
- llgc $len,`2*$SIZE_T-1`($sp)
- nill $len,0x0f # $len%=16
- lrvg $s0,$tweak+0($sp) # load the tweak
- lrvg $s1,$tweak+8($sp)
- lrvgr $s2,$s0 # make copy in reverse byte order
- lrvgr $s3,$s1
-
-.Lxts_dec_km_2ndtweak:
- lghi $i1,0x87
- srag $i2,$s1,63 # broadcast upper bit
- ngr $i1,$i2 # rem
- algr $s0,$s0
- alcgr $s1,$s1
- xgr $s0,$i1
- lrvgr $i1,$s0 # flip byte order
- lrvgr $i2,$s1
-
- xg $i1,0($inp)
- xg $i2,8($inp)
- stg $i1,0($out,$inp)
- stg $i2,8($out,$inp)
- la $i2,0($out,$inp)
- lghi $i3,16
- .long 0xb92e0066 # km $i2,$i2
- brc 1,.-4 # can this happen?
- lrvgr $i1,$s0
- lrvgr $i2,$s1
- xg $i1,0($out,$inp)
- xg $i2,8($out,$inp)
- stg $i1,0($out,$inp)
- stg $i2,8($out,$inp)
-
- la $i3,0($out,$inp) # put aside real $out
-.Lxts_dec_km_steal:
- llgc $i1,16($inp)
- llgc $i2,0($out,$inp)
- stc $i1,0($out,$inp)
- stc $i2,16($out,$inp)
- la $inp,1($inp)
- brct $len,.Lxts_dec_km_steal
-
- lgr $s0,$s2
- lgr $s1,$s3
- xg $s0,0($i3)
- xg $s1,8($i3)
- stg $s0,0($i3)
- stg $s1,8($i3)
- la $s0,0($i3)
- lghi $s1,16
- .long 0xb92e0088 # km $s0,$s0
- brc 1,.-4 # can this happen?
- xg $s2,0($i3)
- xg $s3,8($i3)
- stg $s2,0($i3)
- stg $s3,8($i3)
-.Lxts_dec_km_done:
- stg $sp,$tweak+0($sp) # wipe tweak
- stg $sp,$tweak+8($sp)
- l${g} $ra,5*$SIZE_T($sp)
- lm${g} %r6,$s3,6*$SIZE_T($sp)
- br $ra
-.align 16
-.Lxts_dec_software:
-___
-$code.=<<___;
- stm${g} %r6,$ra,6*$SIZE_T($sp)
-
- srlg $len,$len,4
- slgr $out,$inp
-
- l${g} $s3,$stdframe($sp) # ivp
- llgf $s0,0($s3) # load iv
- llgf $s1,4($s3)
- llgf $s2,8($s3)
- llgf $s3,12($s3)
- stm${g} %r2,%r5,2*$SIZE_T($sp)
- la $key,0($key2)
- larl $tbl,AES_Te
- bras $ra,_s390x_AES_encrypt # generate the tweak
- lm${g} %r2,%r5,2*$SIZE_T($sp)
- larl $tbl,AES_Td
- lt${g}r $len,$len
- stm $s0,$s3,$tweak($sp) # save the tweak
- jz .Lxts_dec_short
- j .Lxts_dec_enter
-
-.align 16
-.Lxts_dec_loop:
- lrvg $s1,$tweak+0($sp) # load the tweak in little-endian
- lrvg $s3,$tweak+8($sp)
- lghi %r1,0x87
- srag %r0,$s3,63 # broadcast upper bit
- ngr %r1,%r0 # rem
- algr $s1,$s1
- alcgr $s3,$s3
- xgr $s1,%r1
- lrvgr $s1,$s1 # flip byte order
- lrvgr $s3,$s3
- srlg $s0,$s1,32 # smash the tweak to 4x32-bits
- stg $s1,$tweak+0($sp) # save the tweak
- llgfr $s1,$s1
- srlg $s2,$s3,32
- stg $s3,$tweak+8($sp)
- llgfr $s3,$s3
-.Lxts_dec_enter:
- x $s0,0($inp) # tweak^=*(inp)
- x $s1,4($inp)
- x $s2,8($inp)
- x $s3,12($inp)
- stm${g} %r2,%r3,2*$SIZE_T($sp) # only two registers are changing
- la $key,0($key1)
- bras $ra,_s390x_AES_decrypt
- lm${g} %r2,%r5,2*$SIZE_T($sp)
- x $s0,$tweak+0($sp) # ^=tweak
- x $s1,$tweak+4($sp)
- x $s2,$tweak+8($sp)
- x $s3,$tweak+12($sp)
- st $s0,0($out,$inp)
- st $s1,4($out,$inp)
- st $s2,8($out,$inp)
- st $s3,12($out,$inp)
- la $inp,16($inp)
- brct${g} $len,.Lxts_dec_loop
-
- llgc $len,`2*$SIZE_T-1`($sp)
- nill $len,0x0f # $len%16
- jz .Lxts_dec_done
-
- # generate pair of tweaks...
- lrvg $s1,$tweak+0($sp) # load the tweak in little-endian
- lrvg $s3,$tweak+8($sp)
- lghi %r1,0x87
- srag %r0,$s3,63 # broadcast upper bit
- ngr %r1,%r0 # rem
- algr $s1,$s1
- alcgr $s3,$s3
- xgr $s1,%r1
- lrvgr $i2,$s1 # flip byte order
- lrvgr $i3,$s3
- stmg $i2,$i3,$tweak($sp) # save the 1st tweak
- j .Lxts_dec_2ndtweak
-
-.align 16
-.Lxts_dec_short:
- llgc $len,`2*$SIZE_T-1`($sp)
- nill $len,0x0f # $len%16
- lrvg $s1,$tweak+0($sp) # load the tweak in little-endian
- lrvg $s3,$tweak+8($sp)
-.Lxts_dec_2ndtweak:
- lghi %r1,0x87
- srag %r0,$s3,63 # broadcast upper bit
- ngr %r1,%r0 # rem
- algr $s1,$s1
- alcgr $s3,$s3
- xgr $s1,%r1
- lrvgr $s1,$s1 # flip byte order
- lrvgr $s3,$s3
- srlg $s0,$s1,32 # smash the tweak to 4x32-bits
- stg $s1,$tweak-16+0($sp) # save the 2nd tweak
- llgfr $s1,$s1
- srlg $s2,$s3,32
- stg $s3,$tweak-16+8($sp)
- llgfr $s3,$s3
-
- x $s0,0($inp) # tweak_the_2nd^=*(inp)
- x $s1,4($inp)
- x $s2,8($inp)
- x $s3,12($inp)
- stm${g} %r2,%r3,2*$SIZE_T($sp)
- la $key,0($key1)
- bras $ra,_s390x_AES_decrypt
- lm${g} %r2,%r5,2*$SIZE_T($sp)
- x $s0,$tweak-16+0($sp) # ^=tweak_the_2nd
- x $s1,$tweak-16+4($sp)
- x $s2,$tweak-16+8($sp)
- x $s3,$tweak-16+12($sp)
- st $s0,0($out,$inp)
- st $s1,4($out,$inp)
- st $s2,8($out,$inp)
- st $s3,12($out,$inp)
-
- la $i3,0($out,$inp) # put aside real $out
-.Lxts_dec_steal:
- llgc %r0,16($inp)
- llgc %r1,0($out,$inp)
- stc %r0,0($out,$inp)
- stc %r1,16($out,$inp)
- la $inp,1($inp)
- brct $len,.Lxts_dec_steal
- la $out,0($i3) # restore real $out
-
- lm $s0,$s3,$tweak($sp) # load the 1st tweak
- x $s0,0($out) # tweak^=*(inp)|stolen cipher-text
- x $s1,4($out)
- x $s2,8($out)
- x $s3,12($out)
- st${g} $out,4*$SIZE_T($sp)
- la $key,0($key1)
- bras $ra,_s390x_AES_decrypt
- l${g} $out,4*$SIZE_T($sp)
- x $s0,$tweak+0($sp) # ^=tweak
- x $s1,$tweak+4($sp)
- x $s2,$tweak+8($sp)
- x $s3,$tweak+12($sp)
- st $s0,0($out)
- st $s1,4($out)
- st $s2,8($out)
- st $s3,12($out)
- stg $sp,$tweak-16+0($sp) # wipe 2nd tweak
- stg $sp,$tweak-16+8($sp)
-.Lxts_dec_done:
- stg $sp,$tweak+0($sp) # wipe tweak
- stg $sp,$twesk+8($sp)
- lm${g} %r6,$ra,6*$SIZE_T($sp)
- br $ra
-.size AES_xts_decrypt,.-AES_xts_decrypt
-___
-}
-$code.=<<___;
-.string "AES for s390x, CRYPTOGAMS by <appro\@openssl.org>"
-.comm OPENSSL_s390xcap_P,16,8
-___
-
-$code =~ s/\`([^\`]*)\`/eval $1/gem;
-print $code;
-close STDOUT; # force flush
diff --git a/app/openssl/crypto/aes/asm/aes-sparcv9.pl b/app/openssl/crypto/aes/asm/aes-sparcv9.pl
deleted file mode 100755
index 403c4d12..00000000
--- a/app/openssl/crypto/aes/asm/aes-sparcv9.pl
+++ /dev/null
@@ -1,1182 +0,0 @@
-#!/usr/bin/env perl
-#
-# ====================================================================
-# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
-# project. Rights for redistribution and usage in source and binary
-# forms are granted according to the OpenSSL license.
-# ====================================================================
-#
-# Version 1.1
-#
-# The major reason for undertaken effort was to mitigate the hazard of
-# cache-timing attack. This is [currently and initially!] addressed in
-# two ways. 1. S-boxes are compressed from 5KB to 2KB+256B size each.
-# 2. References to them are scheduled for L2 cache latency, meaning
-# that the tables don't have to reside in L1 cache. Once again, this
-# is an initial draft and one should expect more countermeasures to
-# be implemented...
-#
-# Version 1.1 prefetches T[ed]4 in order to mitigate attack on last
-# round.
-#
-# Even though performance was not the primary goal [on the contrary,
-# extra shifts "induced" by compressed S-box and longer loop epilogue
-# "induced" by scheduling for L2 have negative effect on performance],
-# the code turned out to run in ~23 cycles per processed byte en-/
-# decrypted with 128-bit key. This is pretty good result for code
-# with mentioned qualities and UltraSPARC core. Compared to Sun C
-# generated code my encrypt procedure runs just few percents faster,
-# while decrypt one - whole 50% faster [yes, Sun C failed to generate
-# optimal decrypt procedure]. Compared to GNU C generated code both
-# procedures are more than 60% faster:-)
-
-$bits=32;
-for (@ARGV) { $bits=64 if (/\-m64/ || /\-xarch\=v9/); }
-if ($bits==64) { $bias=2047; $frame=192; }
-else { $bias=0; $frame=112; }
-$locals=16;
-
-$acc0="%l0";
-$acc1="%o0";
-$acc2="%o1";
-$acc3="%o2";
-
-$acc4="%l1";
-$acc5="%o3";
-$acc6="%o4";
-$acc7="%o5";
-
-$acc8="%l2";
-$acc9="%o7";
-$acc10="%g1";
-$acc11="%g2";
-
-$acc12="%l3";
-$acc13="%g3";
-$acc14="%g4";
-$acc15="%g5";
-
-$t0="%l4";
-$t1="%l5";
-$t2="%l6";
-$t3="%l7";
-
-$s0="%i0";
-$s1="%i1";
-$s2="%i2";
-$s3="%i3";
-$tbl="%i4";
-$key="%i5";
-$rounds="%i7"; # aliases with return address, which is off-loaded to stack
-
-sub _data_word()
-{ my $i;
- while(defined($i=shift)) { $code.=sprintf"\t.long\t0x%08x,0x%08x\n",$i,$i; }
-}
-
-$code.=<<___ if ($bits==64);
-.register %g2,#scratch
-.register %g3,#scratch
-___
-$code.=<<___;
-.section ".text",#alloc,#execinstr
-
-.align 256
-AES_Te:
-___
-&_data_word(
- 0xc66363a5, 0xf87c7c84, 0xee777799, 0xf67b7b8d,
- 0xfff2f20d, 0xd66b6bbd, 0xde6f6fb1, 0x91c5c554,
- 0x60303050, 0x02010103, 0xce6767a9, 0x562b2b7d,
- 0xe7fefe19, 0xb5d7d762, 0x4dababe6, 0xec76769a,
- 0x8fcaca45, 0x1f82829d, 0x89c9c940, 0xfa7d7d87,
- 0xeffafa15, 0xb25959eb, 0x8e4747c9, 0xfbf0f00b,
- 0x41adadec, 0xb3d4d467, 0x5fa2a2fd, 0x45afafea,
- 0x239c9cbf, 0x53a4a4f7, 0xe4727296, 0x9bc0c05b,
- 0x75b7b7c2, 0xe1fdfd1c, 0x3d9393ae, 0x4c26266a,
- 0x6c36365a, 0x7e3f3f41, 0xf5f7f702, 0x83cccc4f,
- 0x6834345c, 0x51a5a5f4, 0xd1e5e534, 0xf9f1f108,
- 0xe2717193, 0xabd8d873, 0x62313153, 0x2a15153f,
- 0x0804040c, 0x95c7c752, 0x46232365, 0x9dc3c35e,
- 0x30181828, 0x379696a1, 0x0a05050f, 0x2f9a9ab5,
- 0x0e070709, 0x24121236, 0x1b80809b, 0xdfe2e23d,
- 0xcdebeb26, 0x4e272769, 0x7fb2b2cd, 0xea75759f,
- 0x1209091b, 0x1d83839e, 0x582c2c74, 0x341a1a2e,
- 0x361b1b2d, 0xdc6e6eb2, 0xb45a5aee, 0x5ba0a0fb,
- 0xa45252f6, 0x763b3b4d, 0xb7d6d661, 0x7db3b3ce,
- 0x5229297b, 0xdde3e33e, 0x5e2f2f71, 0x13848497,
- 0xa65353f5, 0xb9d1d168, 0x00000000, 0xc1eded2c,
- 0x40202060, 0xe3fcfc1f, 0x79b1b1c8, 0xb65b5bed,
- 0xd46a6abe, 0x8dcbcb46, 0x67bebed9, 0x7239394b,
- 0x944a4ade, 0x984c4cd4, 0xb05858e8, 0x85cfcf4a,
- 0xbbd0d06b, 0xc5efef2a, 0x4faaaae5, 0xedfbfb16,
- 0x864343c5, 0x9a4d4dd7, 0x66333355, 0x11858594,
- 0x8a4545cf, 0xe9f9f910, 0x04020206, 0xfe7f7f81,
- 0xa05050f0, 0x783c3c44, 0x259f9fba, 0x4ba8a8e3,
- 0xa25151f3, 0x5da3a3fe, 0x804040c0, 0x058f8f8a,
- 0x3f9292ad, 0x219d9dbc, 0x70383848, 0xf1f5f504,
- 0x63bcbcdf, 0x77b6b6c1, 0xafdada75, 0x42212163,
- 0x20101030, 0xe5ffff1a, 0xfdf3f30e, 0xbfd2d26d,
- 0x81cdcd4c, 0x180c0c14, 0x26131335, 0xc3ecec2f,
- 0xbe5f5fe1, 0x359797a2, 0x884444cc, 0x2e171739,
- 0x93c4c457, 0x55a7a7f2, 0xfc7e7e82, 0x7a3d3d47,
- 0xc86464ac, 0xba5d5de7, 0x3219192b, 0xe6737395,
- 0xc06060a0, 0x19818198, 0x9e4f4fd1, 0xa3dcdc7f,
- 0x44222266, 0x542a2a7e, 0x3b9090ab, 0x0b888883,
- 0x8c4646ca, 0xc7eeee29, 0x6bb8b8d3, 0x2814143c,
- 0xa7dede79, 0xbc5e5ee2, 0x160b0b1d, 0xaddbdb76,
- 0xdbe0e03b, 0x64323256, 0x743a3a4e, 0x140a0a1e,
- 0x924949db, 0x0c06060a, 0x4824246c, 0xb85c5ce4,
- 0x9fc2c25d, 0xbdd3d36e, 0x43acacef, 0xc46262a6,
- 0x399191a8, 0x319595a4, 0xd3e4e437, 0xf279798b,
- 0xd5e7e732, 0x8bc8c843, 0x6e373759, 0xda6d6db7,
- 0x018d8d8c, 0xb1d5d564, 0x9c4e4ed2, 0x49a9a9e0,
- 0xd86c6cb4, 0xac5656fa, 0xf3f4f407, 0xcfeaea25,
- 0xca6565af, 0xf47a7a8e, 0x47aeaee9, 0x10080818,
- 0x6fbabad5, 0xf0787888, 0x4a25256f, 0x5c2e2e72,
- 0x381c1c24, 0x57a6a6f1, 0x73b4b4c7, 0x97c6c651,
- 0xcbe8e823, 0xa1dddd7c, 0xe874749c, 0x3e1f1f21,
- 0x964b4bdd, 0x61bdbddc, 0x0d8b8b86, 0x0f8a8a85,
- 0xe0707090, 0x7c3e3e42, 0x71b5b5c4, 0xcc6666aa,
- 0x904848d8, 0x06030305, 0xf7f6f601, 0x1c0e0e12,
- 0xc26161a3, 0x6a35355f, 0xae5757f9, 0x69b9b9d0,
- 0x17868691, 0x99c1c158, 0x3a1d1d27, 0x279e9eb9,
- 0xd9e1e138, 0xebf8f813, 0x2b9898b3, 0x22111133,
- 0xd26969bb, 0xa9d9d970, 0x078e8e89, 0x339494a7,
- 0x2d9b9bb6, 0x3c1e1e22, 0x15878792, 0xc9e9e920,
- 0x87cece49, 0xaa5555ff, 0x50282878, 0xa5dfdf7a,
- 0x038c8c8f, 0x59a1a1f8, 0x09898980, 0x1a0d0d17,
- 0x65bfbfda, 0xd7e6e631, 0x844242c6, 0xd06868b8,
- 0x824141c3, 0x299999b0, 0x5a2d2d77, 0x1e0f0f11,
- 0x7bb0b0cb, 0xa85454fc, 0x6dbbbbd6, 0x2c16163a);
-$code.=<<___;
- .byte 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5
- .byte 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76
- .byte 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0
- .byte 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0
- .byte 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc
- .byte 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15
- .byte 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a
- .byte 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75
- .byte 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0
- .byte 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84
- .byte 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b
- .byte 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf
- .byte 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85
- .byte 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8
- .byte 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5
- .byte 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2
- .byte 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17
- .byte 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73
- .byte 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88
- .byte 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb
- .byte 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c
- .byte 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79
- .byte 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9
- .byte 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08
- .byte 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6
- .byte 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a
- .byte 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e
- .byte 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e
- .byte 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94
- .byte 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf
- .byte 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68
- .byte 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
-.type AES_Te,#object
-.size AES_Te,(.-AES_Te)
-
-.align 64
-.skip 16
-_sparcv9_AES_encrypt:
- save %sp,-$frame-$locals,%sp
- stx %i7,[%sp+$bias+$frame+0] ! off-load return address
- ld [$key+240],$rounds
- ld [$key+0],$t0
- ld [$key+4],$t1 !
- ld [$key+8],$t2
- srl $rounds,1,$rounds
- xor $t0,$s0,$s0
- ld [$key+12],$t3
- srl $s0,21,$acc0
- xor $t1,$s1,$s1
- ld [$key+16],$t0
- srl $s1,13,$acc1 !
- xor $t2,$s2,$s2
- ld [$key+20],$t1
- xor $t3,$s3,$s3
- ld [$key+24],$t2
- and $acc0,2040,$acc0
- ld [$key+28],$t3
- nop
-.Lenc_loop:
- srl $s2,5,$acc2 !
- and $acc1,2040,$acc1
- ldx [$tbl+$acc0],$acc0
- sll $s3,3,$acc3
- and $acc2,2040,$acc2
- ldx [$tbl+$acc1],$acc1
- srl $s1,21,$acc4
- and $acc3,2040,$acc3
- ldx [$tbl+$acc2],$acc2 !
- srl $s2,13,$acc5
- and $acc4,2040,$acc4
- ldx [$tbl+$acc3],$acc3
- srl $s3,5,$acc6
- and $acc5,2040,$acc5
- ldx [$tbl+$acc4],$acc4
- fmovs %f0,%f0
- sll $s0,3,$acc7 !
- and $acc6,2040,$acc6
- ldx [$tbl+$acc5],$acc5
- srl $s2,21,$acc8
- and $acc7,2040,$acc7
- ldx [$tbl+$acc6],$acc6
- srl $s3,13,$acc9
- and $acc8,2040,$acc8
- ldx [$tbl+$acc7],$acc7 !
- srl $s0,5,$acc10
- and $acc9,2040,$acc9
- ldx [$tbl+$acc8],$acc8
- sll $s1,3,$acc11
- and $acc10,2040,$acc10
- ldx [$tbl+$acc9],$acc9
- fmovs %f0,%f0
- srl $s3,21,$acc12 !
- and $acc11,2040,$acc11
- ldx [$tbl+$acc10],$acc10
- srl $s0,13,$acc13
- and $acc12,2040,$acc12
- ldx [$tbl+$acc11],$acc11
- srl $s1,5,$acc14
- and $acc13,2040,$acc13
- ldx [$tbl+$acc12],$acc12 !
- sll $s2,3,$acc15
- and $acc14,2040,$acc14
- ldx [$tbl+$acc13],$acc13
- and $acc15,2040,$acc15
- add $key,32,$key
- ldx [$tbl+$acc14],$acc14
- fmovs %f0,%f0
- subcc $rounds,1,$rounds !
- ldx [$tbl+$acc15],$acc15
- bz,a,pn %icc,.Lenc_last
- add $tbl,2048,$rounds
-
- srlx $acc1,8,$acc1
- xor $acc0,$t0,$t0
- ld [$key+0],$s0
- fmovs %f0,%f0
- srlx $acc2,16,$acc2 !
- xor $acc1,$t0,$t0
- ld [$key+4],$s1
- srlx $acc3,24,$acc3
- xor $acc2,$t0,$t0
- ld [$key+8],$s2
- srlx $acc5,8,$acc5
- xor $acc3,$t0,$t0
- ld [$key+12],$s3 !
- srlx $acc6,16,$acc6
- xor $acc4,$t1,$t1
- fmovs %f0,%f0
- srlx $acc7,24,$acc7
- xor $acc5,$t1,$t1
- srlx $acc9,8,$acc9
- xor $acc6,$t1,$t1
- srlx $acc10,16,$acc10 !
- xor $acc7,$t1,$t1
- srlx $acc11,24,$acc11
- xor $acc8,$t2,$t2
- srlx $acc13,8,$acc13
- xor $acc9,$t2,$t2
- srlx $acc14,16,$acc14
- xor $acc10,$t2,$t2
- srlx $acc15,24,$acc15 !
- xor $acc11,$t2,$t2
- xor $acc12,$acc14,$acc14
- xor $acc13,$t3,$t3
- srl $t0,21,$acc0
- xor $acc14,$t3,$t3
- srl $t1,13,$acc1
- xor $acc15,$t3,$t3
-
- and $acc0,2040,$acc0 !
- srl $t2,5,$acc2
- and $acc1,2040,$acc1
- ldx [$tbl+$acc0],$acc0
- sll $t3,3,$acc3
- and $acc2,2040,$acc2
- ldx [$tbl+$acc1],$acc1
- fmovs %f0,%f0
- srl $t1,21,$acc4 !
- and $acc3,2040,$acc3
- ldx [$tbl+$acc2],$acc2
- srl $t2,13,$acc5
- and $acc4,2040,$acc4
- ldx [$tbl+$acc3],$acc3
- srl $t3,5,$acc6
- and $acc5,2040,$acc5
- ldx [$tbl+$acc4],$acc4 !
- sll $t0,3,$acc7
- and $acc6,2040,$acc6
- ldx [$tbl+$acc5],$acc5
- srl $t2,21,$acc8
- and $acc7,2040,$acc7
- ldx [$tbl+$acc6],$acc6
- fmovs %f0,%f0
- srl $t3,13,$acc9 !
- and $acc8,2040,$acc8
- ldx [$tbl+$acc7],$acc7
- srl $t0,5,$acc10
- and $acc9,2040,$acc9
- ldx [$tbl+$acc8],$acc8
- sll $t1,3,$acc11
- and $acc10,2040,$acc10
- ldx [$tbl+$acc9],$acc9 !
- srl $t3,21,$acc12
- and $acc11,2040,$acc11
- ldx [$tbl+$acc10],$acc10
- srl $t0,13,$acc13
- and $acc12,2040,$acc12
- ldx [$tbl+$acc11],$acc11
- fmovs %f0,%f0
- srl $t1,5,$acc14 !
- and $acc13,2040,$acc13
- ldx [$tbl+$acc12],$acc12
- sll $t2,3,$acc15
- and $acc14,2040,$acc14
- ldx [$tbl+$acc13],$acc13
- srlx $acc1,8,$acc1
- and $acc15,2040,$acc15
- ldx [$tbl+$acc14],$acc14 !
-
- srlx $acc2,16,$acc2
- xor $acc0,$s0,$s0
- ldx [$tbl+$acc15],$acc15
- srlx $acc3,24,$acc3
- xor $acc1,$s0,$s0
- ld [$key+16],$t0
- fmovs %f0,%f0
- srlx $acc5,8,$acc5 !
- xor $acc2,$s0,$s0
- ld [$key+20],$t1
- srlx $acc6,16,$acc6
- xor $acc3,$s0,$s0
- ld [$key+24],$t2
- srlx $acc7,24,$acc7
- xor $acc4,$s1,$s1
- ld [$key+28],$t3 !
- srlx $acc9,8,$acc9
- xor $acc5,$s1,$s1
- ldx [$tbl+2048+0],%g0 ! prefetch te4
- srlx $acc10,16,$acc10
- xor $acc6,$s1,$s1
- ldx [$tbl+2048+32],%g0 ! prefetch te4
- srlx $acc11,24,$acc11
- xor $acc7,$s1,$s1
- ldx [$tbl+2048+64],%g0 ! prefetch te4
- srlx $acc13,8,$acc13
- xor $acc8,$s2,$s2
- ldx [$tbl+2048+96],%g0 ! prefetch te4
- srlx $acc14,16,$acc14 !
- xor $acc9,$s2,$s2
- ldx [$tbl+2048+128],%g0 ! prefetch te4
- srlx $acc15,24,$acc15
- xor $acc10,$s2,$s2
- ldx [$tbl+2048+160],%g0 ! prefetch te4
- srl $s0,21,$acc0
- xor $acc11,$s2,$s2
- ldx [$tbl+2048+192],%g0 ! prefetch te4
- xor $acc12,$acc14,$acc14
- xor $acc13,$s3,$s3
- ldx [$tbl+2048+224],%g0 ! prefetch te4
- srl $s1,13,$acc1 !
- xor $acc14,$s3,$s3
- xor $acc15,$s3,$s3
- ba .Lenc_loop
- and $acc0,2040,$acc0
-
-.align 32
-.Lenc_last:
- srlx $acc1,8,$acc1 !
- xor $acc0,$t0,$t0
- ld [$key+0],$s0
- srlx $acc2,16,$acc2
- xor $acc1,$t0,$t0
- ld [$key+4],$s1
- srlx $acc3,24,$acc3
- xor $acc2,$t0,$t0
- ld [$key+8],$s2 !
- srlx $acc5,8,$acc5
- xor $acc3,$t0,$t0
- ld [$key+12],$s3
- srlx $acc6,16,$acc6
- xor $acc4,$t1,$t1
- srlx $acc7,24,$acc7
- xor $acc5,$t1,$t1
- srlx $acc9,8,$acc9 !
- xor $acc6,$t1,$t1
- srlx $acc10,16,$acc10
- xor $acc7,$t1,$t1
- srlx $acc11,24,$acc11
- xor $acc8,$t2,$t2
- srlx $acc13,8,$acc13
- xor $acc9,$t2,$t2
- srlx $acc14,16,$acc14 !
- xor $acc10,$t2,$t2
- srlx $acc15,24,$acc15
- xor $acc11,$t2,$t2
- xor $acc12,$acc14,$acc14
- xor $acc13,$t3,$t3
- srl $t0,24,$acc0
- xor $acc14,$t3,$t3
- srl $t1,16,$acc1 !
- xor $acc15,$t3,$t3
-
- srl $t2,8,$acc2
- and $acc1,255,$acc1
- ldub [$rounds+$acc0],$acc0
- srl $t1,24,$acc4
- and $acc2,255,$acc2
- ldub [$rounds+$acc1],$acc1
- srl $t2,16,$acc5 !
- and $t3,255,$acc3
- ldub [$rounds+$acc2],$acc2
- ldub [$rounds+$acc3],$acc3
- srl $t3,8,$acc6
- and $acc5,255,$acc5
- ldub [$rounds+$acc4],$acc4
- fmovs %f0,%f0
- srl $t2,24,$acc8 !
- and $acc6,255,$acc6
- ldub [$rounds+$acc5],$acc5
- srl $t3,16,$acc9
- and $t0,255,$acc7
- ldub [$rounds+$acc6],$acc6
- ldub [$rounds+$acc7],$acc7
- fmovs %f0,%f0
- srl $t0,8,$acc10 !
- and $acc9,255,$acc9
- ldub [$rounds+$acc8],$acc8
- srl $t3,24,$acc12
- and $acc10,255,$acc10
- ldub [$rounds+$acc9],$acc9
- srl $t0,16,$acc13
- and $t1,255,$acc11
- ldub [$rounds+$acc10],$acc10 !
- srl $t1,8,$acc14
- and $acc13,255,$acc13
- ldub [$rounds+$acc11],$acc11
- ldub [$rounds+$acc12],$acc12
- and $acc14,255,$acc14
- ldub [$rounds+$acc13],$acc13
- and $t2,255,$acc15
- ldub [$rounds+$acc14],$acc14 !
-
- sll $acc0,24,$acc0
- xor $acc3,$s0,$s0
- ldub [$rounds+$acc15],$acc15
- sll $acc1,16,$acc1
- xor $acc0,$s0,$s0
- ldx [%sp+$bias+$frame+0],%i7 ! restore return address
- fmovs %f0,%f0
- sll $acc2,8,$acc2 !
- xor $acc1,$s0,$s0
- sll $acc4,24,$acc4
- xor $acc2,$s0,$s0
- sll $acc5,16,$acc5
- xor $acc7,$s1,$s1
- sll $acc6,8,$acc6
- xor $acc4,$s1,$s1
- sll $acc8,24,$acc8 !
- xor $acc5,$s1,$s1
- sll $acc9,16,$acc9
- xor $acc11,$s2,$s2
- sll $acc10,8,$acc10
- xor $acc6,$s1,$s1
- sll $acc12,24,$acc12
- xor $acc8,$s2,$s2
- sll $acc13,16,$acc13 !
- xor $acc9,$s2,$s2
- sll $acc14,8,$acc14
- xor $acc10,$s2,$s2
- xor $acc12,$acc14,$acc14
- xor $acc13,$s3,$s3
- xor $acc14,$s3,$s3
- xor $acc15,$s3,$s3
-
- ret
- restore
-.type _sparcv9_AES_encrypt,#function
-.size _sparcv9_AES_encrypt,(.-_sparcv9_AES_encrypt)
-
-.align 32
-.globl AES_encrypt
-AES_encrypt:
- or %o0,%o1,%g1
- andcc %g1,3,%g0
- bnz,pn %xcc,.Lunaligned_enc
- save %sp,-$frame,%sp
-
- ld [%i0+0],%o0
- ld [%i0+4],%o1
- ld [%i0+8],%o2
- ld [%i0+12],%o3
-
-1: call .+8
- add %o7,AES_Te-1b,%o4
- call _sparcv9_AES_encrypt
- mov %i2,%o5
-
- st %o0,[%i1+0]
- st %o1,[%i1+4]
- st %o2,[%i1+8]
- st %o3,[%i1+12]
-
- ret
- restore
-
-.align 32
-.Lunaligned_enc:
- ldub [%i0+0],%l0
- ldub [%i0+1],%l1
- ldub [%i0+2],%l2
-
- sll %l0,24,%l0
- ldub [%i0+3],%l3
- sll %l1,16,%l1
- ldub [%i0+4],%l4
- sll %l2,8,%l2
- or %l1,%l0,%l0
- ldub [%i0+5],%l5
- sll %l4,24,%l4
- or %l3,%l2,%l2
- ldub [%i0+6],%l6
- sll %l5,16,%l5
- or %l0,%l2,%o0
- ldub [%i0+7],%l7
-
- sll %l6,8,%l6
- or %l5,%l4,%l4
- ldub [%i0+8],%l0
- or %l7,%l6,%l6
- ldub [%i0+9],%l1
- or %l4,%l6,%o1
- ldub [%i0+10],%l2
-
- sll %l0,24,%l0
- ldub [%i0+11],%l3
- sll %l1,16,%l1
- ldub [%i0+12],%l4
- sll %l2,8,%l2
- or %l1,%l0,%l0
- ldub [%i0+13],%l5
- sll %l4,24,%l4
- or %l3,%l2,%l2
- ldub [%i0+14],%l6
- sll %l5,16,%l5
- or %l0,%l2,%o2
- ldub [%i0+15],%l7
-
- sll %l6,8,%l6
- or %l5,%l4,%l4
- or %l7,%l6,%l6
- or %l4,%l6,%o3
-
-1: call .+8
- add %o7,AES_Te-1b,%o4
- call _sparcv9_AES_encrypt
- mov %i2,%o5
-
- srl %o0,24,%l0
- srl %o0,16,%l1
- stb %l0,[%i1+0]
- srl %o0,8,%l2
- stb %l1,[%i1+1]
- stb %l2,[%i1+2]
- srl %o1,24,%l4
- stb %o0,[%i1+3]
-
- srl %o1,16,%l5
- stb %l4,[%i1+4]
- srl %o1,8,%l6
- stb %l5,[%i1+5]
- stb %l6,[%i1+6]
- srl %o2,24,%l0
- stb %o1,[%i1+7]
-
- srl %o2,16,%l1
- stb %l0,[%i1+8]
- srl %o2,8,%l2
- stb %l1,[%i1+9]
- stb %l2,[%i1+10]
- srl %o3,24,%l4
- stb %o2,[%i1+11]
-
- srl %o3,16,%l5
- stb %l4,[%i1+12]
- srl %o3,8,%l6
- stb %l5,[%i1+13]
- stb %l6,[%i1+14]
- stb %o3,[%i1+15]
-
- ret
- restore
-.type AES_encrypt,#function
-.size AES_encrypt,(.-AES_encrypt)
-
-___
-
-$code.=<<___;
-.align 256
-AES_Td:
-___
-&_data_word(
- 0x51f4a750, 0x7e416553, 0x1a17a4c3, 0x3a275e96,
- 0x3bab6bcb, 0x1f9d45f1, 0xacfa58ab, 0x4be30393,
- 0x2030fa55, 0xad766df6, 0x88cc7691, 0xf5024c25,
- 0x4fe5d7fc, 0xc52acbd7, 0x26354480, 0xb562a38f,
- 0xdeb15a49, 0x25ba1b67, 0x45ea0e98, 0x5dfec0e1,
- 0xc32f7502, 0x814cf012, 0x8d4697a3, 0x6bd3f9c6,
- 0x038f5fe7, 0x15929c95, 0xbf6d7aeb, 0x955259da,
- 0xd4be832d, 0x587421d3, 0x49e06929, 0x8ec9c844,
- 0x75c2896a, 0xf48e7978, 0x99583e6b, 0x27b971dd,
- 0xbee14fb6, 0xf088ad17, 0xc920ac66, 0x7dce3ab4,
- 0x63df4a18, 0xe51a3182, 0x97513360, 0x62537f45,
- 0xb16477e0, 0xbb6bae84, 0xfe81a01c, 0xf9082b94,
- 0x70486858, 0x8f45fd19, 0x94de6c87, 0x527bf8b7,
- 0xab73d323, 0x724b02e2, 0xe31f8f57, 0x6655ab2a,
- 0xb2eb2807, 0x2fb5c203, 0x86c57b9a, 0xd33708a5,
- 0x302887f2, 0x23bfa5b2, 0x02036aba, 0xed16825c,
- 0x8acf1c2b, 0xa779b492, 0xf307f2f0, 0x4e69e2a1,
- 0x65daf4cd, 0x0605bed5, 0xd134621f, 0xc4a6fe8a,
- 0x342e539d, 0xa2f355a0, 0x058ae132, 0xa4f6eb75,
- 0x0b83ec39, 0x4060efaa, 0x5e719f06, 0xbd6e1051,
- 0x3e218af9, 0x96dd063d, 0xdd3e05ae, 0x4de6bd46,
- 0x91548db5, 0x71c45d05, 0x0406d46f, 0x605015ff,
- 0x1998fb24, 0xd6bde997, 0x894043cc, 0x67d99e77,
- 0xb0e842bd, 0x07898b88, 0xe7195b38, 0x79c8eedb,
- 0xa17c0a47, 0x7c420fe9, 0xf8841ec9, 0x00000000,
- 0x09808683, 0x322bed48, 0x1e1170ac, 0x6c5a724e,
- 0xfd0efffb, 0x0f853856, 0x3daed51e, 0x362d3927,
- 0x0a0fd964, 0x685ca621, 0x9b5b54d1, 0x24362e3a,
- 0x0c0a67b1, 0x9357e70f, 0xb4ee96d2, 0x1b9b919e,
- 0x80c0c54f, 0x61dc20a2, 0x5a774b69, 0x1c121a16,
- 0xe293ba0a, 0xc0a02ae5, 0x3c22e043, 0x121b171d,
- 0x0e090d0b, 0xf28bc7ad, 0x2db6a8b9, 0x141ea9c8,
- 0x57f11985, 0xaf75074c, 0xee99ddbb, 0xa37f60fd,
- 0xf701269f, 0x5c72f5bc, 0x44663bc5, 0x5bfb7e34,
- 0x8b432976, 0xcb23c6dc, 0xb6edfc68, 0xb8e4f163,
- 0xd731dcca, 0x42638510, 0x13972240, 0x84c61120,
- 0x854a247d, 0xd2bb3df8, 0xaef93211, 0xc729a16d,
- 0x1d9e2f4b, 0xdcb230f3, 0x0d8652ec, 0x77c1e3d0,
- 0x2bb3166c, 0xa970b999, 0x119448fa, 0x47e96422,
- 0xa8fc8cc4, 0xa0f03f1a, 0x567d2cd8, 0x223390ef,
- 0x87494ec7, 0xd938d1c1, 0x8ccaa2fe, 0x98d40b36,
- 0xa6f581cf, 0xa57ade28, 0xdab78e26, 0x3fadbfa4,
- 0x2c3a9de4, 0x5078920d, 0x6a5fcc9b, 0x547e4662,
- 0xf68d13c2, 0x90d8b8e8, 0x2e39f75e, 0x82c3aff5,
- 0x9f5d80be, 0x69d0937c, 0x6fd52da9, 0xcf2512b3,
- 0xc8ac993b, 0x10187da7, 0xe89c636e, 0xdb3bbb7b,
- 0xcd267809, 0x6e5918f4, 0xec9ab701, 0x834f9aa8,
- 0xe6956e65, 0xaaffe67e, 0x21bccf08, 0xef15e8e6,
- 0xbae79bd9, 0x4a6f36ce, 0xea9f09d4, 0x29b07cd6,
- 0x31a4b2af, 0x2a3f2331, 0xc6a59430, 0x35a266c0,
- 0x744ebc37, 0xfc82caa6, 0xe090d0b0, 0x33a7d815,
- 0xf104984a, 0x41ecdaf7, 0x7fcd500e, 0x1791f62f,
- 0x764dd68d, 0x43efb04d, 0xccaa4d54, 0xe49604df,
- 0x9ed1b5e3, 0x4c6a881b, 0xc12c1fb8, 0x4665517f,
- 0x9d5eea04, 0x018c355d, 0xfa877473, 0xfb0b412e,
- 0xb3671d5a, 0x92dbd252, 0xe9105633, 0x6dd64713,
- 0x9ad7618c, 0x37a10c7a, 0x59f8148e, 0xeb133c89,
- 0xcea927ee, 0xb761c935, 0xe11ce5ed, 0x7a47b13c,
- 0x9cd2df59, 0x55f2733f, 0x1814ce79, 0x73c737bf,
- 0x53f7cdea, 0x5ffdaa5b, 0xdf3d6f14, 0x7844db86,
- 0xcaaff381, 0xb968c43e, 0x3824342c, 0xc2a3405f,
- 0x161dc372, 0xbce2250c, 0x283c498b, 0xff0d9541,
- 0x39a80171, 0x080cb3de, 0xd8b4e49c, 0x6456c190,
- 0x7bcb8461, 0xd532b670, 0x486c5c74, 0xd0b85742);
-$code.=<<___;
- .byte 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38
- .byte 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
- .byte 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
- .byte 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
- .byte 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
- .byte 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
- .byte 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
- .byte 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
- .byte 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
- .byte 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
- .byte 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
- .byte 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
- .byte 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
- .byte 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
- .byte 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
- .byte 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
- .byte 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
- .byte 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
- .byte 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
- .byte 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
- .byte 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
- .byte 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
- .byte 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
- .byte 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
- .byte 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
- .byte 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
- .byte 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
- .byte 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
- .byte 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
- .byte 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
- .byte 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
- .byte 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
-.type AES_Td,#object
-.size AES_Td,(.-AES_Td)
-
-.align 64
-.skip 16
-_sparcv9_AES_decrypt:
- save %sp,-$frame-$locals,%sp
- stx %i7,[%sp+$bias+$frame+0] ! off-load return address
- ld [$key+240],$rounds
- ld [$key+0],$t0
- ld [$key+4],$t1 !
- ld [$key+8],$t2
- ld [$key+12],$t3
- srl $rounds,1,$rounds
- xor $t0,$s0,$s0
- ld [$key+16],$t0
- xor $t1,$s1,$s1
- ld [$key+20],$t1
- srl $s0,21,$acc0 !
- xor $t2,$s2,$s2
- ld [$key+24],$t2
- xor $t3,$s3,$s3
- and $acc0,2040,$acc0
- ld [$key+28],$t3
- srl $s3,13,$acc1
- nop
-.Ldec_loop:
- srl $s2,5,$acc2 !
- and $acc1,2040,$acc1
- ldx [$tbl+$acc0],$acc0
- sll $s1,3,$acc3
- and $acc2,2040,$acc2
- ldx [$tbl+$acc1],$acc1
- srl $s1,21,$acc4
- and $acc3,2040,$acc3
- ldx [$tbl+$acc2],$acc2 !
- srl $s0,13,$acc5
- and $acc4,2040,$acc4
- ldx [$tbl+$acc3],$acc3
- srl $s3,5,$acc6
- and $acc5,2040,$acc5
- ldx [$tbl+$acc4],$acc4
- fmovs %f0,%f0
- sll $s2,3,$acc7 !
- and $acc6,2040,$acc6
- ldx [$tbl+$acc5],$acc5
- srl $s2,21,$acc8
- and $acc7,2040,$acc7
- ldx [$tbl+$acc6],$acc6
- srl $s1,13,$acc9
- and $acc8,2040,$acc8
- ldx [$tbl+$acc7],$acc7 !
- srl $s0,5,$acc10
- and $acc9,2040,$acc9
- ldx [$tbl+$acc8],$acc8
- sll $s3,3,$acc11
- and $acc10,2040,$acc10
- ldx [$tbl+$acc9],$acc9
- fmovs %f0,%f0
- srl $s3,21,$acc12 !
- and $acc11,2040,$acc11
- ldx [$tbl+$acc10],$acc10
- srl $s2,13,$acc13
- and $acc12,2040,$acc12
- ldx [$tbl+$acc11],$acc11
- srl $s1,5,$acc14
- and $acc13,2040,$acc13
- ldx [$tbl+$acc12],$acc12 !
- sll $s0,3,$acc15
- and $acc14,2040,$acc14
- ldx [$tbl+$acc13],$acc13
- and $acc15,2040,$acc15
- add $key,32,$key
- ldx [$tbl+$acc14],$acc14
- fmovs %f0,%f0
- subcc $rounds,1,$rounds !
- ldx [$tbl+$acc15],$acc15
- bz,a,pn %icc,.Ldec_last
- add $tbl,2048,$rounds
-
- srlx $acc1,8,$acc1
- xor $acc0,$t0,$t0
- ld [$key+0],$s0
- fmovs %f0,%f0
- srlx $acc2,16,$acc2 !
- xor $acc1,$t0,$t0
- ld [$key+4],$s1
- srlx $acc3,24,$acc3
- xor $acc2,$t0,$t0
- ld [$key+8],$s2
- srlx $acc5,8,$acc5
- xor $acc3,$t0,$t0
- ld [$key+12],$s3 !
- srlx $acc6,16,$acc6
- xor $acc4,$t1,$t1
- fmovs %f0,%f0
- srlx $acc7,24,$acc7
- xor $acc5,$t1,$t1
- srlx $acc9,8,$acc9
- xor $acc6,$t1,$t1
- srlx $acc10,16,$acc10 !
- xor $acc7,$t1,$t1
- srlx $acc11,24,$acc11
- xor $acc8,$t2,$t2
- srlx $acc13,8,$acc13
- xor $acc9,$t2,$t2
- srlx $acc14,16,$acc14
- xor $acc10,$t2,$t2
- srlx $acc15,24,$acc15 !
- xor $acc11,$t2,$t2
- xor $acc12,$acc14,$acc14
- xor $acc13,$t3,$t3
- srl $t0,21,$acc0
- xor $acc14,$t3,$t3
- xor $acc15,$t3,$t3
- srl $t3,13,$acc1
-
- and $acc0,2040,$acc0 !
- srl $t2,5,$acc2
- and $acc1,2040,$acc1
- ldx [$tbl+$acc0],$acc0
- sll $t1,3,$acc3
- and $acc2,2040,$acc2
- ldx [$tbl+$acc1],$acc1
- fmovs %f0,%f0
- srl $t1,21,$acc4 !
- and $acc3,2040,$acc3
- ldx [$tbl+$acc2],$acc2
- srl $t0,13,$acc5
- and $acc4,2040,$acc4
- ldx [$tbl+$acc3],$acc3
- srl $t3,5,$acc6
- and $acc5,2040,$acc5
- ldx [$tbl+$acc4],$acc4 !
- sll $t2,3,$acc7
- and $acc6,2040,$acc6
- ldx [$tbl+$acc5],$acc5
- srl $t2,21,$acc8
- and $acc7,2040,$acc7
- ldx [$tbl+$acc6],$acc6
- fmovs %f0,%f0
- srl $t1,13,$acc9 !
- and $acc8,2040,$acc8
- ldx [$tbl+$acc7],$acc7
- srl $t0,5,$acc10
- and $acc9,2040,$acc9
- ldx [$tbl+$acc8],$acc8
- sll $t3,3,$acc11
- and $acc10,2040,$acc10
- ldx [$tbl+$acc9],$acc9 !
- srl $t3,21,$acc12
- and $acc11,2040,$acc11
- ldx [$tbl+$acc10],$acc10
- srl $t2,13,$acc13
- and $acc12,2040,$acc12
- ldx [$tbl+$acc11],$acc11
- fmovs %f0,%f0
- srl $t1,5,$acc14 !
- and $acc13,2040,$acc13
- ldx [$tbl+$acc12],$acc12
- sll $t0,3,$acc15
- and $acc14,2040,$acc14
- ldx [$tbl+$acc13],$acc13
- srlx $acc1,8,$acc1
- and $acc15,2040,$acc15
- ldx [$tbl+$acc14],$acc14 !
-
- srlx $acc2,16,$acc2
- xor $acc0,$s0,$s0
- ldx [$tbl+$acc15],$acc15
- srlx $acc3,24,$acc3
- xor $acc1,$s0,$s0
- ld [$key+16],$t0
- fmovs %f0,%f0
- srlx $acc5,8,$acc5 !
- xor $acc2,$s0,$s0
- ld [$key+20],$t1
- srlx $acc6,16,$acc6
- xor $acc3,$s0,$s0
- ld [$key+24],$t2
- srlx $acc7,24,$acc7
- xor $acc4,$s1,$s1
- ld [$key+28],$t3 !
- srlx $acc9,8,$acc9
- xor $acc5,$s1,$s1
- ldx [$tbl+2048+0],%g0 ! prefetch td4
- srlx $acc10,16,$acc10
- xor $acc6,$s1,$s1
- ldx [$tbl+2048+32],%g0 ! prefetch td4
- srlx $acc11,24,$acc11
- xor $acc7,$s1,$s1
- ldx [$tbl+2048+64],%g0 ! prefetch td4
- srlx $acc13,8,$acc13
- xor $acc8,$s2,$s2
- ldx [$tbl+2048+96],%g0 ! prefetch td4
- srlx $acc14,16,$acc14 !
- xor $acc9,$s2,$s2
- ldx [$tbl+2048+128],%g0 ! prefetch td4
- srlx $acc15,24,$acc15
- xor $acc10,$s2,$s2
- ldx [$tbl+2048+160],%g0 ! prefetch td4
- srl $s0,21,$acc0
- xor $acc11,$s2,$s2
- ldx [$tbl+2048+192],%g0 ! prefetch td4
- xor $acc12,$acc14,$acc14
- xor $acc13,$s3,$s3
- ldx [$tbl+2048+224],%g0 ! prefetch td4
- and $acc0,2040,$acc0 !
- xor $acc14,$s3,$s3
- xor $acc15,$s3,$s3
- ba .Ldec_loop
- srl $s3,13,$acc1
-
-.align 32
-.Ldec_last:
- srlx $acc1,8,$acc1 !
- xor $acc0,$t0,$t0
- ld [$key+0],$s0
- srlx $acc2,16,$acc2
- xor $acc1,$t0,$t0
- ld [$key+4],$s1
- srlx $acc3,24,$acc3
- xor $acc2,$t0,$t0
- ld [$key+8],$s2 !
- srlx $acc5,8,$acc5
- xor $acc3,$t0,$t0
- ld [$key+12],$s3
- srlx $acc6,16,$acc6
- xor $acc4,$t1,$t1
- srlx $acc7,24,$acc7
- xor $acc5,$t1,$t1
- srlx $acc9,8,$acc9 !
- xor $acc6,$t1,$t1
- srlx $acc10,16,$acc10
- xor $acc7,$t1,$t1
- srlx $acc11,24,$acc11
- xor $acc8,$t2,$t2
- srlx $acc13,8,$acc13
- xor $acc9,$t2,$t2
- srlx $acc14,16,$acc14 !
- xor $acc10,$t2,$t2
- srlx $acc15,24,$acc15
- xor $acc11,$t2,$t2
- xor $acc12,$acc14,$acc14
- xor $acc13,$t3,$t3
- srl $t0,24,$acc0
- xor $acc14,$t3,$t3
- xor $acc15,$t3,$t3 !
- srl $t3,16,$acc1
-
- srl $t2,8,$acc2
- and $acc1,255,$acc1
- ldub [$rounds+$acc0],$acc0
- srl $t1,24,$acc4
- and $acc2,255,$acc2
- ldub [$rounds+$acc1],$acc1
- srl $t0,16,$acc5 !
- and $t1,255,$acc3
- ldub [$rounds+$acc2],$acc2
- ldub [$rounds+$acc3],$acc3
- srl $t3,8,$acc6
- and $acc5,255,$acc5
- ldub [$rounds+$acc4],$acc4
- fmovs %f0,%f0
- srl $t2,24,$acc8 !
- and $acc6,255,$acc6
- ldub [$rounds+$acc5],$acc5
- srl $t1,16,$acc9
- and $t2,255,$acc7
- ldub [$rounds+$acc6],$acc6
- ldub [$rounds+$acc7],$acc7
- fmovs %f0,%f0
- srl $t0,8,$acc10 !
- and $acc9,255,$acc9
- ldub [$rounds+$acc8],$acc8
- srl $t3,24,$acc12
- and $acc10,255,$acc10
- ldub [$rounds+$acc9],$acc9
- srl $t2,16,$acc13
- and $t3,255,$acc11
- ldub [$rounds+$acc10],$acc10 !
- srl $t1,8,$acc14
- and $acc13,255,$acc13
- ldub [$rounds+$acc11],$acc11
- ldub [$rounds+$acc12],$acc12
- and $acc14,255,$acc14
- ldub [$rounds+$acc13],$acc13
- and $t0,255,$acc15
- ldub [$rounds+$acc14],$acc14 !
-
- sll $acc0,24,$acc0
- xor $acc3,$s0,$s0
- ldub [$rounds+$acc15],$acc15
- sll $acc1,16,$acc1
- xor $acc0,$s0,$s0
- ldx [%sp+$bias+$frame+0],%i7 ! restore return address
- fmovs %f0,%f0
- sll $acc2,8,$acc2 !
- xor $acc1,$s0,$s0
- sll $acc4,24,$acc4
- xor $acc2,$s0,$s0
- sll $acc5,16,$acc5
- xor $acc7,$s1,$s1
- sll $acc6,8,$acc6
- xor $acc4,$s1,$s1
- sll $acc8,24,$acc8 !
- xor $acc5,$s1,$s1
- sll $acc9,16,$acc9
- xor $acc11,$s2,$s2
- sll $acc10,8,$acc10
- xor $acc6,$s1,$s1
- sll $acc12,24,$acc12
- xor $acc8,$s2,$s2
- sll $acc13,16,$acc13 !
- xor $acc9,$s2,$s2
- sll $acc14,8,$acc14
- xor $acc10,$s2,$s2
- xor $acc12,$acc14,$acc14
- xor $acc13,$s3,$s3
- xor $acc14,$s3,$s3
- xor $acc15,$s3,$s3
-
- ret
- restore
-.type _sparcv9_AES_decrypt,#function
-.size _sparcv9_AES_decrypt,(.-_sparcv9_AES_decrypt)
-
-.align 32
-.globl AES_decrypt
-AES_decrypt:
- or %o0,%o1,%g1
- andcc %g1,3,%g0
- bnz,pn %xcc,.Lunaligned_dec
- save %sp,-$frame,%sp
-
- ld [%i0+0],%o0
- ld [%i0+4],%o1
- ld [%i0+8],%o2
- ld [%i0+12],%o3
-
-1: call .+8
- add %o7,AES_Td-1b,%o4
- call _sparcv9_AES_decrypt
- mov %i2,%o5
-
- st %o0,[%i1+0]
- st %o1,[%i1+4]
- st %o2,[%i1+8]
- st %o3,[%i1+12]
-
- ret
- restore
-
-.align 32
-.Lunaligned_dec:
- ldub [%i0+0],%l0
- ldub [%i0+1],%l1
- ldub [%i0+2],%l2
-
- sll %l0,24,%l0
- ldub [%i0+3],%l3
- sll %l1,16,%l1
- ldub [%i0+4],%l4
- sll %l2,8,%l2
- or %l1,%l0,%l0
- ldub [%i0+5],%l5
- sll %l4,24,%l4
- or %l3,%l2,%l2
- ldub [%i0+6],%l6
- sll %l5,16,%l5
- or %l0,%l2,%o0
- ldub [%i0+7],%l7
-
- sll %l6,8,%l6
- or %l5,%l4,%l4
- ldub [%i0+8],%l0
- or %l7,%l6,%l6
- ldub [%i0+9],%l1
- or %l4,%l6,%o1
- ldub [%i0+10],%l2
-
- sll %l0,24,%l0
- ldub [%i0+11],%l3
- sll %l1,16,%l1
- ldub [%i0+12],%l4
- sll %l2,8,%l2
- or %l1,%l0,%l0
- ldub [%i0+13],%l5
- sll %l4,24,%l4
- or %l3,%l2,%l2
- ldub [%i0+14],%l6
- sll %l5,16,%l5
- or %l0,%l2,%o2
- ldub [%i0+15],%l7
-
- sll %l6,8,%l6
- or %l5,%l4,%l4
- or %l7,%l6,%l6
- or %l4,%l6,%o3
-
-1: call .+8
- add %o7,AES_Td-1b,%o4
- call _sparcv9_AES_decrypt
- mov %i2,%o5
-
- srl %o0,24,%l0
- srl %o0,16,%l1
- stb %l0,[%i1+0]
- srl %o0,8,%l2
- stb %l1,[%i1+1]
- stb %l2,[%i1+2]
- srl %o1,24,%l4
- stb %o0,[%i1+3]
-
- srl %o1,16,%l5
- stb %l4,[%i1+4]
- srl %o1,8,%l6
- stb %l5,[%i1+5]
- stb %l6,[%i1+6]
- srl %o2,24,%l0
- stb %o1,[%i1+7]
-
- srl %o2,16,%l1
- stb %l0,[%i1+8]
- srl %o2,8,%l2
- stb %l1,[%i1+9]
- stb %l2,[%i1+10]
- srl %o3,24,%l4
- stb %o2,[%i1+11]
-
- srl %o3,16,%l5
- stb %l4,[%i1+12]
- srl %o3,8,%l6
- stb %l5,[%i1+13]
- stb %l6,[%i1+14]
- stb %o3,[%i1+15]
-
- ret
- restore
-.type AES_decrypt,#function
-.size AES_decrypt,(.-AES_decrypt)
-___
-
-# fmovs instructions substituting for FP nops were originally added
-# to meet specific instruction alignment requirements to maximize ILP.
-# As UltraSPARC T1, a.k.a. Niagara, has shared FPU, FP nops can have
-# undesired effect, so just omit them and sacrifice some portion of
-# percent in performance...
-$code =~ s/fmovs.*$//gm;
-
-print $code;
-close STDOUT; # ensure flush
diff --git a/app/openssl/crypto/aes/asm/aes-x86_64.S b/app/openssl/crypto/aes/asm/aes-x86_64.S
deleted file mode 100644
index e385566f..00000000
--- a/app/openssl/crypto/aes/asm/aes-x86_64.S
+++ /dev/null
@@ -1,2541 +0,0 @@
-.text
-.type _x86_64_AES_encrypt,@function
-.align 16
-_x86_64_AES_encrypt:
- xorl 0(%r15),%eax
- xorl 4(%r15),%ebx
- xorl 8(%r15),%ecx
- xorl 12(%r15),%edx
-
- movl 240(%r15),%r13d
- subl $1,%r13d
- jmp .Lenc_loop
-.align 16
-.Lenc_loop:
-
- movzbl %al,%esi
- movzbl %bl,%edi
- movzbl %cl,%ebp
- movl 0(%r14,%rsi,8),%r10d
- movl 0(%r14,%rdi,8),%r11d
- movl 0(%r14,%rbp,8),%r12d
-
- movzbl %bh,%esi
- movzbl %ch,%edi
- movzbl %dl,%ebp
- xorl 3(%r14,%rsi,8),%r10d
- xorl 3(%r14,%rdi,8),%r11d
- movl 0(%r14,%rbp,8),%r8d
-
- movzbl %dh,%esi
- shrl $16,%ecx
- movzbl %ah,%ebp
- xorl 3(%r14,%rsi,8),%r12d
- shrl $16,%edx
- xorl 3(%r14,%rbp,8),%r8d
-
- shrl $16,%ebx
- leaq 16(%r15),%r15
- shrl $16,%eax
-
- movzbl %cl,%esi
- movzbl %dl,%edi
- movzbl %al,%ebp
- xorl 2(%r14,%rsi,8),%r10d
- xorl 2(%r14,%rdi,8),%r11d
- xorl 2(%r14,%rbp,8),%r12d
-
- movzbl %dh,%esi
- movzbl %ah,%edi
- movzbl %bl,%ebp
- xorl 1(%r14,%rsi,8),%r10d
- xorl 1(%r14,%rdi,8),%r11d
- xorl 2(%r14,%rbp,8),%r8d
-
- movl 12(%r15),%edx
- movzbl %bh,%edi
- movzbl %ch,%ebp
- movl 0(%r15),%eax
- xorl 1(%r14,%rdi,8),%r12d
- xorl 1(%r14,%rbp,8),%r8d
-
- movl 4(%r15),%ebx
- movl 8(%r15),%ecx
- xorl %r10d,%eax
- xorl %r11d,%ebx
- xorl %r12d,%ecx
- xorl %r8d,%edx
- subl $1,%r13d
- jnz .Lenc_loop
- movzbl %al,%esi
- movzbl %bl,%edi
- movzbl %cl,%ebp
- movzbl 2(%r14,%rsi,8),%r10d
- movzbl 2(%r14,%rdi,8),%r11d
- movzbl 2(%r14,%rbp,8),%r12d
-
- movzbl %dl,%esi
- movzbl %bh,%edi
- movzbl %ch,%ebp
- movzbl 2(%r14,%rsi,8),%r8d
- movl 0(%r14,%rdi,8),%edi
- movl 0(%r14,%rbp,8),%ebp
-
- andl $65280,%edi
- andl $65280,%ebp
-
- xorl %edi,%r10d
- xorl %ebp,%r11d
- shrl $16,%ecx
-
- movzbl %dh,%esi
- movzbl %ah,%edi
- shrl $16,%edx
- movl 0(%r14,%rsi,8),%esi
- movl 0(%r14,%rdi,8),%edi
-
- andl $65280,%esi
- andl $65280,%edi
- shrl $16,%ebx
- xorl %esi,%r12d
- xorl %edi,%r8d
- shrl $16,%eax
-
- movzbl %cl,%esi
- movzbl %dl,%edi
- movzbl %al,%ebp
- movl 0(%r14,%rsi,8),%esi
- movl 0(%r14,%rdi,8),%edi
- movl 0(%r14,%rbp,8),%ebp
-
- andl $16711680,%esi
- andl $16711680,%edi
- andl $16711680,%ebp
-
- xorl %esi,%r10d
- xorl %edi,%r11d
- xorl %ebp,%r12d
-
- movzbl %bl,%esi
- movzbl %dh,%edi
- movzbl %ah,%ebp
- movl 0(%r14,%rsi,8),%esi
- movl 2(%r14,%rdi,8),%edi
- movl 2(%r14,%rbp,8),%ebp
-
- andl $16711680,%esi
- andl $4278190080,%edi
- andl $4278190080,%ebp
-
- xorl %esi,%r8d
- xorl %edi,%r10d
- xorl %ebp,%r11d
-
- movzbl %bh,%esi
- movzbl %ch,%edi
- movl 16+12(%r15),%edx
- movl 2(%r14,%rsi,8),%esi
- movl 2(%r14,%rdi,8),%edi
- movl 16+0(%r15),%eax
-
- andl $4278190080,%esi
- andl $4278190080,%edi
-
- xorl %esi,%r12d
- xorl %edi,%r8d
-
- movl 16+4(%r15),%ebx
- movl 16+8(%r15),%ecx
- xorl %r10d,%eax
- xorl %r11d,%ebx
- xorl %r12d,%ecx
- xorl %r8d,%edx
-.byte 0xf3,0xc3
-.size _x86_64_AES_encrypt,.-_x86_64_AES_encrypt
-.type _x86_64_AES_encrypt_compact,@function
-.align 16
-_x86_64_AES_encrypt_compact:
- leaq 128(%r14),%r8
- movl 0-128(%r8),%edi
- movl 32-128(%r8),%ebp
- movl 64-128(%r8),%r10d
- movl 96-128(%r8),%r11d
- movl 128-128(%r8),%edi
- movl 160-128(%r8),%ebp
- movl 192-128(%r8),%r10d
- movl 224-128(%r8),%r11d
- jmp .Lenc_loop_compact
-.align 16
-.Lenc_loop_compact:
- xorl 0(%r15),%eax
- xorl 4(%r15),%ebx
- xorl 8(%r15),%ecx
- xorl 12(%r15),%edx
- leaq 16(%r15),%r15
- movzbl %al,%r10d
- movzbl %bl,%r11d
- movzbl %cl,%r12d
- movzbl (%r14,%r10,1),%r10d
- movzbl (%r14,%r11,1),%r11d
- movzbl (%r14,%r12,1),%r12d
-
- movzbl %dl,%r8d
- movzbl %bh,%esi
- movzbl %ch,%edi
- movzbl (%r14,%r8,1),%r8d
- movzbl (%r14,%rsi,1),%r9d
- movzbl (%r14,%rdi,1),%r13d
-
- movzbl %dh,%ebp
- movzbl %ah,%esi
- shrl $16,%ecx
- movzbl (%r14,%rbp,1),%ebp
- movzbl (%r14,%rsi,1),%esi
- shrl $16,%edx
-
- movzbl %cl,%edi
- shll $8,%r9d
- shll $8,%r13d
- movzbl (%r14,%rdi,1),%edi
- xorl %r9d,%r10d
- xorl %r13d,%r11d
-
- movzbl %dl,%r9d
- shrl $16,%eax
- shrl $16,%ebx
- movzbl %al,%r13d
- shll $8,%ebp
- shll $8,%esi
- movzbl (%r14,%r9,1),%r9d
- movzbl (%r14,%r13,1),%r13d
- xorl %ebp,%r12d
- xorl %esi,%r8d
-
- movzbl %bl,%ebp
- movzbl %dh,%esi
- shll $16,%edi
- movzbl (%r14,%rbp,1),%ebp
- movzbl (%r14,%rsi,1),%esi
- xorl %edi,%r10d
-
- movzbl %ah,%edi
- shrl $8,%ecx
- shrl $8,%ebx
- movzbl (%r14,%rdi,1),%edi
- movzbl (%r14,%rcx,1),%edx
- movzbl (%r14,%rbx,1),%ecx
- shll $16,%r9d
- shll $16,%r13d
- shll $16,%ebp
- xorl %r9d,%r11d
- xorl %r13d,%r12d
- xorl %ebp,%r8d
-
- shll $24,%esi
- shll $24,%edi
- shll $24,%edx
- xorl %esi,%r10d
- shll $24,%ecx
- xorl %edi,%r11d
- movl %r10d,%eax
- movl %r11d,%ebx
- xorl %r12d,%ecx
- xorl %r8d,%edx
- cmpq 16(%rsp),%r15
- je .Lenc_compact_done
- movl %eax,%esi
- movl %ebx,%edi
- andl $2155905152,%esi
- andl $2155905152,%edi
- movl %esi,%r10d
- movl %edi,%r11d
- shrl $7,%r10d
- leal (%rax,%rax,1),%r8d
- shrl $7,%r11d
- leal (%rbx,%rbx,1),%r9d
- subl %r10d,%esi
- subl %r11d,%edi
- andl $4278124286,%r8d
- andl $4278124286,%r9d
- andl $454761243,%esi
- andl $454761243,%edi
- movl %eax,%r10d
- movl %ebx,%r11d
- xorl %esi,%r8d
- xorl %edi,%r9d
-
- xorl %r8d,%eax
- xorl %r9d,%ebx
- movl %ecx,%esi
- movl %edx,%edi
- roll $24,%eax
- roll $24,%ebx
- andl $2155905152,%esi
- andl $2155905152,%edi
- xorl %r8d,%eax
- xorl %r9d,%ebx
- movl %esi,%r12d
- movl %edi,%ebp
- rorl $16,%r10d
- rorl $16,%r11d
- shrl $7,%r12d
- leal (%rcx,%rcx,1),%r8d
- xorl %r10d,%eax
- xorl %r11d,%ebx
- shrl $7,%ebp
- leal (%rdx,%rdx,1),%r9d
- rorl $8,%r10d
- rorl $8,%r11d
- subl %r12d,%esi
- subl %ebp,%edi
- xorl %r10d,%eax
- xorl %r11d,%ebx
-
- andl $4278124286,%r8d
- andl $4278124286,%r9d
- andl $454761243,%esi
- andl $454761243,%edi
- movl %ecx,%r12d
- movl %edx,%ebp
- xorl %esi,%r8d
- xorl %edi,%r9d
-
- xorl %r8d,%ecx
- xorl %r9d,%edx
- roll $24,%ecx
- roll $24,%edx
- xorl %r8d,%ecx
- xorl %r9d,%edx
- movl 0(%r14),%esi
- rorl $16,%r12d
- rorl $16,%ebp
- movl 64(%r14),%edi
- xorl %r12d,%ecx
- xorl %ebp,%edx
- movl 128(%r14),%r8d
- rorl $8,%r12d
- rorl $8,%ebp
- movl 192(%r14),%r9d
- xorl %r12d,%ecx
- xorl %ebp,%edx
- jmp .Lenc_loop_compact
-.align 16
-.Lenc_compact_done:
- xorl 0(%r15),%eax
- xorl 4(%r15),%ebx
- xorl 8(%r15),%ecx
- xorl 12(%r15),%edx
-.byte 0xf3,0xc3
-.size _x86_64_AES_encrypt_compact,.-_x86_64_AES_encrypt_compact
-.globl AES_encrypt
-.type AES_encrypt,@function
-.align 16
-.globl asm_AES_encrypt
-.hidden asm_AES_encrypt
-asm_AES_encrypt:
-AES_encrypt:
- pushq %rbx
- pushq %rbp
- pushq %r12
- pushq %r13
- pushq %r14
- pushq %r15
-
-
- movq %rsp,%r10
- leaq -63(%rdx),%rcx
- andq $-64,%rsp
- subq %rsp,%rcx
- negq %rcx
- andq $960,%rcx
- subq %rcx,%rsp
- subq $32,%rsp
-
- movq %rsi,16(%rsp)
- movq %r10,24(%rsp)
-.Lenc_prologue:
-
- movq %rdx,%r15
- movl 240(%r15),%r13d
-
- movl 0(%rdi),%eax
- movl 4(%rdi),%ebx
- movl 8(%rdi),%ecx
- movl 12(%rdi),%edx
-
- shll $4,%r13d
- leaq (%r15,%r13,1),%rbp
- movq %r15,(%rsp)
- movq %rbp,8(%rsp)
-
-
- leaq .LAES_Te+2048(%rip),%r14
- leaq 768(%rsp),%rbp
- subq %r14,%rbp
- andq $768,%rbp
- leaq (%r14,%rbp,1),%r14
-
- call _x86_64_AES_encrypt_compact
-
- movq 16(%rsp),%r9
- movq 24(%rsp),%rsi
- movl %eax,0(%r9)
- movl %ebx,4(%r9)
- movl %ecx,8(%r9)
- movl %edx,12(%r9)
-
- movq (%rsi),%r15
- movq 8(%rsi),%r14
- movq 16(%rsi),%r13
- movq 24(%rsi),%r12
- movq 32(%rsi),%rbp
- movq 40(%rsi),%rbx
- leaq 48(%rsi),%rsp
-.Lenc_epilogue:
- .byte 0xf3,0xc3
-.size AES_encrypt,.-AES_encrypt
-.type _x86_64_AES_decrypt,@function
-.align 16
-_x86_64_AES_decrypt:
- xorl 0(%r15),%eax
- xorl 4(%r15),%ebx
- xorl 8(%r15),%ecx
- xorl 12(%r15),%edx
-
- movl 240(%r15),%r13d
- subl $1,%r13d
- jmp .Ldec_loop
-.align 16
-.Ldec_loop:
-
- movzbl %al,%esi
- movzbl %bl,%edi
- movzbl %cl,%ebp
- movl 0(%r14,%rsi,8),%r10d
- movl 0(%r14,%rdi,8),%r11d
- movl 0(%r14,%rbp,8),%r12d
-
- movzbl %dh,%esi
- movzbl %ah,%edi
- movzbl %dl,%ebp
- xorl 3(%r14,%rsi,8),%r10d
- xorl 3(%r14,%rdi,8),%r11d
- movl 0(%r14,%rbp,8),%r8d
-
- movzbl %bh,%esi
- shrl $16,%eax
- movzbl %ch,%ebp
- xorl 3(%r14,%rsi,8),%r12d
- shrl $16,%edx
- xorl 3(%r14,%rbp,8),%r8d
-
- shrl $16,%ebx
- leaq 16(%r15),%r15
- shrl $16,%ecx
-
- movzbl %cl,%esi
- movzbl %dl,%edi
- movzbl %al,%ebp
- xorl 2(%r14,%rsi,8),%r10d
- xorl 2(%r14,%rdi,8),%r11d
- xorl 2(%r14,%rbp,8),%r12d
-
- movzbl %bh,%esi
- movzbl %ch,%edi
- movzbl %bl,%ebp
- xorl 1(%r14,%rsi,8),%r10d
- xorl 1(%r14,%rdi,8),%r11d
- xorl 2(%r14,%rbp,8),%r8d
-
- movzbl %dh,%esi
- movl 12(%r15),%edx
- movzbl %ah,%ebp
- xorl 1(%r14,%rsi,8),%r12d
- movl 0(%r15),%eax
- xorl 1(%r14,%rbp,8),%r8d
-
- xorl %r10d,%eax
- movl 4(%r15),%ebx
- movl 8(%r15),%ecx
- xorl %r12d,%ecx
- xorl %r11d,%ebx
- xorl %r8d,%edx
- subl $1,%r13d
- jnz .Ldec_loop
- leaq 2048(%r14),%r14
- movzbl %al,%esi
- movzbl %bl,%edi
- movzbl %cl,%ebp
- movzbl (%r14,%rsi,1),%r10d
- movzbl (%r14,%rdi,1),%r11d
- movzbl (%r14,%rbp,1),%r12d
-
- movzbl %dl,%esi
- movzbl %dh,%edi
- movzbl %ah,%ebp
- movzbl (%r14,%rsi,1),%r8d
- movzbl (%r14,%rdi,1),%edi
- movzbl (%r14,%rbp,1),%ebp
-
- shll $8,%edi
- shll $8,%ebp
-
- xorl %edi,%r10d
- xorl %ebp,%r11d
- shrl $16,%edx
-
- movzbl %bh,%esi
- movzbl %ch,%edi
- shrl $16,%eax
- movzbl (%r14,%rsi,1),%esi
- movzbl (%r14,%rdi,1),%edi
-
- shll $8,%esi
- shll $8,%edi
- shrl $16,%ebx
- xorl %esi,%r12d
- xorl %edi,%r8d
- shrl $16,%ecx
-
- movzbl %cl,%esi
- movzbl %dl,%edi
- movzbl %al,%ebp
- movzbl (%r14,%rsi,1),%esi
- movzbl (%r14,%rdi,1),%edi
- movzbl (%r14,%rbp,1),%ebp
-
- shll $16,%esi
- shll $16,%edi
- shll $16,%ebp
-
- xorl %esi,%r10d
- xorl %edi,%r11d
- xorl %ebp,%r12d
-
- movzbl %bl,%esi
- movzbl %bh,%edi
- movzbl %ch,%ebp
- movzbl (%r14,%rsi,1),%esi
- movzbl (%r14,%rdi,1),%edi
- movzbl (%r14,%rbp,1),%ebp
-
- shll $16,%esi
- shll $24,%edi
- shll $24,%ebp
-
- xorl %esi,%r8d
- xorl %edi,%r10d
- xorl %ebp,%r11d
-
- movzbl %dh,%esi
- movzbl %ah,%edi
- movl 16+12(%r15),%edx
- movzbl (%r14,%rsi,1),%esi
- movzbl (%r14,%rdi,1),%edi
- movl 16+0(%r15),%eax
-
- shll $24,%esi
- shll $24,%edi
-
- xorl %esi,%r12d
- xorl %edi,%r8d
-
- movl 16+4(%r15),%ebx
- movl 16+8(%r15),%ecx
- leaq -2048(%r14),%r14
- xorl %r10d,%eax
- xorl %r11d,%ebx
- xorl %r12d,%ecx
- xorl %r8d,%edx
-.byte 0xf3,0xc3
-.size _x86_64_AES_decrypt,.-_x86_64_AES_decrypt
-.type _x86_64_AES_decrypt_compact,@function
-.align 16
-_x86_64_AES_decrypt_compact:
- leaq 128(%r14),%r8
- movl 0-128(%r8),%edi
- movl 32-128(%r8),%ebp
- movl 64-128(%r8),%r10d
- movl 96-128(%r8),%r11d
- movl 128-128(%r8),%edi
- movl 160-128(%r8),%ebp
- movl 192-128(%r8),%r10d
- movl 224-128(%r8),%r11d
- jmp .Ldec_loop_compact
-
-.align 16
-.Ldec_loop_compact:
- xorl 0(%r15),%eax
- xorl 4(%r15),%ebx
- xorl 8(%r15),%ecx
- xorl 12(%r15),%edx
- leaq 16(%r15),%r15
- movzbl %al,%r10d
- movzbl %bl,%r11d
- movzbl %cl,%r12d
- movzbl (%r14,%r10,1),%r10d
- movzbl (%r14,%r11,1),%r11d
- movzbl (%r14,%r12,1),%r12d
-
- movzbl %dl,%r8d
- movzbl %dh,%esi
- movzbl %ah,%edi
- movzbl (%r14,%r8,1),%r8d
- movzbl (%r14,%rsi,1),%r9d
- movzbl (%r14,%rdi,1),%r13d
-
- movzbl %bh,%ebp
- movzbl %ch,%esi
- shrl $16,%ecx
- movzbl (%r14,%rbp,1),%ebp
- movzbl (%r14,%rsi,1),%esi
- shrl $16,%edx
-
- movzbl %cl,%edi
- shll $8,%r9d
- shll $8,%r13d
- movzbl (%r14,%rdi,1),%edi
- xorl %r9d,%r10d
- xorl %r13d,%r11d
-
- movzbl %dl,%r9d
- shrl $16,%eax
- shrl $16,%ebx
- movzbl %al,%r13d
- shll $8,%ebp
- shll $8,%esi
- movzbl (%r14,%r9,1),%r9d
- movzbl (%r14,%r13,1),%r13d
- xorl %ebp,%r12d
- xorl %esi,%r8d
-
- movzbl %bl,%ebp
- movzbl %bh,%esi
- shll $16,%edi
- movzbl (%r14,%rbp,1),%ebp
- movzbl (%r14,%rsi,1),%esi
- xorl %edi,%r10d
-
- movzbl %ch,%edi
- shll $16,%r9d
- shll $16,%r13d
- movzbl (%r14,%rdi,1),%ebx
- xorl %r9d,%r11d
- xorl %r13d,%r12d
-
- movzbl %dh,%edi
- shrl $8,%eax
- shll $16,%ebp
- movzbl (%r14,%rdi,1),%ecx
- movzbl (%r14,%rax,1),%edx
- xorl %ebp,%r8d
-
- shll $24,%esi
- shll $24,%ebx
- shll $24,%ecx
- xorl %esi,%r10d
- shll $24,%edx
- xorl %r11d,%ebx
- movl %r10d,%eax
- xorl %r12d,%ecx
- xorl %r8d,%edx
- cmpq 16(%rsp),%r15
- je .Ldec_compact_done
-
- movq 256+0(%r14),%rsi
- shlq $32,%rbx
- shlq $32,%rdx
- movq 256+8(%r14),%rdi
- orq %rbx,%rax
- orq %rdx,%rcx
- movq 256+16(%r14),%rbp
- movq %rax,%rbx
- movq %rcx,%rdx
- andq %rsi,%rbx
- andq %rsi,%rdx
- movq %rbx,%r9
- movq %rdx,%r12
- shrq $7,%r9
- leaq (%rax,%rax,1),%r8
- shrq $7,%r12
- leaq (%rcx,%rcx,1),%r11
- subq %r9,%rbx
- subq %r12,%rdx
- andq %rdi,%r8
- andq %rdi,%r11
- andq %rbp,%rbx
- andq %rbp,%rdx
- xorq %r8,%rbx
- xorq %r11,%rdx
- movq %rbx,%r8
- movq %rdx,%r11
-
- andq %rsi,%rbx
- andq %rsi,%rdx
- movq %rbx,%r10
- movq %rdx,%r13
- shrq $7,%r10
- leaq (%r8,%r8,1),%r9
- shrq $7,%r13
- leaq (%r11,%r11,1),%r12
- subq %r10,%rbx
- subq %r13,%rdx
- andq %rdi,%r9
- andq %rdi,%r12
- andq %rbp,%rbx
- andq %rbp,%rdx
- xorq %r9,%rbx
- xorq %r12,%rdx
- movq %rbx,%r9
- movq %rdx,%r12
-
- andq %rsi,%rbx
- andq %rsi,%rdx
- movq %rbx,%r10
- movq %rdx,%r13
- shrq $7,%r10
- xorq %rax,%r8
- shrq $7,%r13
- xorq %rcx,%r11
- subq %r10,%rbx
- subq %r13,%rdx
- leaq (%r9,%r9,1),%r10
- leaq (%r12,%r12,1),%r13
- xorq %rax,%r9
- xorq %rcx,%r12
- andq %rdi,%r10
- andq %rdi,%r13
- andq %rbp,%rbx
- andq %rbp,%rdx
- xorq %rbx,%r10
- xorq %rdx,%r13
-
- xorq %r10,%rax
- xorq %r13,%rcx
- xorq %r10,%r8
- xorq %r13,%r11
- movq %rax,%rbx
- movq %rcx,%rdx
- xorq %r10,%r9
- xorq %r13,%r12
- shrq $32,%rbx
- shrq $32,%rdx
- xorq %r8,%r10
- xorq %r11,%r13
- roll $8,%eax
- roll $8,%ecx
- xorq %r9,%r10
- xorq %r12,%r13
-
- roll $8,%ebx
- roll $8,%edx
- xorl %r10d,%eax
- xorl %r13d,%ecx
- shrq $32,%r10
- shrq $32,%r13
- xorl %r10d,%ebx
- xorl %r13d,%edx
-
- movq %r8,%r10
- movq %r11,%r13
- shrq $32,%r10
- shrq $32,%r13
- roll $24,%r8d
- roll $24,%r11d
- roll $24,%r10d
- roll $24,%r13d
- xorl %r8d,%eax
- xorl %r11d,%ecx
- movq %r9,%r8
- movq %r12,%r11
- xorl %r10d,%ebx
- xorl %r13d,%edx
-
- movq 0(%r14),%rsi
- shrq $32,%r8
- shrq $32,%r11
- movq 64(%r14),%rdi
- roll $16,%r9d
- roll $16,%r12d
- movq 128(%r14),%rbp
- roll $16,%r8d
- roll $16,%r11d
- movq 192(%r14),%r10
- xorl %r9d,%eax
- xorl %r12d,%ecx
- movq 256(%r14),%r13
- xorl %r8d,%ebx
- xorl %r11d,%edx
- jmp .Ldec_loop_compact
-.align 16
-.Ldec_compact_done:
- xorl 0(%r15),%eax
- xorl 4(%r15),%ebx
- xorl 8(%r15),%ecx
- xorl 12(%r15),%edx
-.byte 0xf3,0xc3
-.size _x86_64_AES_decrypt_compact,.-_x86_64_AES_decrypt_compact
-.globl AES_decrypt
-.type AES_decrypt,@function
-.align 16
-.globl asm_AES_decrypt
-.hidden asm_AES_decrypt
-asm_AES_decrypt:
-AES_decrypt:
- pushq %rbx
- pushq %rbp
- pushq %r12
- pushq %r13
- pushq %r14
- pushq %r15
-
-
- movq %rsp,%r10
- leaq -63(%rdx),%rcx
- andq $-64,%rsp
- subq %rsp,%rcx
- negq %rcx
- andq $960,%rcx
- subq %rcx,%rsp
- subq $32,%rsp
-
- movq %rsi,16(%rsp)
- movq %r10,24(%rsp)
-.Ldec_prologue:
-
- movq %rdx,%r15
- movl 240(%r15),%r13d
-
- movl 0(%rdi),%eax
- movl 4(%rdi),%ebx
- movl 8(%rdi),%ecx
- movl 12(%rdi),%edx
-
- shll $4,%r13d
- leaq (%r15,%r13,1),%rbp
- movq %r15,(%rsp)
- movq %rbp,8(%rsp)
-
-
- leaq .LAES_Td+2048(%rip),%r14
- leaq 768(%rsp),%rbp
- subq %r14,%rbp
- andq $768,%rbp
- leaq (%r14,%rbp,1),%r14
- shrq $3,%rbp
- addq %rbp,%r14
-
- call _x86_64_AES_decrypt_compact
-
- movq 16(%rsp),%r9
- movq 24(%rsp),%rsi
- movl %eax,0(%r9)
- movl %ebx,4(%r9)
- movl %ecx,8(%r9)
- movl %edx,12(%r9)
-
- movq (%rsi),%r15
- movq 8(%rsi),%r14
- movq 16(%rsi),%r13
- movq 24(%rsi),%r12
- movq 32(%rsi),%rbp
- movq 40(%rsi),%rbx
- leaq 48(%rsi),%rsp
-.Ldec_epilogue:
- .byte 0xf3,0xc3
-.size AES_decrypt,.-AES_decrypt
-.globl private_AES_set_encrypt_key
-.type private_AES_set_encrypt_key,@function
-.align 16
-private_AES_set_encrypt_key:
- pushq %rbx
- pushq %rbp
- pushq %r12
- pushq %r13
- pushq %r14
- pushq %r15
- subq $8,%rsp
-.Lenc_key_prologue:
-
- call _x86_64_AES_set_encrypt_key
-
- movq 8(%rsp),%r15
- movq 16(%rsp),%r14
- movq 24(%rsp),%r13
- movq 32(%rsp),%r12
- movq 40(%rsp),%rbp
- movq 48(%rsp),%rbx
- addq $56,%rsp
-.Lenc_key_epilogue:
- .byte 0xf3,0xc3
-.size private_AES_set_encrypt_key,.-private_AES_set_encrypt_key
-
-.type _x86_64_AES_set_encrypt_key,@function
-.align 16
-_x86_64_AES_set_encrypt_key:
- movl %esi,%ecx
- movq %rdi,%rsi
- movq %rdx,%rdi
-
- testq $-1,%rsi
- jz .Lbadpointer
- testq $-1,%rdi
- jz .Lbadpointer
-
- leaq .LAES_Te(%rip),%rbp
- leaq 2048+128(%rbp),%rbp
-
-
- movl 0-128(%rbp),%eax
- movl 32-128(%rbp),%ebx
- movl 64-128(%rbp),%r8d
- movl 96-128(%rbp),%edx
- movl 128-128(%rbp),%eax
- movl 160-128(%rbp),%ebx
- movl 192-128(%rbp),%r8d
- movl 224-128(%rbp),%edx
-
- cmpl $128,%ecx
- je .L10rounds
- cmpl $192,%ecx
- je .L12rounds
- cmpl $256,%ecx
- je .L14rounds
- movq $-2,%rax
- jmp .Lexit
-
-.L10rounds:
- movq 0(%rsi),%rax
- movq 8(%rsi),%rdx
- movq %rax,0(%rdi)
- movq %rdx,8(%rdi)
-
- shrq $32,%rdx
- xorl %ecx,%ecx
- jmp .L10shortcut
-.align 4
-.L10loop:
- movl 0(%rdi),%eax
- movl 12(%rdi),%edx
-.L10shortcut:
- movzbl %dl,%esi
- movzbl -128(%rbp,%rsi,1),%ebx
- movzbl %dh,%esi
- shll $24,%ebx
- xorl %ebx,%eax
-
- movzbl -128(%rbp,%rsi,1),%ebx
- shrl $16,%edx
- movzbl %dl,%esi
- xorl %ebx,%eax
-
- movzbl -128(%rbp,%rsi,1),%ebx
- movzbl %dh,%esi
- shll $8,%ebx
- xorl %ebx,%eax
-
- movzbl -128(%rbp,%rsi,1),%ebx
- shll $16,%ebx
- xorl %ebx,%eax
-
- xorl 1024-128(%rbp,%rcx,4),%eax
- movl %eax,16(%rdi)
- xorl 4(%rdi),%eax
- movl %eax,20(%rdi)
- xorl 8(%rdi),%eax
- movl %eax,24(%rdi)
- xorl 12(%rdi),%eax
- movl %eax,28(%rdi)
- addl $1,%ecx
- leaq 16(%rdi),%rdi
- cmpl $10,%ecx
- jl .L10loop
-
- movl $10,80(%rdi)
- xorq %rax,%rax
- jmp .Lexit
-
-.L12rounds:
- movq 0(%rsi),%rax
- movq 8(%rsi),%rbx
- movq 16(%rsi),%rdx
- movq %rax,0(%rdi)
- movq %rbx,8(%rdi)
- movq %rdx,16(%rdi)
-
- shrq $32,%rdx
- xorl %ecx,%ecx
- jmp .L12shortcut
-.align 4
-.L12loop:
- movl 0(%rdi),%eax
- movl 20(%rdi),%edx
-.L12shortcut:
- movzbl %dl,%esi
- movzbl -128(%rbp,%rsi,1),%ebx
- movzbl %dh,%esi
- shll $24,%ebx
- xorl %ebx,%eax
-
- movzbl -128(%rbp,%rsi,1),%ebx
- shrl $16,%edx
- movzbl %dl,%esi
- xorl %ebx,%eax
-
- movzbl -128(%rbp,%rsi,1),%ebx
- movzbl %dh,%esi
- shll $8,%ebx
- xorl %ebx,%eax
-
- movzbl -128(%rbp,%rsi,1),%ebx
- shll $16,%ebx
- xorl %ebx,%eax
-
- xorl 1024-128(%rbp,%rcx,4),%eax
- movl %eax,24(%rdi)
- xorl 4(%rdi),%eax
- movl %eax,28(%rdi)
- xorl 8(%rdi),%eax
- movl %eax,32(%rdi)
- xorl 12(%rdi),%eax
- movl %eax,36(%rdi)
-
- cmpl $7,%ecx
- je .L12break
- addl $1,%ecx
-
- xorl 16(%rdi),%eax
- movl %eax,40(%rdi)
- xorl 20(%rdi),%eax
- movl %eax,44(%rdi)
-
- leaq 24(%rdi),%rdi
- jmp .L12loop
-.L12break:
- movl $12,72(%rdi)
- xorq %rax,%rax
- jmp .Lexit
-
-.L14rounds:
- movq 0(%rsi),%rax
- movq 8(%rsi),%rbx
- movq 16(%rsi),%rcx
- movq 24(%rsi),%rdx
- movq %rax,0(%rdi)
- movq %rbx,8(%rdi)
- movq %rcx,16(%rdi)
- movq %rdx,24(%rdi)
-
- shrq $32,%rdx
- xorl %ecx,%ecx
- jmp .L14shortcut
-.align 4
-.L14loop:
- movl 0(%rdi),%eax
- movl 28(%rdi),%edx
-.L14shortcut:
- movzbl %dl,%esi
- movzbl -128(%rbp,%rsi,1),%ebx
- movzbl %dh,%esi
- shll $24,%ebx
- xorl %ebx,%eax
-
- movzbl -128(%rbp,%rsi,1),%ebx
- shrl $16,%edx
- movzbl %dl,%esi
- xorl %ebx,%eax
-
- movzbl -128(%rbp,%rsi,1),%ebx
- movzbl %dh,%esi
- shll $8,%ebx
- xorl %ebx,%eax
-
- movzbl -128(%rbp,%rsi,1),%ebx
- shll $16,%ebx
- xorl %ebx,%eax
-
- xorl 1024-128(%rbp,%rcx,4),%eax
- movl %eax,32(%rdi)
- xorl 4(%rdi),%eax
- movl %eax,36(%rdi)
- xorl 8(%rdi),%eax
- movl %eax,40(%rdi)
- xorl 12(%rdi),%eax
- movl %eax,44(%rdi)
-
- cmpl $6,%ecx
- je .L14break
- addl $1,%ecx
-
- movl %eax,%edx
- movl 16(%rdi),%eax
- movzbl %dl,%esi
- movzbl -128(%rbp,%rsi,1),%ebx
- movzbl %dh,%esi
- xorl %ebx,%eax
-
- movzbl -128(%rbp,%rsi,1),%ebx
- shrl $16,%edx
- shll $8,%ebx
- movzbl %dl,%esi
- xorl %ebx,%eax
-
- movzbl -128(%rbp,%rsi,1),%ebx
- movzbl %dh,%esi
- shll $16,%ebx
- xorl %ebx,%eax
-
- movzbl -128(%rbp,%rsi,1),%ebx
- shll $24,%ebx
- xorl %ebx,%eax
-
- movl %eax,48(%rdi)
- xorl 20(%rdi),%eax
- movl %eax,52(%rdi)
- xorl 24(%rdi),%eax
- movl %eax,56(%rdi)
- xorl 28(%rdi),%eax
- movl %eax,60(%rdi)
-
- leaq 32(%rdi),%rdi
- jmp .L14loop
-.L14break:
- movl $14,48(%rdi)
- xorq %rax,%rax
- jmp .Lexit
-
-.Lbadpointer:
- movq $-1,%rax
-.Lexit:
-.byte 0xf3,0xc3
-.size _x86_64_AES_set_encrypt_key,.-_x86_64_AES_set_encrypt_key
-.globl private_AES_set_decrypt_key
-.type private_AES_set_decrypt_key,@function
-.align 16
-private_AES_set_decrypt_key:
- pushq %rbx
- pushq %rbp
- pushq %r12
- pushq %r13
- pushq %r14
- pushq %r15
- pushq %rdx
-.Ldec_key_prologue:
-
- call _x86_64_AES_set_encrypt_key
- movq (%rsp),%r8
- cmpl $0,%eax
- jne .Labort
-
- movl 240(%r8),%r14d
- xorq %rdi,%rdi
- leaq (%rdi,%r14,4),%rcx
- movq %r8,%rsi
- leaq (%r8,%rcx,4),%rdi
-.align 4
-.Linvert:
- movq 0(%rsi),%rax
- movq 8(%rsi),%rbx
- movq 0(%rdi),%rcx
- movq 8(%rdi),%rdx
- movq %rax,0(%rdi)
- movq %rbx,8(%rdi)
- movq %rcx,0(%rsi)
- movq %rdx,8(%rsi)
- leaq 16(%rsi),%rsi
- leaq -16(%rdi),%rdi
- cmpq %rsi,%rdi
- jne .Linvert
-
- leaq .LAES_Te+2048+1024(%rip),%rax
-
- movq 40(%rax),%rsi
- movq 48(%rax),%rdi
- movq 56(%rax),%rbp
-
- movq %r8,%r15
- subl $1,%r14d
-.align 4
-.Lpermute:
- leaq 16(%r15),%r15
- movq 0(%r15),%rax
- movq 8(%r15),%rcx
- movq %rax,%rbx
- movq %rcx,%rdx
- andq %rsi,%rbx
- andq %rsi,%rdx
- movq %rbx,%r9
- movq %rdx,%r12
- shrq $7,%r9
- leaq (%rax,%rax,1),%r8
- shrq $7,%r12
- leaq (%rcx,%rcx,1),%r11
- subq %r9,%rbx
- subq %r12,%rdx
- andq %rdi,%r8
- andq %rdi,%r11
- andq %rbp,%rbx
- andq %rbp,%rdx
- xorq %r8,%rbx
- xorq %r11,%rdx
- movq %rbx,%r8
- movq %rdx,%r11
-
- andq %rsi,%rbx
- andq %rsi,%rdx
- movq %rbx,%r10
- movq %rdx,%r13
- shrq $7,%r10
- leaq (%r8,%r8,1),%r9
- shrq $7,%r13
- leaq (%r11,%r11,1),%r12
- subq %r10,%rbx
- subq %r13,%rdx
- andq %rdi,%r9
- andq %rdi,%r12
- andq %rbp,%rbx
- andq %rbp,%rdx
- xorq %r9,%rbx
- xorq %r12,%rdx
- movq %rbx,%r9
- movq %rdx,%r12
-
- andq %rsi,%rbx
- andq %rsi,%rdx
- movq %rbx,%r10
- movq %rdx,%r13
- shrq $7,%r10
- xorq %rax,%r8
- shrq $7,%r13
- xorq %rcx,%r11
- subq %r10,%rbx
- subq %r13,%rdx
- leaq (%r9,%r9,1),%r10
- leaq (%r12,%r12,1),%r13
- xorq %rax,%r9
- xorq %rcx,%r12
- andq %rdi,%r10
- andq %rdi,%r13
- andq %rbp,%rbx
- andq %rbp,%rdx
- xorq %rbx,%r10
- xorq %rdx,%r13
-
- xorq %r10,%rax
- xorq %r13,%rcx
- xorq %r10,%r8
- xorq %r13,%r11
- movq %rax,%rbx
- movq %rcx,%rdx
- xorq %r10,%r9
- xorq %r13,%r12
- shrq $32,%rbx
- shrq $32,%rdx
- xorq %r8,%r10
- xorq %r11,%r13
- roll $8,%eax
- roll $8,%ecx
- xorq %r9,%r10
- xorq %r12,%r13
-
- roll $8,%ebx
- roll $8,%edx
- xorl %r10d,%eax
- xorl %r13d,%ecx
- shrq $32,%r10
- shrq $32,%r13
- xorl %r10d,%ebx
- xorl %r13d,%edx
-
- movq %r8,%r10
- movq %r11,%r13
- shrq $32,%r10
- shrq $32,%r13
- roll $24,%r8d
- roll $24,%r11d
- roll $24,%r10d
- roll $24,%r13d
- xorl %r8d,%eax
- xorl %r11d,%ecx
- movq %r9,%r8
- movq %r12,%r11
- xorl %r10d,%ebx
- xorl %r13d,%edx
-
-
- shrq $32,%r8
- shrq $32,%r11
-
- roll $16,%r9d
- roll $16,%r12d
-
- roll $16,%r8d
- roll $16,%r11d
-
- xorl %r9d,%eax
- xorl %r12d,%ecx
-
- xorl %r8d,%ebx
- xorl %r11d,%edx
- movl %eax,0(%r15)
- movl %ebx,4(%r15)
- movl %ecx,8(%r15)
- movl %edx,12(%r15)
- subl $1,%r14d
- jnz .Lpermute
-
- xorq %rax,%rax
-.Labort:
- movq 8(%rsp),%r15
- movq 16(%rsp),%r14
- movq 24(%rsp),%r13
- movq 32(%rsp),%r12
- movq 40(%rsp),%rbp
- movq 48(%rsp),%rbx
- addq $56,%rsp
-.Ldec_key_epilogue:
- .byte 0xf3,0xc3
-.size private_AES_set_decrypt_key,.-private_AES_set_decrypt_key
-.globl AES_cbc_encrypt
-.type AES_cbc_encrypt,@function
-.align 16
-
-.globl asm_AES_cbc_encrypt
-.hidden asm_AES_cbc_encrypt
-asm_AES_cbc_encrypt:
-AES_cbc_encrypt:
- cmpq $0,%rdx
- je .Lcbc_epilogue
- pushfq
- pushq %rbx
- pushq %rbp
- pushq %r12
- pushq %r13
- pushq %r14
- pushq %r15
-.Lcbc_prologue:
-
- cld
- movl %r9d,%r9d
-
- leaq .LAES_Te(%rip),%r14
- cmpq $0,%r9
- jne .Lcbc_picked_te
- leaq .LAES_Td(%rip),%r14
-.Lcbc_picked_te:
-
- movl OPENSSL_ia32cap_P(%rip),%r10d
- cmpq $512,%rdx
- jb .Lcbc_slow_prologue
- testq $15,%rdx
- jnz .Lcbc_slow_prologue
- btl $28,%r10d
- jc .Lcbc_slow_prologue
-
-
- leaq -88-248(%rsp),%r15
- andq $-64,%r15
-
-
- movq %r14,%r10
- leaq 2304(%r14),%r11
- movq %r15,%r12
- andq $4095,%r10
- andq $4095,%r11
- andq $4095,%r12
-
- cmpq %r11,%r12
- jb .Lcbc_te_break_out
- subq %r11,%r12
- subq %r12,%r15
- jmp .Lcbc_te_ok
-.Lcbc_te_break_out:
- subq %r10,%r12
- andq $4095,%r12
- addq $320,%r12
- subq %r12,%r15
-.align 4
-.Lcbc_te_ok:
-
- xchgq %rsp,%r15
-
- movq %r15,16(%rsp)
-.Lcbc_fast_body:
- movq %rdi,24(%rsp)
- movq %rsi,32(%rsp)
- movq %rdx,40(%rsp)
- movq %rcx,48(%rsp)
- movq %r8,56(%rsp)
- movl $0,80+240(%rsp)
- movq %r8,%rbp
- movq %r9,%rbx
- movq %rsi,%r9
- movq %rdi,%r8
- movq %rcx,%r15
-
- movl 240(%r15),%eax
-
- movq %r15,%r10
- subq %r14,%r10
- andq $4095,%r10
- cmpq $2304,%r10
- jb .Lcbc_do_ecopy
- cmpq $4096-248,%r10
- jb .Lcbc_skip_ecopy
-.align 4
-.Lcbc_do_ecopy:
- movq %r15,%rsi
- leaq 80(%rsp),%rdi
- leaq 80(%rsp),%r15
- movl $30,%ecx
-.long 0x90A548F3
- movl %eax,(%rdi)
-.Lcbc_skip_ecopy:
- movq %r15,0(%rsp)
-
- movl $18,%ecx
-.align 4
-.Lcbc_prefetch_te:
- movq 0(%r14),%r10
- movq 32(%r14),%r11
- movq 64(%r14),%r12
- movq 96(%r14),%r13
- leaq 128(%r14),%r14
- subl $1,%ecx
- jnz .Lcbc_prefetch_te
- leaq -2304(%r14),%r14
-
- cmpq $0,%rbx
- je .LFAST_DECRYPT
-
-
- movl 0(%rbp),%eax
- movl 4(%rbp),%ebx
- movl 8(%rbp),%ecx
- movl 12(%rbp),%edx
-
-.align 4
-.Lcbc_fast_enc_loop:
- xorl 0(%r8),%eax
- xorl 4(%r8),%ebx
- xorl 8(%r8),%ecx
- xorl 12(%r8),%edx
- movq 0(%rsp),%r15
- movq %r8,24(%rsp)
-
- call _x86_64_AES_encrypt
-
- movq 24(%rsp),%r8
- movq 40(%rsp),%r10
- movl %eax,0(%r9)
- movl %ebx,4(%r9)
- movl %ecx,8(%r9)
- movl %edx,12(%r9)
-
- leaq 16(%r8),%r8
- leaq 16(%r9),%r9
- subq $16,%r10
- testq $-16,%r10
- movq %r10,40(%rsp)
- jnz .Lcbc_fast_enc_loop
- movq 56(%rsp),%rbp
- movl %eax,0(%rbp)
- movl %ebx,4(%rbp)
- movl %ecx,8(%rbp)
- movl %edx,12(%rbp)
-
- jmp .Lcbc_fast_cleanup
-
-
-.align 16
-.LFAST_DECRYPT:
- cmpq %r8,%r9
- je .Lcbc_fast_dec_in_place
-
- movq %rbp,64(%rsp)
-.align 4
-.Lcbc_fast_dec_loop:
- movl 0(%r8),%eax
- movl 4(%r8),%ebx
- movl 8(%r8),%ecx
- movl 12(%r8),%edx
- movq 0(%rsp),%r15
- movq %r8,24(%rsp)
-
- call _x86_64_AES_decrypt
-
- movq 64(%rsp),%rbp
- movq 24(%rsp),%r8
- movq 40(%rsp),%r10
- xorl 0(%rbp),%eax
- xorl 4(%rbp),%ebx
- xorl 8(%rbp),%ecx
- xorl 12(%rbp),%edx
- movq %r8,%rbp
-
- subq $16,%r10
- movq %r10,40(%rsp)
- movq %rbp,64(%rsp)
-
- movl %eax,0(%r9)
- movl %ebx,4(%r9)
- movl %ecx,8(%r9)
- movl %edx,12(%r9)
-
- leaq 16(%r8),%r8
- leaq 16(%r9),%r9
- jnz .Lcbc_fast_dec_loop
- movq 56(%rsp),%r12
- movq 0(%rbp),%r10
- movq 8(%rbp),%r11
- movq %r10,0(%r12)
- movq %r11,8(%r12)
- jmp .Lcbc_fast_cleanup
-
-.align 16
-.Lcbc_fast_dec_in_place:
- movq 0(%rbp),%r10
- movq 8(%rbp),%r11
- movq %r10,0+64(%rsp)
- movq %r11,8+64(%rsp)
-.align 4
-.Lcbc_fast_dec_in_place_loop:
- movl 0(%r8),%eax
- movl 4(%r8),%ebx
- movl 8(%r8),%ecx
- movl 12(%r8),%edx
- movq 0(%rsp),%r15
- movq %r8,24(%rsp)
-
- call _x86_64_AES_decrypt
-
- movq 24(%rsp),%r8
- movq 40(%rsp),%r10
- xorl 0+64(%rsp),%eax
- xorl 4+64(%rsp),%ebx
- xorl 8+64(%rsp),%ecx
- xorl 12+64(%rsp),%edx
-
- movq 0(%r8),%r11
- movq 8(%r8),%r12
- subq $16,%r10
- jz .Lcbc_fast_dec_in_place_done
-
- movq %r11,0+64(%rsp)
- movq %r12,8+64(%rsp)
-
- movl %eax,0(%r9)
- movl %ebx,4(%r9)
- movl %ecx,8(%r9)
- movl %edx,12(%r9)
-
- leaq 16(%r8),%r8
- leaq 16(%r9),%r9
- movq %r10,40(%rsp)
- jmp .Lcbc_fast_dec_in_place_loop
-.Lcbc_fast_dec_in_place_done:
- movq 56(%rsp),%rdi
- movq %r11,0(%rdi)
- movq %r12,8(%rdi)
-
- movl %eax,0(%r9)
- movl %ebx,4(%r9)
- movl %ecx,8(%r9)
- movl %edx,12(%r9)
-
-.align 4
-.Lcbc_fast_cleanup:
- cmpl $0,80+240(%rsp)
- leaq 80(%rsp),%rdi
- je .Lcbc_exit
- movl $30,%ecx
- xorq %rax,%rax
-.long 0x90AB48F3
-
- jmp .Lcbc_exit
-
-
-.align 16
-.Lcbc_slow_prologue:
-
- leaq -88(%rsp),%rbp
- andq $-64,%rbp
-
- leaq -88-63(%rcx),%r10
- subq %rbp,%r10
- negq %r10
- andq $960,%r10
- subq %r10,%rbp
-
- xchgq %rsp,%rbp
-
- movq %rbp,16(%rsp)
-.Lcbc_slow_body:
-
-
-
-
- movq %r8,56(%rsp)
- movq %r8,%rbp
- movq %r9,%rbx
- movq %rsi,%r9
- movq %rdi,%r8
- movq %rcx,%r15
- movq %rdx,%r10
-
- movl 240(%r15),%eax
- movq %r15,0(%rsp)
- shll $4,%eax
- leaq (%r15,%rax,1),%rax
- movq %rax,8(%rsp)
-
-
- leaq 2048(%r14),%r14
- leaq 768-8(%rsp),%rax
- subq %r14,%rax
- andq $768,%rax
- leaq (%r14,%rax,1),%r14
-
- cmpq $0,%rbx
- je .LSLOW_DECRYPT
-
-
- testq $-16,%r10
- movl 0(%rbp),%eax
- movl 4(%rbp),%ebx
- movl 8(%rbp),%ecx
- movl 12(%rbp),%edx
- jz .Lcbc_slow_enc_tail
-
-.align 4
-.Lcbc_slow_enc_loop:
- xorl 0(%r8),%eax
- xorl 4(%r8),%ebx
- xorl 8(%r8),%ecx
- xorl 12(%r8),%edx
- movq 0(%rsp),%r15
- movq %r8,24(%rsp)
- movq %r9,32(%rsp)
- movq %r10,40(%rsp)
-
- call _x86_64_AES_encrypt_compact
-
- movq 24(%rsp),%r8
- movq 32(%rsp),%r9
- movq 40(%rsp),%r10
- movl %eax,0(%r9)
- movl %ebx,4(%r9)
- movl %ecx,8(%r9)
- movl %edx,12(%r9)
-
- leaq 16(%r8),%r8
- leaq 16(%r9),%r9
- subq $16,%r10
- testq $-16,%r10
- jnz .Lcbc_slow_enc_loop
- testq $15,%r10
- jnz .Lcbc_slow_enc_tail
- movq 56(%rsp),%rbp
- movl %eax,0(%rbp)
- movl %ebx,4(%rbp)
- movl %ecx,8(%rbp)
- movl %edx,12(%rbp)
-
- jmp .Lcbc_exit
-
-.align 4
-.Lcbc_slow_enc_tail:
- movq %rax,%r11
- movq %rcx,%r12
- movq %r10,%rcx
- movq %r8,%rsi
- movq %r9,%rdi
-.long 0x9066A4F3
- movq $16,%rcx
- subq %r10,%rcx
- xorq %rax,%rax
-.long 0x9066AAF3
- movq %r9,%r8
- movq $16,%r10
- movq %r11,%rax
- movq %r12,%rcx
- jmp .Lcbc_slow_enc_loop
-
-.align 16
-.LSLOW_DECRYPT:
- shrq $3,%rax
- addq %rax,%r14
-
- movq 0(%rbp),%r11
- movq 8(%rbp),%r12
- movq %r11,0+64(%rsp)
- movq %r12,8+64(%rsp)
-
-.align 4
-.Lcbc_slow_dec_loop:
- movl 0(%r8),%eax
- movl 4(%r8),%ebx
- movl 8(%r8),%ecx
- movl 12(%r8),%edx
- movq 0(%rsp),%r15
- movq %r8,24(%rsp)
- movq %r9,32(%rsp)
- movq %r10,40(%rsp)
-
- call _x86_64_AES_decrypt_compact
-
- movq 24(%rsp),%r8
- movq 32(%rsp),%r9
- movq 40(%rsp),%r10
- xorl 0+64(%rsp),%eax
- xorl 4+64(%rsp),%ebx
- xorl 8+64(%rsp),%ecx
- xorl 12+64(%rsp),%edx
-
- movq 0(%r8),%r11
- movq 8(%r8),%r12
- subq $16,%r10
- jc .Lcbc_slow_dec_partial
- jz .Lcbc_slow_dec_done
-
- movq %r11,0+64(%rsp)
- movq %r12,8+64(%rsp)
-
- movl %eax,0(%r9)
- movl %ebx,4(%r9)
- movl %ecx,8(%r9)
- movl %edx,12(%r9)
-
- leaq 16(%r8),%r8
- leaq 16(%r9),%r9
- jmp .Lcbc_slow_dec_loop
-.Lcbc_slow_dec_done:
- movq 56(%rsp),%rdi
- movq %r11,0(%rdi)
- movq %r12,8(%rdi)
-
- movl %eax,0(%r9)
- movl %ebx,4(%r9)
- movl %ecx,8(%r9)
- movl %edx,12(%r9)
-
- jmp .Lcbc_exit
-
-.align 4
-.Lcbc_slow_dec_partial:
- movq 56(%rsp),%rdi
- movq %r11,0(%rdi)
- movq %r12,8(%rdi)
-
- movl %eax,0+64(%rsp)
- movl %ebx,4+64(%rsp)
- movl %ecx,8+64(%rsp)
- movl %edx,12+64(%rsp)
-
- movq %r9,%rdi
- leaq 64(%rsp),%rsi
- leaq 16(%r10),%rcx
-.long 0x9066A4F3
- jmp .Lcbc_exit
-
-.align 16
-.Lcbc_exit:
- movq 16(%rsp),%rsi
- movq (%rsi),%r15
- movq 8(%rsi),%r14
- movq 16(%rsi),%r13
- movq 24(%rsi),%r12
- movq 32(%rsi),%rbp
- movq 40(%rsi),%rbx
- leaq 48(%rsi),%rsp
-.Lcbc_popfq:
- popfq
-.Lcbc_epilogue:
- .byte 0xf3,0xc3
-.size AES_cbc_encrypt,.-AES_cbc_encrypt
-.align 64
-.LAES_Te:
-.long 0xa56363c6,0xa56363c6
-.long 0x847c7cf8,0x847c7cf8
-.long 0x997777ee,0x997777ee
-.long 0x8d7b7bf6,0x8d7b7bf6
-.long 0x0df2f2ff,0x0df2f2ff
-.long 0xbd6b6bd6,0xbd6b6bd6
-.long 0xb16f6fde,0xb16f6fde
-.long 0x54c5c591,0x54c5c591
-.long 0x50303060,0x50303060
-.long 0x03010102,0x03010102
-.long 0xa96767ce,0xa96767ce
-.long 0x7d2b2b56,0x7d2b2b56
-.long 0x19fefee7,0x19fefee7
-.long 0x62d7d7b5,0x62d7d7b5
-.long 0xe6abab4d,0xe6abab4d
-.long 0x9a7676ec,0x9a7676ec
-.long 0x45caca8f,0x45caca8f
-.long 0x9d82821f,0x9d82821f
-.long 0x40c9c989,0x40c9c989
-.long 0x877d7dfa,0x877d7dfa
-.long 0x15fafaef,0x15fafaef
-.long 0xeb5959b2,0xeb5959b2
-.long 0xc947478e,0xc947478e
-.long 0x0bf0f0fb,0x0bf0f0fb
-.long 0xecadad41,0xecadad41
-.long 0x67d4d4b3,0x67d4d4b3
-.long 0xfda2a25f,0xfda2a25f
-.long 0xeaafaf45,0xeaafaf45
-.long 0xbf9c9c23,0xbf9c9c23
-.long 0xf7a4a453,0xf7a4a453
-.long 0x967272e4,0x967272e4
-.long 0x5bc0c09b,0x5bc0c09b
-.long 0xc2b7b775,0xc2b7b775
-.long 0x1cfdfde1,0x1cfdfde1
-.long 0xae93933d,0xae93933d
-.long 0x6a26264c,0x6a26264c
-.long 0x5a36366c,0x5a36366c
-.long 0x413f3f7e,0x413f3f7e
-.long 0x02f7f7f5,0x02f7f7f5
-.long 0x4fcccc83,0x4fcccc83
-.long 0x5c343468,0x5c343468
-.long 0xf4a5a551,0xf4a5a551
-.long 0x34e5e5d1,0x34e5e5d1
-.long 0x08f1f1f9,0x08f1f1f9
-.long 0x937171e2,0x937171e2
-.long 0x73d8d8ab,0x73d8d8ab
-.long 0x53313162,0x53313162
-.long 0x3f15152a,0x3f15152a
-.long 0x0c040408,0x0c040408
-.long 0x52c7c795,0x52c7c795
-.long 0x65232346,0x65232346
-.long 0x5ec3c39d,0x5ec3c39d
-.long 0x28181830,0x28181830
-.long 0xa1969637,0xa1969637
-.long 0x0f05050a,0x0f05050a
-.long 0xb59a9a2f,0xb59a9a2f
-.long 0x0907070e,0x0907070e
-.long 0x36121224,0x36121224
-.long 0x9b80801b,0x9b80801b
-.long 0x3de2e2df,0x3de2e2df
-.long 0x26ebebcd,0x26ebebcd
-.long 0x6927274e,0x6927274e
-.long 0xcdb2b27f,0xcdb2b27f
-.long 0x9f7575ea,0x9f7575ea
-.long 0x1b090912,0x1b090912
-.long 0x9e83831d,0x9e83831d
-.long 0x742c2c58,0x742c2c58
-.long 0x2e1a1a34,0x2e1a1a34
-.long 0x2d1b1b36,0x2d1b1b36
-.long 0xb26e6edc,0xb26e6edc
-.long 0xee5a5ab4,0xee5a5ab4
-.long 0xfba0a05b,0xfba0a05b
-.long 0xf65252a4,0xf65252a4
-.long 0x4d3b3b76,0x4d3b3b76
-.long 0x61d6d6b7,0x61d6d6b7
-.long 0xceb3b37d,0xceb3b37d
-.long 0x7b292952,0x7b292952
-.long 0x3ee3e3dd,0x3ee3e3dd
-.long 0x712f2f5e,0x712f2f5e
-.long 0x97848413,0x97848413
-.long 0xf55353a6,0xf55353a6
-.long 0x68d1d1b9,0x68d1d1b9
-.long 0x00000000,0x00000000
-.long 0x2cededc1,0x2cededc1
-.long 0x60202040,0x60202040
-.long 0x1ffcfce3,0x1ffcfce3
-.long 0xc8b1b179,0xc8b1b179
-.long 0xed5b5bb6,0xed5b5bb6
-.long 0xbe6a6ad4,0xbe6a6ad4
-.long 0x46cbcb8d,0x46cbcb8d
-.long 0xd9bebe67,0xd9bebe67
-.long 0x4b393972,0x4b393972
-.long 0xde4a4a94,0xde4a4a94
-.long 0xd44c4c98,0xd44c4c98
-.long 0xe85858b0,0xe85858b0
-.long 0x4acfcf85,0x4acfcf85
-.long 0x6bd0d0bb,0x6bd0d0bb
-.long 0x2aefefc5,0x2aefefc5
-.long 0xe5aaaa4f,0xe5aaaa4f
-.long 0x16fbfbed,0x16fbfbed
-.long 0xc5434386,0xc5434386
-.long 0xd74d4d9a,0xd74d4d9a
-.long 0x55333366,0x55333366
-.long 0x94858511,0x94858511
-.long 0xcf45458a,0xcf45458a
-.long 0x10f9f9e9,0x10f9f9e9
-.long 0x06020204,0x06020204
-.long 0x817f7ffe,0x817f7ffe
-.long 0xf05050a0,0xf05050a0
-.long 0x443c3c78,0x443c3c78
-.long 0xba9f9f25,0xba9f9f25
-.long 0xe3a8a84b,0xe3a8a84b
-.long 0xf35151a2,0xf35151a2
-.long 0xfea3a35d,0xfea3a35d
-.long 0xc0404080,0xc0404080
-.long 0x8a8f8f05,0x8a8f8f05
-.long 0xad92923f,0xad92923f
-.long 0xbc9d9d21,0xbc9d9d21
-.long 0x48383870,0x48383870
-.long 0x04f5f5f1,0x04f5f5f1
-.long 0xdfbcbc63,0xdfbcbc63
-.long 0xc1b6b677,0xc1b6b677
-.long 0x75dadaaf,0x75dadaaf
-.long 0x63212142,0x63212142
-.long 0x30101020,0x30101020
-.long 0x1affffe5,0x1affffe5
-.long 0x0ef3f3fd,0x0ef3f3fd
-.long 0x6dd2d2bf,0x6dd2d2bf
-.long 0x4ccdcd81,0x4ccdcd81
-.long 0x140c0c18,0x140c0c18
-.long 0x35131326,0x35131326
-.long 0x2fececc3,0x2fececc3
-.long 0xe15f5fbe,0xe15f5fbe
-.long 0xa2979735,0xa2979735
-.long 0xcc444488,0xcc444488
-.long 0x3917172e,0x3917172e
-.long 0x57c4c493,0x57c4c493
-.long 0xf2a7a755,0xf2a7a755
-.long 0x827e7efc,0x827e7efc
-.long 0x473d3d7a,0x473d3d7a
-.long 0xac6464c8,0xac6464c8
-.long 0xe75d5dba,0xe75d5dba
-.long 0x2b191932,0x2b191932
-.long 0x957373e6,0x957373e6
-.long 0xa06060c0,0xa06060c0
-.long 0x98818119,0x98818119
-.long 0xd14f4f9e,0xd14f4f9e
-.long 0x7fdcdca3,0x7fdcdca3
-.long 0x66222244,0x66222244
-.long 0x7e2a2a54,0x7e2a2a54
-.long 0xab90903b,0xab90903b
-.long 0x8388880b,0x8388880b
-.long 0xca46468c,0xca46468c
-.long 0x29eeeec7,0x29eeeec7
-.long 0xd3b8b86b,0xd3b8b86b
-.long 0x3c141428,0x3c141428
-.long 0x79dedea7,0x79dedea7
-.long 0xe25e5ebc,0xe25e5ebc
-.long 0x1d0b0b16,0x1d0b0b16
-.long 0x76dbdbad,0x76dbdbad
-.long 0x3be0e0db,0x3be0e0db
-.long 0x56323264,0x56323264
-.long 0x4e3a3a74,0x4e3a3a74
-.long 0x1e0a0a14,0x1e0a0a14
-.long 0xdb494992,0xdb494992
-.long 0x0a06060c,0x0a06060c
-.long 0x6c242448,0x6c242448
-.long 0xe45c5cb8,0xe45c5cb8
-.long 0x5dc2c29f,0x5dc2c29f
-.long 0x6ed3d3bd,0x6ed3d3bd
-.long 0xefacac43,0xefacac43
-.long 0xa66262c4,0xa66262c4
-.long 0xa8919139,0xa8919139
-.long 0xa4959531,0xa4959531
-.long 0x37e4e4d3,0x37e4e4d3
-.long 0x8b7979f2,0x8b7979f2
-.long 0x32e7e7d5,0x32e7e7d5
-.long 0x43c8c88b,0x43c8c88b
-.long 0x5937376e,0x5937376e
-.long 0xb76d6dda,0xb76d6dda
-.long 0x8c8d8d01,0x8c8d8d01
-.long 0x64d5d5b1,0x64d5d5b1
-.long 0xd24e4e9c,0xd24e4e9c
-.long 0xe0a9a949,0xe0a9a949
-.long 0xb46c6cd8,0xb46c6cd8
-.long 0xfa5656ac,0xfa5656ac
-.long 0x07f4f4f3,0x07f4f4f3
-.long 0x25eaeacf,0x25eaeacf
-.long 0xaf6565ca,0xaf6565ca
-.long 0x8e7a7af4,0x8e7a7af4
-.long 0xe9aeae47,0xe9aeae47
-.long 0x18080810,0x18080810
-.long 0xd5baba6f,0xd5baba6f
-.long 0x887878f0,0x887878f0
-.long 0x6f25254a,0x6f25254a
-.long 0x722e2e5c,0x722e2e5c
-.long 0x241c1c38,0x241c1c38
-.long 0xf1a6a657,0xf1a6a657
-.long 0xc7b4b473,0xc7b4b473
-.long 0x51c6c697,0x51c6c697
-.long 0x23e8e8cb,0x23e8e8cb
-.long 0x7cdddda1,0x7cdddda1
-.long 0x9c7474e8,0x9c7474e8
-.long 0x211f1f3e,0x211f1f3e
-.long 0xdd4b4b96,0xdd4b4b96
-.long 0xdcbdbd61,0xdcbdbd61
-.long 0x868b8b0d,0x868b8b0d
-.long 0x858a8a0f,0x858a8a0f
-.long 0x907070e0,0x907070e0
-.long 0x423e3e7c,0x423e3e7c
-.long 0xc4b5b571,0xc4b5b571
-.long 0xaa6666cc,0xaa6666cc
-.long 0xd8484890,0xd8484890
-.long 0x05030306,0x05030306
-.long 0x01f6f6f7,0x01f6f6f7
-.long 0x120e0e1c,0x120e0e1c
-.long 0xa36161c2,0xa36161c2
-.long 0x5f35356a,0x5f35356a
-.long 0xf95757ae,0xf95757ae
-.long 0xd0b9b969,0xd0b9b969
-.long 0x91868617,0x91868617
-.long 0x58c1c199,0x58c1c199
-.long 0x271d1d3a,0x271d1d3a
-.long 0xb99e9e27,0xb99e9e27
-.long 0x38e1e1d9,0x38e1e1d9
-.long 0x13f8f8eb,0x13f8f8eb
-.long 0xb398982b,0xb398982b
-.long 0x33111122,0x33111122
-.long 0xbb6969d2,0xbb6969d2
-.long 0x70d9d9a9,0x70d9d9a9
-.long 0x898e8e07,0x898e8e07
-.long 0xa7949433,0xa7949433
-.long 0xb69b9b2d,0xb69b9b2d
-.long 0x221e1e3c,0x221e1e3c
-.long 0x92878715,0x92878715
-.long 0x20e9e9c9,0x20e9e9c9
-.long 0x49cece87,0x49cece87
-.long 0xff5555aa,0xff5555aa
-.long 0x78282850,0x78282850
-.long 0x7adfdfa5,0x7adfdfa5
-.long 0x8f8c8c03,0x8f8c8c03
-.long 0xf8a1a159,0xf8a1a159
-.long 0x80898909,0x80898909
-.long 0x170d0d1a,0x170d0d1a
-.long 0xdabfbf65,0xdabfbf65
-.long 0x31e6e6d7,0x31e6e6d7
-.long 0xc6424284,0xc6424284
-.long 0xb86868d0,0xb86868d0
-.long 0xc3414182,0xc3414182
-.long 0xb0999929,0xb0999929
-.long 0x772d2d5a,0x772d2d5a
-.long 0x110f0f1e,0x110f0f1e
-.long 0xcbb0b07b,0xcbb0b07b
-.long 0xfc5454a8,0xfc5454a8
-.long 0xd6bbbb6d,0xd6bbbb6d
-.long 0x3a16162c,0x3a16162c
-.byte 0x63,0x7c,0x77,0x7b,0xf2,0x6b,0x6f,0xc5
-.byte 0x30,0x01,0x67,0x2b,0xfe,0xd7,0xab,0x76
-.byte 0xca,0x82,0xc9,0x7d,0xfa,0x59,0x47,0xf0
-.byte 0xad,0xd4,0xa2,0xaf,0x9c,0xa4,0x72,0xc0
-.byte 0xb7,0xfd,0x93,0x26,0x36,0x3f,0xf7,0xcc
-.byte 0x34,0xa5,0xe5,0xf1,0x71,0xd8,0x31,0x15
-.byte 0x04,0xc7,0x23,0xc3,0x18,0x96,0x05,0x9a
-.byte 0x07,0x12,0x80,0xe2,0xeb,0x27,0xb2,0x75
-.byte 0x09,0x83,0x2c,0x1a,0x1b,0x6e,0x5a,0xa0
-.byte 0x52,0x3b,0xd6,0xb3,0x29,0xe3,0x2f,0x84
-.byte 0x53,0xd1,0x00,0xed,0x20,0xfc,0xb1,0x5b
-.byte 0x6a,0xcb,0xbe,0x39,0x4a,0x4c,0x58,0xcf
-.byte 0xd0,0xef,0xaa,0xfb,0x43,0x4d,0x33,0x85
-.byte 0x45,0xf9,0x02,0x7f,0x50,0x3c,0x9f,0xa8
-.byte 0x51,0xa3,0x40,0x8f,0x92,0x9d,0x38,0xf5
-.byte 0xbc,0xb6,0xda,0x21,0x10,0xff,0xf3,0xd2
-.byte 0xcd,0x0c,0x13,0xec,0x5f,0x97,0x44,0x17
-.byte 0xc4,0xa7,0x7e,0x3d,0x64,0x5d,0x19,0x73
-.byte 0x60,0x81,0x4f,0xdc,0x22,0x2a,0x90,0x88
-.byte 0x46,0xee,0xb8,0x14,0xde,0x5e,0x0b,0xdb
-.byte 0xe0,0x32,0x3a,0x0a,0x49,0x06,0x24,0x5c
-.byte 0xc2,0xd3,0xac,0x62,0x91,0x95,0xe4,0x79
-.byte 0xe7,0xc8,0x37,0x6d,0x8d,0xd5,0x4e,0xa9
-.byte 0x6c,0x56,0xf4,0xea,0x65,0x7a,0xae,0x08
-.byte 0xba,0x78,0x25,0x2e,0x1c,0xa6,0xb4,0xc6
-.byte 0xe8,0xdd,0x74,0x1f,0x4b,0xbd,0x8b,0x8a
-.byte 0x70,0x3e,0xb5,0x66,0x48,0x03,0xf6,0x0e
-.byte 0x61,0x35,0x57,0xb9,0x86,0xc1,0x1d,0x9e
-.byte 0xe1,0xf8,0x98,0x11,0x69,0xd9,0x8e,0x94
-.byte 0x9b,0x1e,0x87,0xe9,0xce,0x55,0x28,0xdf
-.byte 0x8c,0xa1,0x89,0x0d,0xbf,0xe6,0x42,0x68
-.byte 0x41,0x99,0x2d,0x0f,0xb0,0x54,0xbb,0x16
-.byte 0x63,0x7c,0x77,0x7b,0xf2,0x6b,0x6f,0xc5
-.byte 0x30,0x01,0x67,0x2b,0xfe,0xd7,0xab,0x76
-.byte 0xca,0x82,0xc9,0x7d,0xfa,0x59,0x47,0xf0
-.byte 0xad,0xd4,0xa2,0xaf,0x9c,0xa4,0x72,0xc0
-.byte 0xb7,0xfd,0x93,0x26,0x36,0x3f,0xf7,0xcc
-.byte 0x34,0xa5,0xe5,0xf1,0x71,0xd8,0x31,0x15
-.byte 0x04,0xc7,0x23,0xc3,0x18,0x96,0x05,0x9a
-.byte 0x07,0x12,0x80,0xe2,0xeb,0x27,0xb2,0x75
-.byte 0x09,0x83,0x2c,0x1a,0x1b,0x6e,0x5a,0xa0
-.byte 0x52,0x3b,0xd6,0xb3,0x29,0xe3,0x2f,0x84
-.byte 0x53,0xd1,0x00,0xed,0x20,0xfc,0xb1,0x5b
-.byte 0x6a,0xcb,0xbe,0x39,0x4a,0x4c,0x58,0xcf
-.byte 0xd0,0xef,0xaa,0xfb,0x43,0x4d,0x33,0x85
-.byte 0x45,0xf9,0x02,0x7f,0x50,0x3c,0x9f,0xa8
-.byte 0x51,0xa3,0x40,0x8f,0x92,0x9d,0x38,0xf5
-.byte 0xbc,0xb6,0xda,0x21,0x10,0xff,0xf3,0xd2
-.byte 0xcd,0x0c,0x13,0xec,0x5f,0x97,0x44,0x17
-.byte 0xc4,0xa7,0x7e,0x3d,0x64,0x5d,0x19,0x73
-.byte 0x60,0x81,0x4f,0xdc,0x22,0x2a,0x90,0x88
-.byte 0x46,0xee,0xb8,0x14,0xde,0x5e,0x0b,0xdb
-.byte 0xe0,0x32,0x3a,0x0a,0x49,0x06,0x24,0x5c
-.byte 0xc2,0xd3,0xac,0x62,0x91,0x95,0xe4,0x79
-.byte 0xe7,0xc8,0x37,0x6d,0x8d,0xd5,0x4e,0xa9
-.byte 0x6c,0x56,0xf4,0xea,0x65,0x7a,0xae,0x08
-.byte 0xba,0x78,0x25,0x2e,0x1c,0xa6,0xb4,0xc6
-.byte 0xe8,0xdd,0x74,0x1f,0x4b,0xbd,0x8b,0x8a
-.byte 0x70,0x3e,0xb5,0x66,0x48,0x03,0xf6,0x0e
-.byte 0x61,0x35,0x57,0xb9,0x86,0xc1,0x1d,0x9e
-.byte 0xe1,0xf8,0x98,0x11,0x69,0xd9,0x8e,0x94
-.byte 0x9b,0x1e,0x87,0xe9,0xce,0x55,0x28,0xdf
-.byte 0x8c,0xa1,0x89,0x0d,0xbf,0xe6,0x42,0x68
-.byte 0x41,0x99,0x2d,0x0f,0xb0,0x54,0xbb,0x16
-.byte 0x63,0x7c,0x77,0x7b,0xf2,0x6b,0x6f,0xc5
-.byte 0x30,0x01,0x67,0x2b,0xfe,0xd7,0xab,0x76
-.byte 0xca,0x82,0xc9,0x7d,0xfa,0x59,0x47,0xf0
-.byte 0xad,0xd4,0xa2,0xaf,0x9c,0xa4,0x72,0xc0
-.byte 0xb7,0xfd,0x93,0x26,0x36,0x3f,0xf7,0xcc
-.byte 0x34,0xa5,0xe5,0xf1,0x71,0xd8,0x31,0x15
-.byte 0x04,0xc7,0x23,0xc3,0x18,0x96,0x05,0x9a
-.byte 0x07,0x12,0x80,0xe2,0xeb,0x27,0xb2,0x75
-.byte 0x09,0x83,0x2c,0x1a,0x1b,0x6e,0x5a,0xa0
-.byte 0x52,0x3b,0xd6,0xb3,0x29,0xe3,0x2f,0x84
-.byte 0x53,0xd1,0x00,0xed,0x20,0xfc,0xb1,0x5b
-.byte 0x6a,0xcb,0xbe,0x39,0x4a,0x4c,0x58,0xcf
-.byte 0xd0,0xef,0xaa,0xfb,0x43,0x4d,0x33,0x85
-.byte 0x45,0xf9,0x02,0x7f,0x50,0x3c,0x9f,0xa8
-.byte 0x51,0xa3,0x40,0x8f,0x92,0x9d,0x38,0xf5
-.byte 0xbc,0xb6,0xda,0x21,0x10,0xff,0xf3,0xd2
-.byte 0xcd,0x0c,0x13,0xec,0x5f,0x97,0x44,0x17
-.byte 0xc4,0xa7,0x7e,0x3d,0x64,0x5d,0x19,0x73
-.byte 0x60,0x81,0x4f,0xdc,0x22,0x2a,0x90,0x88
-.byte 0x46,0xee,0xb8,0x14,0xde,0x5e,0x0b,0xdb
-.byte 0xe0,0x32,0x3a,0x0a,0x49,0x06,0x24,0x5c
-.byte 0xc2,0xd3,0xac,0x62,0x91,0x95,0xe4,0x79
-.byte 0xe7,0xc8,0x37,0x6d,0x8d,0xd5,0x4e,0xa9
-.byte 0x6c,0x56,0xf4,0xea,0x65,0x7a,0xae,0x08
-.byte 0xba,0x78,0x25,0x2e,0x1c,0xa6,0xb4,0xc6
-.byte 0xe8,0xdd,0x74,0x1f,0x4b,0xbd,0x8b,0x8a
-.byte 0x70,0x3e,0xb5,0x66,0x48,0x03,0xf6,0x0e
-.byte 0x61,0x35,0x57,0xb9,0x86,0xc1,0x1d,0x9e
-.byte 0xe1,0xf8,0x98,0x11,0x69,0xd9,0x8e,0x94
-.byte 0x9b,0x1e,0x87,0xe9,0xce,0x55,0x28,0xdf
-.byte 0x8c,0xa1,0x89,0x0d,0xbf,0xe6,0x42,0x68
-.byte 0x41,0x99,0x2d,0x0f,0xb0,0x54,0xbb,0x16
-.byte 0x63,0x7c,0x77,0x7b,0xf2,0x6b,0x6f,0xc5
-.byte 0x30,0x01,0x67,0x2b,0xfe,0xd7,0xab,0x76
-.byte 0xca,0x82,0xc9,0x7d,0xfa,0x59,0x47,0xf0
-.byte 0xad,0xd4,0xa2,0xaf,0x9c,0xa4,0x72,0xc0
-.byte 0xb7,0xfd,0x93,0x26,0x36,0x3f,0xf7,0xcc
-.byte 0x34,0xa5,0xe5,0xf1,0x71,0xd8,0x31,0x15
-.byte 0x04,0xc7,0x23,0xc3,0x18,0x96,0x05,0x9a
-.byte 0x07,0x12,0x80,0xe2,0xeb,0x27,0xb2,0x75
-.byte 0x09,0x83,0x2c,0x1a,0x1b,0x6e,0x5a,0xa0
-.byte 0x52,0x3b,0xd6,0xb3,0x29,0xe3,0x2f,0x84
-.byte 0x53,0xd1,0x00,0xed,0x20,0xfc,0xb1,0x5b
-.byte 0x6a,0xcb,0xbe,0x39,0x4a,0x4c,0x58,0xcf
-.byte 0xd0,0xef,0xaa,0xfb,0x43,0x4d,0x33,0x85
-.byte 0x45,0xf9,0x02,0x7f,0x50,0x3c,0x9f,0xa8
-.byte 0x51,0xa3,0x40,0x8f,0x92,0x9d,0x38,0xf5
-.byte 0xbc,0xb6,0xda,0x21,0x10,0xff,0xf3,0xd2
-.byte 0xcd,0x0c,0x13,0xec,0x5f,0x97,0x44,0x17
-.byte 0xc4,0xa7,0x7e,0x3d,0x64,0x5d,0x19,0x73
-.byte 0x60,0x81,0x4f,0xdc,0x22,0x2a,0x90,0x88
-.byte 0x46,0xee,0xb8,0x14,0xde,0x5e,0x0b,0xdb
-.byte 0xe0,0x32,0x3a,0x0a,0x49,0x06,0x24,0x5c
-.byte 0xc2,0xd3,0xac,0x62,0x91,0x95,0xe4,0x79
-.byte 0xe7,0xc8,0x37,0x6d,0x8d,0xd5,0x4e,0xa9
-.byte 0x6c,0x56,0xf4,0xea,0x65,0x7a,0xae,0x08
-.byte 0xba,0x78,0x25,0x2e,0x1c,0xa6,0xb4,0xc6
-.byte 0xe8,0xdd,0x74,0x1f,0x4b,0xbd,0x8b,0x8a
-.byte 0x70,0x3e,0xb5,0x66,0x48,0x03,0xf6,0x0e
-.byte 0x61,0x35,0x57,0xb9,0x86,0xc1,0x1d,0x9e
-.byte 0xe1,0xf8,0x98,0x11,0x69,0xd9,0x8e,0x94
-.byte 0x9b,0x1e,0x87,0xe9,0xce,0x55,0x28,0xdf
-.byte 0x8c,0xa1,0x89,0x0d,0xbf,0xe6,0x42,0x68
-.byte 0x41,0x99,0x2d,0x0f,0xb0,0x54,0xbb,0x16
-.long 0x00000001, 0x00000002, 0x00000004, 0x00000008
-.long 0x00000010, 0x00000020, 0x00000040, 0x00000080
-.long 0x0000001b, 0x00000036, 0x80808080, 0x80808080
-.long 0xfefefefe, 0xfefefefe, 0x1b1b1b1b, 0x1b1b1b1b
-.align 64
-.LAES_Td:
-.long 0x50a7f451,0x50a7f451
-.long 0x5365417e,0x5365417e
-.long 0xc3a4171a,0xc3a4171a
-.long 0x965e273a,0x965e273a
-.long 0xcb6bab3b,0xcb6bab3b
-.long 0xf1459d1f,0xf1459d1f
-.long 0xab58faac,0xab58faac
-.long 0x9303e34b,0x9303e34b
-.long 0x55fa3020,0x55fa3020
-.long 0xf66d76ad,0xf66d76ad
-.long 0x9176cc88,0x9176cc88
-.long 0x254c02f5,0x254c02f5
-.long 0xfcd7e54f,0xfcd7e54f
-.long 0xd7cb2ac5,0xd7cb2ac5
-.long 0x80443526,0x80443526
-.long 0x8fa362b5,0x8fa362b5
-.long 0x495ab1de,0x495ab1de
-.long 0x671bba25,0x671bba25
-.long 0x980eea45,0x980eea45
-.long 0xe1c0fe5d,0xe1c0fe5d
-.long 0x02752fc3,0x02752fc3
-.long 0x12f04c81,0x12f04c81
-.long 0xa397468d,0xa397468d
-.long 0xc6f9d36b,0xc6f9d36b
-.long 0xe75f8f03,0xe75f8f03
-.long 0x959c9215,0x959c9215
-.long 0xeb7a6dbf,0xeb7a6dbf
-.long 0xda595295,0xda595295
-.long 0x2d83bed4,0x2d83bed4
-.long 0xd3217458,0xd3217458
-.long 0x2969e049,0x2969e049
-.long 0x44c8c98e,0x44c8c98e
-.long 0x6a89c275,0x6a89c275
-.long 0x78798ef4,0x78798ef4
-.long 0x6b3e5899,0x6b3e5899
-.long 0xdd71b927,0xdd71b927
-.long 0xb64fe1be,0xb64fe1be
-.long 0x17ad88f0,0x17ad88f0
-.long 0x66ac20c9,0x66ac20c9
-.long 0xb43ace7d,0xb43ace7d
-.long 0x184adf63,0x184adf63
-.long 0x82311ae5,0x82311ae5
-.long 0x60335197,0x60335197
-.long 0x457f5362,0x457f5362
-.long 0xe07764b1,0xe07764b1
-.long 0x84ae6bbb,0x84ae6bbb
-.long 0x1ca081fe,0x1ca081fe
-.long 0x942b08f9,0x942b08f9
-.long 0x58684870,0x58684870
-.long 0x19fd458f,0x19fd458f
-.long 0x876cde94,0x876cde94
-.long 0xb7f87b52,0xb7f87b52
-.long 0x23d373ab,0x23d373ab
-.long 0xe2024b72,0xe2024b72
-.long 0x578f1fe3,0x578f1fe3
-.long 0x2aab5566,0x2aab5566
-.long 0x0728ebb2,0x0728ebb2
-.long 0x03c2b52f,0x03c2b52f
-.long 0x9a7bc586,0x9a7bc586
-.long 0xa50837d3,0xa50837d3
-.long 0xf2872830,0xf2872830
-.long 0xb2a5bf23,0xb2a5bf23
-.long 0xba6a0302,0xba6a0302
-.long 0x5c8216ed,0x5c8216ed
-.long 0x2b1ccf8a,0x2b1ccf8a
-.long 0x92b479a7,0x92b479a7
-.long 0xf0f207f3,0xf0f207f3
-.long 0xa1e2694e,0xa1e2694e
-.long 0xcdf4da65,0xcdf4da65
-.long 0xd5be0506,0xd5be0506
-.long 0x1f6234d1,0x1f6234d1
-.long 0x8afea6c4,0x8afea6c4
-.long 0x9d532e34,0x9d532e34
-.long 0xa055f3a2,0xa055f3a2
-.long 0x32e18a05,0x32e18a05
-.long 0x75ebf6a4,0x75ebf6a4
-.long 0x39ec830b,0x39ec830b
-.long 0xaaef6040,0xaaef6040
-.long 0x069f715e,0x069f715e
-.long 0x51106ebd,0x51106ebd
-.long 0xf98a213e,0xf98a213e
-.long 0x3d06dd96,0x3d06dd96
-.long 0xae053edd,0xae053edd
-.long 0x46bde64d,0x46bde64d
-.long 0xb58d5491,0xb58d5491
-.long 0x055dc471,0x055dc471
-.long 0x6fd40604,0x6fd40604
-.long 0xff155060,0xff155060
-.long 0x24fb9819,0x24fb9819
-.long 0x97e9bdd6,0x97e9bdd6
-.long 0xcc434089,0xcc434089
-.long 0x779ed967,0x779ed967
-.long 0xbd42e8b0,0xbd42e8b0
-.long 0x888b8907,0x888b8907
-.long 0x385b19e7,0x385b19e7
-.long 0xdbeec879,0xdbeec879
-.long 0x470a7ca1,0x470a7ca1
-.long 0xe90f427c,0xe90f427c
-.long 0xc91e84f8,0xc91e84f8
-.long 0x00000000,0x00000000
-.long 0x83868009,0x83868009
-.long 0x48ed2b32,0x48ed2b32
-.long 0xac70111e,0xac70111e
-.long 0x4e725a6c,0x4e725a6c
-.long 0xfbff0efd,0xfbff0efd
-.long 0x5638850f,0x5638850f
-.long 0x1ed5ae3d,0x1ed5ae3d
-.long 0x27392d36,0x27392d36
-.long 0x64d90f0a,0x64d90f0a
-.long 0x21a65c68,0x21a65c68
-.long 0xd1545b9b,0xd1545b9b
-.long 0x3a2e3624,0x3a2e3624
-.long 0xb1670a0c,0xb1670a0c
-.long 0x0fe75793,0x0fe75793
-.long 0xd296eeb4,0xd296eeb4
-.long 0x9e919b1b,0x9e919b1b
-.long 0x4fc5c080,0x4fc5c080
-.long 0xa220dc61,0xa220dc61
-.long 0x694b775a,0x694b775a
-.long 0x161a121c,0x161a121c
-.long 0x0aba93e2,0x0aba93e2
-.long 0xe52aa0c0,0xe52aa0c0
-.long 0x43e0223c,0x43e0223c
-.long 0x1d171b12,0x1d171b12
-.long 0x0b0d090e,0x0b0d090e
-.long 0xadc78bf2,0xadc78bf2
-.long 0xb9a8b62d,0xb9a8b62d
-.long 0xc8a91e14,0xc8a91e14
-.long 0x8519f157,0x8519f157
-.long 0x4c0775af,0x4c0775af
-.long 0xbbdd99ee,0xbbdd99ee
-.long 0xfd607fa3,0xfd607fa3
-.long 0x9f2601f7,0x9f2601f7
-.long 0xbcf5725c,0xbcf5725c
-.long 0xc53b6644,0xc53b6644
-.long 0x347efb5b,0x347efb5b
-.long 0x7629438b,0x7629438b
-.long 0xdcc623cb,0xdcc623cb
-.long 0x68fcedb6,0x68fcedb6
-.long 0x63f1e4b8,0x63f1e4b8
-.long 0xcadc31d7,0xcadc31d7
-.long 0x10856342,0x10856342
-.long 0x40229713,0x40229713
-.long 0x2011c684,0x2011c684
-.long 0x7d244a85,0x7d244a85
-.long 0xf83dbbd2,0xf83dbbd2
-.long 0x1132f9ae,0x1132f9ae
-.long 0x6da129c7,0x6da129c7
-.long 0x4b2f9e1d,0x4b2f9e1d
-.long 0xf330b2dc,0xf330b2dc
-.long 0xec52860d,0xec52860d
-.long 0xd0e3c177,0xd0e3c177
-.long 0x6c16b32b,0x6c16b32b
-.long 0x99b970a9,0x99b970a9
-.long 0xfa489411,0xfa489411
-.long 0x2264e947,0x2264e947
-.long 0xc48cfca8,0xc48cfca8
-.long 0x1a3ff0a0,0x1a3ff0a0
-.long 0xd82c7d56,0xd82c7d56
-.long 0xef903322,0xef903322
-.long 0xc74e4987,0xc74e4987
-.long 0xc1d138d9,0xc1d138d9
-.long 0xfea2ca8c,0xfea2ca8c
-.long 0x360bd498,0x360bd498
-.long 0xcf81f5a6,0xcf81f5a6
-.long 0x28de7aa5,0x28de7aa5
-.long 0x268eb7da,0x268eb7da
-.long 0xa4bfad3f,0xa4bfad3f
-.long 0xe49d3a2c,0xe49d3a2c
-.long 0x0d927850,0x0d927850
-.long 0x9bcc5f6a,0x9bcc5f6a
-.long 0x62467e54,0x62467e54
-.long 0xc2138df6,0xc2138df6
-.long 0xe8b8d890,0xe8b8d890
-.long 0x5ef7392e,0x5ef7392e
-.long 0xf5afc382,0xf5afc382
-.long 0xbe805d9f,0xbe805d9f
-.long 0x7c93d069,0x7c93d069
-.long 0xa92dd56f,0xa92dd56f
-.long 0xb31225cf,0xb31225cf
-.long 0x3b99acc8,0x3b99acc8
-.long 0xa77d1810,0xa77d1810
-.long 0x6e639ce8,0x6e639ce8
-.long 0x7bbb3bdb,0x7bbb3bdb
-.long 0x097826cd,0x097826cd
-.long 0xf418596e,0xf418596e
-.long 0x01b79aec,0x01b79aec
-.long 0xa89a4f83,0xa89a4f83
-.long 0x656e95e6,0x656e95e6
-.long 0x7ee6ffaa,0x7ee6ffaa
-.long 0x08cfbc21,0x08cfbc21
-.long 0xe6e815ef,0xe6e815ef
-.long 0xd99be7ba,0xd99be7ba
-.long 0xce366f4a,0xce366f4a
-.long 0xd4099fea,0xd4099fea
-.long 0xd67cb029,0xd67cb029
-.long 0xafb2a431,0xafb2a431
-.long 0x31233f2a,0x31233f2a
-.long 0x3094a5c6,0x3094a5c6
-.long 0xc066a235,0xc066a235
-.long 0x37bc4e74,0x37bc4e74
-.long 0xa6ca82fc,0xa6ca82fc
-.long 0xb0d090e0,0xb0d090e0
-.long 0x15d8a733,0x15d8a733
-.long 0x4a9804f1,0x4a9804f1
-.long 0xf7daec41,0xf7daec41
-.long 0x0e50cd7f,0x0e50cd7f
-.long 0x2ff69117,0x2ff69117
-.long 0x8dd64d76,0x8dd64d76
-.long 0x4db0ef43,0x4db0ef43
-.long 0x544daacc,0x544daacc
-.long 0xdf0496e4,0xdf0496e4
-.long 0xe3b5d19e,0xe3b5d19e
-.long 0x1b886a4c,0x1b886a4c
-.long 0xb81f2cc1,0xb81f2cc1
-.long 0x7f516546,0x7f516546
-.long 0x04ea5e9d,0x04ea5e9d
-.long 0x5d358c01,0x5d358c01
-.long 0x737487fa,0x737487fa
-.long 0x2e410bfb,0x2e410bfb
-.long 0x5a1d67b3,0x5a1d67b3
-.long 0x52d2db92,0x52d2db92
-.long 0x335610e9,0x335610e9
-.long 0x1347d66d,0x1347d66d
-.long 0x8c61d79a,0x8c61d79a
-.long 0x7a0ca137,0x7a0ca137
-.long 0x8e14f859,0x8e14f859
-.long 0x893c13eb,0x893c13eb
-.long 0xee27a9ce,0xee27a9ce
-.long 0x35c961b7,0x35c961b7
-.long 0xede51ce1,0xede51ce1
-.long 0x3cb1477a,0x3cb1477a
-.long 0x59dfd29c,0x59dfd29c
-.long 0x3f73f255,0x3f73f255
-.long 0x79ce1418,0x79ce1418
-.long 0xbf37c773,0xbf37c773
-.long 0xeacdf753,0xeacdf753
-.long 0x5baafd5f,0x5baafd5f
-.long 0x146f3ddf,0x146f3ddf
-.long 0x86db4478,0x86db4478
-.long 0x81f3afca,0x81f3afca
-.long 0x3ec468b9,0x3ec468b9
-.long 0x2c342438,0x2c342438
-.long 0x5f40a3c2,0x5f40a3c2
-.long 0x72c31d16,0x72c31d16
-.long 0x0c25e2bc,0x0c25e2bc
-.long 0x8b493c28,0x8b493c28
-.long 0x41950dff,0x41950dff
-.long 0x7101a839,0x7101a839
-.long 0xdeb30c08,0xdeb30c08
-.long 0x9ce4b4d8,0x9ce4b4d8
-.long 0x90c15664,0x90c15664
-.long 0x6184cb7b,0x6184cb7b
-.long 0x70b632d5,0x70b632d5
-.long 0x745c6c48,0x745c6c48
-.long 0x4257b8d0,0x4257b8d0
-.byte 0x52,0x09,0x6a,0xd5,0x30,0x36,0xa5,0x38
-.byte 0xbf,0x40,0xa3,0x9e,0x81,0xf3,0xd7,0xfb
-.byte 0x7c,0xe3,0x39,0x82,0x9b,0x2f,0xff,0x87
-.byte 0x34,0x8e,0x43,0x44,0xc4,0xde,0xe9,0xcb
-.byte 0x54,0x7b,0x94,0x32,0xa6,0xc2,0x23,0x3d
-.byte 0xee,0x4c,0x95,0x0b,0x42,0xfa,0xc3,0x4e
-.byte 0x08,0x2e,0xa1,0x66,0x28,0xd9,0x24,0xb2
-.byte 0x76,0x5b,0xa2,0x49,0x6d,0x8b,0xd1,0x25
-.byte 0x72,0xf8,0xf6,0x64,0x86,0x68,0x98,0x16
-.byte 0xd4,0xa4,0x5c,0xcc,0x5d,0x65,0xb6,0x92
-.byte 0x6c,0x70,0x48,0x50,0xfd,0xed,0xb9,0xda
-.byte 0x5e,0x15,0x46,0x57,0xa7,0x8d,0x9d,0x84
-.byte 0x90,0xd8,0xab,0x00,0x8c,0xbc,0xd3,0x0a
-.byte 0xf7,0xe4,0x58,0x05,0xb8,0xb3,0x45,0x06
-.byte 0xd0,0x2c,0x1e,0x8f,0xca,0x3f,0x0f,0x02
-.byte 0xc1,0xaf,0xbd,0x03,0x01,0x13,0x8a,0x6b
-.byte 0x3a,0x91,0x11,0x41,0x4f,0x67,0xdc,0xea
-.byte 0x97,0xf2,0xcf,0xce,0xf0,0xb4,0xe6,0x73
-.byte 0x96,0xac,0x74,0x22,0xe7,0xad,0x35,0x85
-.byte 0xe2,0xf9,0x37,0xe8,0x1c,0x75,0xdf,0x6e
-.byte 0x47,0xf1,0x1a,0x71,0x1d,0x29,0xc5,0x89
-.byte 0x6f,0xb7,0x62,0x0e,0xaa,0x18,0xbe,0x1b
-.byte 0xfc,0x56,0x3e,0x4b,0xc6,0xd2,0x79,0x20
-.byte 0x9a,0xdb,0xc0,0xfe,0x78,0xcd,0x5a,0xf4
-.byte 0x1f,0xdd,0xa8,0x33,0x88,0x07,0xc7,0x31
-.byte 0xb1,0x12,0x10,0x59,0x27,0x80,0xec,0x5f
-.byte 0x60,0x51,0x7f,0xa9,0x19,0xb5,0x4a,0x0d
-.byte 0x2d,0xe5,0x7a,0x9f,0x93,0xc9,0x9c,0xef
-.byte 0xa0,0xe0,0x3b,0x4d,0xae,0x2a,0xf5,0xb0
-.byte 0xc8,0xeb,0xbb,0x3c,0x83,0x53,0x99,0x61
-.byte 0x17,0x2b,0x04,0x7e,0xba,0x77,0xd6,0x26
-.byte 0xe1,0x69,0x14,0x63,0x55,0x21,0x0c,0x7d
-.long 0x80808080, 0x80808080, 0xfefefefe, 0xfefefefe
-.long 0x1b1b1b1b, 0x1b1b1b1b, 0, 0
-.byte 0x52,0x09,0x6a,0xd5,0x30,0x36,0xa5,0x38
-.byte 0xbf,0x40,0xa3,0x9e,0x81,0xf3,0xd7,0xfb
-.byte 0x7c,0xe3,0x39,0x82,0x9b,0x2f,0xff,0x87
-.byte 0x34,0x8e,0x43,0x44,0xc4,0xde,0xe9,0xcb
-.byte 0x54,0x7b,0x94,0x32,0xa6,0xc2,0x23,0x3d
-.byte 0xee,0x4c,0x95,0x0b,0x42,0xfa,0xc3,0x4e
-.byte 0x08,0x2e,0xa1,0x66,0x28,0xd9,0x24,0xb2
-.byte 0x76,0x5b,0xa2,0x49,0x6d,0x8b,0xd1,0x25
-.byte 0x72,0xf8,0xf6,0x64,0x86,0x68,0x98,0x16
-.byte 0xd4,0xa4,0x5c,0xcc,0x5d,0x65,0xb6,0x92
-.byte 0x6c,0x70,0x48,0x50,0xfd,0xed,0xb9,0xda
-.byte 0x5e,0x15,0x46,0x57,0xa7,0x8d,0x9d,0x84
-.byte 0x90,0xd8,0xab,0x00,0x8c,0xbc,0xd3,0x0a
-.byte 0xf7,0xe4,0x58,0x05,0xb8,0xb3,0x45,0x06
-.byte 0xd0,0x2c,0x1e,0x8f,0xca,0x3f,0x0f,0x02
-.byte 0xc1,0xaf,0xbd,0x03,0x01,0x13,0x8a,0x6b
-.byte 0x3a,0x91,0x11,0x41,0x4f,0x67,0xdc,0xea
-.byte 0x97,0xf2,0xcf,0xce,0xf0,0xb4,0xe6,0x73
-.byte 0x96,0xac,0x74,0x22,0xe7,0xad,0x35,0x85
-.byte 0xe2,0xf9,0x37,0xe8,0x1c,0x75,0xdf,0x6e
-.byte 0x47,0xf1,0x1a,0x71,0x1d,0x29,0xc5,0x89
-.byte 0x6f,0xb7,0x62,0x0e,0xaa,0x18,0xbe,0x1b
-.byte 0xfc,0x56,0x3e,0x4b,0xc6,0xd2,0x79,0x20
-.byte 0x9a,0xdb,0xc0,0xfe,0x78,0xcd,0x5a,0xf4
-.byte 0x1f,0xdd,0xa8,0x33,0x88,0x07,0xc7,0x31
-.byte 0xb1,0x12,0x10,0x59,0x27,0x80,0xec,0x5f
-.byte 0x60,0x51,0x7f,0xa9,0x19,0xb5,0x4a,0x0d
-.byte 0x2d,0xe5,0x7a,0x9f,0x93,0xc9,0x9c,0xef
-.byte 0xa0,0xe0,0x3b,0x4d,0xae,0x2a,0xf5,0xb0
-.byte 0xc8,0xeb,0xbb,0x3c,0x83,0x53,0x99,0x61
-.byte 0x17,0x2b,0x04,0x7e,0xba,0x77,0xd6,0x26
-.byte 0xe1,0x69,0x14,0x63,0x55,0x21,0x0c,0x7d
-.long 0x80808080, 0x80808080, 0xfefefefe, 0xfefefefe
-.long 0x1b1b1b1b, 0x1b1b1b1b, 0, 0
-.byte 0x52,0x09,0x6a,0xd5,0x30,0x36,0xa5,0x38
-.byte 0xbf,0x40,0xa3,0x9e,0x81,0xf3,0xd7,0xfb
-.byte 0x7c,0xe3,0x39,0x82,0x9b,0x2f,0xff,0x87
-.byte 0x34,0x8e,0x43,0x44,0xc4,0xde,0xe9,0xcb
-.byte 0x54,0x7b,0x94,0x32,0xa6,0xc2,0x23,0x3d
-.byte 0xee,0x4c,0x95,0x0b,0x42,0xfa,0xc3,0x4e
-.byte 0x08,0x2e,0xa1,0x66,0x28,0xd9,0x24,0xb2
-.byte 0x76,0x5b,0xa2,0x49,0x6d,0x8b,0xd1,0x25
-.byte 0x72,0xf8,0xf6,0x64,0x86,0x68,0x98,0x16
-.byte 0xd4,0xa4,0x5c,0xcc,0x5d,0x65,0xb6,0x92
-.byte 0x6c,0x70,0x48,0x50,0xfd,0xed,0xb9,0xda
-.byte 0x5e,0x15,0x46,0x57,0xa7,0x8d,0x9d,0x84
-.byte 0x90,0xd8,0xab,0x00,0x8c,0xbc,0xd3,0x0a
-.byte 0xf7,0xe4,0x58,0x05,0xb8,0xb3,0x45,0x06
-.byte 0xd0,0x2c,0x1e,0x8f,0xca,0x3f,0x0f,0x02
-.byte 0xc1,0xaf,0xbd,0x03,0x01,0x13,0x8a,0x6b
-.byte 0x3a,0x91,0x11,0x41,0x4f,0x67,0xdc,0xea
-.byte 0x97,0xf2,0xcf,0xce,0xf0,0xb4,0xe6,0x73
-.byte 0x96,0xac,0x74,0x22,0xe7,0xad,0x35,0x85
-.byte 0xe2,0xf9,0x37,0xe8,0x1c,0x75,0xdf,0x6e
-.byte 0x47,0xf1,0x1a,0x71,0x1d,0x29,0xc5,0x89
-.byte 0x6f,0xb7,0x62,0x0e,0xaa,0x18,0xbe,0x1b
-.byte 0xfc,0x56,0x3e,0x4b,0xc6,0xd2,0x79,0x20
-.byte 0x9a,0xdb,0xc0,0xfe,0x78,0xcd,0x5a,0xf4
-.byte 0x1f,0xdd,0xa8,0x33,0x88,0x07,0xc7,0x31
-.byte 0xb1,0x12,0x10,0x59,0x27,0x80,0xec,0x5f
-.byte 0x60,0x51,0x7f,0xa9,0x19,0xb5,0x4a,0x0d
-.byte 0x2d,0xe5,0x7a,0x9f,0x93,0xc9,0x9c,0xef
-.byte 0xa0,0xe0,0x3b,0x4d,0xae,0x2a,0xf5,0xb0
-.byte 0xc8,0xeb,0xbb,0x3c,0x83,0x53,0x99,0x61
-.byte 0x17,0x2b,0x04,0x7e,0xba,0x77,0xd6,0x26
-.byte 0xe1,0x69,0x14,0x63,0x55,0x21,0x0c,0x7d
-.long 0x80808080, 0x80808080, 0xfefefefe, 0xfefefefe
-.long 0x1b1b1b1b, 0x1b1b1b1b, 0, 0
-.byte 0x52,0x09,0x6a,0xd5,0x30,0x36,0xa5,0x38
-.byte 0xbf,0x40,0xa3,0x9e,0x81,0xf3,0xd7,0xfb
-.byte 0x7c,0xe3,0x39,0x82,0x9b,0x2f,0xff,0x87
-.byte 0x34,0x8e,0x43,0x44,0xc4,0xde,0xe9,0xcb
-.byte 0x54,0x7b,0x94,0x32,0xa6,0xc2,0x23,0x3d
-.byte 0xee,0x4c,0x95,0x0b,0x42,0xfa,0xc3,0x4e
-.byte 0x08,0x2e,0xa1,0x66,0x28,0xd9,0x24,0xb2
-.byte 0x76,0x5b,0xa2,0x49,0x6d,0x8b,0xd1,0x25
-.byte 0x72,0xf8,0xf6,0x64,0x86,0x68,0x98,0x16
-.byte 0xd4,0xa4,0x5c,0xcc,0x5d,0x65,0xb6,0x92
-.byte 0x6c,0x70,0x48,0x50,0xfd,0xed,0xb9,0xda
-.byte 0x5e,0x15,0x46,0x57,0xa7,0x8d,0x9d,0x84
-.byte 0x90,0xd8,0xab,0x00,0x8c,0xbc,0xd3,0x0a
-.byte 0xf7,0xe4,0x58,0x05,0xb8,0xb3,0x45,0x06
-.byte 0xd0,0x2c,0x1e,0x8f,0xca,0x3f,0x0f,0x02
-.byte 0xc1,0xaf,0xbd,0x03,0x01,0x13,0x8a,0x6b
-.byte 0x3a,0x91,0x11,0x41,0x4f,0x67,0xdc,0xea
-.byte 0x97,0xf2,0xcf,0xce,0xf0,0xb4,0xe6,0x73
-.byte 0x96,0xac,0x74,0x22,0xe7,0xad,0x35,0x85
-.byte 0xe2,0xf9,0x37,0xe8,0x1c,0x75,0xdf,0x6e
-.byte 0x47,0xf1,0x1a,0x71,0x1d,0x29,0xc5,0x89
-.byte 0x6f,0xb7,0x62,0x0e,0xaa,0x18,0xbe,0x1b
-.byte 0xfc,0x56,0x3e,0x4b,0xc6,0xd2,0x79,0x20
-.byte 0x9a,0xdb,0xc0,0xfe,0x78,0xcd,0x5a,0xf4
-.byte 0x1f,0xdd,0xa8,0x33,0x88,0x07,0xc7,0x31
-.byte 0xb1,0x12,0x10,0x59,0x27,0x80,0xec,0x5f
-.byte 0x60,0x51,0x7f,0xa9,0x19,0xb5,0x4a,0x0d
-.byte 0x2d,0xe5,0x7a,0x9f,0x93,0xc9,0x9c,0xef
-.byte 0xa0,0xe0,0x3b,0x4d,0xae,0x2a,0xf5,0xb0
-.byte 0xc8,0xeb,0xbb,0x3c,0x83,0x53,0x99,0x61
-.byte 0x17,0x2b,0x04,0x7e,0xba,0x77,0xd6,0x26
-.byte 0xe1,0x69,0x14,0x63,0x55,0x21,0x0c,0x7d
-.long 0x80808080, 0x80808080, 0xfefefefe, 0xfefefefe
-.long 0x1b1b1b1b, 0x1b1b1b1b, 0, 0
-.byte 65,69,83,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
-.align 64
diff --git a/app/openssl/crypto/aes/asm/aes-x86_64.pl b/app/openssl/crypto/aes/asm/aes-x86_64.pl
deleted file mode 100755
index 34cbb5d8..00000000
--- a/app/openssl/crypto/aes/asm/aes-x86_64.pl
+++ /dev/null
@@ -1,2819 +0,0 @@
-#!/usr/bin/env perl
-#
-# ====================================================================
-# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
-# project. The module is, however, dual licensed under OpenSSL and
-# CRYPTOGAMS licenses depending on where you obtain it. For further
-# details see http://www.openssl.org/~appro/cryptogams/.
-# ====================================================================
-#
-# Version 2.1.
-#
-# aes-*-cbc benchmarks are improved by >70% [compared to gcc 3.3.2 on
-# Opteron 240 CPU] plus all the bells-n-whistles from 32-bit version
-# [you'll notice a lot of resemblance], such as compressed S-boxes
-# in little-endian byte order, prefetch of these tables in CBC mode,
-# as well as avoiding L1 cache aliasing between stack frame and key
-# schedule and already mentioned tables, compressed Td4...
-#
-# Performance in number of cycles per processed byte for 128-bit key:
-#
-# ECB encrypt ECB decrypt CBC large chunk
-# AMD64 33 41 13.0
-# EM64T 38 59 18.6(*)
-# Core 2 30 43 14.5(*)
-#
-# (*) with hyper-threading off
-
-$flavour = shift;
-$output = shift;
-if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
-
-$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
-
-$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
-( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
-( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
-die "can't locate x86_64-xlate.pl";
-
-open OUT,"| \"$^X\" $xlate $flavour $output";
-*STDOUT=*OUT;
-
-$verticalspin=1; # unlike 32-bit version $verticalspin performs
- # ~15% better on both AMD and Intel cores
-$speed_limit=512; # see aes-586.pl for details
-
-$code=".text\n";
-
-$s0="%eax";
-$s1="%ebx";
-$s2="%ecx";
-$s3="%edx";
-$acc0="%esi"; $mask80="%rsi";
-$acc1="%edi"; $maskfe="%rdi";
-$acc2="%ebp"; $mask1b="%rbp";
-$inp="%r8";
-$out="%r9";
-$t0="%r10d";
-$t1="%r11d";
-$t2="%r12d";
-$rnds="%r13d";
-$sbox="%r14";
-$key="%r15";
-
-sub hi() { my $r=shift; $r =~ s/%[er]([a-d])x/%\1h/; $r; }
-sub lo() { my $r=shift; $r =~ s/%[er]([a-d])x/%\1l/;
- $r =~ s/%[er]([sd]i)/%\1l/;
- $r =~ s/%(r[0-9]+)[d]?/%\1b/; $r; }
-sub LO() { my $r=shift; $r =~ s/%r([a-z]+)/%e\1/;
- $r =~ s/%r([0-9]+)/%r\1d/; $r; }
-sub _data_word()
-{ my $i;
- while(defined($i=shift)) { $code.=sprintf".long\t0x%08x,0x%08x\n",$i,$i; }
-}
-sub data_word()
-{ my $i;
- my $last=pop(@_);
- $code.=".long\t";
- while(defined($i=shift)) { $code.=sprintf"0x%08x,",$i; }
- $code.=sprintf"0x%08x\n",$last;
-}
-
-sub data_byte()
-{ my $i;
- my $last=pop(@_);
- $code.=".byte\t";
- while(defined($i=shift)) { $code.=sprintf"0x%02x,",$i&0xff; }
- $code.=sprintf"0x%02x\n",$last&0xff;
-}
-
-sub encvert()
-{ my $t3="%r8d"; # zaps $inp!
-
-$code.=<<___;
- # favor 3-way issue Opteron pipeline...
- movzb `&lo("$s0")`,$acc0
- movzb `&lo("$s1")`,$acc1
- movzb `&lo("$s2")`,$acc2
- mov 0($sbox,$acc0,8),$t0
- mov 0($sbox,$acc1,8),$t1
- mov 0($sbox,$acc2,8),$t2
-
- movzb `&hi("$s1")`,$acc0
- movzb `&hi("$s2")`,$acc1
- movzb `&lo("$s3")`,$acc2
- xor 3($sbox,$acc0,8),$t0
- xor 3($sbox,$acc1,8),$t1
- mov 0($sbox,$acc2,8),$t3
-
- movzb `&hi("$s3")`,$acc0
- shr \$16,$s2
- movzb `&hi("$s0")`,$acc2
- xor 3($sbox,$acc0,8),$t2
- shr \$16,$s3
- xor 3($sbox,$acc2,8),$t3
-
- shr \$16,$s1
- lea 16($key),$key
- shr \$16,$s0
-
- movzb `&lo("$s2")`,$acc0
- movzb `&lo("$s3")`,$acc1
- movzb `&lo("$s0")`,$acc2
- xor 2($sbox,$acc0,8),$t0
- xor 2($sbox,$acc1,8),$t1
- xor 2($sbox,$acc2,8),$t2
-
- movzb `&hi("$s3")`,$acc0
- movzb `&hi("$s0")`,$acc1
- movzb `&lo("$s1")`,$acc2
- xor 1($sbox,$acc0,8),$t0
- xor 1($sbox,$acc1,8),$t1
- xor 2($sbox,$acc2,8),$t3
-
- mov 12($key),$s3
- movzb `&hi("$s1")`,$acc1
- movzb `&hi("$s2")`,$acc2
- mov 0($key),$s0
- xor 1($sbox,$acc1,8),$t2
- xor 1($sbox,$acc2,8),$t3
-
- mov 4($key),$s1
- mov 8($key),$s2
- xor $t0,$s0
- xor $t1,$s1
- xor $t2,$s2
- xor $t3,$s3
-___
-}
-
-sub enclastvert()
-{ my $t3="%r8d"; # zaps $inp!
-
-$code.=<<___;
- movzb `&lo("$s0")`,$acc0
- movzb `&lo("$s1")`,$acc1
- movzb `&lo("$s2")`,$acc2
- movzb 2($sbox,$acc0,8),$t0
- movzb 2($sbox,$acc1,8),$t1
- movzb 2($sbox,$acc2,8),$t2
-
- movzb `&lo("$s3")`,$acc0
- movzb `&hi("$s1")`,$acc1
- movzb `&hi("$s2")`,$acc2
- movzb 2($sbox,$acc0,8),$t3
- mov 0($sbox,$acc1,8),$acc1 #$t0
- mov 0($sbox,$acc2,8),$acc2 #$t1
-
- and \$0x0000ff00,$acc1
- and \$0x0000ff00,$acc2
-
- xor $acc1,$t0
- xor $acc2,$t1
- shr \$16,$s2
-
- movzb `&hi("$s3")`,$acc0
- movzb `&hi("$s0")`,$acc1
- shr \$16,$s3
- mov 0($sbox,$acc0,8),$acc0 #$t2
- mov 0($sbox,$acc1,8),$acc1 #$t3
-
- and \$0x0000ff00,$acc0
- and \$0x0000ff00,$acc1
- shr \$16,$s1
- xor $acc0,$t2
- xor $acc1,$t3
- shr \$16,$s0
-
- movzb `&lo("$s2")`,$acc0
- movzb `&lo("$s3")`,$acc1
- movzb `&lo("$s0")`,$acc2
- mov 0($sbox,$acc0,8),$acc0 #$t0
- mov 0($sbox,$acc1,8),$acc1 #$t1
- mov 0($sbox,$acc2,8),$acc2 #$t2
-
- and \$0x00ff0000,$acc0
- and \$0x00ff0000,$acc1
- and \$0x00ff0000,$acc2
-
- xor $acc0,$t0
- xor $acc1,$t1
- xor $acc2,$t2
-
- movzb `&lo("$s1")`,$acc0
- movzb `&hi("$s3")`,$acc1
- movzb `&hi("$s0")`,$acc2
- mov 0($sbox,$acc0,8),$acc0 #$t3
- mov 2($sbox,$acc1,8),$acc1 #$t0
- mov 2($sbox,$acc2,8),$acc2 #$t1
-
- and \$0x00ff0000,$acc0
- and \$0xff000000,$acc1
- and \$0xff000000,$acc2
-
- xor $acc0,$t3
- xor $acc1,$t0
- xor $acc2,$t1
-
- movzb `&hi("$s1")`,$acc0
- movzb `&hi("$s2")`,$acc1
- mov 16+12($key),$s3
- mov 2($sbox,$acc0,8),$acc0 #$t2
- mov 2($sbox,$acc1,8),$acc1 #$t3
- mov 16+0($key),$s0
-
- and \$0xff000000,$acc0
- and \$0xff000000,$acc1
-
- xor $acc0,$t2
- xor $acc1,$t3
-
- mov 16+4($key),$s1
- mov 16+8($key),$s2
- xor $t0,$s0
- xor $t1,$s1
- xor $t2,$s2
- xor $t3,$s3
-___
-}
-
-sub encstep()
-{ my ($i,@s) = @_;
- my $tmp0=$acc0;
- my $tmp1=$acc1;
- my $tmp2=$acc2;
- my $out=($t0,$t1,$t2,$s[0])[$i];
-
- if ($i==3) {
- $tmp0=$s[1];
- $tmp1=$s[2];
- $tmp2=$s[3];
- }
- $code.=" movzb ".&lo($s[0]).",$out\n";
- $code.=" mov $s[2],$tmp1\n" if ($i!=3);
- $code.=" lea 16($key),$key\n" if ($i==0);
-
- $code.=" movzb ".&hi($s[1]).",$tmp0\n";
- $code.=" mov 0($sbox,$out,8),$out\n";
-
- $code.=" shr \$16,$tmp1\n";
- $code.=" mov $s[3],$tmp2\n" if ($i!=3);
- $code.=" xor 3($sbox,$tmp0,8),$out\n";
-
- $code.=" movzb ".&lo($tmp1).",$tmp1\n";
- $code.=" shr \$24,$tmp2\n";
- $code.=" xor 4*$i($key),$out\n";
-
- $code.=" xor 2($sbox,$tmp1,8),$out\n";
- $code.=" xor 1($sbox,$tmp2,8),$out\n";
-
- $code.=" mov $t0,$s[1]\n" if ($i==3);
- $code.=" mov $t1,$s[2]\n" if ($i==3);
- $code.=" mov $t2,$s[3]\n" if ($i==3);
- $code.="\n";
-}
-
-sub enclast()
-{ my ($i,@s)=@_;
- my $tmp0=$acc0;
- my $tmp1=$acc1;
- my $tmp2=$acc2;
- my $out=($t0,$t1,$t2,$s[0])[$i];
-
- if ($i==3) {
- $tmp0=$s[1];
- $tmp1=$s[2];
- $tmp2=$s[3];
- }
- $code.=" movzb ".&lo($s[0]).",$out\n";
- $code.=" mov $s[2],$tmp1\n" if ($i!=3);
-
- $code.=" mov 2($sbox,$out,8),$out\n";
- $code.=" shr \$16,$tmp1\n";
- $code.=" mov $s[3],$tmp2\n" if ($i!=3);
-
- $code.=" and \$0x000000ff,$out\n";
- $code.=" movzb ".&hi($s[1]).",$tmp0\n";
- $code.=" movzb ".&lo($tmp1).",$tmp1\n";
- $code.=" shr \$24,$tmp2\n";
-
- $code.=" mov 0($sbox,$tmp0,8),$tmp0\n";
- $code.=" mov 0($sbox,$tmp1,8),$tmp1\n";
- $code.=" mov 2($sbox,$tmp2,8),$tmp2\n";
-
- $code.=" and \$0x0000ff00,$tmp0\n";
- $code.=" and \$0x00ff0000,$tmp1\n";
- $code.=" and \$0xff000000,$tmp2\n";
-
- $code.=" xor $tmp0,$out\n";
- $code.=" mov $t0,$s[1]\n" if ($i==3);
- $code.=" xor $tmp1,$out\n";
- $code.=" mov $t1,$s[2]\n" if ($i==3);
- $code.=" xor $tmp2,$out\n";
- $code.=" mov $t2,$s[3]\n" if ($i==3);
- $code.="\n";
-}
-
-$code.=<<___;
-.type _x86_64_AES_encrypt,\@abi-omnipotent
-.align 16
-_x86_64_AES_encrypt:
- xor 0($key),$s0 # xor with key
- xor 4($key),$s1
- xor 8($key),$s2
- xor 12($key),$s3
-
- mov 240($key),$rnds # load key->rounds
- sub \$1,$rnds
- jmp .Lenc_loop
-.align 16
-.Lenc_loop:
-___
- if ($verticalspin) { &encvert(); }
- else { &encstep(0,$s0,$s1,$s2,$s3);
- &encstep(1,$s1,$s2,$s3,$s0);
- &encstep(2,$s2,$s3,$s0,$s1);
- &encstep(3,$s3,$s0,$s1,$s2);
- }
-$code.=<<___;
- sub \$1,$rnds
- jnz .Lenc_loop
-___
- if ($verticalspin) { &enclastvert(); }
- else { &enclast(0,$s0,$s1,$s2,$s3);
- &enclast(1,$s1,$s2,$s3,$s0);
- &enclast(2,$s2,$s3,$s0,$s1);
- &enclast(3,$s3,$s0,$s1,$s2);
- $code.=<<___;
- xor 16+0($key),$s0 # xor with key
- xor 16+4($key),$s1
- xor 16+8($key),$s2
- xor 16+12($key),$s3
-___
- }
-$code.=<<___;
- .byte 0xf3,0xc3 # rep ret
-.size _x86_64_AES_encrypt,.-_x86_64_AES_encrypt
-___
-
-# it's possible to implement this by shifting tN by 8, filling least
-# significant byte with byte load and finally bswap-ing at the end,
-# but such partial register load kills Core 2...
-sub enccompactvert()
-{ my ($t3,$t4,$t5)=("%r8d","%r9d","%r13d");
-
-$code.=<<___;
- movzb `&lo("$s0")`,$t0
- movzb `&lo("$s1")`,$t1
- movzb `&lo("$s2")`,$t2
- movzb ($sbox,$t0,1),$t0
- movzb ($sbox,$t1,1),$t1
- movzb ($sbox,$t2,1),$t2
-
- movzb `&lo("$s3")`,$t3
- movzb `&hi("$s1")`,$acc0
- movzb `&hi("$s2")`,$acc1
- movzb ($sbox,$t3,1),$t3
- movzb ($sbox,$acc0,1),$t4 #$t0
- movzb ($sbox,$acc1,1),$t5 #$t1
-
- movzb `&hi("$s3")`,$acc2
- movzb `&hi("$s0")`,$acc0
- shr \$16,$s2
- movzb ($sbox,$acc2,1),$acc2 #$t2
- movzb ($sbox,$acc0,1),$acc0 #$t3
- shr \$16,$s3
-
- movzb `&lo("$s2")`,$acc1
- shl \$8,$t4
- shl \$8,$t5
- movzb ($sbox,$acc1,1),$acc1 #$t0
- xor $t4,$t0
- xor $t5,$t1
-
- movzb `&lo("$s3")`,$t4
- shr \$16,$s0
- shr \$16,$s1
- movzb `&lo("$s0")`,$t5
- shl \$8,$acc2
- shl \$8,$acc0
- movzb ($sbox,$t4,1),$t4 #$t1
- movzb ($sbox,$t5,1),$t5 #$t2
- xor $acc2,$t2
- xor $acc0,$t3
-
- movzb `&lo("$s1")`,$acc2
- movzb `&hi("$s3")`,$acc0
- shl \$16,$acc1
- movzb ($sbox,$acc2,1),$acc2 #$t3
- movzb ($sbox,$acc0,1),$acc0 #$t0
- xor $acc1,$t0
-
- movzb `&hi("$s0")`,$acc1
- shr \$8,$s2
- shr \$8,$s1
- movzb ($sbox,$acc1,1),$acc1 #$t1
- movzb ($sbox,$s2,1),$s3 #$t3
- movzb ($sbox,$s1,1),$s2 #$t2
- shl \$16,$t4
- shl \$16,$t5
- shl \$16,$acc2
- xor $t4,$t1
- xor $t5,$t2
- xor $acc2,$t3
-
- shl \$24,$acc0
- shl \$24,$acc1
- shl \$24,$s3
- xor $acc0,$t0
- shl \$24,$s2
- xor $acc1,$t1
- mov $t0,$s0
- mov $t1,$s1
- xor $t2,$s2
- xor $t3,$s3
-___
-}
-
-sub enctransform_ref()
-{ my $sn = shift;
- my ($acc,$r2,$tmp)=("%r8d","%r9d","%r13d");
-
-$code.=<<___;
- mov $sn,$acc
- and \$0x80808080,$acc
- mov $acc,$tmp
- shr \$7,$tmp
- lea ($sn,$sn),$r2
- sub $tmp,$acc
- and \$0xfefefefe,$r2
- and \$0x1b1b1b1b,$acc
- mov $sn,$tmp
- xor $acc,$r2
-
- xor $r2,$sn
- rol \$24,$sn
- xor $r2,$sn
- ror \$16,$tmp
- xor $tmp,$sn
- ror \$8,$tmp
- xor $tmp,$sn
-___
-}
-
-# unlike decrypt case it does not pay off to parallelize enctransform
-sub enctransform()
-{ my ($t3,$r20,$r21)=($acc2,"%r8d","%r9d");
-
-$code.=<<___;
- mov $s0,$acc0
- mov $s1,$acc1
- and \$0x80808080,$acc0
- and \$0x80808080,$acc1
- mov $acc0,$t0
- mov $acc1,$t1
- shr \$7,$t0
- lea ($s0,$s0),$r20
- shr \$7,$t1
- lea ($s1,$s1),$r21
- sub $t0,$acc0
- sub $t1,$acc1
- and \$0xfefefefe,$r20
- and \$0xfefefefe,$r21
- and \$0x1b1b1b1b,$acc0
- and \$0x1b1b1b1b,$acc1
- mov $s0,$t0
- mov $s1,$t1
- xor $acc0,$r20
- xor $acc1,$r21
-
- xor $r20,$s0
- xor $r21,$s1
- mov $s2,$acc0
- mov $s3,$acc1
- rol \$24,$s0
- rol \$24,$s1
- and \$0x80808080,$acc0
- and \$0x80808080,$acc1
- xor $r20,$s0
- xor $r21,$s1
- mov $acc0,$t2
- mov $acc1,$t3
- ror \$16,$t0
- ror \$16,$t1
- shr \$7,$t2
- lea ($s2,$s2),$r20
- xor $t0,$s0
- xor $t1,$s1
- shr \$7,$t3
- lea ($s3,$s3),$r21
- ror \$8,$t0
- ror \$8,$t1
- sub $t2,$acc0
- sub $t3,$acc1
- xor $t0,$s0
- xor $t1,$s1
-
- and \$0xfefefefe,$r20
- and \$0xfefefefe,$r21
- and \$0x1b1b1b1b,$acc0
- and \$0x1b1b1b1b,$acc1
- mov $s2,$t2
- mov $s3,$t3
- xor $acc0,$r20
- xor $acc1,$r21
-
- xor $r20,$s2
- xor $r21,$s3
- rol \$24,$s2
- rol \$24,$s3
- xor $r20,$s2
- xor $r21,$s3
- mov 0($sbox),$acc0 # prefetch Te4
- ror \$16,$t2
- ror \$16,$t3
- mov 64($sbox),$acc1
- xor $t2,$s2
- xor $t3,$s3
- mov 128($sbox),$r20
- ror \$8,$t2
- ror \$8,$t3
- mov 192($sbox),$r21
- xor $t2,$s2
- xor $t3,$s3
-___
-}
-
-$code.=<<___;
-.type _x86_64_AES_encrypt_compact,\@abi-omnipotent
-.align 16
-_x86_64_AES_encrypt_compact:
- lea 128($sbox),$inp # size optimization
- mov 0-128($inp),$acc1 # prefetch Te4
- mov 32-128($inp),$acc2
- mov 64-128($inp),$t0
- mov 96-128($inp),$t1
- mov 128-128($inp),$acc1
- mov 160-128($inp),$acc2
- mov 192-128($inp),$t0
- mov 224-128($inp),$t1
- jmp .Lenc_loop_compact
-.align 16
-.Lenc_loop_compact:
- xor 0($key),$s0 # xor with key
- xor 4($key),$s1
- xor 8($key),$s2
- xor 12($key),$s3
- lea 16($key),$key
-___
- &enccompactvert();
-$code.=<<___;
- cmp 16(%rsp),$key
- je .Lenc_compact_done
-___
- &enctransform();
-$code.=<<___;
- jmp .Lenc_loop_compact
-.align 16
-.Lenc_compact_done:
- xor 0($key),$s0
- xor 4($key),$s1
- xor 8($key),$s2
- xor 12($key),$s3
- .byte 0xf3,0xc3 # rep ret
-.size _x86_64_AES_encrypt_compact,.-_x86_64_AES_encrypt_compact
-___
-
-# void AES_encrypt (const void *inp,void *out,const AES_KEY *key);
-$code.=<<___;
-.globl AES_encrypt
-.type AES_encrypt,\@function,3
-.align 16
-.globl asm_AES_encrypt
-.hidden asm_AES_encrypt
-asm_AES_encrypt:
-AES_encrypt:
- push %rbx
- push %rbp
- push %r12
- push %r13
- push %r14
- push %r15
-
- # allocate frame "above" key schedule
- mov %rsp,%r10
- lea -63(%rdx),%rcx # %rdx is key argument
- and \$-64,%rsp
- sub %rsp,%rcx
- neg %rcx
- and \$0x3c0,%rcx
- sub %rcx,%rsp
- sub \$32,%rsp
-
- mov %rsi,16(%rsp) # save out
- mov %r10,24(%rsp) # save real stack pointer
-.Lenc_prologue:
-
- mov %rdx,$key
- mov 240($key),$rnds # load rounds
-
- mov 0(%rdi),$s0 # load input vector
- mov 4(%rdi),$s1
- mov 8(%rdi),$s2
- mov 12(%rdi),$s3
-
- shl \$4,$rnds
- lea ($key,$rnds),%rbp
- mov $key,(%rsp) # key schedule
- mov %rbp,8(%rsp) # end of key schedule
-
- # pick Te4 copy which can't "overlap" with stack frame or key schedule
- lea .LAES_Te+2048(%rip),$sbox
- lea 768(%rsp),%rbp
- sub $sbox,%rbp
- and \$0x300,%rbp
- lea ($sbox,%rbp),$sbox
-
- call _x86_64_AES_encrypt_compact
-
- mov 16(%rsp),$out # restore out
- mov 24(%rsp),%rsi # restore saved stack pointer
- mov $s0,0($out) # write output vector
- mov $s1,4($out)
- mov $s2,8($out)
- mov $s3,12($out)
-
- mov (%rsi),%r15
- mov 8(%rsi),%r14
- mov 16(%rsi),%r13
- mov 24(%rsi),%r12
- mov 32(%rsi),%rbp
- mov 40(%rsi),%rbx
- lea 48(%rsi),%rsp
-.Lenc_epilogue:
- ret
-.size AES_encrypt,.-AES_encrypt
-___
-
-#------------------------------------------------------------------#
-
-sub decvert()
-{ my $t3="%r8d"; # zaps $inp!
-
-$code.=<<___;
- # favor 3-way issue Opteron pipeline...
- movzb `&lo("$s0")`,$acc0
- movzb `&lo("$s1")`,$acc1
- movzb `&lo("$s2")`,$acc2
- mov 0($sbox,$acc0,8),$t0
- mov 0($sbox,$acc1,8),$t1
- mov 0($sbox,$acc2,8),$t2
-
- movzb `&hi("$s3")`,$acc0
- movzb `&hi("$s0")`,$acc1
- movzb `&lo("$s3")`,$acc2
- xor 3($sbox,$acc0,8),$t0
- xor 3($sbox,$acc1,8),$t1
- mov 0($sbox,$acc2,8),$t3
-
- movzb `&hi("$s1")`,$acc0
- shr \$16,$s0
- movzb `&hi("$s2")`,$acc2
- xor 3($sbox,$acc0,8),$t2
- shr \$16,$s3
- xor 3($sbox,$acc2,8),$t3
-
- shr \$16,$s1
- lea 16($key),$key
- shr \$16,$s2
-
- movzb `&lo("$s2")`,$acc0
- movzb `&lo("$s3")`,$acc1
- movzb `&lo("$s0")`,$acc2
- xor 2($sbox,$acc0,8),$t0
- xor 2($sbox,$acc1,8),$t1
- xor 2($sbox,$acc2,8),$t2
-
- movzb `&hi("$s1")`,$acc0
- movzb `&hi("$s2")`,$acc1
- movzb `&lo("$s1")`,$acc2
- xor 1($sbox,$acc0,8),$t0
- xor 1($sbox,$acc1,8),$t1
- xor 2($sbox,$acc2,8),$t3
-
- movzb `&hi("$s3")`,$acc0
- mov 12($key),$s3
- movzb `&hi("$s0")`,$acc2
- xor 1($sbox,$acc0,8),$t2
- mov 0($key),$s0
- xor 1($sbox,$acc2,8),$t3
-
- xor $t0,$s0
- mov 4($key),$s1
- mov 8($key),$s2
- xor $t2,$s2
- xor $t1,$s1
- xor $t3,$s3
-___
-}
-
-sub declastvert()
-{ my $t3="%r8d"; # zaps $inp!
-
-$code.=<<___;
- lea 2048($sbox),$sbox # size optimization
- movzb `&lo("$s0")`,$acc0
- movzb `&lo("$s1")`,$acc1
- movzb `&lo("$s2")`,$acc2
- movzb ($sbox,$acc0,1),$t0
- movzb ($sbox,$acc1,1),$t1
- movzb ($sbox,$acc2,1),$t2
-
- movzb `&lo("$s3")`,$acc0
- movzb `&hi("$s3")`,$acc1
- movzb `&hi("$s0")`,$acc2
- movzb ($sbox,$acc0,1),$t3
- movzb ($sbox,$acc1,1),$acc1 #$t0
- movzb ($sbox,$acc2,1),$acc2 #$t1
-
- shl \$8,$acc1
- shl \$8,$acc2
-
- xor $acc1,$t0
- xor $acc2,$t1
- shr \$16,$s3
-
- movzb `&hi("$s1")`,$acc0
- movzb `&hi("$s2")`,$acc1
- shr \$16,$s0
- movzb ($sbox,$acc0,1),$acc0 #$t2
- movzb ($sbox,$acc1,1),$acc1 #$t3
-
- shl \$8,$acc0
- shl \$8,$acc1
- shr \$16,$s1
- xor $acc0,$t2
- xor $acc1,$t3
- shr \$16,$s2
-
- movzb `&lo("$s2")`,$acc0
- movzb `&lo("$s3")`,$acc1
- movzb `&lo("$s0")`,$acc2
- movzb ($sbox,$acc0,1),$acc0 #$t0
- movzb ($sbox,$acc1,1),$acc1 #$t1
- movzb ($sbox,$acc2,1),$acc2 #$t2
-
- shl \$16,$acc0
- shl \$16,$acc1
- shl \$16,$acc2
-
- xor $acc0,$t0
- xor $acc1,$t1
- xor $acc2,$t2
-
- movzb `&lo("$s1")`,$acc0
- movzb `&hi("$s1")`,$acc1
- movzb `&hi("$s2")`,$acc2
- movzb ($sbox,$acc0,1),$acc0 #$t3
- movzb ($sbox,$acc1,1),$acc1 #$t0
- movzb ($sbox,$acc2,1),$acc2 #$t1
-
- shl \$16,$acc0
- shl \$24,$acc1
- shl \$24,$acc2
-
- xor $acc0,$t3
- xor $acc1,$t0
- xor $acc2,$t1
-
- movzb `&hi("$s3")`,$acc0
- movzb `&hi("$s0")`,$acc1
- mov 16+12($key),$s3
- movzb ($sbox,$acc0,1),$acc0 #$t2
- movzb ($sbox,$acc1,1),$acc1 #$t3
- mov 16+0($key),$s0
-
- shl \$24,$acc0
- shl \$24,$acc1
-
- xor $acc0,$t2
- xor $acc1,$t3
-
- mov 16+4($key),$s1
- mov 16+8($key),$s2
- lea -2048($sbox),$sbox
- xor $t0,$s0
- xor $t1,$s1
- xor $t2,$s2
- xor $t3,$s3
-___
-}
-
-sub decstep()
-{ my ($i,@s) = @_;
- my $tmp0=$acc0;
- my $tmp1=$acc1;
- my $tmp2=$acc2;
- my $out=($t0,$t1,$t2,$s[0])[$i];
-
- $code.=" mov $s[0],$out\n" if ($i!=3);
- $tmp1=$s[2] if ($i==3);
- $code.=" mov $s[2],$tmp1\n" if ($i!=3);
- $code.=" and \$0xFF,$out\n";
-
- $code.=" mov 0($sbox,$out,8),$out\n";
- $code.=" shr \$16,$tmp1\n";
- $tmp2=$s[3] if ($i==3);
- $code.=" mov $s[3],$tmp2\n" if ($i!=3);
-
- $tmp0=$s[1] if ($i==3);
- $code.=" movzb ".&hi($s[1]).",$tmp0\n";
- $code.=" and \$0xFF,$tmp1\n";
- $code.=" shr \$24,$tmp2\n";
-
- $code.=" xor 3($sbox,$tmp0,8),$out\n";
- $code.=" xor 2($sbox,$tmp1,8),$out\n";
- $code.=" xor 1($sbox,$tmp2,8),$out\n";
-
- $code.=" mov $t2,$s[1]\n" if ($i==3);
- $code.=" mov $t1,$s[2]\n" if ($i==3);
- $code.=" mov $t0,$s[3]\n" if ($i==3);
- $code.="\n";
-}
-
-sub declast()
-{ my ($i,@s)=@_;
- my $tmp0=$acc0;
- my $tmp1=$acc1;
- my $tmp2=$acc2;
- my $out=($t0,$t1,$t2,$s[0])[$i];
-
- $code.=" mov $s[0],$out\n" if ($i!=3);
- $tmp1=$s[2] if ($i==3);
- $code.=" mov $s[2],$tmp1\n" if ($i!=3);
- $code.=" and \$0xFF,$out\n";
-
- $code.=" movzb 2048($sbox,$out,1),$out\n";
- $code.=" shr \$16,$tmp1\n";
- $tmp2=$s[3] if ($i==3);
- $code.=" mov $s[3],$tmp2\n" if ($i!=3);
-
- $tmp0=$s[1] if ($i==3);
- $code.=" movzb ".&hi($s[1]).",$tmp0\n";
- $code.=" and \$0xFF,$tmp1\n";
- $code.=" shr \$24,$tmp2\n";
-
- $code.=" movzb 2048($sbox,$tmp0,1),$tmp0\n";
- $code.=" movzb 2048($sbox,$tmp1,1),$tmp1\n";
- $code.=" movzb 2048($sbox,$tmp2,1),$tmp2\n";
-
- $code.=" shl \$8,$tmp0\n";
- $code.=" shl \$16,$tmp1\n";
- $code.=" shl \$24,$tmp2\n";
-
- $code.=" xor $tmp0,$out\n";
- $code.=" mov $t2,$s[1]\n" if ($i==3);
- $code.=" xor $tmp1,$out\n";
- $code.=" mov $t1,$s[2]\n" if ($i==3);
- $code.=" xor $tmp2,$out\n";
- $code.=" mov $t0,$s[3]\n" if ($i==3);
- $code.="\n";
-}
-
-$code.=<<___;
-.type _x86_64_AES_decrypt,\@abi-omnipotent
-.align 16
-_x86_64_AES_decrypt:
- xor 0($key),$s0 # xor with key
- xor 4($key),$s1
- xor 8($key),$s2
- xor 12($key),$s3
-
- mov 240($key),$rnds # load key->rounds
- sub \$1,$rnds
- jmp .Ldec_loop
-.align 16
-.Ldec_loop:
-___
- if ($verticalspin) { &decvert(); }
- else { &decstep(0,$s0,$s3,$s2,$s1);
- &decstep(1,$s1,$s0,$s3,$s2);
- &decstep(2,$s2,$s1,$s0,$s3);
- &decstep(3,$s3,$s2,$s1,$s0);
- $code.=<<___;
- lea 16($key),$key
- xor 0($key),$s0 # xor with key
- xor 4($key),$s1
- xor 8($key),$s2
- xor 12($key),$s3
-___
- }
-$code.=<<___;
- sub \$1,$rnds
- jnz .Ldec_loop
-___
- if ($verticalspin) { &declastvert(); }
- else { &declast(0,$s0,$s3,$s2,$s1);
- &declast(1,$s1,$s0,$s3,$s2);
- &declast(2,$s2,$s1,$s0,$s3);
- &declast(3,$s3,$s2,$s1,$s0);
- $code.=<<___;
- xor 16+0($key),$s0 # xor with key
- xor 16+4($key),$s1
- xor 16+8($key),$s2
- xor 16+12($key),$s3
-___
- }
-$code.=<<___;
- .byte 0xf3,0xc3 # rep ret
-.size _x86_64_AES_decrypt,.-_x86_64_AES_decrypt
-___
-
-sub deccompactvert()
-{ my ($t3,$t4,$t5)=("%r8d","%r9d","%r13d");
-
-$code.=<<___;
- movzb `&lo("$s0")`,$t0
- movzb `&lo("$s1")`,$t1
- movzb `&lo("$s2")`,$t2
- movzb ($sbox,$t0,1),$t0
- movzb ($sbox,$t1,1),$t1
- movzb ($sbox,$t2,1),$t2
-
- movzb `&lo("$s3")`,$t3
- movzb `&hi("$s3")`,$acc0
- movzb `&hi("$s0")`,$acc1
- movzb ($sbox,$t3,1),$t3
- movzb ($sbox,$acc0,1),$t4 #$t0
- movzb ($sbox,$acc1,1),$t5 #$t1
-
- movzb `&hi("$s1")`,$acc2
- movzb `&hi("$s2")`,$acc0
- shr \$16,$s2
- movzb ($sbox,$acc2,1),$acc2 #$t2
- movzb ($sbox,$acc0,1),$acc0 #$t3
- shr \$16,$s3
-
- movzb `&lo("$s2")`,$acc1
- shl \$8,$t4
- shl \$8,$t5
- movzb ($sbox,$acc1,1),$acc1 #$t0
- xor $t4,$t0
- xor $t5,$t1
-
- movzb `&lo("$s3")`,$t4
- shr \$16,$s0
- shr \$16,$s1
- movzb `&lo("$s0")`,$t5
- shl \$8,$acc2
- shl \$8,$acc0
- movzb ($sbox,$t4,1),$t4 #$t1
- movzb ($sbox,$t5,1),$t5 #$t2
- xor $acc2,$t2
- xor $acc0,$t3
-
- movzb `&lo("$s1")`,$acc2
- movzb `&hi("$s1")`,$acc0
- shl \$16,$acc1
- movzb ($sbox,$acc2,1),$acc2 #$t3
- movzb ($sbox,$acc0,1),$acc0 #$t0
- xor $acc1,$t0
-
- movzb `&hi("$s2")`,$acc1
- shl \$16,$t4
- shl \$16,$t5
- movzb ($sbox,$acc1,1),$s1 #$t1
- xor $t4,$t1
- xor $t5,$t2
-
- movzb `&hi("$s3")`,$acc1
- shr \$8,$s0
- shl \$16,$acc2
- movzb ($sbox,$acc1,1),$s2 #$t2
- movzb ($sbox,$s0,1),$s3 #$t3
- xor $acc2,$t3
-
- shl \$24,$acc0
- shl \$24,$s1
- shl \$24,$s2
- xor $acc0,$t0
- shl \$24,$s3
- xor $t1,$s1
- mov $t0,$s0
- xor $t2,$s2
- xor $t3,$s3
-___
-}
-
-# parallelized version! input is pair of 64-bit values: %rax=s1.s0
-# and %rcx=s3.s2, output is four 32-bit values in %eax=s0, %ebx=s1,
-# %ecx=s2 and %edx=s3.
-sub dectransform()
-{ my ($tp10,$tp20,$tp40,$tp80,$acc0)=("%rax","%r8", "%r9", "%r10","%rbx");
- my ($tp18,$tp28,$tp48,$tp88,$acc8)=("%rcx","%r11","%r12","%r13","%rdx");
- my $prefetch = shift;
-
-$code.=<<___;
- mov $tp10,$acc0
- mov $tp18,$acc8
- and $mask80,$acc0
- and $mask80,$acc8
- mov $acc0,$tp40
- mov $acc8,$tp48
- shr \$7,$tp40
- lea ($tp10,$tp10),$tp20
- shr \$7,$tp48
- lea ($tp18,$tp18),$tp28
- sub $tp40,$acc0
- sub $tp48,$acc8
- and $maskfe,$tp20
- and $maskfe,$tp28
- and $mask1b,$acc0
- and $mask1b,$acc8
- xor $tp20,$acc0
- xor $tp28,$acc8
- mov $acc0,$tp20
- mov $acc8,$tp28
-
- and $mask80,$acc0
- and $mask80,$acc8
- mov $acc0,$tp80
- mov $acc8,$tp88
- shr \$7,$tp80
- lea ($tp20,$tp20),$tp40
- shr \$7,$tp88
- lea ($tp28,$tp28),$tp48
- sub $tp80,$acc0
- sub $tp88,$acc8
- and $maskfe,$tp40
- and $maskfe,$tp48
- and $mask1b,$acc0
- and $mask1b,$acc8
- xor $tp40,$acc0
- xor $tp48,$acc8
- mov $acc0,$tp40
- mov $acc8,$tp48
-
- and $mask80,$acc0
- and $mask80,$acc8
- mov $acc0,$tp80
- mov $acc8,$tp88
- shr \$7,$tp80
- xor $tp10,$tp20 # tp2^=tp1
- shr \$7,$tp88
- xor $tp18,$tp28 # tp2^=tp1
- sub $tp80,$acc0
- sub $tp88,$acc8
- lea ($tp40,$tp40),$tp80
- lea ($tp48,$tp48),$tp88
- xor $tp10,$tp40 # tp4^=tp1
- xor $tp18,$tp48 # tp4^=tp1
- and $maskfe,$tp80
- and $maskfe,$tp88
- and $mask1b,$acc0
- and $mask1b,$acc8
- xor $acc0,$tp80
- xor $acc8,$tp88
-
- xor $tp80,$tp10 # tp1^=tp8
- xor $tp88,$tp18 # tp1^=tp8
- xor $tp80,$tp20 # tp2^tp1^=tp8
- xor $tp88,$tp28 # tp2^tp1^=tp8
- mov $tp10,$acc0
- mov $tp18,$acc8
- xor $tp80,$tp40 # tp4^tp1^=tp8
- xor $tp88,$tp48 # tp4^tp1^=tp8
- shr \$32,$acc0
- shr \$32,$acc8
- xor $tp20,$tp80 # tp8^=tp8^tp2^tp1=tp2^tp1
- xor $tp28,$tp88 # tp8^=tp8^tp2^tp1=tp2^tp1
- rol \$8,`&LO("$tp10")` # ROTATE(tp1^tp8,8)
- rol \$8,`&LO("$tp18")` # ROTATE(tp1^tp8,8)
- xor $tp40,$tp80 # tp2^tp1^=tp8^tp4^tp1=tp8^tp4^tp2
- xor $tp48,$tp88 # tp2^tp1^=tp8^tp4^tp1=tp8^tp4^tp2
-
- rol \$8,`&LO("$acc0")` # ROTATE(tp1^tp8,8)
- rol \$8,`&LO("$acc8")` # ROTATE(tp1^tp8,8)
- xor `&LO("$tp80")`,`&LO("$tp10")`
- xor `&LO("$tp88")`,`&LO("$tp18")`
- shr \$32,$tp80
- shr \$32,$tp88
- xor `&LO("$tp80")`,`&LO("$acc0")`
- xor `&LO("$tp88")`,`&LO("$acc8")`
-
- mov $tp20,$tp80
- mov $tp28,$tp88
- shr \$32,$tp80
- shr \$32,$tp88
- rol \$24,`&LO("$tp20")` # ROTATE(tp2^tp1^tp8,24)
- rol \$24,`&LO("$tp28")` # ROTATE(tp2^tp1^tp8,24)
- rol \$24,`&LO("$tp80")` # ROTATE(tp2^tp1^tp8,24)
- rol \$24,`&LO("$tp88")` # ROTATE(tp2^tp1^tp8,24)
- xor `&LO("$tp20")`,`&LO("$tp10")`
- xor `&LO("$tp28")`,`&LO("$tp18")`
- mov $tp40,$tp20
- mov $tp48,$tp28
- xor `&LO("$tp80")`,`&LO("$acc0")`
- xor `&LO("$tp88")`,`&LO("$acc8")`
-
- `"mov 0($sbox),$mask80" if ($prefetch)`
- shr \$32,$tp20
- shr \$32,$tp28
- `"mov 64($sbox),$maskfe" if ($prefetch)`
- rol \$16,`&LO("$tp40")` # ROTATE(tp4^tp1^tp8,16)
- rol \$16,`&LO("$tp48")` # ROTATE(tp4^tp1^tp8,16)
- `"mov 128($sbox),$mask1b" if ($prefetch)`
- rol \$16,`&LO("$tp20")` # ROTATE(tp4^tp1^tp8,16)
- rol \$16,`&LO("$tp28")` # ROTATE(tp4^tp1^tp8,16)
- `"mov 192($sbox),$tp80" if ($prefetch)`
- xor `&LO("$tp40")`,`&LO("$tp10")`
- xor `&LO("$tp48")`,`&LO("$tp18")`
- `"mov 256($sbox),$tp88" if ($prefetch)`
- xor `&LO("$tp20")`,`&LO("$acc0")`
- xor `&LO("$tp28")`,`&LO("$acc8")`
-___
-}
-
-$code.=<<___;
-.type _x86_64_AES_decrypt_compact,\@abi-omnipotent
-.align 16
-_x86_64_AES_decrypt_compact:
- lea 128($sbox),$inp # size optimization
- mov 0-128($inp),$acc1 # prefetch Td4
- mov 32-128($inp),$acc2
- mov 64-128($inp),$t0
- mov 96-128($inp),$t1
- mov 128-128($inp),$acc1
- mov 160-128($inp),$acc2
- mov 192-128($inp),$t0
- mov 224-128($inp),$t1
- jmp .Ldec_loop_compact
-
-.align 16
-.Ldec_loop_compact:
- xor 0($key),$s0 # xor with key
- xor 4($key),$s1
- xor 8($key),$s2
- xor 12($key),$s3
- lea 16($key),$key
-___
- &deccompactvert();
-$code.=<<___;
- cmp 16(%rsp),$key
- je .Ldec_compact_done
-
- mov 256+0($sbox),$mask80
- shl \$32,%rbx
- shl \$32,%rdx
- mov 256+8($sbox),$maskfe
- or %rbx,%rax
- or %rdx,%rcx
- mov 256+16($sbox),$mask1b
-___
- &dectransform(1);
-$code.=<<___;
- jmp .Ldec_loop_compact
-.align 16
-.Ldec_compact_done:
- xor 0($key),$s0
- xor 4($key),$s1
- xor 8($key),$s2
- xor 12($key),$s3
- .byte 0xf3,0xc3 # rep ret
-.size _x86_64_AES_decrypt_compact,.-_x86_64_AES_decrypt_compact
-___
-
-# void AES_decrypt (const void *inp,void *out,const AES_KEY *key);
-$code.=<<___;
-.globl AES_decrypt
-.type AES_decrypt,\@function,3
-.align 16
-.globl asm_AES_decrypt
-.hidden asm_AES_decrypt
-asm_AES_decrypt:
-AES_decrypt:
- push %rbx
- push %rbp
- push %r12
- push %r13
- push %r14
- push %r15
-
- # allocate frame "above" key schedule
- mov %rsp,%r10
- lea -63(%rdx),%rcx # %rdx is key argument
- and \$-64,%rsp
- sub %rsp,%rcx
- neg %rcx
- and \$0x3c0,%rcx
- sub %rcx,%rsp
- sub \$32,%rsp
-
- mov %rsi,16(%rsp) # save out
- mov %r10,24(%rsp) # save real stack pointer
-.Ldec_prologue:
-
- mov %rdx,$key
- mov 240($key),$rnds # load rounds
-
- mov 0(%rdi),$s0 # load input vector
- mov 4(%rdi),$s1
- mov 8(%rdi),$s2
- mov 12(%rdi),$s3
-
- shl \$4,$rnds
- lea ($key,$rnds),%rbp
- mov $key,(%rsp) # key schedule
- mov %rbp,8(%rsp) # end of key schedule
-
- # pick Td4 copy which can't "overlap" with stack frame or key schedule
- lea .LAES_Td+2048(%rip),$sbox
- lea 768(%rsp),%rbp
- sub $sbox,%rbp
- and \$0x300,%rbp
- lea ($sbox,%rbp),$sbox
- shr \$3,%rbp # recall "magic" constants!
- add %rbp,$sbox
-
- call _x86_64_AES_decrypt_compact
-
- mov 16(%rsp),$out # restore out
- mov 24(%rsp),%rsi # restore saved stack pointer
- mov $s0,0($out) # write output vector
- mov $s1,4($out)
- mov $s2,8($out)
- mov $s3,12($out)
-
- mov (%rsi),%r15
- mov 8(%rsi),%r14
- mov 16(%rsi),%r13
- mov 24(%rsi),%r12
- mov 32(%rsi),%rbp
- mov 40(%rsi),%rbx
- lea 48(%rsi),%rsp
-.Ldec_epilogue:
- ret
-.size AES_decrypt,.-AES_decrypt
-___
-#------------------------------------------------------------------#
-
-sub enckey()
-{
-$code.=<<___;
- movz %dl,%esi # rk[i]>>0
- movzb -128(%rbp,%rsi),%ebx
- movz %dh,%esi # rk[i]>>8
- shl \$24,%ebx
- xor %ebx,%eax
-
- movzb -128(%rbp,%rsi),%ebx
- shr \$16,%edx
- movz %dl,%esi # rk[i]>>16
- xor %ebx,%eax
-
- movzb -128(%rbp,%rsi),%ebx
- movz %dh,%esi # rk[i]>>24
- shl \$8,%ebx
- xor %ebx,%eax
-
- movzb -128(%rbp,%rsi),%ebx
- shl \$16,%ebx
- xor %ebx,%eax
-
- xor 1024-128(%rbp,%rcx,4),%eax # rcon
-___
-}
-
-# int private_AES_set_encrypt_key(const unsigned char *userKey, const int bits,
-# AES_KEY *key)
-$code.=<<___;
-.globl private_AES_set_encrypt_key
-.type private_AES_set_encrypt_key,\@function,3
-.align 16
-private_AES_set_encrypt_key:
- push %rbx
- push %rbp
- push %r12 # redundant, but allows to share
- push %r13 # exception handler...
- push %r14
- push %r15
- sub \$8,%rsp
-.Lenc_key_prologue:
-
- call _x86_64_AES_set_encrypt_key
-
- mov 8(%rsp),%r15
- mov 16(%rsp),%r14
- mov 24(%rsp),%r13
- mov 32(%rsp),%r12
- mov 40(%rsp),%rbp
- mov 48(%rsp),%rbx
- add \$56,%rsp
-.Lenc_key_epilogue:
- ret
-.size private_AES_set_encrypt_key,.-private_AES_set_encrypt_key
-
-.type _x86_64_AES_set_encrypt_key,\@abi-omnipotent
-.align 16
-_x86_64_AES_set_encrypt_key:
- mov %esi,%ecx # %ecx=bits
- mov %rdi,%rsi # %rsi=userKey
- mov %rdx,%rdi # %rdi=key
-
- test \$-1,%rsi
- jz .Lbadpointer
- test \$-1,%rdi
- jz .Lbadpointer
-
- lea .LAES_Te(%rip),%rbp
- lea 2048+128(%rbp),%rbp
-
- # prefetch Te4
- mov 0-128(%rbp),%eax
- mov 32-128(%rbp),%ebx
- mov 64-128(%rbp),%r8d
- mov 96-128(%rbp),%edx
- mov 128-128(%rbp),%eax
- mov 160-128(%rbp),%ebx
- mov 192-128(%rbp),%r8d
- mov 224-128(%rbp),%edx
-
- cmp \$128,%ecx
- je .L10rounds
- cmp \$192,%ecx
- je .L12rounds
- cmp \$256,%ecx
- je .L14rounds
- mov \$-2,%rax # invalid number of bits
- jmp .Lexit
-
-.L10rounds:
- mov 0(%rsi),%rax # copy first 4 dwords
- mov 8(%rsi),%rdx
- mov %rax,0(%rdi)
- mov %rdx,8(%rdi)
-
- shr \$32,%rdx
- xor %ecx,%ecx
- jmp .L10shortcut
-.align 4
-.L10loop:
- mov 0(%rdi),%eax # rk[0]
- mov 12(%rdi),%edx # rk[3]
-.L10shortcut:
-___
- &enckey ();
-$code.=<<___;
- mov %eax,16(%rdi) # rk[4]
- xor 4(%rdi),%eax
- mov %eax,20(%rdi) # rk[5]
- xor 8(%rdi),%eax
- mov %eax,24(%rdi) # rk[6]
- xor 12(%rdi),%eax
- mov %eax,28(%rdi) # rk[7]
- add \$1,%ecx
- lea 16(%rdi),%rdi
- cmp \$10,%ecx
- jl .L10loop
-
- movl \$10,80(%rdi) # setup number of rounds
- xor %rax,%rax
- jmp .Lexit
-
-.L12rounds:
- mov 0(%rsi),%rax # copy first 6 dwords
- mov 8(%rsi),%rbx
- mov 16(%rsi),%rdx
- mov %rax,0(%rdi)
- mov %rbx,8(%rdi)
- mov %rdx,16(%rdi)
-
- shr \$32,%rdx
- xor %ecx,%ecx
- jmp .L12shortcut
-.align 4
-.L12loop:
- mov 0(%rdi),%eax # rk[0]
- mov 20(%rdi),%edx # rk[5]
-.L12shortcut:
-___
- &enckey ();
-$code.=<<___;
- mov %eax,24(%rdi) # rk[6]
- xor 4(%rdi),%eax
- mov %eax,28(%rdi) # rk[7]
- xor 8(%rdi),%eax
- mov %eax,32(%rdi) # rk[8]
- xor 12(%rdi),%eax
- mov %eax,36(%rdi) # rk[9]
-
- cmp \$7,%ecx
- je .L12break
- add \$1,%ecx
-
- xor 16(%rdi),%eax
- mov %eax,40(%rdi) # rk[10]
- xor 20(%rdi),%eax
- mov %eax,44(%rdi) # rk[11]
-
- lea 24(%rdi),%rdi
- jmp .L12loop
-.L12break:
- movl \$12,72(%rdi) # setup number of rounds
- xor %rax,%rax
- jmp .Lexit
-
-.L14rounds:
- mov 0(%rsi),%rax # copy first 8 dwords
- mov 8(%rsi),%rbx
- mov 16(%rsi),%rcx
- mov 24(%rsi),%rdx
- mov %rax,0(%rdi)
- mov %rbx,8(%rdi)
- mov %rcx,16(%rdi)
- mov %rdx,24(%rdi)
-
- shr \$32,%rdx
- xor %ecx,%ecx
- jmp .L14shortcut
-.align 4
-.L14loop:
- mov 0(%rdi),%eax # rk[0]
- mov 28(%rdi),%edx # rk[4]
-.L14shortcut:
-___
- &enckey ();
-$code.=<<___;
- mov %eax,32(%rdi) # rk[8]
- xor 4(%rdi),%eax
- mov %eax,36(%rdi) # rk[9]
- xor 8(%rdi),%eax
- mov %eax,40(%rdi) # rk[10]
- xor 12(%rdi),%eax
- mov %eax,44(%rdi) # rk[11]
-
- cmp \$6,%ecx
- je .L14break
- add \$1,%ecx
-
- mov %eax,%edx
- mov 16(%rdi),%eax # rk[4]
- movz %dl,%esi # rk[11]>>0
- movzb -128(%rbp,%rsi),%ebx
- movz %dh,%esi # rk[11]>>8
- xor %ebx,%eax
-
- movzb -128(%rbp,%rsi),%ebx
- shr \$16,%edx
- shl \$8,%ebx
- movz %dl,%esi # rk[11]>>16
- xor %ebx,%eax
-
- movzb -128(%rbp,%rsi),%ebx
- movz %dh,%esi # rk[11]>>24
- shl \$16,%ebx
- xor %ebx,%eax
-
- movzb -128(%rbp,%rsi),%ebx
- shl \$24,%ebx
- xor %ebx,%eax
-
- mov %eax,48(%rdi) # rk[12]
- xor 20(%rdi),%eax
- mov %eax,52(%rdi) # rk[13]
- xor 24(%rdi),%eax
- mov %eax,56(%rdi) # rk[14]
- xor 28(%rdi),%eax
- mov %eax,60(%rdi) # rk[15]
-
- lea 32(%rdi),%rdi
- jmp .L14loop
-.L14break:
- movl \$14,48(%rdi) # setup number of rounds
- xor %rax,%rax
- jmp .Lexit
-
-.Lbadpointer:
- mov \$-1,%rax
-.Lexit:
- .byte 0xf3,0xc3 # rep ret
-.size _x86_64_AES_set_encrypt_key,.-_x86_64_AES_set_encrypt_key
-___
-
-sub deckey_ref()
-{ my ($i,$ptr,$te,$td) = @_;
- my ($tp1,$tp2,$tp4,$tp8,$acc)=("%eax","%ebx","%edi","%edx","%r8d");
-$code.=<<___;
- mov $i($ptr),$tp1
- mov $tp1,$acc
- and \$0x80808080,$acc
- mov $acc,$tp4
- shr \$7,$tp4
- lea 0($tp1,$tp1),$tp2
- sub $tp4,$acc
- and \$0xfefefefe,$tp2
- and \$0x1b1b1b1b,$acc
- xor $tp2,$acc
- mov $acc,$tp2
-
- and \$0x80808080,$acc
- mov $acc,$tp8
- shr \$7,$tp8
- lea 0($tp2,$tp2),$tp4
- sub $tp8,$acc
- and \$0xfefefefe,$tp4
- and \$0x1b1b1b1b,$acc
- xor $tp1,$tp2 # tp2^tp1
- xor $tp4,$acc
- mov $acc,$tp4
-
- and \$0x80808080,$acc
- mov $acc,$tp8
- shr \$7,$tp8
- sub $tp8,$acc
- lea 0($tp4,$tp4),$tp8
- xor $tp1,$tp4 # tp4^tp1
- and \$0xfefefefe,$tp8
- and \$0x1b1b1b1b,$acc
- xor $acc,$tp8
-
- xor $tp8,$tp1 # tp1^tp8
- rol \$8,$tp1 # ROTATE(tp1^tp8,8)
- xor $tp8,$tp2 # tp2^tp1^tp8
- xor $tp8,$tp4 # tp4^tp1^tp8
- xor $tp2,$tp8
- xor $tp4,$tp8 # tp8^(tp8^tp4^tp1)^(tp8^tp2^tp1)=tp8^tp4^tp2
-
- xor $tp8,$tp1
- rol \$24,$tp2 # ROTATE(tp2^tp1^tp8,24)
- xor $tp2,$tp1
- rol \$16,$tp4 # ROTATE(tp4^tp1^tp8,16)
- xor $tp4,$tp1
-
- mov $tp1,$i($ptr)
-___
-}
-
-# int private_AES_set_decrypt_key(const unsigned char *userKey, const int bits,
-# AES_KEY *key)
-$code.=<<___;
-.globl private_AES_set_decrypt_key
-.type private_AES_set_decrypt_key,\@function,3
-.align 16
-private_AES_set_decrypt_key:
- push %rbx
- push %rbp
- push %r12
- push %r13
- push %r14
- push %r15
- push %rdx # save key schedule
-.Ldec_key_prologue:
-
- call _x86_64_AES_set_encrypt_key
- mov (%rsp),%r8 # restore key schedule
- cmp \$0,%eax
- jne .Labort
-
- mov 240(%r8),%r14d # pull number of rounds
- xor %rdi,%rdi
- lea (%rdi,%r14d,4),%rcx
- mov %r8,%rsi
- lea (%r8,%rcx,4),%rdi # pointer to last chunk
-.align 4
-.Linvert:
- mov 0(%rsi),%rax
- mov 8(%rsi),%rbx
- mov 0(%rdi),%rcx
- mov 8(%rdi),%rdx
- mov %rax,0(%rdi)
- mov %rbx,8(%rdi)
- mov %rcx,0(%rsi)
- mov %rdx,8(%rsi)
- lea 16(%rsi),%rsi
- lea -16(%rdi),%rdi
- cmp %rsi,%rdi
- jne .Linvert
-
- lea .LAES_Te+2048+1024(%rip),%rax # rcon
-
- mov 40(%rax),$mask80
- mov 48(%rax),$maskfe
- mov 56(%rax),$mask1b
-
- mov %r8,$key
- sub \$1,%r14d
-.align 4
-.Lpermute:
- lea 16($key),$key
- mov 0($key),%rax
- mov 8($key),%rcx
-___
- &dectransform ();
-$code.=<<___;
- mov %eax,0($key)
- mov %ebx,4($key)
- mov %ecx,8($key)
- mov %edx,12($key)
- sub \$1,%r14d
- jnz .Lpermute
-
- xor %rax,%rax
-.Labort:
- mov 8(%rsp),%r15
- mov 16(%rsp),%r14
- mov 24(%rsp),%r13
- mov 32(%rsp),%r12
- mov 40(%rsp),%rbp
- mov 48(%rsp),%rbx
- add \$56,%rsp
-.Ldec_key_epilogue:
- ret
-.size private_AES_set_decrypt_key,.-private_AES_set_decrypt_key
-___
-
-# void AES_cbc_encrypt (const void char *inp, unsigned char *out,
-# size_t length, const AES_KEY *key,
-# unsigned char *ivp,const int enc);
-{
-# stack frame layout
-# -8(%rsp) return address
-my $keyp="0(%rsp)"; # one to pass as $key
-my $keyend="8(%rsp)"; # &(keyp->rd_key[4*keyp->rounds])
-my $_rsp="16(%rsp)"; # saved %rsp
-my $_inp="24(%rsp)"; # copy of 1st parameter, inp
-my $_out="32(%rsp)"; # copy of 2nd parameter, out
-my $_len="40(%rsp)"; # copy of 3rd parameter, length
-my $_key="48(%rsp)"; # copy of 4th parameter, key
-my $_ivp="56(%rsp)"; # copy of 5th parameter, ivp
-my $ivec="64(%rsp)"; # ivec[16]
-my $aes_key="80(%rsp)"; # copy of aes_key
-my $mark="80+240(%rsp)"; # copy of aes_key->rounds
-
-$code.=<<___;
-.globl AES_cbc_encrypt
-.type AES_cbc_encrypt,\@function,6
-.align 16
-.extern OPENSSL_ia32cap_P
-.globl asm_AES_cbc_encrypt
-.hidden asm_AES_cbc_encrypt
-asm_AES_cbc_encrypt:
-AES_cbc_encrypt:
- cmp \$0,%rdx # check length
- je .Lcbc_epilogue
- pushfq
- push %rbx
- push %rbp
- push %r12
- push %r13
- push %r14
- push %r15
-.Lcbc_prologue:
-
- cld
- mov %r9d,%r9d # clear upper half of enc
-
- lea .LAES_Te(%rip),$sbox
- cmp \$0,%r9
- jne .Lcbc_picked_te
- lea .LAES_Td(%rip),$sbox
-.Lcbc_picked_te:
-
- mov OPENSSL_ia32cap_P(%rip),%r10d
- cmp \$$speed_limit,%rdx
- jb .Lcbc_slow_prologue
- test \$15,%rdx
- jnz .Lcbc_slow_prologue
- bt \$28,%r10d
- jc .Lcbc_slow_prologue
-
- # allocate aligned stack frame...
- lea -88-248(%rsp),$key
- and \$-64,$key
-
- # ... and make sure it doesn't alias with AES_T[ed] modulo 4096
- mov $sbox,%r10
- lea 2304($sbox),%r11
- mov $key,%r12
- and \$0xFFF,%r10 # s = $sbox&0xfff
- and \$0xFFF,%r11 # e = ($sbox+2048)&0xfff
- and \$0xFFF,%r12 # p = %rsp&0xfff
-
- cmp %r11,%r12 # if (p=>e) %rsp =- (p-e);
- jb .Lcbc_te_break_out
- sub %r11,%r12
- sub %r12,$key
- jmp .Lcbc_te_ok
-.Lcbc_te_break_out: # else %rsp -= (p-s)&0xfff + framesz
- sub %r10,%r12
- and \$0xFFF,%r12
- add \$320,%r12
- sub %r12,$key
-.align 4
-.Lcbc_te_ok:
-
- xchg %rsp,$key
- #add \$8,%rsp # reserve for return address!
- mov $key,$_rsp # save %rsp
-.Lcbc_fast_body:
- mov %rdi,$_inp # save copy of inp
- mov %rsi,$_out # save copy of out
- mov %rdx,$_len # save copy of len
- mov %rcx,$_key # save copy of key
- mov %r8,$_ivp # save copy of ivp
- movl \$0,$mark # copy of aes_key->rounds = 0;
- mov %r8,%rbp # rearrange input arguments
- mov %r9,%rbx
- mov %rsi,$out
- mov %rdi,$inp
- mov %rcx,$key
-
- mov 240($key),%eax # key->rounds
- # do we copy key schedule to stack?
- mov $key,%r10
- sub $sbox,%r10
- and \$0xfff,%r10
- cmp \$2304,%r10
- jb .Lcbc_do_ecopy
- cmp \$4096-248,%r10
- jb .Lcbc_skip_ecopy
-.align 4
-.Lcbc_do_ecopy:
- mov $key,%rsi
- lea $aes_key,%rdi
- lea $aes_key,$key
- mov \$240/8,%ecx
- .long 0x90A548F3 # rep movsq
- mov %eax,(%rdi) # copy aes_key->rounds
-.Lcbc_skip_ecopy:
- mov $key,$keyp # save key pointer
-
- mov \$18,%ecx
-.align 4
-.Lcbc_prefetch_te:
- mov 0($sbox),%r10
- mov 32($sbox),%r11
- mov 64($sbox),%r12
- mov 96($sbox),%r13
- lea 128($sbox),$sbox
- sub \$1,%ecx
- jnz .Lcbc_prefetch_te
- lea -2304($sbox),$sbox
-
- cmp \$0,%rbx
- je .LFAST_DECRYPT
-
-#----------------------------- ENCRYPT -----------------------------#
- mov 0(%rbp),$s0 # load iv
- mov 4(%rbp),$s1
- mov 8(%rbp),$s2
- mov 12(%rbp),$s3
-
-.align 4
-.Lcbc_fast_enc_loop:
- xor 0($inp),$s0
- xor 4($inp),$s1
- xor 8($inp),$s2
- xor 12($inp),$s3
- mov $keyp,$key # restore key
- mov $inp,$_inp # if ($verticalspin) save inp
-
- call _x86_64_AES_encrypt
-
- mov $_inp,$inp # if ($verticalspin) restore inp
- mov $_len,%r10
- mov $s0,0($out)
- mov $s1,4($out)
- mov $s2,8($out)
- mov $s3,12($out)
-
- lea 16($inp),$inp
- lea 16($out),$out
- sub \$16,%r10
- test \$-16,%r10
- mov %r10,$_len
- jnz .Lcbc_fast_enc_loop
- mov $_ivp,%rbp # restore ivp
- mov $s0,0(%rbp) # save ivec
- mov $s1,4(%rbp)
- mov $s2,8(%rbp)
- mov $s3,12(%rbp)
-
- jmp .Lcbc_fast_cleanup
-
-#----------------------------- DECRYPT -----------------------------#
-.align 16
-.LFAST_DECRYPT:
- cmp $inp,$out
- je .Lcbc_fast_dec_in_place
-
- mov %rbp,$ivec
-.align 4
-.Lcbc_fast_dec_loop:
- mov 0($inp),$s0 # read input
- mov 4($inp),$s1
- mov 8($inp),$s2
- mov 12($inp),$s3
- mov $keyp,$key # restore key
- mov $inp,$_inp # if ($verticalspin) save inp
-
- call _x86_64_AES_decrypt
-
- mov $ivec,%rbp # load ivp
- mov $_inp,$inp # if ($verticalspin) restore inp
- mov $_len,%r10 # load len
- xor 0(%rbp),$s0 # xor iv
- xor 4(%rbp),$s1
- xor 8(%rbp),$s2
- xor 12(%rbp),$s3
- mov $inp,%rbp # current input, next iv
-
- sub \$16,%r10
- mov %r10,$_len # update len
- mov %rbp,$ivec # update ivp
-
- mov $s0,0($out) # write output
- mov $s1,4($out)
- mov $s2,8($out)
- mov $s3,12($out)
-
- lea 16($inp),$inp
- lea 16($out),$out
- jnz .Lcbc_fast_dec_loop
- mov $_ivp,%r12 # load user ivp
- mov 0(%rbp),%r10 # load iv
- mov 8(%rbp),%r11
- mov %r10,0(%r12) # copy back to user
- mov %r11,8(%r12)
- jmp .Lcbc_fast_cleanup
-
-.align 16
-.Lcbc_fast_dec_in_place:
- mov 0(%rbp),%r10 # copy iv to stack
- mov 8(%rbp),%r11
- mov %r10,0+$ivec
- mov %r11,8+$ivec
-.align 4
-.Lcbc_fast_dec_in_place_loop:
- mov 0($inp),$s0 # load input
- mov 4($inp),$s1
- mov 8($inp),$s2
- mov 12($inp),$s3
- mov $keyp,$key # restore key
- mov $inp,$_inp # if ($verticalspin) save inp
-
- call _x86_64_AES_decrypt
-
- mov $_inp,$inp # if ($verticalspin) restore inp
- mov $_len,%r10
- xor 0+$ivec,$s0
- xor 4+$ivec,$s1
- xor 8+$ivec,$s2
- xor 12+$ivec,$s3
-
- mov 0($inp),%r11 # load input
- mov 8($inp),%r12
- sub \$16,%r10
- jz .Lcbc_fast_dec_in_place_done
-
- mov %r11,0+$ivec # copy input to iv
- mov %r12,8+$ivec
-
- mov $s0,0($out) # save output [zaps input]
- mov $s1,4($out)
- mov $s2,8($out)
- mov $s3,12($out)
-
- lea 16($inp),$inp
- lea 16($out),$out
- mov %r10,$_len
- jmp .Lcbc_fast_dec_in_place_loop
-.Lcbc_fast_dec_in_place_done:
- mov $_ivp,%rdi
- mov %r11,0(%rdi) # copy iv back to user
- mov %r12,8(%rdi)
-
- mov $s0,0($out) # save output [zaps input]
- mov $s1,4($out)
- mov $s2,8($out)
- mov $s3,12($out)
-
-.align 4
-.Lcbc_fast_cleanup:
- cmpl \$0,$mark # was the key schedule copied?
- lea $aes_key,%rdi
- je .Lcbc_exit
- mov \$240/8,%ecx
- xor %rax,%rax
- .long 0x90AB48F3 # rep stosq
-
- jmp .Lcbc_exit
-
-#--------------------------- SLOW ROUTINE ---------------------------#
-.align 16
-.Lcbc_slow_prologue:
- # allocate aligned stack frame...
- lea -88(%rsp),%rbp
- and \$-64,%rbp
- # ... just "above" key schedule
- lea -88-63(%rcx),%r10
- sub %rbp,%r10
- neg %r10
- and \$0x3c0,%r10
- sub %r10,%rbp
-
- xchg %rsp,%rbp
- #add \$8,%rsp # reserve for return address!
- mov %rbp,$_rsp # save %rsp
-.Lcbc_slow_body:
- #mov %rdi,$_inp # save copy of inp
- #mov %rsi,$_out # save copy of out
- #mov %rdx,$_len # save copy of len
- #mov %rcx,$_key # save copy of key
- mov %r8,$_ivp # save copy of ivp
- mov %r8,%rbp # rearrange input arguments
- mov %r9,%rbx
- mov %rsi,$out
- mov %rdi,$inp
- mov %rcx,$key
- mov %rdx,%r10
-
- mov 240($key),%eax
- mov $key,$keyp # save key pointer
- shl \$4,%eax
- lea ($key,%rax),%rax
- mov %rax,$keyend
-
- # pick Te4 copy which can't "overlap" with stack frame or key scdedule
- lea 2048($sbox),$sbox
- lea 768-8(%rsp),%rax
- sub $sbox,%rax
- and \$0x300,%rax
- lea ($sbox,%rax),$sbox
-
- cmp \$0,%rbx
- je .LSLOW_DECRYPT
-
-#--------------------------- SLOW ENCRYPT ---------------------------#
- test \$-16,%r10 # check upon length
- mov 0(%rbp),$s0 # load iv
- mov 4(%rbp),$s1
- mov 8(%rbp),$s2
- mov 12(%rbp),$s3
- jz .Lcbc_slow_enc_tail # short input...
-
-.align 4
-.Lcbc_slow_enc_loop:
- xor 0($inp),$s0
- xor 4($inp),$s1
- xor 8($inp),$s2
- xor 12($inp),$s3
- mov $keyp,$key # restore key
- mov $inp,$_inp # save inp
- mov $out,$_out # save out
- mov %r10,$_len # save len
-
- call _x86_64_AES_encrypt_compact
-
- mov $_inp,$inp # restore inp
- mov $_out,$out # restore out
- mov $_len,%r10 # restore len
- mov $s0,0($out)
- mov $s1,4($out)
- mov $s2,8($out)
- mov $s3,12($out)
-
- lea 16($inp),$inp
- lea 16($out),$out
- sub \$16,%r10
- test \$-16,%r10
- jnz .Lcbc_slow_enc_loop
- test \$15,%r10
- jnz .Lcbc_slow_enc_tail
- mov $_ivp,%rbp # restore ivp
- mov $s0,0(%rbp) # save ivec
- mov $s1,4(%rbp)
- mov $s2,8(%rbp)
- mov $s3,12(%rbp)
-
- jmp .Lcbc_exit
-
-.align 4
-.Lcbc_slow_enc_tail:
- mov %rax,%r11
- mov %rcx,%r12
- mov %r10,%rcx
- mov $inp,%rsi
- mov $out,%rdi
- .long 0x9066A4F3 # rep movsb
- mov \$16,%rcx # zero tail
- sub %r10,%rcx
- xor %rax,%rax
- .long 0x9066AAF3 # rep stosb
- mov $out,$inp # this is not a mistake!
- mov \$16,%r10 # len=16
- mov %r11,%rax
- mov %r12,%rcx
- jmp .Lcbc_slow_enc_loop # one more spin...
-#--------------------------- SLOW DECRYPT ---------------------------#
-.align 16
-.LSLOW_DECRYPT:
- shr \$3,%rax
- add %rax,$sbox # recall "magic" constants!
-
- mov 0(%rbp),%r11 # copy iv to stack
- mov 8(%rbp),%r12
- mov %r11,0+$ivec
- mov %r12,8+$ivec
-
-.align 4
-.Lcbc_slow_dec_loop:
- mov 0($inp),$s0 # load input
- mov 4($inp),$s1
- mov 8($inp),$s2
- mov 12($inp),$s3
- mov $keyp,$key # restore key
- mov $inp,$_inp # save inp
- mov $out,$_out # save out
- mov %r10,$_len # save len
-
- call _x86_64_AES_decrypt_compact
-
- mov $_inp,$inp # restore inp
- mov $_out,$out # restore out
- mov $_len,%r10
- xor 0+$ivec,$s0
- xor 4+$ivec,$s1
- xor 8+$ivec,$s2
- xor 12+$ivec,$s3
-
- mov 0($inp),%r11 # load input
- mov 8($inp),%r12
- sub \$16,%r10
- jc .Lcbc_slow_dec_partial
- jz .Lcbc_slow_dec_done
-
- mov %r11,0+$ivec # copy input to iv
- mov %r12,8+$ivec
-
- mov $s0,0($out) # save output [can zap input]
- mov $s1,4($out)
- mov $s2,8($out)
- mov $s3,12($out)
-
- lea 16($inp),$inp
- lea 16($out),$out
- jmp .Lcbc_slow_dec_loop
-.Lcbc_slow_dec_done:
- mov $_ivp,%rdi
- mov %r11,0(%rdi) # copy iv back to user
- mov %r12,8(%rdi)
-
- mov $s0,0($out) # save output [can zap input]
- mov $s1,4($out)
- mov $s2,8($out)
- mov $s3,12($out)
-
- jmp .Lcbc_exit
-
-.align 4
-.Lcbc_slow_dec_partial:
- mov $_ivp,%rdi
- mov %r11,0(%rdi) # copy iv back to user
- mov %r12,8(%rdi)
-
- mov $s0,0+$ivec # save output to stack
- mov $s1,4+$ivec
- mov $s2,8+$ivec
- mov $s3,12+$ivec
-
- mov $out,%rdi
- lea $ivec,%rsi
- lea 16(%r10),%rcx
- .long 0x9066A4F3 # rep movsb
- jmp .Lcbc_exit
-
-.align 16
-.Lcbc_exit:
- mov $_rsp,%rsi
- mov (%rsi),%r15
- mov 8(%rsi),%r14
- mov 16(%rsi),%r13
- mov 24(%rsi),%r12
- mov 32(%rsi),%rbp
- mov 40(%rsi),%rbx
- lea 48(%rsi),%rsp
-.Lcbc_popfq:
- popfq
-.Lcbc_epilogue:
- ret
-.size AES_cbc_encrypt,.-AES_cbc_encrypt
-___
-}
-
-$code.=<<___;
-.align 64
-.LAES_Te:
-___
- &_data_word(0xa56363c6, 0x847c7cf8, 0x997777ee, 0x8d7b7bf6);
- &_data_word(0x0df2f2ff, 0xbd6b6bd6, 0xb16f6fde, 0x54c5c591);
- &_data_word(0x50303060, 0x03010102, 0xa96767ce, 0x7d2b2b56);
- &_data_word(0x19fefee7, 0x62d7d7b5, 0xe6abab4d, 0x9a7676ec);
- &_data_word(0x45caca8f, 0x9d82821f, 0x40c9c989, 0x877d7dfa);
- &_data_word(0x15fafaef, 0xeb5959b2, 0xc947478e, 0x0bf0f0fb);
- &_data_word(0xecadad41, 0x67d4d4b3, 0xfda2a25f, 0xeaafaf45);
- &_data_word(0xbf9c9c23, 0xf7a4a453, 0x967272e4, 0x5bc0c09b);
- &_data_word(0xc2b7b775, 0x1cfdfde1, 0xae93933d, 0x6a26264c);
- &_data_word(0x5a36366c, 0x413f3f7e, 0x02f7f7f5, 0x4fcccc83);
- &_data_word(0x5c343468, 0xf4a5a551, 0x34e5e5d1, 0x08f1f1f9);
- &_data_word(0x937171e2, 0x73d8d8ab, 0x53313162, 0x3f15152a);
- &_data_word(0x0c040408, 0x52c7c795, 0x65232346, 0x5ec3c39d);
- &_data_word(0x28181830, 0xa1969637, 0x0f05050a, 0xb59a9a2f);
- &_data_word(0x0907070e, 0x36121224, 0x9b80801b, 0x3de2e2df);
- &_data_word(0x26ebebcd, 0x6927274e, 0xcdb2b27f, 0x9f7575ea);
- &_data_word(0x1b090912, 0x9e83831d, 0x742c2c58, 0x2e1a1a34);
- &_data_word(0x2d1b1b36, 0xb26e6edc, 0xee5a5ab4, 0xfba0a05b);
- &_data_word(0xf65252a4, 0x4d3b3b76, 0x61d6d6b7, 0xceb3b37d);
- &_data_word(0x7b292952, 0x3ee3e3dd, 0x712f2f5e, 0x97848413);
- &_data_word(0xf55353a6, 0x68d1d1b9, 0x00000000, 0x2cededc1);
- &_data_word(0x60202040, 0x1ffcfce3, 0xc8b1b179, 0xed5b5bb6);
- &_data_word(0xbe6a6ad4, 0x46cbcb8d, 0xd9bebe67, 0x4b393972);
- &_data_word(0xde4a4a94, 0xd44c4c98, 0xe85858b0, 0x4acfcf85);
- &_data_word(0x6bd0d0bb, 0x2aefefc5, 0xe5aaaa4f, 0x16fbfbed);
- &_data_word(0xc5434386, 0xd74d4d9a, 0x55333366, 0x94858511);
- &_data_word(0xcf45458a, 0x10f9f9e9, 0x06020204, 0x817f7ffe);
- &_data_word(0xf05050a0, 0x443c3c78, 0xba9f9f25, 0xe3a8a84b);
- &_data_word(0xf35151a2, 0xfea3a35d, 0xc0404080, 0x8a8f8f05);
- &_data_word(0xad92923f, 0xbc9d9d21, 0x48383870, 0x04f5f5f1);
- &_data_word(0xdfbcbc63, 0xc1b6b677, 0x75dadaaf, 0x63212142);
- &_data_word(0x30101020, 0x1affffe5, 0x0ef3f3fd, 0x6dd2d2bf);
- &_data_word(0x4ccdcd81, 0x140c0c18, 0x35131326, 0x2fececc3);
- &_data_word(0xe15f5fbe, 0xa2979735, 0xcc444488, 0x3917172e);
- &_data_word(0x57c4c493, 0xf2a7a755, 0x827e7efc, 0x473d3d7a);
- &_data_word(0xac6464c8, 0xe75d5dba, 0x2b191932, 0x957373e6);
- &_data_word(0xa06060c0, 0x98818119, 0xd14f4f9e, 0x7fdcdca3);
- &_data_word(0x66222244, 0x7e2a2a54, 0xab90903b, 0x8388880b);
- &_data_word(0xca46468c, 0x29eeeec7, 0xd3b8b86b, 0x3c141428);
- &_data_word(0x79dedea7, 0xe25e5ebc, 0x1d0b0b16, 0x76dbdbad);
- &_data_word(0x3be0e0db, 0x56323264, 0x4e3a3a74, 0x1e0a0a14);
- &_data_word(0xdb494992, 0x0a06060c, 0x6c242448, 0xe45c5cb8);
- &_data_word(0x5dc2c29f, 0x6ed3d3bd, 0xefacac43, 0xa66262c4);
- &_data_word(0xa8919139, 0xa4959531, 0x37e4e4d3, 0x8b7979f2);
- &_data_word(0x32e7e7d5, 0x43c8c88b, 0x5937376e, 0xb76d6dda);
- &_data_word(0x8c8d8d01, 0x64d5d5b1, 0xd24e4e9c, 0xe0a9a949);
- &_data_word(0xb46c6cd8, 0xfa5656ac, 0x07f4f4f3, 0x25eaeacf);
- &_data_word(0xaf6565ca, 0x8e7a7af4, 0xe9aeae47, 0x18080810);
- &_data_word(0xd5baba6f, 0x887878f0, 0x6f25254a, 0x722e2e5c);
- &_data_word(0x241c1c38, 0xf1a6a657, 0xc7b4b473, 0x51c6c697);
- &_data_word(0x23e8e8cb, 0x7cdddda1, 0x9c7474e8, 0x211f1f3e);
- &_data_word(0xdd4b4b96, 0xdcbdbd61, 0x868b8b0d, 0x858a8a0f);
- &_data_word(0x907070e0, 0x423e3e7c, 0xc4b5b571, 0xaa6666cc);
- &_data_word(0xd8484890, 0x05030306, 0x01f6f6f7, 0x120e0e1c);
- &_data_word(0xa36161c2, 0x5f35356a, 0xf95757ae, 0xd0b9b969);
- &_data_word(0x91868617, 0x58c1c199, 0x271d1d3a, 0xb99e9e27);
- &_data_word(0x38e1e1d9, 0x13f8f8eb, 0xb398982b, 0x33111122);
- &_data_word(0xbb6969d2, 0x70d9d9a9, 0x898e8e07, 0xa7949433);
- &_data_word(0xb69b9b2d, 0x221e1e3c, 0x92878715, 0x20e9e9c9);
- &_data_word(0x49cece87, 0xff5555aa, 0x78282850, 0x7adfdfa5);
- &_data_word(0x8f8c8c03, 0xf8a1a159, 0x80898909, 0x170d0d1a);
- &_data_word(0xdabfbf65, 0x31e6e6d7, 0xc6424284, 0xb86868d0);
- &_data_word(0xc3414182, 0xb0999929, 0x772d2d5a, 0x110f0f1e);
- &_data_word(0xcbb0b07b, 0xfc5454a8, 0xd6bbbb6d, 0x3a16162c);
-
-#Te4 # four copies of Te4 to choose from to avoid L1 aliasing
- &data_byte(0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5);
- &data_byte(0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76);
- &data_byte(0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0);
- &data_byte(0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0);
- &data_byte(0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc);
- &data_byte(0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15);
- &data_byte(0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a);
- &data_byte(0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75);
- &data_byte(0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0);
- &data_byte(0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84);
- &data_byte(0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b);
- &data_byte(0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf);
- &data_byte(0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85);
- &data_byte(0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8);
- &data_byte(0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5);
- &data_byte(0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2);
- &data_byte(0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17);
- &data_byte(0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73);
- &data_byte(0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88);
- &data_byte(0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb);
- &data_byte(0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c);
- &data_byte(0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79);
- &data_byte(0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9);
- &data_byte(0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08);
- &data_byte(0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6);
- &data_byte(0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a);
- &data_byte(0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e);
- &data_byte(0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e);
- &data_byte(0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94);
- &data_byte(0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf);
- &data_byte(0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68);
- &data_byte(0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16);
-
- &data_byte(0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5);
- &data_byte(0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76);
- &data_byte(0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0);
- &data_byte(0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0);
- &data_byte(0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc);
- &data_byte(0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15);
- &data_byte(0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a);
- &data_byte(0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75);
- &data_byte(0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0);
- &data_byte(0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84);
- &data_byte(0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b);
- &data_byte(0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf);
- &data_byte(0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85);
- &data_byte(0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8);
- &data_byte(0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5);
- &data_byte(0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2);
- &data_byte(0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17);
- &data_byte(0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73);
- &data_byte(0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88);
- &data_byte(0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb);
- &data_byte(0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c);
- &data_byte(0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79);
- &data_byte(0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9);
- &data_byte(0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08);
- &data_byte(0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6);
- &data_byte(0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a);
- &data_byte(0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e);
- &data_byte(0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e);
- &data_byte(0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94);
- &data_byte(0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf);
- &data_byte(0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68);
- &data_byte(0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16);
-
- &data_byte(0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5);
- &data_byte(0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76);
- &data_byte(0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0);
- &data_byte(0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0);
- &data_byte(0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc);
- &data_byte(0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15);
- &data_byte(0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a);
- &data_byte(0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75);
- &data_byte(0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0);
- &data_byte(0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84);
- &data_byte(0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b);
- &data_byte(0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf);
- &data_byte(0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85);
- &data_byte(0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8);
- &data_byte(0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5);
- &data_byte(0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2);
- &data_byte(0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17);
- &data_byte(0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73);
- &data_byte(0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88);
- &data_byte(0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb);
- &data_byte(0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c);
- &data_byte(0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79);
- &data_byte(0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9);
- &data_byte(0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08);
- &data_byte(0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6);
- &data_byte(0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a);
- &data_byte(0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e);
- &data_byte(0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e);
- &data_byte(0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94);
- &data_byte(0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf);
- &data_byte(0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68);
- &data_byte(0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16);
-
- &data_byte(0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5);
- &data_byte(0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76);
- &data_byte(0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0);
- &data_byte(0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0);
- &data_byte(0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc);
- &data_byte(0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15);
- &data_byte(0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a);
- &data_byte(0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75);
- &data_byte(0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0);
- &data_byte(0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84);
- &data_byte(0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b);
- &data_byte(0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf);
- &data_byte(0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85);
- &data_byte(0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8);
- &data_byte(0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5);
- &data_byte(0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2);
- &data_byte(0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17);
- &data_byte(0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73);
- &data_byte(0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88);
- &data_byte(0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb);
- &data_byte(0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c);
- &data_byte(0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79);
- &data_byte(0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9);
- &data_byte(0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08);
- &data_byte(0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6);
- &data_byte(0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a);
- &data_byte(0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e);
- &data_byte(0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e);
- &data_byte(0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94);
- &data_byte(0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf);
- &data_byte(0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68);
- &data_byte(0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16);
-#rcon:
-$code.=<<___;
- .long 0x00000001, 0x00000002, 0x00000004, 0x00000008
- .long 0x00000010, 0x00000020, 0x00000040, 0x00000080
- .long 0x0000001b, 0x00000036, 0x80808080, 0x80808080
- .long 0xfefefefe, 0xfefefefe, 0x1b1b1b1b, 0x1b1b1b1b
-___
-$code.=<<___;
-.align 64
-.LAES_Td:
-___
- &_data_word(0x50a7f451, 0x5365417e, 0xc3a4171a, 0x965e273a);
- &_data_word(0xcb6bab3b, 0xf1459d1f, 0xab58faac, 0x9303e34b);
- &_data_word(0x55fa3020, 0xf66d76ad, 0x9176cc88, 0x254c02f5);
- &_data_word(0xfcd7e54f, 0xd7cb2ac5, 0x80443526, 0x8fa362b5);
- &_data_word(0x495ab1de, 0x671bba25, 0x980eea45, 0xe1c0fe5d);
- &_data_word(0x02752fc3, 0x12f04c81, 0xa397468d, 0xc6f9d36b);
- &_data_word(0xe75f8f03, 0x959c9215, 0xeb7a6dbf, 0xda595295);
- &_data_word(0x2d83bed4, 0xd3217458, 0x2969e049, 0x44c8c98e);
- &_data_word(0x6a89c275, 0x78798ef4, 0x6b3e5899, 0xdd71b927);
- &_data_word(0xb64fe1be, 0x17ad88f0, 0x66ac20c9, 0xb43ace7d);
- &_data_word(0x184adf63, 0x82311ae5, 0x60335197, 0x457f5362);
- &_data_word(0xe07764b1, 0x84ae6bbb, 0x1ca081fe, 0x942b08f9);
- &_data_word(0x58684870, 0x19fd458f, 0x876cde94, 0xb7f87b52);
- &_data_word(0x23d373ab, 0xe2024b72, 0x578f1fe3, 0x2aab5566);
- &_data_word(0x0728ebb2, 0x03c2b52f, 0x9a7bc586, 0xa50837d3);
- &_data_word(0xf2872830, 0xb2a5bf23, 0xba6a0302, 0x5c8216ed);
- &_data_word(0x2b1ccf8a, 0x92b479a7, 0xf0f207f3, 0xa1e2694e);
- &_data_word(0xcdf4da65, 0xd5be0506, 0x1f6234d1, 0x8afea6c4);
- &_data_word(0x9d532e34, 0xa055f3a2, 0x32e18a05, 0x75ebf6a4);
- &_data_word(0x39ec830b, 0xaaef6040, 0x069f715e, 0x51106ebd);
- &_data_word(0xf98a213e, 0x3d06dd96, 0xae053edd, 0x46bde64d);
- &_data_word(0xb58d5491, 0x055dc471, 0x6fd40604, 0xff155060);
- &_data_word(0x24fb9819, 0x97e9bdd6, 0xcc434089, 0x779ed967);
- &_data_word(0xbd42e8b0, 0x888b8907, 0x385b19e7, 0xdbeec879);
- &_data_word(0x470a7ca1, 0xe90f427c, 0xc91e84f8, 0x00000000);
- &_data_word(0x83868009, 0x48ed2b32, 0xac70111e, 0x4e725a6c);
- &_data_word(0xfbff0efd, 0x5638850f, 0x1ed5ae3d, 0x27392d36);
- &_data_word(0x64d90f0a, 0x21a65c68, 0xd1545b9b, 0x3a2e3624);
- &_data_word(0xb1670a0c, 0x0fe75793, 0xd296eeb4, 0x9e919b1b);
- &_data_word(0x4fc5c080, 0xa220dc61, 0x694b775a, 0x161a121c);
- &_data_word(0x0aba93e2, 0xe52aa0c0, 0x43e0223c, 0x1d171b12);
- &_data_word(0x0b0d090e, 0xadc78bf2, 0xb9a8b62d, 0xc8a91e14);
- &_data_word(0x8519f157, 0x4c0775af, 0xbbdd99ee, 0xfd607fa3);
- &_data_word(0x9f2601f7, 0xbcf5725c, 0xc53b6644, 0x347efb5b);
- &_data_word(0x7629438b, 0xdcc623cb, 0x68fcedb6, 0x63f1e4b8);
- &_data_word(0xcadc31d7, 0x10856342, 0x40229713, 0x2011c684);
- &_data_word(0x7d244a85, 0xf83dbbd2, 0x1132f9ae, 0x6da129c7);
- &_data_word(0x4b2f9e1d, 0xf330b2dc, 0xec52860d, 0xd0e3c177);
- &_data_word(0x6c16b32b, 0x99b970a9, 0xfa489411, 0x2264e947);
- &_data_word(0xc48cfca8, 0x1a3ff0a0, 0xd82c7d56, 0xef903322);
- &_data_word(0xc74e4987, 0xc1d138d9, 0xfea2ca8c, 0x360bd498);
- &_data_word(0xcf81f5a6, 0x28de7aa5, 0x268eb7da, 0xa4bfad3f);
- &_data_word(0xe49d3a2c, 0x0d927850, 0x9bcc5f6a, 0x62467e54);
- &_data_word(0xc2138df6, 0xe8b8d890, 0x5ef7392e, 0xf5afc382);
- &_data_word(0xbe805d9f, 0x7c93d069, 0xa92dd56f, 0xb31225cf);
- &_data_word(0x3b99acc8, 0xa77d1810, 0x6e639ce8, 0x7bbb3bdb);
- &_data_word(0x097826cd, 0xf418596e, 0x01b79aec, 0xa89a4f83);
- &_data_word(0x656e95e6, 0x7ee6ffaa, 0x08cfbc21, 0xe6e815ef);
- &_data_word(0xd99be7ba, 0xce366f4a, 0xd4099fea, 0xd67cb029);
- &_data_word(0xafb2a431, 0x31233f2a, 0x3094a5c6, 0xc066a235);
- &_data_word(0x37bc4e74, 0xa6ca82fc, 0xb0d090e0, 0x15d8a733);
- &_data_word(0x4a9804f1, 0xf7daec41, 0x0e50cd7f, 0x2ff69117);
- &_data_word(0x8dd64d76, 0x4db0ef43, 0x544daacc, 0xdf0496e4);
- &_data_word(0xe3b5d19e, 0x1b886a4c, 0xb81f2cc1, 0x7f516546);
- &_data_word(0x04ea5e9d, 0x5d358c01, 0x737487fa, 0x2e410bfb);
- &_data_word(0x5a1d67b3, 0x52d2db92, 0x335610e9, 0x1347d66d);
- &_data_word(0x8c61d79a, 0x7a0ca137, 0x8e14f859, 0x893c13eb);
- &_data_word(0xee27a9ce, 0x35c961b7, 0xede51ce1, 0x3cb1477a);
- &_data_word(0x59dfd29c, 0x3f73f255, 0x79ce1418, 0xbf37c773);
- &_data_word(0xeacdf753, 0x5baafd5f, 0x146f3ddf, 0x86db4478);
- &_data_word(0x81f3afca, 0x3ec468b9, 0x2c342438, 0x5f40a3c2);
- &_data_word(0x72c31d16, 0x0c25e2bc, 0x8b493c28, 0x41950dff);
- &_data_word(0x7101a839, 0xdeb30c08, 0x9ce4b4d8, 0x90c15664);
- &_data_word(0x6184cb7b, 0x70b632d5, 0x745c6c48, 0x4257b8d0);
-
-#Td4: # four copies of Td4 to choose from to avoid L1 aliasing
- &data_byte(0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38);
- &data_byte(0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb);
- &data_byte(0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87);
- &data_byte(0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb);
- &data_byte(0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d);
- &data_byte(0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e);
- &data_byte(0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2);
- &data_byte(0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25);
- &data_byte(0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16);
- &data_byte(0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92);
- &data_byte(0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda);
- &data_byte(0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84);
- &data_byte(0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a);
- &data_byte(0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06);
- &data_byte(0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02);
- &data_byte(0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b);
- &data_byte(0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea);
- &data_byte(0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73);
- &data_byte(0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85);
- &data_byte(0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e);
- &data_byte(0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89);
- &data_byte(0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b);
- &data_byte(0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20);
- &data_byte(0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4);
- &data_byte(0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31);
- &data_byte(0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f);
- &data_byte(0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d);
- &data_byte(0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef);
- &data_byte(0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0);
- &data_byte(0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61);
- &data_byte(0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26);
- &data_byte(0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d);
-$code.=<<___;
- .long 0x80808080, 0x80808080, 0xfefefefe, 0xfefefefe
- .long 0x1b1b1b1b, 0x1b1b1b1b, 0, 0
-___
- &data_byte(0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38);
- &data_byte(0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb);
- &data_byte(0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87);
- &data_byte(0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb);
- &data_byte(0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d);
- &data_byte(0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e);
- &data_byte(0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2);
- &data_byte(0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25);
- &data_byte(0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16);
- &data_byte(0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92);
- &data_byte(0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda);
- &data_byte(0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84);
- &data_byte(0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a);
- &data_byte(0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06);
- &data_byte(0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02);
- &data_byte(0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b);
- &data_byte(0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea);
- &data_byte(0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73);
- &data_byte(0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85);
- &data_byte(0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e);
- &data_byte(0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89);
- &data_byte(0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b);
- &data_byte(0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20);
- &data_byte(0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4);
- &data_byte(0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31);
- &data_byte(0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f);
- &data_byte(0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d);
- &data_byte(0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef);
- &data_byte(0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0);
- &data_byte(0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61);
- &data_byte(0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26);
- &data_byte(0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d);
-$code.=<<___;
- .long 0x80808080, 0x80808080, 0xfefefefe, 0xfefefefe
- .long 0x1b1b1b1b, 0x1b1b1b1b, 0, 0
-___
- &data_byte(0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38);
- &data_byte(0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb);
- &data_byte(0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87);
- &data_byte(0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb);
- &data_byte(0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d);
- &data_byte(0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e);
- &data_byte(0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2);
- &data_byte(0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25);
- &data_byte(0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16);
- &data_byte(0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92);
- &data_byte(0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda);
- &data_byte(0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84);
- &data_byte(0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a);
- &data_byte(0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06);
- &data_byte(0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02);
- &data_byte(0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b);
- &data_byte(0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea);
- &data_byte(0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73);
- &data_byte(0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85);
- &data_byte(0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e);
- &data_byte(0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89);
- &data_byte(0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b);
- &data_byte(0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20);
- &data_byte(0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4);
- &data_byte(0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31);
- &data_byte(0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f);
- &data_byte(0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d);
- &data_byte(0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef);
- &data_byte(0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0);
- &data_byte(0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61);
- &data_byte(0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26);
- &data_byte(0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d);
-$code.=<<___;
- .long 0x80808080, 0x80808080, 0xfefefefe, 0xfefefefe
- .long 0x1b1b1b1b, 0x1b1b1b1b, 0, 0
-___
- &data_byte(0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38);
- &data_byte(0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb);
- &data_byte(0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87);
- &data_byte(0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb);
- &data_byte(0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d);
- &data_byte(0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e);
- &data_byte(0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2);
- &data_byte(0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25);
- &data_byte(0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16);
- &data_byte(0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92);
- &data_byte(0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda);
- &data_byte(0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84);
- &data_byte(0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a);
- &data_byte(0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06);
- &data_byte(0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02);
- &data_byte(0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b);
- &data_byte(0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea);
- &data_byte(0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73);
- &data_byte(0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85);
- &data_byte(0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e);
- &data_byte(0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89);
- &data_byte(0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b);
- &data_byte(0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20);
- &data_byte(0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4);
- &data_byte(0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31);
- &data_byte(0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f);
- &data_byte(0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d);
- &data_byte(0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef);
- &data_byte(0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0);
- &data_byte(0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61);
- &data_byte(0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26);
- &data_byte(0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d);
-$code.=<<___;
- .long 0x80808080, 0x80808080, 0xfefefefe, 0xfefefefe
- .long 0x1b1b1b1b, 0x1b1b1b1b, 0, 0
-.asciz "AES for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
-.align 64
-___
-
-# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
-# CONTEXT *context,DISPATCHER_CONTEXT *disp)
-if ($win64) {
-$rec="%rcx";
-$frame="%rdx";
-$context="%r8";
-$disp="%r9";
-
-$code.=<<___;
-.extern __imp_RtlVirtualUnwind
-.type block_se_handler,\@abi-omnipotent
-.align 16
-block_se_handler:
- push %rsi
- push %rdi
- push %rbx
- push %rbp
- push %r12
- push %r13
- push %r14
- push %r15
- pushfq
- sub \$64,%rsp
-
- mov 120($context),%rax # pull context->Rax
- mov 248($context),%rbx # pull context->Rip
-
- mov 8($disp),%rsi # disp->ImageBase
- mov 56($disp),%r11 # disp->HandlerData
-
- mov 0(%r11),%r10d # HandlerData[0]
- lea (%rsi,%r10),%r10 # prologue label
- cmp %r10,%rbx # context->Rip<prologue label
- jb .Lin_block_prologue
-
- mov 152($context),%rax # pull context->Rsp
-
- mov 4(%r11),%r10d # HandlerData[1]
- lea (%rsi,%r10),%r10 # epilogue label
- cmp %r10,%rbx # context->Rip>=epilogue label
- jae .Lin_block_prologue
-
- mov 24(%rax),%rax # pull saved real stack pointer
- lea 48(%rax),%rax # adjust...
-
- mov -8(%rax),%rbx
- mov -16(%rax),%rbp
- mov -24(%rax),%r12
- mov -32(%rax),%r13
- mov -40(%rax),%r14
- mov -48(%rax),%r15
- mov %rbx,144($context) # restore context->Rbx
- mov %rbp,160($context) # restore context->Rbp
- mov %r12,216($context) # restore context->R12
- mov %r13,224($context) # restore context->R13
- mov %r14,232($context) # restore context->R14
- mov %r15,240($context) # restore context->R15
-
-.Lin_block_prologue:
- mov 8(%rax),%rdi
- mov 16(%rax),%rsi
- mov %rax,152($context) # restore context->Rsp
- mov %rsi,168($context) # restore context->Rsi
- mov %rdi,176($context) # restore context->Rdi
-
- jmp .Lcommon_seh_exit
-.size block_se_handler,.-block_se_handler
-
-.type key_se_handler,\@abi-omnipotent
-.align 16
-key_se_handler:
- push %rsi
- push %rdi
- push %rbx
- push %rbp
- push %r12
- push %r13
- push %r14
- push %r15
- pushfq
- sub \$64,%rsp
-
- mov 120($context),%rax # pull context->Rax
- mov 248($context),%rbx # pull context->Rip
-
- mov 8($disp),%rsi # disp->ImageBase
- mov 56($disp),%r11 # disp->HandlerData
-
- mov 0(%r11),%r10d # HandlerData[0]
- lea (%rsi,%r10),%r10 # prologue label
- cmp %r10,%rbx # context->Rip<prologue label
- jb .Lin_key_prologue
-
- mov 152($context),%rax # pull context->Rsp
-
- mov 4(%r11),%r10d # HandlerData[1]
- lea (%rsi,%r10),%r10 # epilogue label
- cmp %r10,%rbx # context->Rip>=epilogue label
- jae .Lin_key_prologue
-
- lea 56(%rax),%rax
-
- mov -8(%rax),%rbx
- mov -16(%rax),%rbp
- mov -24(%rax),%r12
- mov -32(%rax),%r13
- mov -40(%rax),%r14
- mov -48(%rax),%r15
- mov %rbx,144($context) # restore context->Rbx
- mov %rbp,160($context) # restore context->Rbp
- mov %r12,216($context) # restore context->R12
- mov %r13,224($context) # restore context->R13
- mov %r14,232($context) # restore context->R14
- mov %r15,240($context) # restore context->R15
-
-.Lin_key_prologue:
- mov 8(%rax),%rdi
- mov 16(%rax),%rsi
- mov %rax,152($context) # restore context->Rsp
- mov %rsi,168($context) # restore context->Rsi
- mov %rdi,176($context) # restore context->Rdi
-
- jmp .Lcommon_seh_exit
-.size key_se_handler,.-key_se_handler
-
-.type cbc_se_handler,\@abi-omnipotent
-.align 16
-cbc_se_handler:
- push %rsi
- push %rdi
- push %rbx
- push %rbp
- push %r12
- push %r13
- push %r14
- push %r15
- pushfq
- sub \$64,%rsp
-
- mov 120($context),%rax # pull context->Rax
- mov 248($context),%rbx # pull context->Rip
-
- lea .Lcbc_prologue(%rip),%r10
- cmp %r10,%rbx # context->Rip<.Lcbc_prologue
- jb .Lin_cbc_prologue
-
- lea .Lcbc_fast_body(%rip),%r10
- cmp %r10,%rbx # context->Rip<.Lcbc_fast_body
- jb .Lin_cbc_frame_setup
-
- lea .Lcbc_slow_prologue(%rip),%r10
- cmp %r10,%rbx # context->Rip<.Lcbc_slow_prologue
- jb .Lin_cbc_body
-
- lea .Lcbc_slow_body(%rip),%r10
- cmp %r10,%rbx # context->Rip<.Lcbc_slow_body
- jb .Lin_cbc_frame_setup
-
-.Lin_cbc_body:
- mov 152($context),%rax # pull context->Rsp
-
- lea .Lcbc_epilogue(%rip),%r10
- cmp %r10,%rbx # context->Rip>=.Lcbc_epilogue
- jae .Lin_cbc_prologue
-
- lea 8(%rax),%rax
-
- lea .Lcbc_popfq(%rip),%r10
- cmp %r10,%rbx # context->Rip>=.Lcbc_popfq
- jae .Lin_cbc_prologue
-
- mov `16-8`(%rax),%rax # biased $_rsp
- lea 56(%rax),%rax
-
-.Lin_cbc_frame_setup:
- mov -16(%rax),%rbx
- mov -24(%rax),%rbp
- mov -32(%rax),%r12
- mov -40(%rax),%r13
- mov -48(%rax),%r14
- mov -56(%rax),%r15
- mov %rbx,144($context) # restore context->Rbx
- mov %rbp,160($context) # restore context->Rbp
- mov %r12,216($context) # restore context->R12
- mov %r13,224($context) # restore context->R13
- mov %r14,232($context) # restore context->R14
- mov %r15,240($context) # restore context->R15
-
-.Lin_cbc_prologue:
- mov 8(%rax),%rdi
- mov 16(%rax),%rsi
- mov %rax,152($context) # restore context->Rsp
- mov %rsi,168($context) # restore context->Rsi
- mov %rdi,176($context) # restore context->Rdi
-
-.Lcommon_seh_exit:
-
- mov 40($disp),%rdi # disp->ContextRecord
- mov $context,%rsi # context
- mov \$`1232/8`,%ecx # sizeof(CONTEXT)
- .long 0xa548f3fc # cld; rep movsq
-
- mov $disp,%rsi
- xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
- mov 8(%rsi),%rdx # arg2, disp->ImageBase
- mov 0(%rsi),%r8 # arg3, disp->ControlPc
- mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
- mov 40(%rsi),%r10 # disp->ContextRecord
- lea 56(%rsi),%r11 # &disp->HandlerData
- lea 24(%rsi),%r12 # &disp->EstablisherFrame
- mov %r10,32(%rsp) # arg5
- mov %r11,40(%rsp) # arg6
- mov %r12,48(%rsp) # arg7
- mov %rcx,56(%rsp) # arg8, (NULL)
- call *__imp_RtlVirtualUnwind(%rip)
-
- mov \$1,%eax # ExceptionContinueSearch
- add \$64,%rsp
- popfq
- pop %r15
- pop %r14
- pop %r13
- pop %r12
- pop %rbp
- pop %rbx
- pop %rdi
- pop %rsi
- ret
-.size cbc_se_handler,.-cbc_se_handler
-
-.section .pdata
-.align 4
- .rva .LSEH_begin_AES_encrypt
- .rva .LSEH_end_AES_encrypt
- .rva .LSEH_info_AES_encrypt
-
- .rva .LSEH_begin_AES_decrypt
- .rva .LSEH_end_AES_decrypt
- .rva .LSEH_info_AES_decrypt
-
- .rva .LSEH_begin_private_AES_set_encrypt_key
- .rva .LSEH_end_private_AES_set_encrypt_key
- .rva .LSEH_info_private_AES_set_encrypt_key
-
- .rva .LSEH_begin_private_AES_set_decrypt_key
- .rva .LSEH_end_private_AES_set_decrypt_key
- .rva .LSEH_info_private_AES_set_decrypt_key
-
- .rva .LSEH_begin_AES_cbc_encrypt
- .rva .LSEH_end_AES_cbc_encrypt
- .rva .LSEH_info_AES_cbc_encrypt
-
-.section .xdata
-.align 8
-.LSEH_info_AES_encrypt:
- .byte 9,0,0,0
- .rva block_se_handler
- .rva .Lenc_prologue,.Lenc_epilogue # HandlerData[]
-.LSEH_info_AES_decrypt:
- .byte 9,0,0,0
- .rva block_se_handler
- .rva .Ldec_prologue,.Ldec_epilogue # HandlerData[]
-.LSEH_info_private_AES_set_encrypt_key:
- .byte 9,0,0,0
- .rva key_se_handler
- .rva .Lenc_key_prologue,.Lenc_key_epilogue # HandlerData[]
-.LSEH_info_private_AES_set_decrypt_key:
- .byte 9,0,0,0
- .rva key_se_handler
- .rva .Ldec_key_prologue,.Ldec_key_epilogue # HandlerData[]
-.LSEH_info_AES_cbc_encrypt:
- .byte 9,0,0,0
- .rva cbc_se_handler
-___
-}
-
-$code =~ s/\`([^\`]*)\`/eval($1)/gem;
-
-print $code;
-
-close STDOUT;
diff --git a/app/openssl/crypto/aes/asm/aesni-sha1-x86_64.S b/app/openssl/crypto/aes/asm/aesni-sha1-x86_64.S
deleted file mode 100644
index 32fd600b..00000000
--- a/app/openssl/crypto/aes/asm/aesni-sha1-x86_64.S
+++ /dev/null
@@ -1,1396 +0,0 @@
-.text
-
-
-.globl aesni_cbc_sha1_enc
-.type aesni_cbc_sha1_enc,@function
-.align 16
-aesni_cbc_sha1_enc:
-
- movl OPENSSL_ia32cap_P+0(%rip),%r10d
- movl OPENSSL_ia32cap_P+4(%rip),%r11d
- jmp aesni_cbc_sha1_enc_ssse3
- .byte 0xf3,0xc3
-.size aesni_cbc_sha1_enc,.-aesni_cbc_sha1_enc
-.type aesni_cbc_sha1_enc_ssse3,@function
-.align 16
-aesni_cbc_sha1_enc_ssse3:
- movq 8(%rsp),%r10
-
-
- pushq %rbx
- pushq %rbp
- pushq %r12
- pushq %r13
- pushq %r14
- pushq %r15
- leaq -104(%rsp),%rsp
-
-
- movq %rdi,%r12
- movq %rsi,%r13
- movq %rdx,%r14
- movq %rcx,%r15
- movdqu (%r8),%xmm11
- movq %r8,88(%rsp)
- shlq $6,%r14
- subq %r12,%r13
- movl 240(%r15),%r8d
- addq %r10,%r14
-
- leaq K_XX_XX(%rip),%r11
- movl 0(%r9),%eax
- movl 4(%r9),%ebx
- movl 8(%r9),%ecx
- movl 12(%r9),%edx
- movl %ebx,%esi
- movl 16(%r9),%ebp
-
- movdqa 64(%r11),%xmm6
- movdqa 0(%r11),%xmm9
- movdqu 0(%r10),%xmm0
- movdqu 16(%r10),%xmm1
- movdqu 32(%r10),%xmm2
- movdqu 48(%r10),%xmm3
-.byte 102,15,56,0,198
- addq $64,%r10
-.byte 102,15,56,0,206
-.byte 102,15,56,0,214
-.byte 102,15,56,0,222
- paddd %xmm9,%xmm0
- paddd %xmm9,%xmm1
- paddd %xmm9,%xmm2
- movdqa %xmm0,0(%rsp)
- psubd %xmm9,%xmm0
- movdqa %xmm1,16(%rsp)
- psubd %xmm9,%xmm1
- movdqa %xmm2,32(%rsp)
- psubd %xmm9,%xmm2
- movups (%r15),%xmm13
- movups 16(%r15),%xmm14
- jmp .Loop_ssse3
-.align 16
-.Loop_ssse3:
- movdqa %xmm1,%xmm4
- addl 0(%rsp),%ebp
- movups 0(%r12),%xmm12
- xorps %xmm13,%xmm12
- xorps %xmm12,%xmm11
-.byte 102,69,15,56,220,222
- movups 32(%r15),%xmm15
- xorl %edx,%ecx
- movdqa %xmm3,%xmm8
-.byte 102,15,58,15,224,8
- movl %eax,%edi
- roll $5,%eax
- paddd %xmm3,%xmm9
- andl %ecx,%esi
- xorl %edx,%ecx
- psrldq $4,%xmm8
- xorl %edx,%esi
- addl %eax,%ebp
- pxor %xmm0,%xmm4
- rorl $2,%ebx
- addl %esi,%ebp
- pxor %xmm2,%xmm8
- addl 4(%rsp),%edx
- xorl %ecx,%ebx
- movl %ebp,%esi
- roll $5,%ebp
- pxor %xmm8,%xmm4
- andl %ebx,%edi
- xorl %ecx,%ebx
- movdqa %xmm9,48(%rsp)
- xorl %ecx,%edi
-.byte 102,69,15,56,220,223
- movups 48(%r15),%xmm14
- addl %ebp,%edx
- movdqa %xmm4,%xmm10
- movdqa %xmm4,%xmm8
- rorl $7,%eax
- addl %edi,%edx
- addl 8(%rsp),%ecx
- xorl %ebx,%eax
- pslldq $12,%xmm10
- paddd %xmm4,%xmm4
- movl %edx,%edi
- roll $5,%edx
- andl %eax,%esi
- xorl %ebx,%eax
- psrld $31,%xmm8
- xorl %ebx,%esi
- addl %edx,%ecx
- movdqa %xmm10,%xmm9
- rorl $7,%ebp
- addl %esi,%ecx
- psrld $30,%xmm10
- por %xmm8,%xmm4
- addl 12(%rsp),%ebx
- xorl %eax,%ebp
- movl %ecx,%esi
- roll $5,%ecx
-.byte 102,69,15,56,220,222
- movups 64(%r15),%xmm15
- pslld $2,%xmm9
- pxor %xmm10,%xmm4
- andl %ebp,%edi
- xorl %eax,%ebp
- movdqa 0(%r11),%xmm10
- xorl %eax,%edi
- addl %ecx,%ebx
- pxor %xmm9,%xmm4
- rorl $7,%edx
- addl %edi,%ebx
- movdqa %xmm2,%xmm5
- addl 16(%rsp),%eax
- xorl %ebp,%edx
- movdqa %xmm4,%xmm9
-.byte 102,15,58,15,233,8
- movl %ebx,%edi
- roll $5,%ebx
- paddd %xmm4,%xmm10
- andl %edx,%esi
- xorl %ebp,%edx
- psrldq $4,%xmm9
- xorl %ebp,%esi
- addl %ebx,%eax
- pxor %xmm1,%xmm5
- rorl $7,%ecx
- addl %esi,%eax
- pxor %xmm3,%xmm9
- addl 20(%rsp),%ebp
-.byte 102,69,15,56,220,223
- movups 80(%r15),%xmm14
- xorl %edx,%ecx
- movl %eax,%esi
- roll $5,%eax
- pxor %xmm9,%xmm5
- andl %ecx,%edi
- xorl %edx,%ecx
- movdqa %xmm10,0(%rsp)
- xorl %edx,%edi
- addl %eax,%ebp
- movdqa %xmm5,%xmm8
- movdqa %xmm5,%xmm9
- rorl $7,%ebx
- addl %edi,%ebp
- addl 24(%rsp),%edx
- xorl %ecx,%ebx
- pslldq $12,%xmm8
- paddd %xmm5,%xmm5
- movl %ebp,%edi
- roll $5,%ebp
- andl %ebx,%esi
- xorl %ecx,%ebx
- psrld $31,%xmm9
- xorl %ecx,%esi
-.byte 102,69,15,56,220,222
- movups 96(%r15),%xmm15
- addl %ebp,%edx
- movdqa %xmm8,%xmm10
- rorl $7,%eax
- addl %esi,%edx
- psrld $30,%xmm8
- por %xmm9,%xmm5
- addl 28(%rsp),%ecx
- xorl %ebx,%eax
- movl %edx,%esi
- roll $5,%edx
- pslld $2,%xmm10
- pxor %xmm8,%xmm5
- andl %eax,%edi
- xorl %ebx,%eax
- movdqa 16(%r11),%xmm8
- xorl %ebx,%edi
- addl %edx,%ecx
- pxor %xmm10,%xmm5
- rorl $7,%ebp
- addl %edi,%ecx
- movdqa %xmm3,%xmm6
- addl 32(%rsp),%ebx
- xorl %eax,%ebp
- movdqa %xmm5,%xmm10
-.byte 102,15,58,15,242,8
- movl %ecx,%edi
- roll $5,%ecx
-.byte 102,69,15,56,220,223
- movups 112(%r15),%xmm14
- paddd %xmm5,%xmm8
- andl %ebp,%esi
- xorl %eax,%ebp
- psrldq $4,%xmm10
- xorl %eax,%esi
- addl %ecx,%ebx
- pxor %xmm2,%xmm6
- rorl $7,%edx
- addl %esi,%ebx
- pxor %xmm4,%xmm10
- addl 36(%rsp),%eax
- xorl %ebp,%edx
- movl %ebx,%esi
- roll $5,%ebx
- pxor %xmm10,%xmm6
- andl %edx,%edi
- xorl %ebp,%edx
- movdqa %xmm8,16(%rsp)
- xorl %ebp,%edi
- addl %ebx,%eax
- movdqa %xmm6,%xmm9
- movdqa %xmm6,%xmm10
- rorl $7,%ecx
- addl %edi,%eax
- addl 40(%rsp),%ebp
-.byte 102,69,15,56,220,222
- movups 128(%r15),%xmm15
- xorl %edx,%ecx
- pslldq $12,%xmm9
- paddd %xmm6,%xmm6
- movl %eax,%edi
- roll $5,%eax
- andl %ecx,%esi
- xorl %edx,%ecx
- psrld $31,%xmm10
- xorl %edx,%esi
- addl %eax,%ebp
- movdqa %xmm9,%xmm8
- rorl $7,%ebx
- addl %esi,%ebp
- psrld $30,%xmm9
- por %xmm10,%xmm6
- addl 44(%rsp),%edx
- xorl %ecx,%ebx
- movl %ebp,%esi
- roll $5,%ebp
- pslld $2,%xmm8
- pxor %xmm9,%xmm6
- andl %ebx,%edi
- xorl %ecx,%ebx
- movdqa 16(%r11),%xmm9
- xorl %ecx,%edi
-.byte 102,69,15,56,220,223
- movups 144(%r15),%xmm14
- addl %ebp,%edx
- pxor %xmm8,%xmm6
- rorl $7,%eax
- addl %edi,%edx
- movdqa %xmm4,%xmm7
- addl 48(%rsp),%ecx
- xorl %ebx,%eax
- movdqa %xmm6,%xmm8
-.byte 102,15,58,15,251,8
- movl %edx,%edi
- roll $5,%edx
- paddd %xmm6,%xmm9
- andl %eax,%esi
- xorl %ebx,%eax
- psrldq $4,%xmm8
- xorl %ebx,%esi
- addl %edx,%ecx
- pxor %xmm3,%xmm7
- rorl $7,%ebp
- addl %esi,%ecx
- pxor %xmm5,%xmm8
- addl 52(%rsp),%ebx
- xorl %eax,%ebp
- movl %ecx,%esi
- roll $5,%ecx
-.byte 102,69,15,56,220,222
- movups 160(%r15),%xmm15
- pxor %xmm8,%xmm7
- andl %ebp,%edi
- xorl %eax,%ebp
- movdqa %xmm9,32(%rsp)
- xorl %eax,%edi
- addl %ecx,%ebx
- movdqa %xmm7,%xmm10
- movdqa %xmm7,%xmm8
- rorl $7,%edx
- addl %edi,%ebx
- addl 56(%rsp),%eax
- xorl %ebp,%edx
- pslldq $12,%xmm10
- paddd %xmm7,%xmm7
- movl %ebx,%edi
- roll $5,%ebx
- andl %edx,%esi
- xorl %ebp,%edx
- psrld $31,%xmm8
- xorl %ebp,%esi
- addl %ebx,%eax
- movdqa %xmm10,%xmm9
- rorl $7,%ecx
- addl %esi,%eax
- psrld $30,%xmm10
- por %xmm8,%xmm7
- addl 60(%rsp),%ebp
- cmpl $11,%r8d
- jb .Laesenclast1
- movups 176(%r15),%xmm14
-.byte 102,69,15,56,220,223
- movups 192(%r15),%xmm15
-.byte 102,69,15,56,220,222
- je .Laesenclast1
- movups 208(%r15),%xmm14
-.byte 102,69,15,56,220,223
- movups 224(%r15),%xmm15
-.byte 102,69,15,56,220,222
-.Laesenclast1:
-.byte 102,69,15,56,221,223
- movups 16(%r15),%xmm14
- xorl %edx,%ecx
- movl %eax,%esi
- roll $5,%eax
- pslld $2,%xmm9
- pxor %xmm10,%xmm7
- andl %ecx,%edi
- xorl %edx,%ecx
- movdqa 16(%r11),%xmm10
- xorl %edx,%edi
- addl %eax,%ebp
- pxor %xmm9,%xmm7
- rorl $7,%ebx
- addl %edi,%ebp
- movdqa %xmm7,%xmm9
- addl 0(%rsp),%edx
- pxor %xmm4,%xmm0
-.byte 102,68,15,58,15,206,8
- xorl %ecx,%ebx
- movl %ebp,%edi
- roll $5,%ebp
- pxor %xmm1,%xmm0
- andl %ebx,%esi
- xorl %ecx,%ebx
- movdqa %xmm10,%xmm8
- paddd %xmm7,%xmm10
- xorl %ecx,%esi
- movups 16(%r12),%xmm12
- xorps %xmm13,%xmm12
- movups %xmm11,0(%r13,%r12,1)
- xorps %xmm12,%xmm11
-.byte 102,69,15,56,220,222
- movups 32(%r15),%xmm15
- addl %ebp,%edx
- pxor %xmm9,%xmm0
- rorl $7,%eax
- addl %esi,%edx
- addl 4(%rsp),%ecx
- xorl %ebx,%eax
- movdqa %xmm0,%xmm9
- movdqa %xmm10,48(%rsp)
- movl %edx,%esi
- roll $5,%edx
- andl %eax,%edi
- xorl %ebx,%eax
- pslld $2,%xmm0
- xorl %ebx,%edi
- addl %edx,%ecx
- psrld $30,%xmm9
- rorl $7,%ebp
- addl %edi,%ecx
- addl 8(%rsp),%ebx
- xorl %eax,%ebp
- movl %ecx,%edi
- roll $5,%ecx
-.byte 102,69,15,56,220,223
- movups 48(%r15),%xmm14
- por %xmm9,%xmm0
- andl %ebp,%esi
- xorl %eax,%ebp
- movdqa %xmm0,%xmm10
- xorl %eax,%esi
- addl %ecx,%ebx
- rorl $7,%edx
- addl %esi,%ebx
- addl 12(%rsp),%eax
- xorl %ebp,%edx
- movl %ebx,%esi
- roll $5,%ebx
- andl %edx,%edi
- xorl %ebp,%edx
- xorl %ebp,%edi
- addl %ebx,%eax
- rorl $7,%ecx
- addl %edi,%eax
- addl 16(%rsp),%ebp
-.byte 102,69,15,56,220,222
- movups 64(%r15),%xmm15
- pxor %xmm5,%xmm1
-.byte 102,68,15,58,15,215,8
- xorl %edx,%esi
- movl %eax,%edi
- roll $5,%eax
- pxor %xmm2,%xmm1
- xorl %ecx,%esi
- addl %eax,%ebp
- movdqa %xmm8,%xmm9
- paddd %xmm0,%xmm8
- rorl $7,%ebx
- addl %esi,%ebp
- pxor %xmm10,%xmm1
- addl 20(%rsp),%edx
- xorl %ecx,%edi
- movl %ebp,%esi
- roll $5,%ebp
- movdqa %xmm1,%xmm10
- movdqa %xmm8,0(%rsp)
- xorl %ebx,%edi
- addl %ebp,%edx
- rorl $7,%eax
- addl %edi,%edx
- pslld $2,%xmm1
- addl 24(%rsp),%ecx
- xorl %ebx,%esi
- psrld $30,%xmm10
- movl %edx,%edi
- roll $5,%edx
- xorl %eax,%esi
-.byte 102,69,15,56,220,223
- movups 80(%r15),%xmm14
- addl %edx,%ecx
- rorl $7,%ebp
- addl %esi,%ecx
- por %xmm10,%xmm1
- addl 28(%rsp),%ebx
- xorl %eax,%edi
- movdqa %xmm1,%xmm8
- movl %ecx,%esi
- roll $5,%ecx
- xorl %ebp,%edi
- addl %ecx,%ebx
- rorl $7,%edx
- addl %edi,%ebx
- addl 32(%rsp),%eax
- pxor %xmm6,%xmm2
-.byte 102,68,15,58,15,192,8
- xorl %ebp,%esi
- movl %ebx,%edi
- roll $5,%ebx
- pxor %xmm3,%xmm2
- xorl %edx,%esi
- addl %ebx,%eax
- movdqa 32(%r11),%xmm10
- paddd %xmm1,%xmm9
- rorl $7,%ecx
- addl %esi,%eax
- pxor %xmm8,%xmm2
- addl 36(%rsp),%ebp
-.byte 102,69,15,56,220,222
- movups 96(%r15),%xmm15
- xorl %edx,%edi
- movl %eax,%esi
- roll $5,%eax
- movdqa %xmm2,%xmm8
- movdqa %xmm9,16(%rsp)
- xorl %ecx,%edi
- addl %eax,%ebp
- rorl $7,%ebx
- addl %edi,%ebp
- pslld $2,%xmm2
- addl 40(%rsp),%edx
- xorl %ecx,%esi
- psrld $30,%xmm8
- movl %ebp,%edi
- roll $5,%ebp
- xorl %ebx,%esi
- addl %ebp,%edx
- rorl $7,%eax
- addl %esi,%edx
- por %xmm8,%xmm2
- addl 44(%rsp),%ecx
- xorl %ebx,%edi
- movdqa %xmm2,%xmm9
- movl %edx,%esi
- roll $5,%edx
- xorl %eax,%edi
-.byte 102,69,15,56,220,223
- movups 112(%r15),%xmm14
- addl %edx,%ecx
- rorl $7,%ebp
- addl %edi,%ecx
- addl 48(%rsp),%ebx
- pxor %xmm7,%xmm3
-.byte 102,68,15,58,15,201,8
- xorl %eax,%esi
- movl %ecx,%edi
- roll $5,%ecx
- pxor %xmm4,%xmm3
- xorl %ebp,%esi
- addl %ecx,%ebx
- movdqa %xmm10,%xmm8
- paddd %xmm2,%xmm10
- rorl $7,%edx
- addl %esi,%ebx
- pxor %xmm9,%xmm3
- addl 52(%rsp),%eax
- xorl %ebp,%edi
- movl %ebx,%esi
- roll $5,%ebx
- movdqa %xmm3,%xmm9
- movdqa %xmm10,32(%rsp)
- xorl %edx,%edi
- addl %ebx,%eax
- rorl $7,%ecx
- addl %edi,%eax
- pslld $2,%xmm3
- addl 56(%rsp),%ebp
-.byte 102,69,15,56,220,222
- movups 128(%r15),%xmm15
- xorl %edx,%esi
- psrld $30,%xmm9
- movl %eax,%edi
- roll $5,%eax
- xorl %ecx,%esi
- addl %eax,%ebp
- rorl $7,%ebx
- addl %esi,%ebp
- por %xmm9,%xmm3
- addl 60(%rsp),%edx
- xorl %ecx,%edi
- movdqa %xmm3,%xmm10
- movl %ebp,%esi
- roll $5,%ebp
- xorl %ebx,%edi
- addl %ebp,%edx
- rorl $7,%eax
- addl %edi,%edx
- addl 0(%rsp),%ecx
- pxor %xmm0,%xmm4
-.byte 102,68,15,58,15,210,8
- xorl %ebx,%esi
- movl %edx,%edi
- roll $5,%edx
- pxor %xmm5,%xmm4
- xorl %eax,%esi
-.byte 102,69,15,56,220,223
- movups 144(%r15),%xmm14
- addl %edx,%ecx
- movdqa %xmm8,%xmm9
- paddd %xmm3,%xmm8
- rorl $7,%ebp
- addl %esi,%ecx
- pxor %xmm10,%xmm4
- addl 4(%rsp),%ebx
- xorl %eax,%edi
- movl %ecx,%esi
- roll $5,%ecx
- movdqa %xmm4,%xmm10
- movdqa %xmm8,48(%rsp)
- xorl %ebp,%edi
- addl %ecx,%ebx
- rorl $7,%edx
- addl %edi,%ebx
- pslld $2,%xmm4
- addl 8(%rsp),%eax
- xorl %ebp,%esi
- psrld $30,%xmm10
- movl %ebx,%edi
- roll $5,%ebx
- xorl %edx,%esi
- addl %ebx,%eax
- rorl $7,%ecx
- addl %esi,%eax
- por %xmm10,%xmm4
- addl 12(%rsp),%ebp
-.byte 102,69,15,56,220,222
- movups 160(%r15),%xmm15
- xorl %edx,%edi
- movdqa %xmm4,%xmm8
- movl %eax,%esi
- roll $5,%eax
- xorl %ecx,%edi
- addl %eax,%ebp
- rorl $7,%ebx
- addl %edi,%ebp
- addl 16(%rsp),%edx
- pxor %xmm1,%xmm5
-.byte 102,68,15,58,15,195,8
- xorl %ecx,%esi
- movl %ebp,%edi
- roll $5,%ebp
- pxor %xmm6,%xmm5
- xorl %ebx,%esi
- addl %ebp,%edx
- movdqa %xmm9,%xmm10
- paddd %xmm4,%xmm9
- rorl $7,%eax
- addl %esi,%edx
- pxor %xmm8,%xmm5
- addl 20(%rsp),%ecx
- xorl %ebx,%edi
- movl %edx,%esi
- roll $5,%edx
- movdqa %xmm5,%xmm8
- movdqa %xmm9,0(%rsp)
- xorl %eax,%edi
- cmpl $11,%r8d
- jb .Laesenclast2
- movups 176(%r15),%xmm14
-.byte 102,69,15,56,220,223
- movups 192(%r15),%xmm15
-.byte 102,69,15,56,220,222
- je .Laesenclast2
- movups 208(%r15),%xmm14
-.byte 102,69,15,56,220,223
- movups 224(%r15),%xmm15
-.byte 102,69,15,56,220,222
-.Laesenclast2:
-.byte 102,69,15,56,221,223
- movups 16(%r15),%xmm14
- addl %edx,%ecx
- rorl $7,%ebp
- addl %edi,%ecx
- pslld $2,%xmm5
- addl 24(%rsp),%ebx
- xorl %eax,%esi
- psrld $30,%xmm8
- movl %ecx,%edi
- roll $5,%ecx
- xorl %ebp,%esi
- addl %ecx,%ebx
- rorl $7,%edx
- addl %esi,%ebx
- por %xmm8,%xmm5
- addl 28(%rsp),%eax
- xorl %ebp,%edi
- movdqa %xmm5,%xmm9
- movl %ebx,%esi
- roll $5,%ebx
- xorl %edx,%edi
- addl %ebx,%eax
- rorl $7,%ecx
- addl %edi,%eax
- movl %ecx,%edi
- movups 32(%r12),%xmm12
- xorps %xmm13,%xmm12
- movups %xmm11,16(%r13,%r12,1)
- xorps %xmm12,%xmm11
-.byte 102,69,15,56,220,222
- movups 32(%r15),%xmm15
- pxor %xmm2,%xmm6
-.byte 102,68,15,58,15,204,8
- xorl %edx,%ecx
- addl 32(%rsp),%ebp
- andl %edx,%edi
- pxor %xmm7,%xmm6
- andl %ecx,%esi
- rorl $7,%ebx
- movdqa %xmm10,%xmm8
- paddd %xmm5,%xmm10
- addl %edi,%ebp
- movl %eax,%edi
- pxor %xmm9,%xmm6
- roll $5,%eax
- addl %esi,%ebp
- xorl %edx,%ecx
- addl %eax,%ebp
- movdqa %xmm6,%xmm9
- movdqa %xmm10,16(%rsp)
- movl %ebx,%esi
- xorl %ecx,%ebx
- addl 36(%rsp),%edx
- andl %ecx,%esi
- pslld $2,%xmm6
- andl %ebx,%edi
- rorl $7,%eax
- psrld $30,%xmm9
- addl %esi,%edx
- movl %ebp,%esi
- roll $5,%ebp
-.byte 102,69,15,56,220,223
- movups 48(%r15),%xmm14
- addl %edi,%edx
- xorl %ecx,%ebx
- addl %ebp,%edx
- por %xmm9,%xmm6
- movl %eax,%edi
- xorl %ebx,%eax
- movdqa %xmm6,%xmm10
- addl 40(%rsp),%ecx
- andl %ebx,%edi
- andl %eax,%esi
- rorl $7,%ebp
- addl %edi,%ecx
- movl %edx,%edi
- roll $5,%edx
- addl %esi,%ecx
- xorl %ebx,%eax
- addl %edx,%ecx
- movl %ebp,%esi
- xorl %eax,%ebp
- addl 44(%rsp),%ebx
- andl %eax,%esi
- andl %ebp,%edi
-.byte 102,69,15,56,220,222
- movups 64(%r15),%xmm15
- rorl $7,%edx
- addl %esi,%ebx
- movl %ecx,%esi
- roll $5,%ecx
- addl %edi,%ebx
- xorl %eax,%ebp
- addl %ecx,%ebx
- movl %edx,%edi
- pxor %xmm3,%xmm7
-.byte 102,68,15,58,15,213,8
- xorl %ebp,%edx
- addl 48(%rsp),%eax
- andl %ebp,%edi
- pxor %xmm0,%xmm7
- andl %edx,%esi
- rorl $7,%ecx
- movdqa 48(%r11),%xmm9
- paddd %xmm6,%xmm8
- addl %edi,%eax
- movl %ebx,%edi
- pxor %xmm10,%xmm7
- roll $5,%ebx
- addl %esi,%eax
- xorl %ebp,%edx
- addl %ebx,%eax
- movdqa %xmm7,%xmm10
- movdqa %xmm8,32(%rsp)
- movl %ecx,%esi
-.byte 102,69,15,56,220,223
- movups 80(%r15),%xmm14
- xorl %edx,%ecx
- addl 52(%rsp),%ebp
- andl %edx,%esi
- pslld $2,%xmm7
- andl %ecx,%edi
- rorl $7,%ebx
- psrld $30,%xmm10
- addl %esi,%ebp
- movl %eax,%esi
- roll $5,%eax
- addl %edi,%ebp
- xorl %edx,%ecx
- addl %eax,%ebp
- por %xmm10,%xmm7
- movl %ebx,%edi
- xorl %ecx,%ebx
- movdqa %xmm7,%xmm8
- addl 56(%rsp),%edx
- andl %ecx,%edi
- andl %ebx,%esi
- rorl $7,%eax
- addl %edi,%edx
- movl %ebp,%edi
- roll $5,%ebp
-.byte 102,69,15,56,220,222
- movups 96(%r15),%xmm15
- addl %esi,%edx
- xorl %ecx,%ebx
- addl %ebp,%edx
- movl %eax,%esi
- xorl %ebx,%eax
- addl 60(%rsp),%ecx
- andl %ebx,%esi
- andl %eax,%edi
- rorl $7,%ebp
- addl %esi,%ecx
- movl %edx,%esi
- roll $5,%edx
- addl %edi,%ecx
- xorl %ebx,%eax
- addl %edx,%ecx
- movl %ebp,%edi
- pxor %xmm4,%xmm0
-.byte 102,68,15,58,15,198,8
- xorl %eax,%ebp
- addl 0(%rsp),%ebx
- andl %eax,%edi
- pxor %xmm1,%xmm0
- andl %ebp,%esi
-.byte 102,69,15,56,220,223
- movups 112(%r15),%xmm14
- rorl $7,%edx
- movdqa %xmm9,%xmm10
- paddd %xmm7,%xmm9
- addl %edi,%ebx
- movl %ecx,%edi
- pxor %xmm8,%xmm0
- roll $5,%ecx
- addl %esi,%ebx
- xorl %eax,%ebp
- addl %ecx,%ebx
- movdqa %xmm0,%xmm8
- movdqa %xmm9,48(%rsp)
- movl %edx,%esi
- xorl %ebp,%edx
- addl 4(%rsp),%eax
- andl %ebp,%esi
- pslld $2,%xmm0
- andl %edx,%edi
- rorl $7,%ecx
- psrld $30,%xmm8
- addl %esi,%eax
- movl %ebx,%esi
- roll $5,%ebx
- addl %edi,%eax
- xorl %ebp,%edx
- addl %ebx,%eax
- por %xmm8,%xmm0
- movl %ecx,%edi
-.byte 102,69,15,56,220,222
- movups 128(%r15),%xmm15
- xorl %edx,%ecx
- movdqa %xmm0,%xmm9
- addl 8(%rsp),%ebp
- andl %edx,%edi
- andl %ecx,%esi
- rorl $7,%ebx
- addl %edi,%ebp
- movl %eax,%edi
- roll $5,%eax
- addl %esi,%ebp
- xorl %edx,%ecx
- addl %eax,%ebp
- movl %ebx,%esi
- xorl %ecx,%ebx
- addl 12(%rsp),%edx
- andl %ecx,%esi
- andl %ebx,%edi
- rorl $7,%eax
- addl %esi,%edx
- movl %ebp,%esi
- roll $5,%ebp
-.byte 102,69,15,56,220,223
- movups 144(%r15),%xmm14
- addl %edi,%edx
- xorl %ecx,%ebx
- addl %ebp,%edx
- movl %eax,%edi
- pxor %xmm5,%xmm1
-.byte 102,68,15,58,15,207,8
- xorl %ebx,%eax
- addl 16(%rsp),%ecx
- andl %ebx,%edi
- pxor %xmm2,%xmm1
- andl %eax,%esi
- rorl $7,%ebp
- movdqa %xmm10,%xmm8
- paddd %xmm0,%xmm10
- addl %edi,%ecx
- movl %edx,%edi
- pxor %xmm9,%xmm1
- roll $5,%edx
- addl %esi,%ecx
- xorl %ebx,%eax
- addl %edx,%ecx
- movdqa %xmm1,%xmm9
- movdqa %xmm10,0(%rsp)
- movl %ebp,%esi
- xorl %eax,%ebp
- addl 20(%rsp),%ebx
- andl %eax,%esi
- pslld $2,%xmm1
- andl %ebp,%edi
-.byte 102,69,15,56,220,222
- movups 160(%r15),%xmm15
- rorl $7,%edx
- psrld $30,%xmm9
- addl %esi,%ebx
- movl %ecx,%esi
- roll $5,%ecx
- addl %edi,%ebx
- xorl %eax,%ebp
- addl %ecx,%ebx
- por %xmm9,%xmm1
- movl %edx,%edi
- xorl %ebp,%edx
- movdqa %xmm1,%xmm10
- addl 24(%rsp),%eax
- andl %ebp,%edi
- andl %edx,%esi
- rorl $7,%ecx
- addl %edi,%eax
- movl %ebx,%edi
- roll $5,%ebx
- addl %esi,%eax
- xorl %ebp,%edx
- addl %ebx,%eax
- movl %ecx,%esi
- cmpl $11,%r8d
- jb .Laesenclast3
- movups 176(%r15),%xmm14
-.byte 102,69,15,56,220,223
- movups 192(%r15),%xmm15
-.byte 102,69,15,56,220,222
- je .Laesenclast3
- movups 208(%r15),%xmm14
-.byte 102,69,15,56,220,223
- movups 224(%r15),%xmm15
-.byte 102,69,15,56,220,222
-.Laesenclast3:
-.byte 102,69,15,56,221,223
- movups 16(%r15),%xmm14
- xorl %edx,%ecx
- addl 28(%rsp),%ebp
- andl %edx,%esi
- andl %ecx,%edi
- rorl $7,%ebx
- addl %esi,%ebp
- movl %eax,%esi
- roll $5,%eax
- addl %edi,%ebp
- xorl %edx,%ecx
- addl %eax,%ebp
- movl %ebx,%edi
- pxor %xmm6,%xmm2
-.byte 102,68,15,58,15,208,8
- xorl %ecx,%ebx
- addl 32(%rsp),%edx
- andl %ecx,%edi
- pxor %xmm3,%xmm2
- andl %ebx,%esi
- rorl $7,%eax
- movdqa %xmm8,%xmm9
- paddd %xmm1,%xmm8
- addl %edi,%edx
- movl %ebp,%edi
- pxor %xmm10,%xmm2
- roll $5,%ebp
- movups 48(%r12),%xmm12
- xorps %xmm13,%xmm12
- movups %xmm11,32(%r13,%r12,1)
- xorps %xmm12,%xmm11
-.byte 102,69,15,56,220,222
- movups 32(%r15),%xmm15
- addl %esi,%edx
- xorl %ecx,%ebx
- addl %ebp,%edx
- movdqa %xmm2,%xmm10
- movdqa %xmm8,16(%rsp)
- movl %eax,%esi
- xorl %ebx,%eax
- addl 36(%rsp),%ecx
- andl %ebx,%esi
- pslld $2,%xmm2
- andl %eax,%edi
- rorl $7,%ebp
- psrld $30,%xmm10
- addl %esi,%ecx
- movl %edx,%esi
- roll $5,%edx
- addl %edi,%ecx
- xorl %ebx,%eax
- addl %edx,%ecx
- por %xmm10,%xmm2
- movl %ebp,%edi
- xorl %eax,%ebp
- movdqa %xmm2,%xmm8
- addl 40(%rsp),%ebx
- andl %eax,%edi
- andl %ebp,%esi
-.byte 102,69,15,56,220,223
- movups 48(%r15),%xmm14
- rorl $7,%edx
- addl %edi,%ebx
- movl %ecx,%edi
- roll $5,%ecx
- addl %esi,%ebx
- xorl %eax,%ebp
- addl %ecx,%ebx
- movl %edx,%esi
- xorl %ebp,%edx
- addl 44(%rsp),%eax
- andl %ebp,%esi
- andl %edx,%edi
- rorl $7,%ecx
- addl %esi,%eax
- movl %ebx,%esi
- roll $5,%ebx
- addl %edi,%eax
- xorl %ebp,%edx
- addl %ebx,%eax
- addl 48(%rsp),%ebp
-.byte 102,69,15,56,220,222
- movups 64(%r15),%xmm15
- pxor %xmm7,%xmm3
-.byte 102,68,15,58,15,193,8
- xorl %edx,%esi
- movl %eax,%edi
- roll $5,%eax
- pxor %xmm4,%xmm3
- xorl %ecx,%esi
- addl %eax,%ebp
- movdqa %xmm9,%xmm10
- paddd %xmm2,%xmm9
- rorl $7,%ebx
- addl %esi,%ebp
- pxor %xmm8,%xmm3
- addl 52(%rsp),%edx
- xorl %ecx,%edi
- movl %ebp,%esi
- roll $5,%ebp
- movdqa %xmm3,%xmm8
- movdqa %xmm9,32(%rsp)
- xorl %ebx,%edi
- addl %ebp,%edx
- rorl $7,%eax
- addl %edi,%edx
- pslld $2,%xmm3
- addl 56(%rsp),%ecx
- xorl %ebx,%esi
- psrld $30,%xmm8
- movl %edx,%edi
- roll $5,%edx
- xorl %eax,%esi
-.byte 102,69,15,56,220,223
- movups 80(%r15),%xmm14
- addl %edx,%ecx
- rorl $7,%ebp
- addl %esi,%ecx
- por %xmm8,%xmm3
- addl 60(%rsp),%ebx
- xorl %eax,%edi
- movl %ecx,%esi
- roll $5,%ecx
- xorl %ebp,%edi
- addl %ecx,%ebx
- rorl $7,%edx
- addl %edi,%ebx
- addl 0(%rsp),%eax
- paddd %xmm3,%xmm10
- xorl %ebp,%esi
- movl %ebx,%edi
- roll $5,%ebx
- xorl %edx,%esi
- movdqa %xmm10,48(%rsp)
- addl %ebx,%eax
- rorl $7,%ecx
- addl %esi,%eax
- addl 4(%rsp),%ebp
-.byte 102,69,15,56,220,222
- movups 96(%r15),%xmm15
- xorl %edx,%edi
- movl %eax,%esi
- roll $5,%eax
- xorl %ecx,%edi
- addl %eax,%ebp
- rorl $7,%ebx
- addl %edi,%ebp
- addl 8(%rsp),%edx
- xorl %ecx,%esi
- movl %ebp,%edi
- roll $5,%ebp
- xorl %ebx,%esi
- addl %ebp,%edx
- rorl $7,%eax
- addl %esi,%edx
- addl 12(%rsp),%ecx
- xorl %ebx,%edi
- movl %edx,%esi
- roll $5,%edx
- xorl %eax,%edi
-.byte 102,69,15,56,220,223
- movups 112(%r15),%xmm14
- addl %edx,%ecx
- rorl $7,%ebp
- addl %edi,%ecx
- cmpq %r14,%r10
- je .Ldone_ssse3
- movdqa 64(%r11),%xmm6
- movdqa 0(%r11),%xmm9
- movdqu 0(%r10),%xmm0
- movdqu 16(%r10),%xmm1
- movdqu 32(%r10),%xmm2
- movdqu 48(%r10),%xmm3
-.byte 102,15,56,0,198
- addq $64,%r10
- addl 16(%rsp),%ebx
- xorl %eax,%esi
-.byte 102,15,56,0,206
- movl %ecx,%edi
- roll $5,%ecx
- paddd %xmm9,%xmm0
- xorl %ebp,%esi
- addl %ecx,%ebx
- rorl $7,%edx
- addl %esi,%ebx
- movdqa %xmm0,0(%rsp)
- addl 20(%rsp),%eax
- xorl %ebp,%edi
- psubd %xmm9,%xmm0
- movl %ebx,%esi
- roll $5,%ebx
- xorl %edx,%edi
- addl %ebx,%eax
- rorl $7,%ecx
- addl %edi,%eax
- addl 24(%rsp),%ebp
-.byte 102,69,15,56,220,222
- movups 128(%r15),%xmm15
- xorl %edx,%esi
- movl %eax,%edi
- roll $5,%eax
- xorl %ecx,%esi
- addl %eax,%ebp
- rorl $7,%ebx
- addl %esi,%ebp
- addl 28(%rsp),%edx
- xorl %ecx,%edi
- movl %ebp,%esi
- roll $5,%ebp
- xorl %ebx,%edi
- addl %ebp,%edx
- rorl $7,%eax
- addl %edi,%edx
- addl 32(%rsp),%ecx
- xorl %ebx,%esi
-.byte 102,15,56,0,214
- movl %edx,%edi
- roll $5,%edx
- paddd %xmm9,%xmm1
- xorl %eax,%esi
-.byte 102,69,15,56,220,223
- movups 144(%r15),%xmm14
- addl %edx,%ecx
- rorl $7,%ebp
- addl %esi,%ecx
- movdqa %xmm1,16(%rsp)
- addl 36(%rsp),%ebx
- xorl %eax,%edi
- psubd %xmm9,%xmm1
- movl %ecx,%esi
- roll $5,%ecx
- xorl %ebp,%edi
- addl %ecx,%ebx
- rorl $7,%edx
- addl %edi,%ebx
- addl 40(%rsp),%eax
- xorl %ebp,%esi
- movl %ebx,%edi
- roll $5,%ebx
- xorl %edx,%esi
- addl %ebx,%eax
- rorl $7,%ecx
- addl %esi,%eax
- addl 44(%rsp),%ebp
-.byte 102,69,15,56,220,222
- movups 160(%r15),%xmm15
- xorl %edx,%edi
- movl %eax,%esi
- roll $5,%eax
- xorl %ecx,%edi
- addl %eax,%ebp
- rorl $7,%ebx
- addl %edi,%ebp
- addl 48(%rsp),%edx
- xorl %ecx,%esi
-.byte 102,15,56,0,222
- movl %ebp,%edi
- roll $5,%ebp
- paddd %xmm9,%xmm2
- xorl %ebx,%esi
- addl %ebp,%edx
- rorl $7,%eax
- addl %esi,%edx
- movdqa %xmm2,32(%rsp)
- addl 52(%rsp),%ecx
- xorl %ebx,%edi
- psubd %xmm9,%xmm2
- movl %edx,%esi
- roll $5,%edx
- xorl %eax,%edi
- cmpl $11,%r8d
- jb .Laesenclast4
- movups 176(%r15),%xmm14
-.byte 102,69,15,56,220,223
- movups 192(%r15),%xmm15
-.byte 102,69,15,56,220,222
- je .Laesenclast4
- movups 208(%r15),%xmm14
-.byte 102,69,15,56,220,223
- movups 224(%r15),%xmm15
-.byte 102,69,15,56,220,222
-.Laesenclast4:
-.byte 102,69,15,56,221,223
- movups 16(%r15),%xmm14
- addl %edx,%ecx
- rorl $7,%ebp
- addl %edi,%ecx
- addl 56(%rsp),%ebx
- xorl %eax,%esi
- movl %ecx,%edi
- roll $5,%ecx
- xorl %ebp,%esi
- addl %ecx,%ebx
- rorl $7,%edx
- addl %esi,%ebx
- addl 60(%rsp),%eax
- xorl %ebp,%edi
- movl %ebx,%esi
- roll $5,%ebx
- xorl %edx,%edi
- addl %ebx,%eax
- rorl $7,%ecx
- addl %edi,%eax
- movups %xmm11,48(%r13,%r12,1)
- leaq 64(%r12),%r12
-
- addl 0(%r9),%eax
- addl 4(%r9),%esi
- addl 8(%r9),%ecx
- addl 12(%r9),%edx
- movl %eax,0(%r9)
- addl 16(%r9),%ebp
- movl %esi,4(%r9)
- movl %esi,%ebx
- movl %ecx,8(%r9)
- movl %edx,12(%r9)
- movl %ebp,16(%r9)
- jmp .Loop_ssse3
-
-.align 16
-.Ldone_ssse3:
- addl 16(%rsp),%ebx
- xorl %eax,%esi
- movl %ecx,%edi
- roll $5,%ecx
- xorl %ebp,%esi
- addl %ecx,%ebx
- rorl $7,%edx
- addl %esi,%ebx
- addl 20(%rsp),%eax
- xorl %ebp,%edi
- movl %ebx,%esi
- roll $5,%ebx
- xorl %edx,%edi
- addl %ebx,%eax
- rorl $7,%ecx
- addl %edi,%eax
- addl 24(%rsp),%ebp
-.byte 102,69,15,56,220,222
- movups 128(%r15),%xmm15
- xorl %edx,%esi
- movl %eax,%edi
- roll $5,%eax
- xorl %ecx,%esi
- addl %eax,%ebp
- rorl $7,%ebx
- addl %esi,%ebp
- addl 28(%rsp),%edx
- xorl %ecx,%edi
- movl %ebp,%esi
- roll $5,%ebp
- xorl %ebx,%edi
- addl %ebp,%edx
- rorl $7,%eax
- addl %edi,%edx
- addl 32(%rsp),%ecx
- xorl %ebx,%esi
- movl %edx,%edi
- roll $5,%edx
- xorl %eax,%esi
-.byte 102,69,15,56,220,223
- movups 144(%r15),%xmm14
- addl %edx,%ecx
- rorl $7,%ebp
- addl %esi,%ecx
- addl 36(%rsp),%ebx
- xorl %eax,%edi
- movl %ecx,%esi
- roll $5,%ecx
- xorl %ebp,%edi
- addl %ecx,%ebx
- rorl $7,%edx
- addl %edi,%ebx
- addl 40(%rsp),%eax
- xorl %ebp,%esi
- movl %ebx,%edi
- roll $5,%ebx
- xorl %edx,%esi
- addl %ebx,%eax
- rorl $7,%ecx
- addl %esi,%eax
- addl 44(%rsp),%ebp
-.byte 102,69,15,56,220,222
- movups 160(%r15),%xmm15
- xorl %edx,%edi
- movl %eax,%esi
- roll $5,%eax
- xorl %ecx,%edi
- addl %eax,%ebp
- rorl $7,%ebx
- addl %edi,%ebp
- addl 48(%rsp),%edx
- xorl %ecx,%esi
- movl %ebp,%edi
- roll $5,%ebp
- xorl %ebx,%esi
- addl %ebp,%edx
- rorl $7,%eax
- addl %esi,%edx
- addl 52(%rsp),%ecx
- xorl %ebx,%edi
- movl %edx,%esi
- roll $5,%edx
- xorl %eax,%edi
- cmpl $11,%r8d
- jb .Laesenclast5
- movups 176(%r15),%xmm14
-.byte 102,69,15,56,220,223
- movups 192(%r15),%xmm15
-.byte 102,69,15,56,220,222
- je .Laesenclast5
- movups 208(%r15),%xmm14
-.byte 102,69,15,56,220,223
- movups 224(%r15),%xmm15
-.byte 102,69,15,56,220,222
-.Laesenclast5:
-.byte 102,69,15,56,221,223
- movups 16(%r15),%xmm14
- addl %edx,%ecx
- rorl $7,%ebp
- addl %edi,%ecx
- addl 56(%rsp),%ebx
- xorl %eax,%esi
- movl %ecx,%edi
- roll $5,%ecx
- xorl %ebp,%esi
- addl %ecx,%ebx
- rorl $7,%edx
- addl %esi,%ebx
- addl 60(%rsp),%eax
- xorl %ebp,%edi
- movl %ebx,%esi
- roll $5,%ebx
- xorl %edx,%edi
- addl %ebx,%eax
- rorl $7,%ecx
- addl %edi,%eax
- movups %xmm11,48(%r13,%r12,1)
- movq 88(%rsp),%r8
-
- addl 0(%r9),%eax
- addl 4(%r9),%esi
- addl 8(%r9),%ecx
- movl %eax,0(%r9)
- addl 12(%r9),%edx
- movl %esi,4(%r9)
- addl 16(%r9),%ebp
- movl %ecx,8(%r9)
- movl %edx,12(%r9)
- movl %ebp,16(%r9)
- movups %xmm11,(%r8)
- leaq 104(%rsp),%rsi
- movq 0(%rsi),%r15
- movq 8(%rsi),%r14
- movq 16(%rsi),%r13
- movq 24(%rsi),%r12
- movq 32(%rsi),%rbp
- movq 40(%rsi),%rbx
- leaq 48(%rsi),%rsp
-.Lepilogue_ssse3:
- .byte 0xf3,0xc3
-.size aesni_cbc_sha1_enc_ssse3,.-aesni_cbc_sha1_enc_ssse3
-.align 64
-K_XX_XX:
-.long 0x5a827999,0x5a827999,0x5a827999,0x5a827999
-.long 0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1
-.long 0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc
-.long 0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6
-.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
-
-.byte 65,69,83,78,73,45,67,66,67,43,83,72,65,49,32,115,116,105,116,99,104,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
-.align 64
diff --git a/app/openssl/crypto/aes/asm/aesni-sha1-x86_64.pl b/app/openssl/crypto/aes/asm/aesni-sha1-x86_64.pl
deleted file mode 100644
index 3c8f6c19..00000000
--- a/app/openssl/crypto/aes/asm/aesni-sha1-x86_64.pl
+++ /dev/null
@@ -1,1250 +0,0 @@
-#!/usr/bin/env perl
-#
-# ====================================================================
-# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
-# project. The module is, however, dual licensed under OpenSSL and
-# CRYPTOGAMS licenses depending on where you obtain it. For further
-# details see http://www.openssl.org/~appro/cryptogams/.
-# ====================================================================
-#
-# June 2011
-#
-# This is AESNI-CBC+SHA1 "stitch" implementation. The idea, as spelled
-# in http://download.intel.com/design/intarch/papers/323686.pdf, is
-# that since AESNI-CBC encrypt exhibit *very* low instruction-level
-# parallelism, interleaving it with another algorithm would allow to
-# utilize processor resources better and achieve better performance.
-# SHA1 instruction sequences(*) are taken from sha1-x86_64.pl and
-# AESNI code is weaved into it. Below are performance numbers in
-# cycles per processed byte, less is better, for standalone AESNI-CBC
-# encrypt, sum of the latter and standalone SHA1, and "stitched"
-# subroutine:
-#
-# AES-128-CBC +SHA1 stitch gain
-# Westmere 3.77[+5.6] 9.37 6.65 +41%
-# Sandy Bridge 5.05[+5.2(6.3)] 10.25(11.35) 6.16(7.08) +67%(+60%)
-#
-# AES-192-CBC
-# Westmere 4.51 10.11 6.97 +45%
-# Sandy Bridge 6.05 11.25(12.35) 6.34(7.27) +77%(+70%)
-#
-# AES-256-CBC
-# Westmere 5.25 10.85 7.25 +50%
-# Sandy Bridge 7.05 12.25(13.35) 7.06(7.70) +74%(+73%)
-#
-# (*) There are two code paths: SSSE3 and AVX. See sha1-568.pl for
-# background information. Above numbers in parentheses are SSSE3
-# results collected on AVX-capable CPU, i.e. apply on OSes that
-# don't support AVX.
-#
-# Needless to mention that it makes no sense to implement "stitched"
-# *decrypt* subroutine. Because *both* AESNI-CBC decrypt and SHA1
-# fully utilize parallelism, so stitching would not give any gain
-# anyway. Well, there might be some, e.g. because of better cache
-# locality... For reference, here are performance results for
-# standalone AESNI-CBC decrypt:
-#
-# AES-128-CBC AES-192-CBC AES-256-CBC
-# Westmere 1.31 1.55 1.80
-# Sandy Bridge 0.93 1.06 1.22
-
-$flavour = shift;
-$output = shift;
-if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
-
-$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
-
-$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
-( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
-( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
-die "can't locate x86_64-xlate.pl";
-
-$avx=1 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
- =~ /GNU assembler version ([2-9]\.[0-9]+)/ &&
- $1>=2.19);
-$avx=1 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
- `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/ &&
- $1>=2.09);
-$avx=1 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
- `ml64 2>&1` =~ /Version ([0-9]+)\./ &&
- $1>=10);
-
-open OUT,"| \"$^X\" $xlate $flavour $output";
-*STDOUT=*OUT;
-
-# void aesni_cbc_sha1_enc(const void *inp,
-# void *out,
-# size_t length,
-# const AES_KEY *key,
-# unsigned char *iv,
-# SHA_CTX *ctx,
-# const void *in0);
-
-$code.=<<___;
-.text
-.extern OPENSSL_ia32cap_P
-
-.globl aesni_cbc_sha1_enc
-.type aesni_cbc_sha1_enc,\@abi-omnipotent
-.align 16
-aesni_cbc_sha1_enc:
- # caller should check for SSSE3 and AES-NI bits
- mov OPENSSL_ia32cap_P+0(%rip),%r10d
- mov OPENSSL_ia32cap_P+4(%rip),%r11d
-___
-$code.=<<___ if ($avx);
- and \$`1<<28`,%r11d # mask AVX bit
- and \$`1<<30`,%r10d # mask "Intel CPU" bit
- or %r11d,%r10d
- cmp \$`1<<28|1<<30`,%r10d
- je aesni_cbc_sha1_enc_avx
-___
-$code.=<<___;
- jmp aesni_cbc_sha1_enc_ssse3
- ret
-.size aesni_cbc_sha1_enc,.-aesni_cbc_sha1_enc
-___
-
-my ($in0,$out,$len,$key,$ivp,$ctx,$inp)=("%rdi","%rsi","%rdx","%rcx","%r8","%r9","%r10");
-
-my $Xi=4;
-my @X=map("%xmm$_",(4..7,0..3));
-my @Tx=map("%xmm$_",(8..10));
-my @V=($A,$B,$C,$D,$E)=("%eax","%ebx","%ecx","%edx","%ebp"); # size optimization
-my @T=("%esi","%edi");
-my $j=0; my $jj=0; my $r=0; my $sn=0;
-my $K_XX_XX="%r11";
-my ($iv,$in,$rndkey0)=map("%xmm$_",(11..13));
-my @rndkey=("%xmm14","%xmm15");
-
-sub AUTOLOAD() # thunk [simplified] 32-bit style perlasm
-{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://;
- my $arg = pop;
- $arg = "\$$arg" if ($arg*1 eq $arg);
- $code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n";
-}
-
-my $_rol=sub { &rol(@_) };
-my $_ror=sub { &ror(@_) };
-
-$code.=<<___;
-.type aesni_cbc_sha1_enc_ssse3,\@function,6
-.align 16
-aesni_cbc_sha1_enc_ssse3:
- mov `($win64?56:8)`(%rsp),$inp # load 7th argument
- #shr \$6,$len # debugging artefact
- #jz .Lepilogue_ssse3 # debugging artefact
- push %rbx
- push %rbp
- push %r12
- push %r13
- push %r14
- push %r15
- lea `-104-($win64?10*16:0)`(%rsp),%rsp
- #mov $in0,$inp # debugging artefact
- #lea 64(%rsp),$ctx # debugging artefact
-___
-$code.=<<___ if ($win64);
- movaps %xmm6,96+0(%rsp)
- movaps %xmm7,96+16(%rsp)
- movaps %xmm8,96+32(%rsp)
- movaps %xmm9,96+48(%rsp)
- movaps %xmm10,96+64(%rsp)
- movaps %xmm11,96+80(%rsp)
- movaps %xmm12,96+96(%rsp)
- movaps %xmm13,96+112(%rsp)
- movaps %xmm14,96+128(%rsp)
- movaps %xmm15,96+144(%rsp)
-.Lprologue_ssse3:
-___
-$code.=<<___;
- mov $in0,%r12 # reassign arguments
- mov $out,%r13
- mov $len,%r14
- mov $key,%r15
- movdqu ($ivp),$iv # load IV
- mov $ivp,88(%rsp) # save $ivp
-___
-my ($in0,$out,$len,$key)=map("%r$_",(12..15)); # reassign arguments
-my $rounds="${ivp}d";
-$code.=<<___;
- shl \$6,$len
- sub $in0,$out
- mov 240($key),$rounds
- add $inp,$len # end of input
-
- lea K_XX_XX(%rip),$K_XX_XX
- mov 0($ctx),$A # load context
- mov 4($ctx),$B
- mov 8($ctx),$C
- mov 12($ctx),$D
- mov $B,@T[0] # magic seed
- mov 16($ctx),$E
-
- movdqa 64($K_XX_XX),@X[2] # pbswap mask
- movdqa 0($K_XX_XX),@Tx[1] # K_00_19
- movdqu 0($inp),@X[-4&7] # load input to %xmm[0-3]
- movdqu 16($inp),@X[-3&7]
- movdqu 32($inp),@X[-2&7]
- movdqu 48($inp),@X[-1&7]
- pshufb @X[2],@X[-4&7] # byte swap
- add \$64,$inp
- pshufb @X[2],@X[-3&7]
- pshufb @X[2],@X[-2&7]
- pshufb @X[2],@X[-1&7]
- paddd @Tx[1],@X[-4&7] # add K_00_19
- paddd @Tx[1],@X[-3&7]
- paddd @Tx[1],@X[-2&7]
- movdqa @X[-4&7],0(%rsp) # X[]+K xfer to IALU
- psubd @Tx[1],@X[-4&7] # restore X[]
- movdqa @X[-3&7],16(%rsp)
- psubd @Tx[1],@X[-3&7]
- movdqa @X[-2&7],32(%rsp)
- psubd @Tx[1],@X[-2&7]
- movups ($key),$rndkey0 # $key[0]
- movups 16($key),$rndkey[0] # forward reference
- jmp .Loop_ssse3
-___
-
-my $aesenc=sub {
- use integer;
- my ($n,$k)=($r/10,$r%10);
- if ($k==0) {
- $code.=<<___;
- movups `16*$n`($in0),$in # load input
- xorps $rndkey0,$in
-___
- $code.=<<___ if ($n);
- movups $iv,`16*($n-1)`($out,$in0) # write output
-___
- $code.=<<___;
- xorps $in,$iv
- aesenc $rndkey[0],$iv
- movups `32+16*$k`($key),$rndkey[1]
-___
- } elsif ($k==9) {
- $sn++;
- $code.=<<___;
- cmp \$11,$rounds
- jb .Laesenclast$sn
- movups `32+16*($k+0)`($key),$rndkey[1]
- aesenc $rndkey[0],$iv
- movups `32+16*($k+1)`($key),$rndkey[0]
- aesenc $rndkey[1],$iv
- je .Laesenclast$sn
- movups `32+16*($k+2)`($key),$rndkey[1]
- aesenc $rndkey[0],$iv
- movups `32+16*($k+3)`($key),$rndkey[0]
- aesenc $rndkey[1],$iv
-.Laesenclast$sn:
- aesenclast $rndkey[0],$iv
- movups 16($key),$rndkey[1] # forward reference
-___
- } else {
- $code.=<<___;
- aesenc $rndkey[0],$iv
- movups `32+16*$k`($key),$rndkey[1]
-___
- }
- $r++; unshift(@rndkey,pop(@rndkey));
-};
-
-sub Xupdate_ssse3_16_31() # recall that $Xi starts wtih 4
-{ use integer;
- my $body = shift;
- my @insns = (&$body,&$body,&$body,&$body); # 40 instructions
- my ($a,$b,$c,$d,$e);
-
- &movdqa (@X[0],@X[-3&7]);
- eval(shift(@insns));
- eval(shift(@insns));
- &movdqa (@Tx[0],@X[-1&7]);
- &palignr(@X[0],@X[-4&7],8); # compose "X[-14]" in "X[0]"
- eval(shift(@insns));
- eval(shift(@insns));
-
- &paddd (@Tx[1],@X[-1&7]);
- eval(shift(@insns));
- eval(shift(@insns));
- &psrldq (@Tx[0],4); # "X[-3]", 3 dwords
- eval(shift(@insns));
- eval(shift(@insns));
- &pxor (@X[0],@X[-4&7]); # "X[0]"^="X[-16]"
- eval(shift(@insns));
- eval(shift(@insns));
-
- &pxor (@Tx[0],@X[-2&7]); # "X[-3]"^"X[-8]"
- eval(shift(@insns));
- eval(shift(@insns));
- eval(shift(@insns));
- eval(shift(@insns));
-
- &pxor (@X[0],@Tx[0]); # "X[0]"^="X[-3]"^"X[-8]"
- eval(shift(@insns));
- eval(shift(@insns));
- &movdqa (eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]); # X[]+K xfer to IALU
- eval(shift(@insns));
- eval(shift(@insns));
-
- &movdqa (@Tx[2],@X[0]);
- &movdqa (@Tx[0],@X[0]);
- eval(shift(@insns));
- eval(shift(@insns));
- eval(shift(@insns));
- eval(shift(@insns));
-
- &pslldq (@Tx[2],12); # "X[0]"<<96, extract one dword
- &paddd (@X[0],@X[0]);
- eval(shift(@insns));
- eval(shift(@insns));
- eval(shift(@insns));
- eval(shift(@insns));
-
- &psrld (@Tx[0],31);
- eval(shift(@insns));
- eval(shift(@insns));
- &movdqa (@Tx[1],@Tx[2]);
- eval(shift(@insns));
- eval(shift(@insns));
-
- &psrld (@Tx[2],30);
- &por (@X[0],@Tx[0]); # "X[0]"<<<=1
- eval(shift(@insns));
- eval(shift(@insns));
- eval(shift(@insns));
- eval(shift(@insns));
-
- &pslld (@Tx[1],2);
- &pxor (@X[0],@Tx[2]);
- eval(shift(@insns));
- eval(shift(@insns));
- &movdqa (@Tx[2],eval(16*(($Xi)/5))."($K_XX_XX)"); # K_XX_XX
- eval(shift(@insns));
- eval(shift(@insns));
-
- &pxor (@X[0],@Tx[1]); # "X[0]"^=("X[0]">>96)<<<2
-
- foreach (@insns) { eval; } # remaining instructions [if any]
-
- $Xi++; push(@X,shift(@X)); # "rotate" X[]
- push(@Tx,shift(@Tx));
-}
-
-sub Xupdate_ssse3_32_79()
-{ use integer;
- my $body = shift;
- my @insns = (&$body,&$body,&$body,&$body); # 32 to 48 instructions
- my ($a,$b,$c,$d,$e);
-
- &movdqa (@Tx[0],@X[-1&7]) if ($Xi==8);
- eval(shift(@insns)); # body_20_39
- &pxor (@X[0],@X[-4&7]); # "X[0]"="X[-32]"^"X[-16]"
- &palignr(@Tx[0],@X[-2&7],8); # compose "X[-6]"
- eval(shift(@insns));
- eval(shift(@insns));
- eval(shift(@insns)); # rol
-
- &pxor (@X[0],@X[-7&7]); # "X[0]"^="X[-28]"
- eval(shift(@insns));
- eval(shift(@insns)) if (@insns[0] !~ /&ro[rl]/);
- if ($Xi%5) {
- &movdqa (@Tx[2],@Tx[1]);# "perpetuate" K_XX_XX...
- } else { # ... or load next one
- &movdqa (@Tx[2],eval(16*($Xi/5))."($K_XX_XX)");
- }
- &paddd (@Tx[1],@X[-1&7]);
- eval(shift(@insns)); # ror
- eval(shift(@insns));
-
- &pxor (@X[0],@Tx[0]); # "X[0]"^="X[-6]"
- eval(shift(@insns)); # body_20_39
- eval(shift(@insns));
- eval(shift(@insns));
- eval(shift(@insns)); # rol
-
- &movdqa (@Tx[0],@X[0]);
- &movdqa (eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]); # X[]+K xfer to IALU
- eval(shift(@insns));
- eval(shift(@insns));
- eval(shift(@insns)); # ror
- eval(shift(@insns));
-
- &pslld (@X[0],2);
- eval(shift(@insns)); # body_20_39
- eval(shift(@insns));
- &psrld (@Tx[0],30);
- eval(shift(@insns));
- eval(shift(@insns)); # rol
- eval(shift(@insns));
- eval(shift(@insns));
- eval(shift(@insns)); # ror
- eval(shift(@insns));
-
- &por (@X[0],@Tx[0]); # "X[0]"<<<=2
- eval(shift(@insns)); # body_20_39
- eval(shift(@insns));
- &movdqa (@Tx[1],@X[0]) if ($Xi<19);
- eval(shift(@insns));
- eval(shift(@insns)); # rol
- eval(shift(@insns));
- eval(shift(@insns));
- eval(shift(@insns)); # rol
- eval(shift(@insns));
-
- foreach (@insns) { eval; } # remaining instructions
-
- $Xi++; push(@X,shift(@X)); # "rotate" X[]
- push(@Tx,shift(@Tx));
-}
-
-sub Xuplast_ssse3_80()
-{ use integer;
- my $body = shift;
- my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
- my ($a,$b,$c,$d,$e);
-
- eval(shift(@insns));
- &paddd (@Tx[1],@X[-1&7]);
- eval(shift(@insns));
- eval(shift(@insns));
- eval(shift(@insns));
- eval(shift(@insns));
-
- &movdqa (eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]); # X[]+K xfer IALU
-
- foreach (@insns) { eval; } # remaining instructions
-
- &cmp ($inp,$len);
- &je (".Ldone_ssse3");
-
- unshift(@Tx,pop(@Tx));
-
- &movdqa (@X[2],"64($K_XX_XX)"); # pbswap mask
- &movdqa (@Tx[1],"0($K_XX_XX)"); # K_00_19
- &movdqu (@X[-4&7],"0($inp)"); # load input
- &movdqu (@X[-3&7],"16($inp)");
- &movdqu (@X[-2&7],"32($inp)");
- &movdqu (@X[-1&7],"48($inp)");
- &pshufb (@X[-4&7],@X[2]); # byte swap
- &add ($inp,64);
-
- $Xi=0;
-}
-
-sub Xloop_ssse3()
-{ use integer;
- my $body = shift;
- my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
- my ($a,$b,$c,$d,$e);
-
- eval(shift(@insns));
- eval(shift(@insns));
- &pshufb (@X[($Xi-3)&7],@X[2]);
- eval(shift(@insns));
- eval(shift(@insns));
- &paddd (@X[($Xi-4)&7],@Tx[1]);
- eval(shift(@insns));
- eval(shift(@insns));
- eval(shift(@insns));
- eval(shift(@insns));
- &movdqa (eval(16*$Xi)."(%rsp)",@X[($Xi-4)&7]); # X[]+K xfer to IALU
- eval(shift(@insns));
- eval(shift(@insns));
- &psubd (@X[($Xi-4)&7],@Tx[1]);
-
- foreach (@insns) { eval; }
- $Xi++;
-}
-
-sub Xtail_ssse3()
-{ use integer;
- my $body = shift;
- my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
- my ($a,$b,$c,$d,$e);
-
- foreach (@insns) { eval; }
-}
-
-sub body_00_19 () {
- use integer;
- my ($k,$n);
- my @r=(
- '($a,$b,$c,$d,$e)=@V;'.
- '&add ($e,eval(4*($j&15))."(%rsp)");', # X[]+K xfer
- '&xor ($c,$d);',
- '&mov (@T[1],$a);', # $b in next round
- '&$_rol ($a,5);',
- '&and (@T[0],$c);', # ($b&($c^$d))
- '&xor ($c,$d);', # restore $c
- '&xor (@T[0],$d);',
- '&add ($e,$a);',
- '&$_ror ($b,$j?7:2);', # $b>>>2
- '&add ($e,@T[0]);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
- );
- $n = scalar(@r);
- $k = (($jj+1)*12/20)*20*$n/12; # 12 aesencs per these 20 rounds
- @r[$k%$n].='&$aesenc();' if ($jj==$k/$n);
- $jj++;
- return @r;
-}
-
-sub body_20_39 () {
- use integer;
- my ($k,$n);
- my @r=(
- '($a,$b,$c,$d,$e)=@V;'.
- '&add ($e,eval(4*($j++&15))."(%rsp)");', # X[]+K xfer
- '&xor (@T[0],$d);', # ($b^$d)
- '&mov (@T[1],$a);', # $b in next round
- '&$_rol ($a,5);',
- '&xor (@T[0],$c);', # ($b^$d^$c)
- '&add ($e,$a);',
- '&$_ror ($b,7);', # $b>>>2
- '&add ($e,@T[0]);' .'unshift(@V,pop(@V)); unshift(@T,pop(@T));'
- );
- $n = scalar(@r);
- $k = (($jj+1)*8/20)*20*$n/8; # 8 aesencs per these 20 rounds
- @r[$k%$n].='&$aesenc();' if ($jj==$k/$n);
- $jj++;
- return @r;
-}
-
-sub body_40_59 () {
- use integer;
- my ($k,$n);
- my @r=(
- '($a,$b,$c,$d,$e)=@V;'.
- '&mov (@T[1],$c);',
- '&xor ($c,$d);',
- '&add ($e,eval(4*($j++&15))."(%rsp)");', # X[]+K xfer
- '&and (@T[1],$d);',
- '&and (@T[0],$c);', # ($b&($c^$d))
- '&$_ror ($b,7);', # $b>>>2
- '&add ($e,@T[1]);',
- '&mov (@T[1],$a);', # $b in next round
- '&$_rol ($a,5);',
- '&add ($e,@T[0]);',
- '&xor ($c,$d);', # restore $c
- '&add ($e,$a);' .'unshift(@V,pop(@V)); unshift(@T,pop(@T));'
- );
- $n = scalar(@r);
- $k=(($jj+1)*12/20)*20*$n/12; # 12 aesencs per these 20 rounds
- @r[$k%$n].='&$aesenc();' if ($jj==$k/$n);
- $jj++;
- return @r;
-}
-$code.=<<___;
-.align 16
-.Loop_ssse3:
-___
- &Xupdate_ssse3_16_31(\&body_00_19);
- &Xupdate_ssse3_16_31(\&body_00_19);
- &Xupdate_ssse3_16_31(\&body_00_19);
- &Xupdate_ssse3_16_31(\&body_00_19);
- &Xupdate_ssse3_32_79(\&body_00_19);
- &Xupdate_ssse3_32_79(\&body_20_39);
- &Xupdate_ssse3_32_79(\&body_20_39);
- &Xupdate_ssse3_32_79(\&body_20_39);
- &Xupdate_ssse3_32_79(\&body_20_39);
- &Xupdate_ssse3_32_79(\&body_20_39);
- &Xupdate_ssse3_32_79(\&body_40_59);
- &Xupdate_ssse3_32_79(\&body_40_59);
- &Xupdate_ssse3_32_79(\&body_40_59);
- &Xupdate_ssse3_32_79(\&body_40_59);
- &Xupdate_ssse3_32_79(\&body_40_59);
- &Xupdate_ssse3_32_79(\&body_20_39);
- &Xuplast_ssse3_80(\&body_20_39); # can jump to "done"
-
- $saved_j=$j; @saved_V=@V;
- $saved_r=$r; @saved_rndkey=@rndkey;
-
- &Xloop_ssse3(\&body_20_39);
- &Xloop_ssse3(\&body_20_39);
- &Xloop_ssse3(\&body_20_39);
-
-$code.=<<___;
- movups $iv,48($out,$in0) # write output
- lea 64($in0),$in0
-
- add 0($ctx),$A # update context
- add 4($ctx),@T[0]
- add 8($ctx),$C
- add 12($ctx),$D
- mov $A,0($ctx)
- add 16($ctx),$E
- mov @T[0],4($ctx)
- mov @T[0],$B # magic seed
- mov $C,8($ctx)
- mov $D,12($ctx)
- mov $E,16($ctx)
- jmp .Loop_ssse3
-
-.align 16
-.Ldone_ssse3:
-___
- $jj=$j=$saved_j; @V=@saved_V;
- $r=$saved_r; @rndkey=@saved_rndkey;
-
- &Xtail_ssse3(\&body_20_39);
- &Xtail_ssse3(\&body_20_39);
- &Xtail_ssse3(\&body_20_39);
-
-$code.=<<___;
- movups $iv,48($out,$in0) # write output
- mov 88(%rsp),$ivp # restore $ivp
-
- add 0($ctx),$A # update context
- add 4($ctx),@T[0]
- add 8($ctx),$C
- mov $A,0($ctx)
- add 12($ctx),$D
- mov @T[0],4($ctx)
- add 16($ctx),$E
- mov $C,8($ctx)
- mov $D,12($ctx)
- mov $E,16($ctx)
- movups $iv,($ivp) # write IV
-___
-$code.=<<___ if ($win64);
- movaps 96+0(%rsp),%xmm6
- movaps 96+16(%rsp),%xmm7
- movaps 96+32(%rsp),%xmm8
- movaps 96+48(%rsp),%xmm9
- movaps 96+64(%rsp),%xmm10
- movaps 96+80(%rsp),%xmm11
- movaps 96+96(%rsp),%xmm12
- movaps 96+112(%rsp),%xmm13
- movaps 96+128(%rsp),%xmm14
- movaps 96+144(%rsp),%xmm15
-___
-$code.=<<___;
- lea `104+($win64?10*16:0)`(%rsp),%rsi
- mov 0(%rsi),%r15
- mov 8(%rsi),%r14
- mov 16(%rsi),%r13
- mov 24(%rsi),%r12
- mov 32(%rsi),%rbp
- mov 40(%rsi),%rbx
- lea 48(%rsi),%rsp
-.Lepilogue_ssse3:
- ret
-.size aesni_cbc_sha1_enc_ssse3,.-aesni_cbc_sha1_enc_ssse3
-___
-
-$j=$jj=$r=$sn=0;
-
-if ($avx) {
-my ($in0,$out,$len,$key,$ivp,$ctx,$inp)=("%rdi","%rsi","%rdx","%rcx","%r8","%r9","%r10");
-
-my $Xi=4;
-my @X=map("%xmm$_",(4..7,0..3));
-my @Tx=map("%xmm$_",(8..10));
-my @V=($A,$B,$C,$D,$E)=("%eax","%ebx","%ecx","%edx","%ebp"); # size optimization
-my @T=("%esi","%edi");
-
-my $_rol=sub { &shld(@_[0],@_) };
-my $_ror=sub { &shrd(@_[0],@_) };
-
-$code.=<<___;
-.type aesni_cbc_sha1_enc_avx,\@function,6
-.align 16
-aesni_cbc_sha1_enc_avx:
- mov `($win64?56:8)`(%rsp),$inp # load 7th argument
- #shr \$6,$len # debugging artefact
- #jz .Lepilogue_avx # debugging artefact
- push %rbx
- push %rbp
- push %r12
- push %r13
- push %r14
- push %r15
- lea `-104-($win64?10*16:0)`(%rsp),%rsp
- #mov $in0,$inp # debugging artefact
- #lea 64(%rsp),$ctx # debugging artefact
-___
-$code.=<<___ if ($win64);
- movaps %xmm6,96+0(%rsp)
- movaps %xmm7,96+16(%rsp)
- movaps %xmm8,96+32(%rsp)
- movaps %xmm9,96+48(%rsp)
- movaps %xmm10,96+64(%rsp)
- movaps %xmm11,96+80(%rsp)
- movaps %xmm12,96+96(%rsp)
- movaps %xmm13,96+112(%rsp)
- movaps %xmm14,96+128(%rsp)
- movaps %xmm15,96+144(%rsp)
-.Lprologue_avx:
-___
-$code.=<<___;
- vzeroall
- mov $in0,%r12 # reassign arguments
- mov $out,%r13
- mov $len,%r14
- mov $key,%r15
- vmovdqu ($ivp),$iv # load IV
- mov $ivp,88(%rsp) # save $ivp
-___
-my ($in0,$out,$len,$key)=map("%r$_",(12..15)); # reassign arguments
-my $rounds="${ivp}d";
-$code.=<<___;
- shl \$6,$len
- sub $in0,$out
- mov 240($key),$rounds
- add \$112,$key # size optimization
- add $inp,$len # end of input
-
- lea K_XX_XX(%rip),$K_XX_XX
- mov 0($ctx),$A # load context
- mov 4($ctx),$B
- mov 8($ctx),$C
- mov 12($ctx),$D
- mov $B,@T[0] # magic seed
- mov 16($ctx),$E
-
- vmovdqa 64($K_XX_XX),@X[2] # pbswap mask
- vmovdqa 0($K_XX_XX),@Tx[1] # K_00_19
- vmovdqu 0($inp),@X[-4&7] # load input to %xmm[0-3]
- vmovdqu 16($inp),@X[-3&7]
- vmovdqu 32($inp),@X[-2&7]
- vmovdqu 48($inp),@X[-1&7]
- vpshufb @X[2],@X[-4&7],@X[-4&7] # byte swap
- add \$64,$inp
- vpshufb @X[2],@X[-3&7],@X[-3&7]
- vpshufb @X[2],@X[-2&7],@X[-2&7]
- vpshufb @X[2],@X[-1&7],@X[-1&7]
- vpaddd @Tx[1],@X[-4&7],@X[0] # add K_00_19
- vpaddd @Tx[1],@X[-3&7],@X[1]
- vpaddd @Tx[1],@X[-2&7],@X[2]
- vmovdqa @X[0],0(%rsp) # X[]+K xfer to IALU
- vmovdqa @X[1],16(%rsp)
- vmovdqa @X[2],32(%rsp)
- vmovups -112($key),$rndkey0 # $key[0]
- vmovups 16-112($key),$rndkey[0] # forward reference
- jmp .Loop_avx
-___
-
-my $aesenc=sub {
- use integer;
- my ($n,$k)=($r/10,$r%10);
- if ($k==0) {
- $code.=<<___;
- vmovups `16*$n`($in0),$in # load input
- vxorps $rndkey0,$in,$in
-___
- $code.=<<___ if ($n);
- vmovups $iv,`16*($n-1)`($out,$in0) # write output
-___
- $code.=<<___;
- vxorps $in,$iv,$iv
- vaesenc $rndkey[0],$iv,$iv
- vmovups `32+16*$k-112`($key),$rndkey[1]
-___
- } elsif ($k==9) {
- $sn++;
- $code.=<<___;
- cmp \$11,$rounds
- jb .Lvaesenclast$sn
- vaesenc $rndkey[0],$iv,$iv
- vmovups `32+16*($k+0)-112`($key),$rndkey[1]
- vaesenc $rndkey[1],$iv,$iv
- vmovups `32+16*($k+1)-112`($key),$rndkey[0]
- je .Lvaesenclast$sn
- vaesenc $rndkey[0],$iv,$iv
- vmovups `32+16*($k+2)-112`($key),$rndkey[1]
- vaesenc $rndkey[1],$iv,$iv
- vmovups `32+16*($k+3)-112`($key),$rndkey[0]
-.Lvaesenclast$sn:
- vaesenclast $rndkey[0],$iv,$iv
- vmovups 16-112($key),$rndkey[1] # forward reference
-___
- } else {
- $code.=<<___;
- vaesenc $rndkey[0],$iv,$iv
- vmovups `32+16*$k-112`($key),$rndkey[1]
-___
- }
- $r++; unshift(@rndkey,pop(@rndkey));
-};
-
-sub Xupdate_avx_16_31() # recall that $Xi starts wtih 4
-{ use integer;
- my $body = shift;
- my @insns = (&$body,&$body,&$body,&$body); # 40 instructions
- my ($a,$b,$c,$d,$e);
-
- eval(shift(@insns));
- eval(shift(@insns));
- &vpalignr(@X[0],@X[-3&7],@X[-4&7],8); # compose "X[-14]" in "X[0]"
- eval(shift(@insns));
- eval(shift(@insns));
-
- &vpaddd (@Tx[1],@Tx[1],@X[-1&7]);
- eval(shift(@insns));
- eval(shift(@insns));
- &vpsrldq(@Tx[0],@X[-1&7],4); # "X[-3]", 3 dwords
- eval(shift(@insns));
- eval(shift(@insns));
- &vpxor (@X[0],@X[0],@X[-4&7]); # "X[0]"^="X[-16]"
- eval(shift(@insns));
- eval(shift(@insns));
-
- &vpxor (@Tx[0],@Tx[0],@X[-2&7]); # "X[-3]"^"X[-8]"
- eval(shift(@insns));
- eval(shift(@insns));
- eval(shift(@insns));
- eval(shift(@insns));
-
- &vpxor (@X[0],@X[0],@Tx[0]); # "X[0]"^="X[-3]"^"X[-8]"
- eval(shift(@insns));
- eval(shift(@insns));
- &vmovdqa (eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]); # X[]+K xfer to IALU
- eval(shift(@insns));
- eval(shift(@insns));
-
- &vpsrld (@Tx[0],@X[0],31);
- eval(shift(@insns));
- eval(shift(@insns));
- eval(shift(@insns));
- eval(shift(@insns));
-
- &vpslldq(@Tx[2],@X[0],12); # "X[0]"<<96, extract one dword
- &vpaddd (@X[0],@X[0],@X[0]);
- eval(shift(@insns));
- eval(shift(@insns));
- eval(shift(@insns));
- eval(shift(@insns));
-
- &vpsrld (@Tx[1],@Tx[2],30);
- &vpor (@X[0],@X[0],@Tx[0]); # "X[0]"<<<=1
- eval(shift(@insns));
- eval(shift(@insns));
- eval(shift(@insns));
- eval(shift(@insns));
-
- &vpslld (@Tx[2],@Tx[2],2);
- &vpxor (@X[0],@X[0],@Tx[1]);
- eval(shift(@insns));
- eval(shift(@insns));
- eval(shift(@insns));
- eval(shift(@insns));
-
- &vpxor (@X[0],@X[0],@Tx[2]); # "X[0]"^=("X[0]">>96)<<<2
- eval(shift(@insns));
- eval(shift(@insns));
- &vmovdqa (@Tx[2],eval(16*(($Xi)/5))."($K_XX_XX)"); # K_XX_XX
- eval(shift(@insns));
- eval(shift(@insns));
-
-
- foreach (@insns) { eval; } # remaining instructions [if any]
-
- $Xi++; push(@X,shift(@X)); # "rotate" X[]
- push(@Tx,shift(@Tx));
-}
-
-sub Xupdate_avx_32_79()
-{ use integer;
- my $body = shift;
- my @insns = (&$body,&$body,&$body,&$body); # 32 to 48 instructions
- my ($a,$b,$c,$d,$e);
-
- &vpalignr(@Tx[0],@X[-1&7],@X[-2&7],8); # compose "X[-6]"
- &vpxor (@X[0],@X[0],@X[-4&7]); # "X[0]"="X[-32]"^"X[-16]"
- eval(shift(@insns)); # body_20_39
- eval(shift(@insns));
- eval(shift(@insns));
- eval(shift(@insns)); # rol
-
- &vpxor (@X[0],@X[0],@X[-7&7]); # "X[0]"^="X[-28]"
- eval(shift(@insns));
- eval(shift(@insns)) if (@insns[0] !~ /&ro[rl]/);
- if ($Xi%5) {
- &vmovdqa (@Tx[2],@Tx[1]);# "perpetuate" K_XX_XX...
- } else { # ... or load next one
- &vmovdqa (@Tx[2],eval(16*($Xi/5))."($K_XX_XX)");
- }
- &vpaddd (@Tx[1],@Tx[1],@X[-1&7]);
- eval(shift(@insns)); # ror
- eval(shift(@insns));
-
- &vpxor (@X[0],@X[0],@Tx[0]); # "X[0]"^="X[-6]"
- eval(shift(@insns)); # body_20_39
- eval(shift(@insns));
- eval(shift(@insns));
- eval(shift(@insns)); # rol
-
- &vpsrld (@Tx[0],@X[0],30);
- &vmovdqa (eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]); # X[]+K xfer to IALU
- eval(shift(@insns));
- eval(shift(@insns));
- eval(shift(@insns)); # ror
- eval(shift(@insns));
-
- &vpslld (@X[0],@X[0],2);
- eval(shift(@insns)); # body_20_39
- eval(shift(@insns));
- eval(shift(@insns));
- eval(shift(@insns)); # rol
- eval(shift(@insns));
- eval(shift(@insns));
- eval(shift(@insns)); # ror
- eval(shift(@insns));
-
- &vpor (@X[0],@X[0],@Tx[0]); # "X[0]"<<<=2
- eval(shift(@insns)); # body_20_39
- eval(shift(@insns));
- &vmovdqa (@Tx[1],@X[0]) if ($Xi<19);
- eval(shift(@insns));
- eval(shift(@insns)); # rol
- eval(shift(@insns));
- eval(shift(@insns));
- eval(shift(@insns)); # rol
- eval(shift(@insns));
-
- foreach (@insns) { eval; } # remaining instructions
-
- $Xi++; push(@X,shift(@X)); # "rotate" X[]
- push(@Tx,shift(@Tx));
-}
-
-sub Xuplast_avx_80()
-{ use integer;
- my $body = shift;
- my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
- my ($a,$b,$c,$d,$e);
-
- eval(shift(@insns));
- &vpaddd (@Tx[1],@Tx[1],@X[-1&7]);
- eval(shift(@insns));
- eval(shift(@insns));
- eval(shift(@insns));
- eval(shift(@insns));
-
- &movdqa (eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]); # X[]+K xfer IALU
-
- foreach (@insns) { eval; } # remaining instructions
-
- &cmp ($inp,$len);
- &je (".Ldone_avx");
-
- unshift(@Tx,pop(@Tx));
-
- &vmovdqa(@X[2],"64($K_XX_XX)"); # pbswap mask
- &vmovdqa(@Tx[1],"0($K_XX_XX)"); # K_00_19
- &vmovdqu(@X[-4&7],"0($inp)"); # load input
- &vmovdqu(@X[-3&7],"16($inp)");
- &vmovdqu(@X[-2&7],"32($inp)");
- &vmovdqu(@X[-1&7],"48($inp)");
- &vpshufb(@X[-4&7],@X[-4&7],@X[2]); # byte swap
- &add ($inp,64);
-
- $Xi=0;
-}
-
-sub Xloop_avx()
-{ use integer;
- my $body = shift;
- my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
- my ($a,$b,$c,$d,$e);
-
- eval(shift(@insns));
- eval(shift(@insns));
- &vpshufb(@X[($Xi-3)&7],@X[($Xi-3)&7],@X[2]);
- eval(shift(@insns));
- eval(shift(@insns));
- &vpaddd (@X[$Xi&7],@X[($Xi-4)&7],@Tx[1]);
- eval(shift(@insns));
- eval(shift(@insns));
- eval(shift(@insns));
- eval(shift(@insns));
- &vmovdqa(eval(16*$Xi)."(%rsp)",@X[$Xi&7]); # X[]+K xfer to IALU
- eval(shift(@insns));
- eval(shift(@insns));
-
- foreach (@insns) { eval; }
- $Xi++;
-}
-
-sub Xtail_avx()
-{ use integer;
- my $body = shift;
- my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
- my ($a,$b,$c,$d,$e);
-
- foreach (@insns) { eval; }
-}
-
-$code.=<<___;
-.align 16
-.Loop_avx:
-___
- &Xupdate_avx_16_31(\&body_00_19);
- &Xupdate_avx_16_31(\&body_00_19);
- &Xupdate_avx_16_31(\&body_00_19);
- &Xupdate_avx_16_31(\&body_00_19);
- &Xupdate_avx_32_79(\&body_00_19);
- &Xupdate_avx_32_79(\&body_20_39);
- &Xupdate_avx_32_79(\&body_20_39);
- &Xupdate_avx_32_79(\&body_20_39);
- &Xupdate_avx_32_79(\&body_20_39);
- &Xupdate_avx_32_79(\&body_20_39);
- &Xupdate_avx_32_79(\&body_40_59);
- &Xupdate_avx_32_79(\&body_40_59);
- &Xupdate_avx_32_79(\&body_40_59);
- &Xupdate_avx_32_79(\&body_40_59);
- &Xupdate_avx_32_79(\&body_40_59);
- &Xupdate_avx_32_79(\&body_20_39);
- &Xuplast_avx_80(\&body_20_39); # can jump to "done"
-
- $saved_j=$j; @saved_V=@V;
- $saved_r=$r; @saved_rndkey=@rndkey;
-
- &Xloop_avx(\&body_20_39);
- &Xloop_avx(\&body_20_39);
- &Xloop_avx(\&body_20_39);
-
-$code.=<<___;
- vmovups $iv,48($out,$in0) # write output
- lea 64($in0),$in0
-
- add 0($ctx),$A # update context
- add 4($ctx),@T[0]
- add 8($ctx),$C
- add 12($ctx),$D
- mov $A,0($ctx)
- add 16($ctx),$E
- mov @T[0],4($ctx)
- mov @T[0],$B # magic seed
- mov $C,8($ctx)
- mov $D,12($ctx)
- mov $E,16($ctx)
- jmp .Loop_avx
-
-.align 16
-.Ldone_avx:
-___
- $jj=$j=$saved_j; @V=@saved_V;
- $r=$saved_r; @rndkey=@saved_rndkey;
-
- &Xtail_avx(\&body_20_39);
- &Xtail_avx(\&body_20_39);
- &Xtail_avx(\&body_20_39);
-
-$code.=<<___;
- vmovups $iv,48($out,$in0) # write output
- mov 88(%rsp),$ivp # restore $ivp
-
- add 0($ctx),$A # update context
- add 4($ctx),@T[0]
- add 8($ctx),$C
- mov $A,0($ctx)
- add 12($ctx),$D
- mov @T[0],4($ctx)
- add 16($ctx),$E
- mov $C,8($ctx)
- mov $D,12($ctx)
- mov $E,16($ctx)
- vmovups $iv,($ivp) # write IV
- vzeroall
-___
-$code.=<<___ if ($win64);
- movaps 96+0(%rsp),%xmm6
- movaps 96+16(%rsp),%xmm7
- movaps 96+32(%rsp),%xmm8
- movaps 96+48(%rsp),%xmm9
- movaps 96+64(%rsp),%xmm10
- movaps 96+80(%rsp),%xmm11
- movaps 96+96(%rsp),%xmm12
- movaps 96+112(%rsp),%xmm13
- movaps 96+128(%rsp),%xmm14
- movaps 96+144(%rsp),%xmm15
-___
-$code.=<<___;
- lea `104+($win64?10*16:0)`(%rsp),%rsi
- mov 0(%rsi),%r15
- mov 8(%rsi),%r14
- mov 16(%rsi),%r13
- mov 24(%rsi),%r12
- mov 32(%rsi),%rbp
- mov 40(%rsi),%rbx
- lea 48(%rsi),%rsp
-.Lepilogue_avx:
- ret
-.size aesni_cbc_sha1_enc_avx,.-aesni_cbc_sha1_enc_avx
-___
-}
-$code.=<<___;
-.align 64
-K_XX_XX:
-.long 0x5a827999,0x5a827999,0x5a827999,0x5a827999 # K_00_19
-.long 0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1 # K_20_39
-.long 0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc # K_40_59
-.long 0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6 # K_60_79
-.long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f # pbswap mask
-
-.asciz "AESNI-CBC+SHA1 stitch for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
-.align 64
-___
-
-# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
-# CONTEXT *context,DISPATCHER_CONTEXT *disp)
-if ($win64) {
-$rec="%rcx";
-$frame="%rdx";
-$context="%r8";
-$disp="%r9";
-
-$code.=<<___;
-.extern __imp_RtlVirtualUnwind
-.type ssse3_handler,\@abi-omnipotent
-.align 16
-ssse3_handler:
- push %rsi
- push %rdi
- push %rbx
- push %rbp
- push %r12
- push %r13
- push %r14
- push %r15
- pushfq
- sub \$64,%rsp
-
- mov 120($context),%rax # pull context->Rax
- mov 248($context),%rbx # pull context->Rip
-
- mov 8($disp),%rsi # disp->ImageBase
- mov 56($disp),%r11 # disp->HandlerData
-
- mov 0(%r11),%r10d # HandlerData[0]
- lea (%rsi,%r10),%r10 # prologue label
- cmp %r10,%rbx # context->Rip<prologue label
- jb .Lcommon_seh_tail
-
- mov 152($context),%rax # pull context->Rsp
-
- mov 4(%r11),%r10d # HandlerData[1]
- lea (%rsi,%r10),%r10 # epilogue label
- cmp %r10,%rbx # context->Rip>=epilogue label
- jae .Lcommon_seh_tail
-
- lea 96(%rax),%rsi
- lea 512($context),%rdi # &context.Xmm6
- mov \$20,%ecx
- .long 0xa548f3fc # cld; rep movsq
- lea `104+10*16`(%rax),%rax # adjust stack pointer
-
- mov 0(%rax),%r15
- mov 8(%rax),%r14
- mov 16(%rax),%r13
- mov 24(%rax),%r12
- mov 32(%rax),%rbp
- mov 40(%rax),%rbx
- lea 48(%rax),%rax
- mov %rbx,144($context) # restore context->Rbx
- mov %rbp,160($context) # restore context->Rbp
- mov %r12,216($context) # restore context->R12
- mov %r13,224($context) # restore context->R13
- mov %r14,232($context) # restore context->R14
- mov %r15,240($context) # restore context->R15
-
-.Lcommon_seh_tail:
- mov 8(%rax),%rdi
- mov 16(%rax),%rsi
- mov %rax,152($context) # restore context->Rsp
- mov %rsi,168($context) # restore context->Rsi
- mov %rdi,176($context) # restore context->Rdi
-
- mov 40($disp),%rdi # disp->ContextRecord
- mov $context,%rsi # context
- mov \$154,%ecx # sizeof(CONTEXT)
- .long 0xa548f3fc # cld; rep movsq
-
- mov $disp,%rsi
- xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
- mov 8(%rsi),%rdx # arg2, disp->ImageBase
- mov 0(%rsi),%r8 # arg3, disp->ControlPc
- mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
- mov 40(%rsi),%r10 # disp->ContextRecord
- lea 56(%rsi),%r11 # &disp->HandlerData
- lea 24(%rsi),%r12 # &disp->EstablisherFrame
- mov %r10,32(%rsp) # arg5
- mov %r11,40(%rsp) # arg6
- mov %r12,48(%rsp) # arg7
- mov %rcx,56(%rsp) # arg8, (NULL)
- call *__imp_RtlVirtualUnwind(%rip)
-
- mov \$1,%eax # ExceptionContinueSearch
- add \$64,%rsp
- popfq
- pop %r15
- pop %r14
- pop %r13
- pop %r12
- pop %rbp
- pop %rbx
- pop %rdi
- pop %rsi
- ret
-.size ssse3_handler,.-ssse3_handler
-
-.section .pdata
-.align 4
- .rva .LSEH_begin_aesni_cbc_sha1_enc_ssse3
- .rva .LSEH_end_aesni_cbc_sha1_enc_ssse3
- .rva .LSEH_info_aesni_cbc_sha1_enc_ssse3
-___
-$code.=<<___ if ($avx);
- .rva .LSEH_begin_aesni_cbc_sha1_enc_avx
- .rva .LSEH_end_aesni_cbc_sha1_enc_avx
- .rva .LSEH_info_aesni_cbc_sha1_enc_avx
-___
-$code.=<<___;
-.section .xdata
-.align 8
-.LSEH_info_aesni_cbc_sha1_enc_ssse3:
- .byte 9,0,0,0
- .rva ssse3_handler
- .rva .Lprologue_ssse3,.Lepilogue_ssse3 # HandlerData[]
-___
-$code.=<<___ if ($avx);
-.LSEH_info_aesni_cbc_sha1_enc_avx:
- .byte 9,0,0,0
- .rva ssse3_handler
- .rva .Lprologue_avx,.Lepilogue_avx # HandlerData[]
-___
-}
-
-####################################################################
-sub rex {
- local *opcode=shift;
- my ($dst,$src)=@_;
- my $rex=0;
-
- $rex|=0x04 if($dst>=8);
- $rex|=0x01 if($src>=8);
- push @opcode,$rex|0x40 if($rex);
-}
-
-sub aesni {
- my $line=shift;
- my @opcode=(0x66);
-
- if ($line=~/(aes[a-z]+)\s+%xmm([0-9]+),\s*%xmm([0-9]+)/) {
- my %opcodelet = (
- "aesenc" => 0xdc, "aesenclast" => 0xdd
- );
- return undef if (!defined($opcodelet{$1}));
- rex(\@opcode,$3,$2);
- push @opcode,0x0f,0x38,$opcodelet{$1};
- push @opcode,0xc0|($2&7)|(($3&7)<<3); # ModR/M
- return ".byte\t".join(',',@opcode);
- }
- return $line;
-}
-
-$code =~ s/\`([^\`]*)\`/eval($1)/gem;
-$code =~ s/\b(aes.*%xmm[0-9]+).*$/aesni($1)/gem;
-
-print $code;
-close STDOUT;
diff --git a/app/openssl/crypto/aes/asm/aesni-x86.S b/app/openssl/crypto/aes/asm/aesni-x86.S
deleted file mode 100644
index 0766bb54..00000000
--- a/app/openssl/crypto/aes/asm/aesni-x86.S
+++ /dev/null
@@ -1,2143 +0,0 @@
-.file "crypto/aes/asm/aesni-x86.s"
-.text
-.globl aesni_encrypt
-.type aesni_encrypt,@function
-.align 16
-aesni_encrypt:
-.L_aesni_encrypt_begin:
- movl 4(%esp),%eax
- movl 12(%esp),%edx
- movups (%eax),%xmm2
- movl 240(%edx),%ecx
- movl 8(%esp),%eax
- movups (%edx),%xmm0
- movups 16(%edx),%xmm1
- leal 32(%edx),%edx
- xorps %xmm0,%xmm2
-.L000enc1_loop_1:
-.byte 102,15,56,220,209
- decl %ecx
- movups (%edx),%xmm1
- leal 16(%edx),%edx
- jnz .L000enc1_loop_1
-.byte 102,15,56,221,209
- movups %xmm2,(%eax)
- ret
-.size aesni_encrypt,.-.L_aesni_encrypt_begin
-.globl aesni_decrypt
-.type aesni_decrypt,@function
-.align 16
-aesni_decrypt:
-.L_aesni_decrypt_begin:
- movl 4(%esp),%eax
- movl 12(%esp),%edx
- movups (%eax),%xmm2
- movl 240(%edx),%ecx
- movl 8(%esp),%eax
- movups (%edx),%xmm0
- movups 16(%edx),%xmm1
- leal 32(%edx),%edx
- xorps %xmm0,%xmm2
-.L001dec1_loop_2:
-.byte 102,15,56,222,209
- decl %ecx
- movups (%edx),%xmm1
- leal 16(%edx),%edx
- jnz .L001dec1_loop_2
-.byte 102,15,56,223,209
- movups %xmm2,(%eax)
- ret
-.size aesni_decrypt,.-.L_aesni_decrypt_begin
-.type _aesni_encrypt3,@function
-.align 16
-_aesni_encrypt3:
- movups (%edx),%xmm0
- shrl $1,%ecx
- movups 16(%edx),%xmm1
- leal 32(%edx),%edx
- xorps %xmm0,%xmm2
- pxor %xmm0,%xmm3
- pxor %xmm0,%xmm4
- movups (%edx),%xmm0
-.L002enc3_loop:
-.byte 102,15,56,220,209
-.byte 102,15,56,220,217
- decl %ecx
-.byte 102,15,56,220,225
- movups 16(%edx),%xmm1
-.byte 102,15,56,220,208
-.byte 102,15,56,220,216
- leal 32(%edx),%edx
-.byte 102,15,56,220,224
- movups (%edx),%xmm0
- jnz .L002enc3_loop
-.byte 102,15,56,220,209
-.byte 102,15,56,220,217
-.byte 102,15,56,220,225
-.byte 102,15,56,221,208
-.byte 102,15,56,221,216
-.byte 102,15,56,221,224
- ret
-.size _aesni_encrypt3,.-_aesni_encrypt3
-.type _aesni_decrypt3,@function
-.align 16
-_aesni_decrypt3:
- movups (%edx),%xmm0
- shrl $1,%ecx
- movups 16(%edx),%xmm1
- leal 32(%edx),%edx
- xorps %xmm0,%xmm2
- pxor %xmm0,%xmm3
- pxor %xmm0,%xmm4
- movups (%edx),%xmm0
-.L003dec3_loop:
-.byte 102,15,56,222,209
-.byte 102,15,56,222,217
- decl %ecx
-.byte 102,15,56,222,225
- movups 16(%edx),%xmm1
-.byte 102,15,56,222,208
-.byte 102,15,56,222,216
- leal 32(%edx),%edx
-.byte 102,15,56,222,224
- movups (%edx),%xmm0
- jnz .L003dec3_loop
-.byte 102,15,56,222,209
-.byte 102,15,56,222,217
-.byte 102,15,56,222,225
-.byte 102,15,56,223,208
-.byte 102,15,56,223,216
-.byte 102,15,56,223,224
- ret
-.size _aesni_decrypt3,.-_aesni_decrypt3
-.type _aesni_encrypt4,@function
-.align 16
-_aesni_encrypt4:
- movups (%edx),%xmm0
- movups 16(%edx),%xmm1
- shrl $1,%ecx
- leal 32(%edx),%edx
- xorps %xmm0,%xmm2
- pxor %xmm0,%xmm3
- pxor %xmm0,%xmm4
- pxor %xmm0,%xmm5
- movups (%edx),%xmm0
-.L004enc4_loop:
-.byte 102,15,56,220,209
-.byte 102,15,56,220,217
- decl %ecx
-.byte 102,15,56,220,225
-.byte 102,15,56,220,233
- movups 16(%edx),%xmm1
-.byte 102,15,56,220,208
-.byte 102,15,56,220,216
- leal 32(%edx),%edx
-.byte 102,15,56,220,224
-.byte 102,15,56,220,232
- movups (%edx),%xmm0
- jnz .L004enc4_loop
-.byte 102,15,56,220,209
-.byte 102,15,56,220,217
-.byte 102,15,56,220,225
-.byte 102,15,56,220,233
-.byte 102,15,56,221,208
-.byte 102,15,56,221,216
-.byte 102,15,56,221,224
-.byte 102,15,56,221,232
- ret
-.size _aesni_encrypt4,.-_aesni_encrypt4
-.type _aesni_decrypt4,@function
-.align 16
-_aesni_decrypt4:
- movups (%edx),%xmm0
- movups 16(%edx),%xmm1
- shrl $1,%ecx
- leal 32(%edx),%edx
- xorps %xmm0,%xmm2
- pxor %xmm0,%xmm3
- pxor %xmm0,%xmm4
- pxor %xmm0,%xmm5
- movups (%edx),%xmm0
-.L005dec4_loop:
-.byte 102,15,56,222,209
-.byte 102,15,56,222,217
- decl %ecx
-.byte 102,15,56,222,225
-.byte 102,15,56,222,233
- movups 16(%edx),%xmm1
-.byte 102,15,56,222,208
-.byte 102,15,56,222,216
- leal 32(%edx),%edx
-.byte 102,15,56,222,224
-.byte 102,15,56,222,232
- movups (%edx),%xmm0
- jnz .L005dec4_loop
-.byte 102,15,56,222,209
-.byte 102,15,56,222,217
-.byte 102,15,56,222,225
-.byte 102,15,56,222,233
-.byte 102,15,56,223,208
-.byte 102,15,56,223,216
-.byte 102,15,56,223,224
-.byte 102,15,56,223,232
- ret
-.size _aesni_decrypt4,.-_aesni_decrypt4
-.type _aesni_encrypt6,@function
-.align 16
-_aesni_encrypt6:
- movups (%edx),%xmm0
- shrl $1,%ecx
- movups 16(%edx),%xmm1
- leal 32(%edx),%edx
- xorps %xmm0,%xmm2
- pxor %xmm0,%xmm3
-.byte 102,15,56,220,209
- pxor %xmm0,%xmm4
-.byte 102,15,56,220,217
- pxor %xmm0,%xmm5
- decl %ecx
-.byte 102,15,56,220,225
- pxor %xmm0,%xmm6
-.byte 102,15,56,220,233
- pxor %xmm0,%xmm7
-.byte 102,15,56,220,241
- movups (%edx),%xmm0
-.byte 102,15,56,220,249
- jmp .L_aesni_encrypt6_enter
-.align 16
-.L006enc6_loop:
-.byte 102,15,56,220,209
-.byte 102,15,56,220,217
- decl %ecx
-.byte 102,15,56,220,225
-.byte 102,15,56,220,233
-.byte 102,15,56,220,241
-.byte 102,15,56,220,249
-.align 16
-.L_aesni_encrypt6_enter:
- movups 16(%edx),%xmm1
-.byte 102,15,56,220,208
-.byte 102,15,56,220,216
- leal 32(%edx),%edx
-.byte 102,15,56,220,224
-.byte 102,15,56,220,232
-.byte 102,15,56,220,240
-.byte 102,15,56,220,248
- movups (%edx),%xmm0
- jnz .L006enc6_loop
-.byte 102,15,56,220,209
-.byte 102,15,56,220,217
-.byte 102,15,56,220,225
-.byte 102,15,56,220,233
-.byte 102,15,56,220,241
-.byte 102,15,56,220,249
-.byte 102,15,56,221,208
-.byte 102,15,56,221,216
-.byte 102,15,56,221,224
-.byte 102,15,56,221,232
-.byte 102,15,56,221,240
-.byte 102,15,56,221,248
- ret
-.size _aesni_encrypt6,.-_aesni_encrypt6
-.type _aesni_decrypt6,@function
-.align 16
-_aesni_decrypt6:
- movups (%edx),%xmm0
- shrl $1,%ecx
- movups 16(%edx),%xmm1
- leal 32(%edx),%edx
- xorps %xmm0,%xmm2
- pxor %xmm0,%xmm3
-.byte 102,15,56,222,209
- pxor %xmm0,%xmm4
-.byte 102,15,56,222,217
- pxor %xmm0,%xmm5
- decl %ecx
-.byte 102,15,56,222,225
- pxor %xmm0,%xmm6
-.byte 102,15,56,222,233
- pxor %xmm0,%xmm7
-.byte 102,15,56,222,241
- movups (%edx),%xmm0
-.byte 102,15,56,222,249
- jmp .L_aesni_decrypt6_enter
-.align 16
-.L007dec6_loop:
-.byte 102,15,56,222,209
-.byte 102,15,56,222,217
- decl %ecx
-.byte 102,15,56,222,225
-.byte 102,15,56,222,233
-.byte 102,15,56,222,241
-.byte 102,15,56,222,249
-.align 16
-.L_aesni_decrypt6_enter:
- movups 16(%edx),%xmm1
-.byte 102,15,56,222,208
-.byte 102,15,56,222,216
- leal 32(%edx),%edx
-.byte 102,15,56,222,224
-.byte 102,15,56,222,232
-.byte 102,15,56,222,240
-.byte 102,15,56,222,248
- movups (%edx),%xmm0
- jnz .L007dec6_loop
-.byte 102,15,56,222,209
-.byte 102,15,56,222,217
-.byte 102,15,56,222,225
-.byte 102,15,56,222,233
-.byte 102,15,56,222,241
-.byte 102,15,56,222,249
-.byte 102,15,56,223,208
-.byte 102,15,56,223,216
-.byte 102,15,56,223,224
-.byte 102,15,56,223,232
-.byte 102,15,56,223,240
-.byte 102,15,56,223,248
- ret
-.size _aesni_decrypt6,.-_aesni_decrypt6
-.globl aesni_ecb_encrypt
-.type aesni_ecb_encrypt,@function
-.align 16
-aesni_ecb_encrypt:
-.L_aesni_ecb_encrypt_begin:
- pushl %ebp
- pushl %ebx
- pushl %esi
- pushl %edi
- movl 20(%esp),%esi
- movl 24(%esp),%edi
- movl 28(%esp),%eax
- movl 32(%esp),%edx
- movl 36(%esp),%ebx
- andl $-16,%eax
- jz .L008ecb_ret
- movl 240(%edx),%ecx
- testl %ebx,%ebx
- jz .L009ecb_decrypt
- movl %edx,%ebp
- movl %ecx,%ebx
- cmpl $96,%eax
- jb .L010ecb_enc_tail
- movdqu (%esi),%xmm2
- movdqu 16(%esi),%xmm3
- movdqu 32(%esi),%xmm4
- movdqu 48(%esi),%xmm5
- movdqu 64(%esi),%xmm6
- movdqu 80(%esi),%xmm7
- leal 96(%esi),%esi
- subl $96,%eax
- jmp .L011ecb_enc_loop6_enter
-.align 16
-.L012ecb_enc_loop6:
- movups %xmm2,(%edi)
- movdqu (%esi),%xmm2
- movups %xmm3,16(%edi)
- movdqu 16(%esi),%xmm3
- movups %xmm4,32(%edi)
- movdqu 32(%esi),%xmm4
- movups %xmm5,48(%edi)
- movdqu 48(%esi),%xmm5
- movups %xmm6,64(%edi)
- movdqu 64(%esi),%xmm6
- movups %xmm7,80(%edi)
- leal 96(%edi),%edi
- movdqu 80(%esi),%xmm7
- leal 96(%esi),%esi
-.L011ecb_enc_loop6_enter:
- call _aesni_encrypt6
- movl %ebp,%edx
- movl %ebx,%ecx
- subl $96,%eax
- jnc .L012ecb_enc_loop6
- movups %xmm2,(%edi)
- movups %xmm3,16(%edi)
- movups %xmm4,32(%edi)
- movups %xmm5,48(%edi)
- movups %xmm6,64(%edi)
- movups %xmm7,80(%edi)
- leal 96(%edi),%edi
- addl $96,%eax
- jz .L008ecb_ret
-.L010ecb_enc_tail:
- movups (%esi),%xmm2
- cmpl $32,%eax
- jb .L013ecb_enc_one
- movups 16(%esi),%xmm3
- je .L014ecb_enc_two
- movups 32(%esi),%xmm4
- cmpl $64,%eax
- jb .L015ecb_enc_three
- movups 48(%esi),%xmm5
- je .L016ecb_enc_four
- movups 64(%esi),%xmm6
- xorps %xmm7,%xmm7
- call _aesni_encrypt6
- movups %xmm2,(%edi)
- movups %xmm3,16(%edi)
- movups %xmm4,32(%edi)
- movups %xmm5,48(%edi)
- movups %xmm6,64(%edi)
- jmp .L008ecb_ret
-.align 16
-.L013ecb_enc_one:
- movups (%edx),%xmm0
- movups 16(%edx),%xmm1
- leal 32(%edx),%edx
- xorps %xmm0,%xmm2
-.L017enc1_loop_3:
-.byte 102,15,56,220,209
- decl %ecx
- movups (%edx),%xmm1
- leal 16(%edx),%edx
- jnz .L017enc1_loop_3
-.byte 102,15,56,221,209
- movups %xmm2,(%edi)
- jmp .L008ecb_ret
-.align 16
-.L014ecb_enc_two:
- xorps %xmm4,%xmm4
- call _aesni_encrypt3
- movups %xmm2,(%edi)
- movups %xmm3,16(%edi)
- jmp .L008ecb_ret
-.align 16
-.L015ecb_enc_three:
- call _aesni_encrypt3
- movups %xmm2,(%edi)
- movups %xmm3,16(%edi)
- movups %xmm4,32(%edi)
- jmp .L008ecb_ret
-.align 16
-.L016ecb_enc_four:
- call _aesni_encrypt4
- movups %xmm2,(%edi)
- movups %xmm3,16(%edi)
- movups %xmm4,32(%edi)
- movups %xmm5,48(%edi)
- jmp .L008ecb_ret
-.align 16
-.L009ecb_decrypt:
- movl %edx,%ebp
- movl %ecx,%ebx
- cmpl $96,%eax
- jb .L018ecb_dec_tail
- movdqu (%esi),%xmm2
- movdqu 16(%esi),%xmm3
- movdqu 32(%esi),%xmm4
- movdqu 48(%esi),%xmm5
- movdqu 64(%esi),%xmm6
- movdqu 80(%esi),%xmm7
- leal 96(%esi),%esi
- subl $96,%eax
- jmp .L019ecb_dec_loop6_enter
-.align 16
-.L020ecb_dec_loop6:
- movups %xmm2,(%edi)
- movdqu (%esi),%xmm2
- movups %xmm3,16(%edi)
- movdqu 16(%esi),%xmm3
- movups %xmm4,32(%edi)
- movdqu 32(%esi),%xmm4
- movups %xmm5,48(%edi)
- movdqu 48(%esi),%xmm5
- movups %xmm6,64(%edi)
- movdqu 64(%esi),%xmm6
- movups %xmm7,80(%edi)
- leal 96(%edi),%edi
- movdqu 80(%esi),%xmm7
- leal 96(%esi),%esi
-.L019ecb_dec_loop6_enter:
- call _aesni_decrypt6
- movl %ebp,%edx
- movl %ebx,%ecx
- subl $96,%eax
- jnc .L020ecb_dec_loop6
- movups %xmm2,(%edi)
- movups %xmm3,16(%edi)
- movups %xmm4,32(%edi)
- movups %xmm5,48(%edi)
- movups %xmm6,64(%edi)
- movups %xmm7,80(%edi)
- leal 96(%edi),%edi
- addl $96,%eax
- jz .L008ecb_ret
-.L018ecb_dec_tail:
- movups (%esi),%xmm2
- cmpl $32,%eax
- jb .L021ecb_dec_one
- movups 16(%esi),%xmm3
- je .L022ecb_dec_two
- movups 32(%esi),%xmm4
- cmpl $64,%eax
- jb .L023ecb_dec_three
- movups 48(%esi),%xmm5
- je .L024ecb_dec_four
- movups 64(%esi),%xmm6
- xorps %xmm7,%xmm7
- call _aesni_decrypt6
- movups %xmm2,(%edi)
- movups %xmm3,16(%edi)
- movups %xmm4,32(%edi)
- movups %xmm5,48(%edi)
- movups %xmm6,64(%edi)
- jmp .L008ecb_ret
-.align 16
-.L021ecb_dec_one:
- movups (%edx),%xmm0
- movups 16(%edx),%xmm1
- leal 32(%edx),%edx
- xorps %xmm0,%xmm2
-.L025dec1_loop_4:
-.byte 102,15,56,222,209
- decl %ecx
- movups (%edx),%xmm1
- leal 16(%edx),%edx
- jnz .L025dec1_loop_4
-.byte 102,15,56,223,209
- movups %xmm2,(%edi)
- jmp .L008ecb_ret
-.align 16
-.L022ecb_dec_two:
- xorps %xmm4,%xmm4
- call _aesni_decrypt3
- movups %xmm2,(%edi)
- movups %xmm3,16(%edi)
- jmp .L008ecb_ret
-.align 16
-.L023ecb_dec_three:
- call _aesni_decrypt3
- movups %xmm2,(%edi)
- movups %xmm3,16(%edi)
- movups %xmm4,32(%edi)
- jmp .L008ecb_ret
-.align 16
-.L024ecb_dec_four:
- call _aesni_decrypt4
- movups %xmm2,(%edi)
- movups %xmm3,16(%edi)
- movups %xmm4,32(%edi)
- movups %xmm5,48(%edi)
-.L008ecb_ret:
- popl %edi
- popl %esi
- popl %ebx
- popl %ebp
- ret
-.size aesni_ecb_encrypt,.-.L_aesni_ecb_encrypt_begin
-.globl aesni_ccm64_encrypt_blocks
-.type aesni_ccm64_encrypt_blocks,@function
-.align 16
-aesni_ccm64_encrypt_blocks:
-.L_aesni_ccm64_encrypt_blocks_begin:
- pushl %ebp
- pushl %ebx
- pushl %esi
- pushl %edi
- movl 20(%esp),%esi
- movl 24(%esp),%edi
- movl 28(%esp),%eax
- movl 32(%esp),%edx
- movl 36(%esp),%ebx
- movl 40(%esp),%ecx
- movl %esp,%ebp
- subl $60,%esp
- andl $-16,%esp
- movl %ebp,48(%esp)
- movdqu (%ebx),%xmm7
- movdqu (%ecx),%xmm3
- movl 240(%edx),%ecx
- movl $202182159,(%esp)
- movl $134810123,4(%esp)
- movl $67438087,8(%esp)
- movl $66051,12(%esp)
- movl $1,%ebx
- xorl %ebp,%ebp
- movl %ebx,16(%esp)
- movl %ebp,20(%esp)
- movl %ebp,24(%esp)
- movl %ebp,28(%esp)
- shrl $1,%ecx
- leal (%edx),%ebp
- movdqa (%esp),%xmm5
- movdqa %xmm7,%xmm2
- movl %ecx,%ebx
-.byte 102,15,56,0,253
-.L026ccm64_enc_outer:
- movups (%ebp),%xmm0
- movl %ebx,%ecx
- movups (%esi),%xmm6
- xorps %xmm0,%xmm2
- movups 16(%ebp),%xmm1
- xorps %xmm6,%xmm0
- leal 32(%ebp),%edx
- xorps %xmm0,%xmm3
- movups (%edx),%xmm0
-.L027ccm64_enc2_loop:
-.byte 102,15,56,220,209
- decl %ecx
-.byte 102,15,56,220,217
- movups 16(%edx),%xmm1
-.byte 102,15,56,220,208
- leal 32(%edx),%edx
-.byte 102,15,56,220,216
- movups (%edx),%xmm0
- jnz .L027ccm64_enc2_loop
-.byte 102,15,56,220,209
-.byte 102,15,56,220,217
- paddq 16(%esp),%xmm7
-.byte 102,15,56,221,208
-.byte 102,15,56,221,216
- decl %eax
- leal 16(%esi),%esi
- xorps %xmm2,%xmm6
- movdqa %xmm7,%xmm2
- movups %xmm6,(%edi)
- leal 16(%edi),%edi
-.byte 102,15,56,0,213
- jnz .L026ccm64_enc_outer
- movl 48(%esp),%esp
- movl 40(%esp),%edi
- movups %xmm3,(%edi)
- popl %edi
- popl %esi
- popl %ebx
- popl %ebp
- ret
-.size aesni_ccm64_encrypt_blocks,.-.L_aesni_ccm64_encrypt_blocks_begin
-.globl aesni_ccm64_decrypt_blocks
-.type aesni_ccm64_decrypt_blocks,@function
-.align 16
-aesni_ccm64_decrypt_blocks:
-.L_aesni_ccm64_decrypt_blocks_begin:
- pushl %ebp
- pushl %ebx
- pushl %esi
- pushl %edi
- movl 20(%esp),%esi
- movl 24(%esp),%edi
- movl 28(%esp),%eax
- movl 32(%esp),%edx
- movl 36(%esp),%ebx
- movl 40(%esp),%ecx
- movl %esp,%ebp
- subl $60,%esp
- andl $-16,%esp
- movl %ebp,48(%esp)
- movdqu (%ebx),%xmm7
- movdqu (%ecx),%xmm3
- movl 240(%edx),%ecx
- movl $202182159,(%esp)
- movl $134810123,4(%esp)
- movl $67438087,8(%esp)
- movl $66051,12(%esp)
- movl $1,%ebx
- xorl %ebp,%ebp
- movl %ebx,16(%esp)
- movl %ebp,20(%esp)
- movl %ebp,24(%esp)
- movl %ebp,28(%esp)
- movdqa (%esp),%xmm5
- movdqa %xmm7,%xmm2
- movl %edx,%ebp
- movl %ecx,%ebx
-.byte 102,15,56,0,253
- movups (%edx),%xmm0
- movups 16(%edx),%xmm1
- leal 32(%edx),%edx
- xorps %xmm0,%xmm2
-.L028enc1_loop_5:
-.byte 102,15,56,220,209
- decl %ecx
- movups (%edx),%xmm1
- leal 16(%edx),%edx
- jnz .L028enc1_loop_5
-.byte 102,15,56,221,209
- movups (%esi),%xmm6
- paddq 16(%esp),%xmm7
- leal 16(%esi),%esi
- jmp .L029ccm64_dec_outer
-.align 16
-.L029ccm64_dec_outer:
- xorps %xmm2,%xmm6
- movdqa %xmm7,%xmm2
- movl %ebx,%ecx
- movups %xmm6,(%edi)
- leal 16(%edi),%edi
-.byte 102,15,56,0,213
- subl $1,%eax
- jz .L030ccm64_dec_break
- movups (%ebp),%xmm0
- shrl $1,%ecx
- movups 16(%ebp),%xmm1
- xorps %xmm0,%xmm6
- leal 32(%ebp),%edx
- xorps %xmm0,%xmm2
- xorps %xmm6,%xmm3
- movups (%edx),%xmm0
-.L031ccm64_dec2_loop:
-.byte 102,15,56,220,209
- decl %ecx
-.byte 102,15,56,220,217
- movups 16(%edx),%xmm1
-.byte 102,15,56,220,208
- leal 32(%edx),%edx
-.byte 102,15,56,220,216
- movups (%edx),%xmm0
- jnz .L031ccm64_dec2_loop
- movups (%esi),%xmm6
- paddq 16(%esp),%xmm7
-.byte 102,15,56,220,209
-.byte 102,15,56,220,217
- leal 16(%esi),%esi
-.byte 102,15,56,221,208
-.byte 102,15,56,221,216
- jmp .L029ccm64_dec_outer
-.align 16
-.L030ccm64_dec_break:
- movl %ebp,%edx
- movups (%edx),%xmm0
- movups 16(%edx),%xmm1
- xorps %xmm0,%xmm6
- leal 32(%edx),%edx
- xorps %xmm6,%xmm3
-.L032enc1_loop_6:
-.byte 102,15,56,220,217
- decl %ecx
- movups (%edx),%xmm1
- leal 16(%edx),%edx
- jnz .L032enc1_loop_6
-.byte 102,15,56,221,217
- movl 48(%esp),%esp
- movl 40(%esp),%edi
- movups %xmm3,(%edi)
- popl %edi
- popl %esi
- popl %ebx
- popl %ebp
- ret
-.size aesni_ccm64_decrypt_blocks,.-.L_aesni_ccm64_decrypt_blocks_begin
-.globl aesni_ctr32_encrypt_blocks
-.type aesni_ctr32_encrypt_blocks,@function
-.align 16
-aesni_ctr32_encrypt_blocks:
-.L_aesni_ctr32_encrypt_blocks_begin:
- pushl %ebp
- pushl %ebx
- pushl %esi
- pushl %edi
- movl 20(%esp),%esi
- movl 24(%esp),%edi
- movl 28(%esp),%eax
- movl 32(%esp),%edx
- movl 36(%esp),%ebx
- movl %esp,%ebp
- subl $88,%esp
- andl $-16,%esp
- movl %ebp,80(%esp)
- cmpl $1,%eax
- je .L033ctr32_one_shortcut
- movdqu (%ebx),%xmm7
- movl $202182159,(%esp)
- movl $134810123,4(%esp)
- movl $67438087,8(%esp)
- movl $66051,12(%esp)
- movl $6,%ecx
- xorl %ebp,%ebp
- movl %ecx,16(%esp)
- movl %ecx,20(%esp)
- movl %ecx,24(%esp)
- movl %ebp,28(%esp)
-.byte 102,15,58,22,251,3
-.byte 102,15,58,34,253,3
- movl 240(%edx),%ecx
- bswap %ebx
- pxor %xmm1,%xmm1
- pxor %xmm0,%xmm0
- movdqa (%esp),%xmm2
-.byte 102,15,58,34,203,0
- leal 3(%ebx),%ebp
-.byte 102,15,58,34,197,0
- incl %ebx
-.byte 102,15,58,34,203,1
- incl %ebp
-.byte 102,15,58,34,197,1
- incl %ebx
-.byte 102,15,58,34,203,2
- incl %ebp
-.byte 102,15,58,34,197,2
- movdqa %xmm1,48(%esp)
-.byte 102,15,56,0,202
- movdqa %xmm0,64(%esp)
-.byte 102,15,56,0,194
- pshufd $192,%xmm1,%xmm2
- pshufd $128,%xmm1,%xmm3
- cmpl $6,%eax
- jb .L034ctr32_tail
- movdqa %xmm7,32(%esp)
- shrl $1,%ecx
- movl %edx,%ebp
- movl %ecx,%ebx
- subl $6,%eax
- jmp .L035ctr32_loop6
-.align 16
-.L035ctr32_loop6:
- pshufd $64,%xmm1,%xmm4
- movdqa 32(%esp),%xmm1
- pshufd $192,%xmm0,%xmm5
- por %xmm1,%xmm2
- pshufd $128,%xmm0,%xmm6
- por %xmm1,%xmm3
- pshufd $64,%xmm0,%xmm7
- por %xmm1,%xmm4
- por %xmm1,%xmm5
- por %xmm1,%xmm6
- por %xmm1,%xmm7
- movups (%ebp),%xmm0
- movups 16(%ebp),%xmm1
- leal 32(%ebp),%edx
- decl %ecx
- pxor %xmm0,%xmm2
- pxor %xmm0,%xmm3
-.byte 102,15,56,220,209
- pxor %xmm0,%xmm4
-.byte 102,15,56,220,217
- pxor %xmm0,%xmm5
-.byte 102,15,56,220,225
- pxor %xmm0,%xmm6
-.byte 102,15,56,220,233
- pxor %xmm0,%xmm7
-.byte 102,15,56,220,241
- movups (%edx),%xmm0
-.byte 102,15,56,220,249
- call .L_aesni_encrypt6_enter
- movups (%esi),%xmm1
- movups 16(%esi),%xmm0
- xorps %xmm1,%xmm2
- movups 32(%esi),%xmm1
- xorps %xmm0,%xmm3
- movups %xmm2,(%edi)
- movdqa 16(%esp),%xmm0
- xorps %xmm1,%xmm4
- movdqa 48(%esp),%xmm1
- movups %xmm3,16(%edi)
- movups %xmm4,32(%edi)
- paddd %xmm0,%xmm1
- paddd 64(%esp),%xmm0
- movdqa (%esp),%xmm2
- movups 48(%esi),%xmm3
- movups 64(%esi),%xmm4
- xorps %xmm3,%xmm5
- movups 80(%esi),%xmm3
- leal 96(%esi),%esi
- movdqa %xmm1,48(%esp)
-.byte 102,15,56,0,202
- xorps %xmm4,%xmm6
- movups %xmm5,48(%edi)
- xorps %xmm3,%xmm7
- movdqa %xmm0,64(%esp)
-.byte 102,15,56,0,194
- movups %xmm6,64(%edi)
- pshufd $192,%xmm1,%xmm2
- movups %xmm7,80(%edi)
- leal 96(%edi),%edi
- movl %ebx,%ecx
- pshufd $128,%xmm1,%xmm3
- subl $6,%eax
- jnc .L035ctr32_loop6
- addl $6,%eax
- jz .L036ctr32_ret
- movl %ebp,%edx
- leal 1(,%ecx,2),%ecx
- movdqa 32(%esp),%xmm7
-.L034ctr32_tail:
- por %xmm7,%xmm2
- cmpl $2,%eax
- jb .L037ctr32_one
- pshufd $64,%xmm1,%xmm4
- por %xmm7,%xmm3
- je .L038ctr32_two
- pshufd $192,%xmm0,%xmm5
- por %xmm7,%xmm4
- cmpl $4,%eax
- jb .L039ctr32_three
- pshufd $128,%xmm0,%xmm6
- por %xmm7,%xmm5
- je .L040ctr32_four
- por %xmm7,%xmm6
- call _aesni_encrypt6
- movups (%esi),%xmm1
- movups 16(%esi),%xmm0
- xorps %xmm1,%xmm2
- movups 32(%esi),%xmm1
- xorps %xmm0,%xmm3
- movups 48(%esi),%xmm0
- xorps %xmm1,%xmm4
- movups 64(%esi),%xmm1
- xorps %xmm0,%xmm5
- movups %xmm2,(%edi)
- xorps %xmm1,%xmm6
- movups %xmm3,16(%edi)
- movups %xmm4,32(%edi)
- movups %xmm5,48(%edi)
- movups %xmm6,64(%edi)
- jmp .L036ctr32_ret
-.align 16
-.L033ctr32_one_shortcut:
- movups (%ebx),%xmm2
- movl 240(%edx),%ecx
-.L037ctr32_one:
- movups (%edx),%xmm0
- movups 16(%edx),%xmm1
- leal 32(%edx),%edx
- xorps %xmm0,%xmm2
-.L041enc1_loop_7:
-.byte 102,15,56,220,209
- decl %ecx
- movups (%edx),%xmm1
- leal 16(%edx),%edx
- jnz .L041enc1_loop_7
-.byte 102,15,56,221,209
- movups (%esi),%xmm6
- xorps %xmm2,%xmm6
- movups %xmm6,(%edi)
- jmp .L036ctr32_ret
-.align 16
-.L038ctr32_two:
- call _aesni_encrypt3
- movups (%esi),%xmm5
- movups 16(%esi),%xmm6
- xorps %xmm5,%xmm2
- xorps %xmm6,%xmm3
- movups %xmm2,(%edi)
- movups %xmm3,16(%edi)
- jmp .L036ctr32_ret
-.align 16
-.L039ctr32_three:
- call _aesni_encrypt3
- movups (%esi),%xmm5
- movups 16(%esi),%xmm6
- xorps %xmm5,%xmm2
- movups 32(%esi),%xmm7
- xorps %xmm6,%xmm3
- movups %xmm2,(%edi)
- xorps %xmm7,%xmm4
- movups %xmm3,16(%edi)
- movups %xmm4,32(%edi)
- jmp .L036ctr32_ret
-.align 16
-.L040ctr32_four:
- call _aesni_encrypt4
- movups (%esi),%xmm6
- movups 16(%esi),%xmm7
- movups 32(%esi),%xmm1
- xorps %xmm6,%xmm2
- movups 48(%esi),%xmm0
- xorps %xmm7,%xmm3
- movups %xmm2,(%edi)
- xorps %xmm1,%xmm4
- movups %xmm3,16(%edi)
- xorps %xmm0,%xmm5
- movups %xmm4,32(%edi)
- movups %xmm5,48(%edi)
-.L036ctr32_ret:
- movl 80(%esp),%esp
- popl %edi
- popl %esi
- popl %ebx
- popl %ebp
- ret
-.size aesni_ctr32_encrypt_blocks,.-.L_aesni_ctr32_encrypt_blocks_begin
-.globl aesni_xts_encrypt
-.type aesni_xts_encrypt,@function
-.align 16
-aesni_xts_encrypt:
-.L_aesni_xts_encrypt_begin:
- pushl %ebp
- pushl %ebx
- pushl %esi
- pushl %edi
- movl 36(%esp),%edx
- movl 40(%esp),%esi
- movl 240(%edx),%ecx
- movups (%esi),%xmm2
- movups (%edx),%xmm0
- movups 16(%edx),%xmm1
- leal 32(%edx),%edx
- xorps %xmm0,%xmm2
-.L042enc1_loop_8:
-.byte 102,15,56,220,209
- decl %ecx
- movups (%edx),%xmm1
- leal 16(%edx),%edx
- jnz .L042enc1_loop_8
-.byte 102,15,56,221,209
- movl 20(%esp),%esi
- movl 24(%esp),%edi
- movl 28(%esp),%eax
- movl 32(%esp),%edx
- movl %esp,%ebp
- subl $120,%esp
- movl 240(%edx),%ecx
- andl $-16,%esp
- movl $135,96(%esp)
- movl $0,100(%esp)
- movl $1,104(%esp)
- movl $0,108(%esp)
- movl %eax,112(%esp)
- movl %ebp,116(%esp)
- movdqa %xmm2,%xmm1
- pxor %xmm0,%xmm0
- movdqa 96(%esp),%xmm3
- pcmpgtd %xmm1,%xmm0
- andl $-16,%eax
- movl %edx,%ebp
- movl %ecx,%ebx
- subl $96,%eax
- jc .L043xts_enc_short
- shrl $1,%ecx
- movl %ecx,%ebx
- jmp .L044xts_enc_loop6
-.align 16
-.L044xts_enc_loop6:
- pshufd $19,%xmm0,%xmm2
- pxor %xmm0,%xmm0
- movdqa %xmm1,(%esp)
- paddq %xmm1,%xmm1
- pand %xmm3,%xmm2
- pcmpgtd %xmm1,%xmm0
- pxor %xmm2,%xmm1
- pshufd $19,%xmm0,%xmm2
- pxor %xmm0,%xmm0
- movdqa %xmm1,16(%esp)
- paddq %xmm1,%xmm1
- pand %xmm3,%xmm2
- pcmpgtd %xmm1,%xmm0
- pxor %xmm2,%xmm1
- pshufd $19,%xmm0,%xmm2
- pxor %xmm0,%xmm0
- movdqa %xmm1,32(%esp)
- paddq %xmm1,%xmm1
- pand %xmm3,%xmm2
- pcmpgtd %xmm1,%xmm0
- pxor %xmm2,%xmm1
- pshufd $19,%xmm0,%xmm2
- pxor %xmm0,%xmm0
- movdqa %xmm1,48(%esp)
- paddq %xmm1,%xmm1
- pand %xmm3,%xmm2
- pcmpgtd %xmm1,%xmm0
- pxor %xmm2,%xmm1
- pshufd $19,%xmm0,%xmm7
- movdqa %xmm1,64(%esp)
- paddq %xmm1,%xmm1
- movups (%ebp),%xmm0
- pand %xmm3,%xmm7
- movups (%esi),%xmm2
- pxor %xmm1,%xmm7
- movdqu 16(%esi),%xmm3
- xorps %xmm0,%xmm2
- movdqu 32(%esi),%xmm4
- pxor %xmm0,%xmm3
- movdqu 48(%esi),%xmm5
- pxor %xmm0,%xmm4
- movdqu 64(%esi),%xmm6
- pxor %xmm0,%xmm5
- movdqu 80(%esi),%xmm1
- pxor %xmm0,%xmm6
- leal 96(%esi),%esi
- pxor (%esp),%xmm2
- movdqa %xmm7,80(%esp)
- pxor %xmm1,%xmm7
- movups 16(%ebp),%xmm1
- leal 32(%ebp),%edx
- pxor 16(%esp),%xmm3
-.byte 102,15,56,220,209
- pxor 32(%esp),%xmm4
-.byte 102,15,56,220,217
- pxor 48(%esp),%xmm5
- decl %ecx
-.byte 102,15,56,220,225
- pxor 64(%esp),%xmm6
-.byte 102,15,56,220,233
- pxor %xmm0,%xmm7
-.byte 102,15,56,220,241
- movups (%edx),%xmm0
-.byte 102,15,56,220,249
- call .L_aesni_encrypt6_enter
- movdqa 80(%esp),%xmm1
- pxor %xmm0,%xmm0
- xorps (%esp),%xmm2
- pcmpgtd %xmm1,%xmm0
- xorps 16(%esp),%xmm3
- movups %xmm2,(%edi)
- xorps 32(%esp),%xmm4
- movups %xmm3,16(%edi)
- xorps 48(%esp),%xmm5
- movups %xmm4,32(%edi)
- xorps 64(%esp),%xmm6
- movups %xmm5,48(%edi)
- xorps %xmm1,%xmm7
- movups %xmm6,64(%edi)
- pshufd $19,%xmm0,%xmm2
- movups %xmm7,80(%edi)
- leal 96(%edi),%edi
- movdqa 96(%esp),%xmm3
- pxor %xmm0,%xmm0
- paddq %xmm1,%xmm1
- pand %xmm3,%xmm2
- pcmpgtd %xmm1,%xmm0
- movl %ebx,%ecx
- pxor %xmm2,%xmm1
- subl $96,%eax
- jnc .L044xts_enc_loop6
- leal 1(,%ecx,2),%ecx
- movl %ebp,%edx
- movl %ecx,%ebx
-.L043xts_enc_short:
- addl $96,%eax
- jz .L045xts_enc_done6x
- movdqa %xmm1,%xmm5
- cmpl $32,%eax
- jb .L046xts_enc_one
- pshufd $19,%xmm0,%xmm2
- pxor %xmm0,%xmm0
- paddq %xmm1,%xmm1
- pand %xmm3,%xmm2
- pcmpgtd %xmm1,%xmm0
- pxor %xmm2,%xmm1
- je .L047xts_enc_two
- pshufd $19,%xmm0,%xmm2
- pxor %xmm0,%xmm0
- movdqa %xmm1,%xmm6
- paddq %xmm1,%xmm1
- pand %xmm3,%xmm2
- pcmpgtd %xmm1,%xmm0
- pxor %xmm2,%xmm1
- cmpl $64,%eax
- jb .L048xts_enc_three
- pshufd $19,%xmm0,%xmm2
- pxor %xmm0,%xmm0
- movdqa %xmm1,%xmm7
- paddq %xmm1,%xmm1
- pand %xmm3,%xmm2
- pcmpgtd %xmm1,%xmm0
- pxor %xmm2,%xmm1
- movdqa %xmm5,(%esp)
- movdqa %xmm6,16(%esp)
- je .L049xts_enc_four
- movdqa %xmm7,32(%esp)
- pshufd $19,%xmm0,%xmm7
- movdqa %xmm1,48(%esp)
- paddq %xmm1,%xmm1
- pand %xmm3,%xmm7
- pxor %xmm1,%xmm7
- movdqu (%esi),%xmm2
- movdqu 16(%esi),%xmm3
- movdqu 32(%esi),%xmm4
- pxor (%esp),%xmm2
- movdqu 48(%esi),%xmm5
- pxor 16(%esp),%xmm3
- movdqu 64(%esi),%xmm6
- pxor 32(%esp),%xmm4
- leal 80(%esi),%esi
- pxor 48(%esp),%xmm5
- movdqa %xmm7,64(%esp)
- pxor %xmm7,%xmm6
- call _aesni_encrypt6
- movaps 64(%esp),%xmm1
- xorps (%esp),%xmm2
- xorps 16(%esp),%xmm3
- xorps 32(%esp),%xmm4
- movups %xmm2,(%edi)
- xorps 48(%esp),%xmm5
- movups %xmm3,16(%edi)
- xorps %xmm1,%xmm6
- movups %xmm4,32(%edi)
- movups %xmm5,48(%edi)
- movups %xmm6,64(%edi)
- leal 80(%edi),%edi
- jmp .L050xts_enc_done
-.align 16
-.L046xts_enc_one:
- movups (%esi),%xmm2
- leal 16(%esi),%esi
- xorps %xmm5,%xmm2
- movups (%edx),%xmm0
- movups 16(%edx),%xmm1
- leal 32(%edx),%edx
- xorps %xmm0,%xmm2
-.L051enc1_loop_9:
-.byte 102,15,56,220,209
- decl %ecx
- movups (%edx),%xmm1
- leal 16(%edx),%edx
- jnz .L051enc1_loop_9
-.byte 102,15,56,221,209
- xorps %xmm5,%xmm2
- movups %xmm2,(%edi)
- leal 16(%edi),%edi
- movdqa %xmm5,%xmm1
- jmp .L050xts_enc_done
-.align 16
-.L047xts_enc_two:
- movaps %xmm1,%xmm6
- movups (%esi),%xmm2
- movups 16(%esi),%xmm3
- leal 32(%esi),%esi
- xorps %xmm5,%xmm2
- xorps %xmm6,%xmm3
- xorps %xmm4,%xmm4
- call _aesni_encrypt3
- xorps %xmm5,%xmm2
- xorps %xmm6,%xmm3
- movups %xmm2,(%edi)
- movups %xmm3,16(%edi)
- leal 32(%edi),%edi
- movdqa %xmm6,%xmm1
- jmp .L050xts_enc_done
-.align 16
-.L048xts_enc_three:
- movaps %xmm1,%xmm7
- movups (%esi),%xmm2
- movups 16(%esi),%xmm3
- movups 32(%esi),%xmm4
- leal 48(%esi),%esi
- xorps %xmm5,%xmm2
- xorps %xmm6,%xmm3
- xorps %xmm7,%xmm4
- call _aesni_encrypt3
- xorps %xmm5,%xmm2
- xorps %xmm6,%xmm3
- xorps %xmm7,%xmm4
- movups %xmm2,(%edi)
- movups %xmm3,16(%edi)
- movups %xmm4,32(%edi)
- leal 48(%edi),%edi
- movdqa %xmm7,%xmm1
- jmp .L050xts_enc_done
-.align 16
-.L049xts_enc_four:
- movaps %xmm1,%xmm6
- movups (%esi),%xmm2
- movups 16(%esi),%xmm3
- movups 32(%esi),%xmm4
- xorps (%esp),%xmm2
- movups 48(%esi),%xmm5
- leal 64(%esi),%esi
- xorps 16(%esp),%xmm3
- xorps %xmm7,%xmm4
- xorps %xmm6,%xmm5
- call _aesni_encrypt4
- xorps (%esp),%xmm2
- xorps 16(%esp),%xmm3
- xorps %xmm7,%xmm4
- movups %xmm2,(%edi)
- xorps %xmm6,%xmm5
- movups %xmm3,16(%edi)
- movups %xmm4,32(%edi)
- movups %xmm5,48(%edi)
- leal 64(%edi),%edi
- movdqa %xmm6,%xmm1
- jmp .L050xts_enc_done
-.align 16
-.L045xts_enc_done6x:
- movl 112(%esp),%eax
- andl $15,%eax
- jz .L052xts_enc_ret
- movdqa %xmm1,%xmm5
- movl %eax,112(%esp)
- jmp .L053xts_enc_steal
-.align 16
-.L050xts_enc_done:
- movl 112(%esp),%eax
- pxor %xmm0,%xmm0
- andl $15,%eax
- jz .L052xts_enc_ret
- pcmpgtd %xmm1,%xmm0
- movl %eax,112(%esp)
- pshufd $19,%xmm0,%xmm5
- paddq %xmm1,%xmm1
- pand 96(%esp),%xmm5
- pxor %xmm1,%xmm5
-.L053xts_enc_steal:
- movzbl (%esi),%ecx
- movzbl -16(%edi),%edx
- leal 1(%esi),%esi
- movb %cl,-16(%edi)
- movb %dl,(%edi)
- leal 1(%edi),%edi
- subl $1,%eax
- jnz .L053xts_enc_steal
- subl 112(%esp),%edi
- movl %ebp,%edx
- movl %ebx,%ecx
- movups -16(%edi),%xmm2
- xorps %xmm5,%xmm2
- movups (%edx),%xmm0
- movups 16(%edx),%xmm1
- leal 32(%edx),%edx
- xorps %xmm0,%xmm2
-.L054enc1_loop_10:
-.byte 102,15,56,220,209
- decl %ecx
- movups (%edx),%xmm1
- leal 16(%edx),%edx
- jnz .L054enc1_loop_10
-.byte 102,15,56,221,209
- xorps %xmm5,%xmm2
- movups %xmm2,-16(%edi)
-.L052xts_enc_ret:
- movl 116(%esp),%esp
- popl %edi
- popl %esi
- popl %ebx
- popl %ebp
- ret
-.size aesni_xts_encrypt,.-.L_aesni_xts_encrypt_begin
-.globl aesni_xts_decrypt
-.type aesni_xts_decrypt,@function
-.align 16
-aesni_xts_decrypt:
-.L_aesni_xts_decrypt_begin:
- pushl %ebp
- pushl %ebx
- pushl %esi
- pushl %edi
- movl 36(%esp),%edx
- movl 40(%esp),%esi
- movl 240(%edx),%ecx
- movups (%esi),%xmm2
- movups (%edx),%xmm0
- movups 16(%edx),%xmm1
- leal 32(%edx),%edx
- xorps %xmm0,%xmm2
-.L055enc1_loop_11:
-.byte 102,15,56,220,209
- decl %ecx
- movups (%edx),%xmm1
- leal 16(%edx),%edx
- jnz .L055enc1_loop_11
-.byte 102,15,56,221,209
- movl 20(%esp),%esi
- movl 24(%esp),%edi
- movl 28(%esp),%eax
- movl 32(%esp),%edx
- movl %esp,%ebp
- subl $120,%esp
- andl $-16,%esp
- xorl %ebx,%ebx
- testl $15,%eax
- setnz %bl
- shll $4,%ebx
- subl %ebx,%eax
- movl $135,96(%esp)
- movl $0,100(%esp)
- movl $1,104(%esp)
- movl $0,108(%esp)
- movl %eax,112(%esp)
- movl %ebp,116(%esp)
- movl 240(%edx),%ecx
- movl %edx,%ebp
- movl %ecx,%ebx
- movdqa %xmm2,%xmm1
- pxor %xmm0,%xmm0
- movdqa 96(%esp),%xmm3
- pcmpgtd %xmm1,%xmm0
- andl $-16,%eax
- subl $96,%eax
- jc .L056xts_dec_short
- shrl $1,%ecx
- movl %ecx,%ebx
- jmp .L057xts_dec_loop6
-.align 16
-.L057xts_dec_loop6:
- pshufd $19,%xmm0,%xmm2
- pxor %xmm0,%xmm0
- movdqa %xmm1,(%esp)
- paddq %xmm1,%xmm1
- pand %xmm3,%xmm2
- pcmpgtd %xmm1,%xmm0
- pxor %xmm2,%xmm1
- pshufd $19,%xmm0,%xmm2
- pxor %xmm0,%xmm0
- movdqa %xmm1,16(%esp)
- paddq %xmm1,%xmm1
- pand %xmm3,%xmm2
- pcmpgtd %xmm1,%xmm0
- pxor %xmm2,%xmm1
- pshufd $19,%xmm0,%xmm2
- pxor %xmm0,%xmm0
- movdqa %xmm1,32(%esp)
- paddq %xmm1,%xmm1
- pand %xmm3,%xmm2
- pcmpgtd %xmm1,%xmm0
- pxor %xmm2,%xmm1
- pshufd $19,%xmm0,%xmm2
- pxor %xmm0,%xmm0
- movdqa %xmm1,48(%esp)
- paddq %xmm1,%xmm1
- pand %xmm3,%xmm2
- pcmpgtd %xmm1,%xmm0
- pxor %xmm2,%xmm1
- pshufd $19,%xmm0,%xmm7
- movdqa %xmm1,64(%esp)
- paddq %xmm1,%xmm1
- movups (%ebp),%xmm0
- pand %xmm3,%xmm7
- movups (%esi),%xmm2
- pxor %xmm1,%xmm7
- movdqu 16(%esi),%xmm3
- xorps %xmm0,%xmm2
- movdqu 32(%esi),%xmm4
- pxor %xmm0,%xmm3
- movdqu 48(%esi),%xmm5
- pxor %xmm0,%xmm4
- movdqu 64(%esi),%xmm6
- pxor %xmm0,%xmm5
- movdqu 80(%esi),%xmm1
- pxor %xmm0,%xmm6
- leal 96(%esi),%esi
- pxor (%esp),%xmm2
- movdqa %xmm7,80(%esp)
- pxor %xmm1,%xmm7
- movups 16(%ebp),%xmm1
- leal 32(%ebp),%edx
- pxor 16(%esp),%xmm3
-.byte 102,15,56,222,209
- pxor 32(%esp),%xmm4
-.byte 102,15,56,222,217
- pxor 48(%esp),%xmm5
- decl %ecx
-.byte 102,15,56,222,225
- pxor 64(%esp),%xmm6
-.byte 102,15,56,222,233
- pxor %xmm0,%xmm7
-.byte 102,15,56,222,241
- movups (%edx),%xmm0
-.byte 102,15,56,222,249
- call .L_aesni_decrypt6_enter
- movdqa 80(%esp),%xmm1
- pxor %xmm0,%xmm0
- xorps (%esp),%xmm2
- pcmpgtd %xmm1,%xmm0
- xorps 16(%esp),%xmm3
- movups %xmm2,(%edi)
- xorps 32(%esp),%xmm4
- movups %xmm3,16(%edi)
- xorps 48(%esp),%xmm5
- movups %xmm4,32(%edi)
- xorps 64(%esp),%xmm6
- movups %xmm5,48(%edi)
- xorps %xmm1,%xmm7
- movups %xmm6,64(%edi)
- pshufd $19,%xmm0,%xmm2
- movups %xmm7,80(%edi)
- leal 96(%edi),%edi
- movdqa 96(%esp),%xmm3
- pxor %xmm0,%xmm0
- paddq %xmm1,%xmm1
- pand %xmm3,%xmm2
- pcmpgtd %xmm1,%xmm0
- movl %ebx,%ecx
- pxor %xmm2,%xmm1
- subl $96,%eax
- jnc .L057xts_dec_loop6
- leal 1(,%ecx,2),%ecx
- movl %ebp,%edx
- movl %ecx,%ebx
-.L056xts_dec_short:
- addl $96,%eax
- jz .L058xts_dec_done6x
- movdqa %xmm1,%xmm5
- cmpl $32,%eax
- jb .L059xts_dec_one
- pshufd $19,%xmm0,%xmm2
- pxor %xmm0,%xmm0
- paddq %xmm1,%xmm1
- pand %xmm3,%xmm2
- pcmpgtd %xmm1,%xmm0
- pxor %xmm2,%xmm1
- je .L060xts_dec_two
- pshufd $19,%xmm0,%xmm2
- pxor %xmm0,%xmm0
- movdqa %xmm1,%xmm6
- paddq %xmm1,%xmm1
- pand %xmm3,%xmm2
- pcmpgtd %xmm1,%xmm0
- pxor %xmm2,%xmm1
- cmpl $64,%eax
- jb .L061xts_dec_three
- pshufd $19,%xmm0,%xmm2
- pxor %xmm0,%xmm0
- movdqa %xmm1,%xmm7
- paddq %xmm1,%xmm1
- pand %xmm3,%xmm2
- pcmpgtd %xmm1,%xmm0
- pxor %xmm2,%xmm1
- movdqa %xmm5,(%esp)
- movdqa %xmm6,16(%esp)
- je .L062xts_dec_four
- movdqa %xmm7,32(%esp)
- pshufd $19,%xmm0,%xmm7
- movdqa %xmm1,48(%esp)
- paddq %xmm1,%xmm1
- pand %xmm3,%xmm7
- pxor %xmm1,%xmm7
- movdqu (%esi),%xmm2
- movdqu 16(%esi),%xmm3
- movdqu 32(%esi),%xmm4
- pxor (%esp),%xmm2
- movdqu 48(%esi),%xmm5
- pxor 16(%esp),%xmm3
- movdqu 64(%esi),%xmm6
- pxor 32(%esp),%xmm4
- leal 80(%esi),%esi
- pxor 48(%esp),%xmm5
- movdqa %xmm7,64(%esp)
- pxor %xmm7,%xmm6
- call _aesni_decrypt6
- movaps 64(%esp),%xmm1
- xorps (%esp),%xmm2
- xorps 16(%esp),%xmm3
- xorps 32(%esp),%xmm4
- movups %xmm2,(%edi)
- xorps 48(%esp),%xmm5
- movups %xmm3,16(%edi)
- xorps %xmm1,%xmm6
- movups %xmm4,32(%edi)
- movups %xmm5,48(%edi)
- movups %xmm6,64(%edi)
- leal 80(%edi),%edi
- jmp .L063xts_dec_done
-.align 16
-.L059xts_dec_one:
- movups (%esi),%xmm2
- leal 16(%esi),%esi
- xorps %xmm5,%xmm2
- movups (%edx),%xmm0
- movups 16(%edx),%xmm1
- leal 32(%edx),%edx
- xorps %xmm0,%xmm2
-.L064dec1_loop_12:
-.byte 102,15,56,222,209
- decl %ecx
- movups (%edx),%xmm1
- leal 16(%edx),%edx
- jnz .L064dec1_loop_12
-.byte 102,15,56,223,209
- xorps %xmm5,%xmm2
- movups %xmm2,(%edi)
- leal 16(%edi),%edi
- movdqa %xmm5,%xmm1
- jmp .L063xts_dec_done
-.align 16
-.L060xts_dec_two:
- movaps %xmm1,%xmm6
- movups (%esi),%xmm2
- movups 16(%esi),%xmm3
- leal 32(%esi),%esi
- xorps %xmm5,%xmm2
- xorps %xmm6,%xmm3
- call _aesni_decrypt3
- xorps %xmm5,%xmm2
- xorps %xmm6,%xmm3
- movups %xmm2,(%edi)
- movups %xmm3,16(%edi)
- leal 32(%edi),%edi
- movdqa %xmm6,%xmm1
- jmp .L063xts_dec_done
-.align 16
-.L061xts_dec_three:
- movaps %xmm1,%xmm7
- movups (%esi),%xmm2
- movups 16(%esi),%xmm3
- movups 32(%esi),%xmm4
- leal 48(%esi),%esi
- xorps %xmm5,%xmm2
- xorps %xmm6,%xmm3
- xorps %xmm7,%xmm4
- call _aesni_decrypt3
- xorps %xmm5,%xmm2
- xorps %xmm6,%xmm3
- xorps %xmm7,%xmm4
- movups %xmm2,(%edi)
- movups %xmm3,16(%edi)
- movups %xmm4,32(%edi)
- leal 48(%edi),%edi
- movdqa %xmm7,%xmm1
- jmp .L063xts_dec_done
-.align 16
-.L062xts_dec_four:
- movaps %xmm1,%xmm6
- movups (%esi),%xmm2
- movups 16(%esi),%xmm3
- movups 32(%esi),%xmm4
- xorps (%esp),%xmm2
- movups 48(%esi),%xmm5
- leal 64(%esi),%esi
- xorps 16(%esp),%xmm3
- xorps %xmm7,%xmm4
- xorps %xmm6,%xmm5
- call _aesni_decrypt4
- xorps (%esp),%xmm2
- xorps 16(%esp),%xmm3
- xorps %xmm7,%xmm4
- movups %xmm2,(%edi)
- xorps %xmm6,%xmm5
- movups %xmm3,16(%edi)
- movups %xmm4,32(%edi)
- movups %xmm5,48(%edi)
- leal 64(%edi),%edi
- movdqa %xmm6,%xmm1
- jmp .L063xts_dec_done
-.align 16
-.L058xts_dec_done6x:
- movl 112(%esp),%eax
- andl $15,%eax
- jz .L065xts_dec_ret
- movl %eax,112(%esp)
- jmp .L066xts_dec_only_one_more
-.align 16
-.L063xts_dec_done:
- movl 112(%esp),%eax
- pxor %xmm0,%xmm0
- andl $15,%eax
- jz .L065xts_dec_ret
- pcmpgtd %xmm1,%xmm0
- movl %eax,112(%esp)
- pshufd $19,%xmm0,%xmm2
- pxor %xmm0,%xmm0
- movdqa 96(%esp),%xmm3
- paddq %xmm1,%xmm1
- pand %xmm3,%xmm2
- pcmpgtd %xmm1,%xmm0
- pxor %xmm2,%xmm1
-.L066xts_dec_only_one_more:
- pshufd $19,%xmm0,%xmm5
- movdqa %xmm1,%xmm6
- paddq %xmm1,%xmm1
- pand %xmm3,%xmm5
- pxor %xmm1,%xmm5
- movl %ebp,%edx
- movl %ebx,%ecx
- movups (%esi),%xmm2
- xorps %xmm5,%xmm2
- movups (%edx),%xmm0
- movups 16(%edx),%xmm1
- leal 32(%edx),%edx
- xorps %xmm0,%xmm2
-.L067dec1_loop_13:
-.byte 102,15,56,222,209
- decl %ecx
- movups (%edx),%xmm1
- leal 16(%edx),%edx
- jnz .L067dec1_loop_13
-.byte 102,15,56,223,209
- xorps %xmm5,%xmm2
- movups %xmm2,(%edi)
-.L068xts_dec_steal:
- movzbl 16(%esi),%ecx
- movzbl (%edi),%edx
- leal 1(%esi),%esi
- movb %cl,(%edi)
- movb %dl,16(%edi)
- leal 1(%edi),%edi
- subl $1,%eax
- jnz .L068xts_dec_steal
- subl 112(%esp),%edi
- movl %ebp,%edx
- movl %ebx,%ecx
- movups (%edi),%xmm2
- xorps %xmm6,%xmm2
- movups (%edx),%xmm0
- movups 16(%edx),%xmm1
- leal 32(%edx),%edx
- xorps %xmm0,%xmm2
-.L069dec1_loop_14:
-.byte 102,15,56,222,209
- decl %ecx
- movups (%edx),%xmm1
- leal 16(%edx),%edx
- jnz .L069dec1_loop_14
-.byte 102,15,56,223,209
- xorps %xmm6,%xmm2
- movups %xmm2,(%edi)
-.L065xts_dec_ret:
- movl 116(%esp),%esp
- popl %edi
- popl %esi
- popl %ebx
- popl %ebp
- ret
-.size aesni_xts_decrypt,.-.L_aesni_xts_decrypt_begin
-.globl aesni_cbc_encrypt
-.type aesni_cbc_encrypt,@function
-.align 16
-aesni_cbc_encrypt:
-.L_aesni_cbc_encrypt_begin:
- pushl %ebp
- pushl %ebx
- pushl %esi
- pushl %edi
- movl 20(%esp),%esi
- movl %esp,%ebx
- movl 24(%esp),%edi
- subl $24,%ebx
- movl 28(%esp),%eax
- andl $-16,%ebx
- movl 32(%esp),%edx
- movl 36(%esp),%ebp
- testl %eax,%eax
- jz .L070cbc_abort
- cmpl $0,40(%esp)
- xchgl %esp,%ebx
- movups (%ebp),%xmm7
- movl 240(%edx),%ecx
- movl %edx,%ebp
- movl %ebx,16(%esp)
- movl %ecx,%ebx
- je .L071cbc_decrypt
- movaps %xmm7,%xmm2
- cmpl $16,%eax
- jb .L072cbc_enc_tail
- subl $16,%eax
- jmp .L073cbc_enc_loop
-.align 16
-.L073cbc_enc_loop:
- movups (%esi),%xmm7
- leal 16(%esi),%esi
- movups (%edx),%xmm0
- movups 16(%edx),%xmm1
- xorps %xmm0,%xmm7
- leal 32(%edx),%edx
- xorps %xmm7,%xmm2
-.L074enc1_loop_15:
-.byte 102,15,56,220,209
- decl %ecx
- movups (%edx),%xmm1
- leal 16(%edx),%edx
- jnz .L074enc1_loop_15
-.byte 102,15,56,221,209
- movl %ebx,%ecx
- movl %ebp,%edx
- movups %xmm2,(%edi)
- leal 16(%edi),%edi
- subl $16,%eax
- jnc .L073cbc_enc_loop
- addl $16,%eax
- jnz .L072cbc_enc_tail
- movaps %xmm2,%xmm7
- jmp .L075cbc_ret
-.L072cbc_enc_tail:
- movl %eax,%ecx
-.long 2767451785
- movl $16,%ecx
- subl %eax,%ecx
- xorl %eax,%eax
-.long 2868115081
- leal -16(%edi),%edi
- movl %ebx,%ecx
- movl %edi,%esi
- movl %ebp,%edx
- jmp .L073cbc_enc_loop
-.align 16
-.L071cbc_decrypt:
- cmpl $80,%eax
- jbe .L076cbc_dec_tail
- movaps %xmm7,(%esp)
- subl $80,%eax
- jmp .L077cbc_dec_loop6_enter
-.align 16
-.L078cbc_dec_loop6:
- movaps %xmm0,(%esp)
- movups %xmm7,(%edi)
- leal 16(%edi),%edi
-.L077cbc_dec_loop6_enter:
- movdqu (%esi),%xmm2
- movdqu 16(%esi),%xmm3
- movdqu 32(%esi),%xmm4
- movdqu 48(%esi),%xmm5
- movdqu 64(%esi),%xmm6
- movdqu 80(%esi),%xmm7
- call _aesni_decrypt6
- movups (%esi),%xmm1
- movups 16(%esi),%xmm0
- xorps (%esp),%xmm2
- xorps %xmm1,%xmm3
- movups 32(%esi),%xmm1
- xorps %xmm0,%xmm4
- movups 48(%esi),%xmm0
- xorps %xmm1,%xmm5
- movups 64(%esi),%xmm1
- xorps %xmm0,%xmm6
- movups 80(%esi),%xmm0
- xorps %xmm1,%xmm7
- movups %xmm2,(%edi)
- movups %xmm3,16(%edi)
- leal 96(%esi),%esi
- movups %xmm4,32(%edi)
- movl %ebx,%ecx
- movups %xmm5,48(%edi)
- movl %ebp,%edx
- movups %xmm6,64(%edi)
- leal 80(%edi),%edi
- subl $96,%eax
- ja .L078cbc_dec_loop6
- movaps %xmm7,%xmm2
- movaps %xmm0,%xmm7
- addl $80,%eax
- jle .L079cbc_dec_tail_collected
- movups %xmm2,(%edi)
- leal 16(%edi),%edi
-.L076cbc_dec_tail:
- movups (%esi),%xmm2
- movaps %xmm2,%xmm6
- cmpl $16,%eax
- jbe .L080cbc_dec_one
- movups 16(%esi),%xmm3
- movaps %xmm3,%xmm5
- cmpl $32,%eax
- jbe .L081cbc_dec_two
- movups 32(%esi),%xmm4
- cmpl $48,%eax
- jbe .L082cbc_dec_three
- movups 48(%esi),%xmm5
- cmpl $64,%eax
- jbe .L083cbc_dec_four
- movups 64(%esi),%xmm6
- movaps %xmm7,(%esp)
- movups (%esi),%xmm2
- xorps %xmm7,%xmm7
- call _aesni_decrypt6
- movups (%esi),%xmm1
- movups 16(%esi),%xmm0
- xorps (%esp),%xmm2
- xorps %xmm1,%xmm3
- movups 32(%esi),%xmm1
- xorps %xmm0,%xmm4
- movups 48(%esi),%xmm0
- xorps %xmm1,%xmm5
- movups 64(%esi),%xmm7
- xorps %xmm0,%xmm6
- movups %xmm2,(%edi)
- movups %xmm3,16(%edi)
- movups %xmm4,32(%edi)
- movups %xmm5,48(%edi)
- leal 64(%edi),%edi
- movaps %xmm6,%xmm2
- subl $80,%eax
- jmp .L079cbc_dec_tail_collected
-.align 16
-.L080cbc_dec_one:
- movups (%edx),%xmm0
- movups 16(%edx),%xmm1
- leal 32(%edx),%edx
- xorps %xmm0,%xmm2
-.L084dec1_loop_16:
-.byte 102,15,56,222,209
- decl %ecx
- movups (%edx),%xmm1
- leal 16(%edx),%edx
- jnz .L084dec1_loop_16
-.byte 102,15,56,223,209
- xorps %xmm7,%xmm2
- movaps %xmm6,%xmm7
- subl $16,%eax
- jmp .L079cbc_dec_tail_collected
-.align 16
-.L081cbc_dec_two:
- xorps %xmm4,%xmm4
- call _aesni_decrypt3
- xorps %xmm7,%xmm2
- xorps %xmm6,%xmm3
- movups %xmm2,(%edi)
- movaps %xmm3,%xmm2
- leal 16(%edi),%edi
- movaps %xmm5,%xmm7
- subl $32,%eax
- jmp .L079cbc_dec_tail_collected
-.align 16
-.L082cbc_dec_three:
- call _aesni_decrypt3
- xorps %xmm7,%xmm2
- xorps %xmm6,%xmm3
- xorps %xmm5,%xmm4
- movups %xmm2,(%edi)
- movaps %xmm4,%xmm2
- movups %xmm3,16(%edi)
- leal 32(%edi),%edi
- movups 32(%esi),%xmm7
- subl $48,%eax
- jmp .L079cbc_dec_tail_collected
-.align 16
-.L083cbc_dec_four:
- call _aesni_decrypt4
- movups 16(%esi),%xmm1
- movups 32(%esi),%xmm0
- xorps %xmm7,%xmm2
- movups 48(%esi),%xmm7
- xorps %xmm6,%xmm3
- movups %xmm2,(%edi)
- xorps %xmm1,%xmm4
- movups %xmm3,16(%edi)
- xorps %xmm0,%xmm5
- movups %xmm4,32(%edi)
- leal 48(%edi),%edi
- movaps %xmm5,%xmm2
- subl $64,%eax
-.L079cbc_dec_tail_collected:
- andl $15,%eax
- jnz .L085cbc_dec_tail_partial
- movups %xmm2,(%edi)
- jmp .L075cbc_ret
-.align 16
-.L085cbc_dec_tail_partial:
- movaps %xmm2,(%esp)
- movl $16,%ecx
- movl %esp,%esi
- subl %eax,%ecx
-.long 2767451785
-.L075cbc_ret:
- movl 16(%esp),%esp
- movl 36(%esp),%ebp
- movups %xmm7,(%ebp)
-.L070cbc_abort:
- popl %edi
- popl %esi
- popl %ebx
- popl %ebp
- ret
-.size aesni_cbc_encrypt,.-.L_aesni_cbc_encrypt_begin
-.type _aesni_set_encrypt_key,@function
-.align 16
-_aesni_set_encrypt_key:
- testl %eax,%eax
- jz .L086bad_pointer
- testl %edx,%edx
- jz .L086bad_pointer
- movups (%eax),%xmm0
- xorps %xmm4,%xmm4
- leal 16(%edx),%edx
- cmpl $256,%ecx
- je .L08714rounds
- cmpl $192,%ecx
- je .L08812rounds
- cmpl $128,%ecx
- jne .L089bad_keybits
-.align 16
-.L09010rounds:
- movl $9,%ecx
- movups %xmm0,-16(%edx)
-.byte 102,15,58,223,200,1
- call .L091key_128_cold
-.byte 102,15,58,223,200,2
- call .L092key_128
-.byte 102,15,58,223,200,4
- call .L092key_128
-.byte 102,15,58,223,200,8
- call .L092key_128
-.byte 102,15,58,223,200,16
- call .L092key_128
-.byte 102,15,58,223,200,32
- call .L092key_128
-.byte 102,15,58,223,200,64
- call .L092key_128
-.byte 102,15,58,223,200,128
- call .L092key_128
-.byte 102,15,58,223,200,27
- call .L092key_128
-.byte 102,15,58,223,200,54
- call .L092key_128
- movups %xmm0,(%edx)
- movl %ecx,80(%edx)
- xorl %eax,%eax
- ret
-.align 16
-.L092key_128:
- movups %xmm0,(%edx)
- leal 16(%edx),%edx
-.L091key_128_cold:
- shufps $16,%xmm0,%xmm4
- xorps %xmm4,%xmm0
- shufps $140,%xmm0,%xmm4
- xorps %xmm4,%xmm0
- shufps $255,%xmm1,%xmm1
- xorps %xmm1,%xmm0
- ret
-.align 16
-.L08812rounds:
- movq 16(%eax),%xmm2
- movl $11,%ecx
- movups %xmm0,-16(%edx)
-.byte 102,15,58,223,202,1
- call .L093key_192a_cold
-.byte 102,15,58,223,202,2
- call .L094key_192b
-.byte 102,15,58,223,202,4
- call .L095key_192a
-.byte 102,15,58,223,202,8
- call .L094key_192b
-.byte 102,15,58,223,202,16
- call .L095key_192a
-.byte 102,15,58,223,202,32
- call .L094key_192b
-.byte 102,15,58,223,202,64
- call .L095key_192a
-.byte 102,15,58,223,202,128
- call .L094key_192b
- movups %xmm0,(%edx)
- movl %ecx,48(%edx)
- xorl %eax,%eax
- ret
-.align 16
-.L095key_192a:
- movups %xmm0,(%edx)
- leal 16(%edx),%edx
-.align 16
-.L093key_192a_cold:
- movaps %xmm2,%xmm5
-.L096key_192b_warm:
- shufps $16,%xmm0,%xmm4
- movdqa %xmm2,%xmm3
- xorps %xmm4,%xmm0
- shufps $140,%xmm0,%xmm4
- pslldq $4,%xmm3
- xorps %xmm4,%xmm0
- pshufd $85,%xmm1,%xmm1
- pxor %xmm3,%xmm2
- pxor %xmm1,%xmm0
- pshufd $255,%xmm0,%xmm3
- pxor %xmm3,%xmm2
- ret
-.align 16
-.L094key_192b:
- movaps %xmm0,%xmm3
- shufps $68,%xmm0,%xmm5
- movups %xmm5,(%edx)
- shufps $78,%xmm2,%xmm3
- movups %xmm3,16(%edx)
- leal 32(%edx),%edx
- jmp .L096key_192b_warm
-.align 16
-.L08714rounds:
- movups 16(%eax),%xmm2
- movl $13,%ecx
- leal 16(%edx),%edx
- movups %xmm0,-32(%edx)
- movups %xmm2,-16(%edx)
-.byte 102,15,58,223,202,1
- call .L097key_256a_cold
-.byte 102,15,58,223,200,1
- call .L098key_256b
-.byte 102,15,58,223,202,2
- call .L099key_256a
-.byte 102,15,58,223,200,2
- call .L098key_256b
-.byte 102,15,58,223,202,4
- call .L099key_256a
-.byte 102,15,58,223,200,4
- call .L098key_256b
-.byte 102,15,58,223,202,8
- call .L099key_256a
-.byte 102,15,58,223,200,8
- call .L098key_256b
-.byte 102,15,58,223,202,16
- call .L099key_256a
-.byte 102,15,58,223,200,16
- call .L098key_256b
-.byte 102,15,58,223,202,32
- call .L099key_256a
-.byte 102,15,58,223,200,32
- call .L098key_256b
-.byte 102,15,58,223,202,64
- call .L099key_256a
- movups %xmm0,(%edx)
- movl %ecx,16(%edx)
- xorl %eax,%eax
- ret
-.align 16
-.L099key_256a:
- movups %xmm2,(%edx)
- leal 16(%edx),%edx
-.L097key_256a_cold:
- shufps $16,%xmm0,%xmm4
- xorps %xmm4,%xmm0
- shufps $140,%xmm0,%xmm4
- xorps %xmm4,%xmm0
- shufps $255,%xmm1,%xmm1
- xorps %xmm1,%xmm0
- ret
-.align 16
-.L098key_256b:
- movups %xmm0,(%edx)
- leal 16(%edx),%edx
- shufps $16,%xmm2,%xmm4
- xorps %xmm4,%xmm2
- shufps $140,%xmm2,%xmm4
- xorps %xmm4,%xmm2
- shufps $170,%xmm1,%xmm1
- xorps %xmm1,%xmm2
- ret
-.align 4
-.L086bad_pointer:
- movl $-1,%eax
- ret
-.align 4
-.L089bad_keybits:
- movl $-2,%eax
- ret
-.size _aesni_set_encrypt_key,.-_aesni_set_encrypt_key
-.globl aesni_set_encrypt_key
-.type aesni_set_encrypt_key,@function
-.align 16
-aesni_set_encrypt_key:
-.L_aesni_set_encrypt_key_begin:
- movl 4(%esp),%eax
- movl 8(%esp),%ecx
- movl 12(%esp),%edx
- call _aesni_set_encrypt_key
- ret
-.size aesni_set_encrypt_key,.-.L_aesni_set_encrypt_key_begin
-.globl aesni_set_decrypt_key
-.type aesni_set_decrypt_key,@function
-.align 16
-aesni_set_decrypt_key:
-.L_aesni_set_decrypt_key_begin:
- movl 4(%esp),%eax
- movl 8(%esp),%ecx
- movl 12(%esp),%edx
- call _aesni_set_encrypt_key
- movl 12(%esp),%edx
- shll $4,%ecx
- testl %eax,%eax
- jnz .L100dec_key_ret
- leal 16(%edx,%ecx,1),%eax
- movups (%edx),%xmm0
- movups (%eax),%xmm1
- movups %xmm0,(%eax)
- movups %xmm1,(%edx)
- leal 16(%edx),%edx
- leal -16(%eax),%eax
-.L101dec_key_inverse:
- movups (%edx),%xmm0
- movups (%eax),%xmm1
-.byte 102,15,56,219,192
-.byte 102,15,56,219,201
- leal 16(%edx),%edx
- leal -16(%eax),%eax
- movups %xmm0,16(%eax)
- movups %xmm1,-16(%edx)
- cmpl %edx,%eax
- ja .L101dec_key_inverse
- movups (%edx),%xmm0
-.byte 102,15,56,219,192
- movups %xmm0,(%edx)
- xorl %eax,%eax
-.L100dec_key_ret:
- ret
-.size aesni_set_decrypt_key,.-.L_aesni_set_decrypt_key_begin
-.byte 65,69,83,32,102,111,114,32,73,110,116,101,108,32,65,69
-.byte 83,45,78,73,44,32,67,82,89,80,84,79,71,65,77,83
-.byte 32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115
-.byte 115,108,46,111,114,103,62,0
diff --git a/app/openssl/crypto/aes/asm/aesni-x86.pl b/app/openssl/crypto/aes/asm/aesni-x86.pl
deleted file mode 100644
index 3dc345b5..00000000
--- a/app/openssl/crypto/aes/asm/aesni-x86.pl
+++ /dev/null
@@ -1,2189 +0,0 @@
-#!/usr/bin/env perl
-
-# ====================================================================
-# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
-# project. The module is, however, dual licensed under OpenSSL and
-# CRYPTOGAMS licenses depending on where you obtain it. For further
-# details see http://www.openssl.org/~appro/cryptogams/.
-# ====================================================================
-#
-# This module implements support for Intel AES-NI extension. In
-# OpenSSL context it's used with Intel engine, but can also be used as
-# drop-in replacement for crypto/aes/asm/aes-586.pl [see below for
-# details].
-#
-# Performance.
-#
-# To start with see corresponding paragraph in aesni-x86_64.pl...
-# Instead of filling table similar to one found there I've chosen to
-# summarize *comparison* results for raw ECB, CTR and CBC benchmarks.
-# The simplified table below represents 32-bit performance relative
-# to 64-bit one in every given point. Ratios vary for different
-# encryption modes, therefore interval values.
-#
-# 16-byte 64-byte 256-byte 1-KB 8-KB
-# 53-67% 67-84% 91-94% 95-98% 97-99.5%
-#
-# Lower ratios for smaller block sizes are perfectly understandable,
-# because function call overhead is higher in 32-bit mode. Largest
-# 8-KB block performance is virtually same: 32-bit code is less than
-# 1% slower for ECB, CBC and CCM, and ~3% slower otherwise.
-
-# January 2011
-#
-# See aesni-x86_64.pl for details. Unlike x86_64 version this module
-# interleaves at most 6 aes[enc|dec] instructions, because there are
-# not enough registers for 8x interleave [which should be optimal for
-# Sandy Bridge]. Actually, performance results for 6x interleave
-# factor presented in aesni-x86_64.pl (except for CTR) are for this
-# module.
-
-# April 2011
-#
-# Add aesni_xts_[en|de]crypt. Westmere spends 1.50 cycles processing
-# one byte out of 8KB with 128-bit key, Sandy Bridge - 1.09.
-
-$PREFIX="aesni"; # if $PREFIX is set to "AES", the script
- # generates drop-in replacement for
- # crypto/aes/asm/aes-586.pl:-)
-$inline=1; # inline _aesni_[en|de]crypt
-
-$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
-push(@INC,"${dir}","${dir}../../perlasm");
-require "x86asm.pl";
-
-&asm_init($ARGV[0],$0);
-
-if ($PREFIX eq "aesni") { $movekey=*movups; }
-else { $movekey=*movups; }
-
-$len="eax";
-$rounds="ecx";
-$key="edx";
-$inp="esi";
-$out="edi";
-$rounds_="ebx"; # backup copy for $rounds
-$key_="ebp"; # backup copy for $key
-
-$rndkey0="xmm0";
-$rndkey1="xmm1";
-$inout0="xmm2";
-$inout1="xmm3";
-$inout2="xmm4";
-$inout3="xmm5"; $in1="xmm5";
-$inout4="xmm6"; $in0="xmm6";
-$inout5="xmm7"; $ivec="xmm7";
-
-# AESNI extenstion
-sub aeskeygenassist
-{ my($dst,$src,$imm)=@_;
- if ("$dst:$src" =~ /xmm([0-7]):xmm([0-7])/)
- { &data_byte(0x66,0x0f,0x3a,0xdf,0xc0|($1<<3)|$2,$imm); }
-}
-sub aescommon
-{ my($opcodelet,$dst,$src)=@_;
- if ("$dst:$src" =~ /xmm([0-7]):xmm([0-7])/)
- { &data_byte(0x66,0x0f,0x38,$opcodelet,0xc0|($1<<3)|$2);}
-}
-sub aesimc { aescommon(0xdb,@_); }
-sub aesenc { aescommon(0xdc,@_); }
-sub aesenclast { aescommon(0xdd,@_); }
-sub aesdec { aescommon(0xde,@_); }
-sub aesdeclast { aescommon(0xdf,@_); }
-
-# Inline version of internal aesni_[en|de]crypt1
-{ my $sn;
-sub aesni_inline_generate1
-{ my ($p,$inout,$ivec)=@_; $inout=$inout0 if (!defined($inout));
- $sn++;
-
- &$movekey ($rndkey0,&QWP(0,$key));
- &$movekey ($rndkey1,&QWP(16,$key));
- &xorps ($ivec,$rndkey0) if (defined($ivec));
- &lea ($key,&DWP(32,$key));
- &xorps ($inout,$ivec) if (defined($ivec));
- &xorps ($inout,$rndkey0) if (!defined($ivec));
- &set_label("${p}1_loop_$sn");
- eval"&aes${p} ($inout,$rndkey1)";
- &dec ($rounds);
- &$movekey ($rndkey1,&QWP(0,$key));
- &lea ($key,&DWP(16,$key));
- &jnz (&label("${p}1_loop_$sn"));
- eval"&aes${p}last ($inout,$rndkey1)";
-}}
-
-sub aesni_generate1 # fully unrolled loop
-{ my ($p,$inout)=@_; $inout=$inout0 if (!defined($inout));
-
- &function_begin_B("_aesni_${p}rypt1");
- &movups ($rndkey0,&QWP(0,$key));
- &$movekey ($rndkey1,&QWP(0x10,$key));
- &xorps ($inout,$rndkey0);
- &$movekey ($rndkey0,&QWP(0x20,$key));
- &lea ($key,&DWP(0x30,$key));
- &cmp ($rounds,11);
- &jb (&label("${p}128"));
- &lea ($key,&DWP(0x20,$key));
- &je (&label("${p}192"));
- &lea ($key,&DWP(0x20,$key));
- eval"&aes${p} ($inout,$rndkey1)";
- &$movekey ($rndkey1,&QWP(-0x40,$key));
- eval"&aes${p} ($inout,$rndkey0)";
- &$movekey ($rndkey0,&QWP(-0x30,$key));
- &set_label("${p}192");
- eval"&aes${p} ($inout,$rndkey1)";
- &$movekey ($rndkey1,&QWP(-0x20,$key));
- eval"&aes${p} ($inout,$rndkey0)";
- &$movekey ($rndkey0,&QWP(-0x10,$key));
- &set_label("${p}128");
- eval"&aes${p} ($inout,$rndkey1)";
- &$movekey ($rndkey1,&QWP(0,$key));
- eval"&aes${p} ($inout,$rndkey0)";
- &$movekey ($rndkey0,&QWP(0x10,$key));
- eval"&aes${p} ($inout,$rndkey1)";
- &$movekey ($rndkey1,&QWP(0x20,$key));
- eval"&aes${p} ($inout,$rndkey0)";
- &$movekey ($rndkey0,&QWP(0x30,$key));
- eval"&aes${p} ($inout,$rndkey1)";
- &$movekey ($rndkey1,&QWP(0x40,$key));
- eval"&aes${p} ($inout,$rndkey0)";
- &$movekey ($rndkey0,&QWP(0x50,$key));
- eval"&aes${p} ($inout,$rndkey1)";
- &$movekey ($rndkey1,&QWP(0x60,$key));
- eval"&aes${p} ($inout,$rndkey0)";
- &$movekey ($rndkey0,&QWP(0x70,$key));
- eval"&aes${p} ($inout,$rndkey1)";
- eval"&aes${p}last ($inout,$rndkey0)";
- &ret();
- &function_end_B("_aesni_${p}rypt1");
-}
-
-# void $PREFIX_encrypt (const void *inp,void *out,const AES_KEY *key);
-&aesni_generate1("enc") if (!$inline);
-&function_begin_B("${PREFIX}_encrypt");
- &mov ("eax",&wparam(0));
- &mov ($key,&wparam(2));
- &movups ($inout0,&QWP(0,"eax"));
- &mov ($rounds,&DWP(240,$key));
- &mov ("eax",&wparam(1));
- if ($inline)
- { &aesni_inline_generate1("enc"); }
- else
- { &call ("_aesni_encrypt1"); }
- &movups (&QWP(0,"eax"),$inout0);
- &ret ();
-&function_end_B("${PREFIX}_encrypt");
-
-# void $PREFIX_decrypt (const void *inp,void *out,const AES_KEY *key);
-&aesni_generate1("dec") if(!$inline);
-&function_begin_B("${PREFIX}_decrypt");
- &mov ("eax",&wparam(0));
- &mov ($key,&wparam(2));
- &movups ($inout0,&QWP(0,"eax"));
- &mov ($rounds,&DWP(240,$key));
- &mov ("eax",&wparam(1));
- if ($inline)
- { &aesni_inline_generate1("dec"); }
- else
- { &call ("_aesni_decrypt1"); }
- &movups (&QWP(0,"eax"),$inout0);
- &ret ();
-&function_end_B("${PREFIX}_decrypt");
-
-# _aesni_[en|de]cryptN are private interfaces, N denotes interleave
-# factor. Why 3x subroutine were originally used in loops? Even though
-# aes[enc|dec] latency was originally 6, it could be scheduled only
-# every *2nd* cycle. Thus 3x interleave was the one providing optimal
-# utilization, i.e. when subroutine's throughput is virtually same as
-# of non-interleaved subroutine [for number of input blocks up to 3].
-# This is why it makes no sense to implement 2x subroutine.
-# aes[enc|dec] latency in next processor generation is 8, but the
-# instructions can be scheduled every cycle. Optimal interleave for
-# new processor is therefore 8x, but it's unfeasible to accommodate it
-# in XMM registers addreassable in 32-bit mode and therefore 6x is
-# used instead...
-
-sub aesni_generate3
-{ my $p=shift;
-
- &function_begin_B("_aesni_${p}rypt3");
- &$movekey ($rndkey0,&QWP(0,$key));
- &shr ($rounds,1);
- &$movekey ($rndkey1,&QWP(16,$key));
- &lea ($key,&DWP(32,$key));
- &xorps ($inout0,$rndkey0);
- &pxor ($inout1,$rndkey0);
- &pxor ($inout2,$rndkey0);
- &$movekey ($rndkey0,&QWP(0,$key));
-
- &set_label("${p}3_loop");
- eval"&aes${p} ($inout0,$rndkey1)";
- eval"&aes${p} ($inout1,$rndkey1)";
- &dec ($rounds);
- eval"&aes${p} ($inout2,$rndkey1)";
- &$movekey ($rndkey1,&QWP(16,$key));
- eval"&aes${p} ($inout0,$rndkey0)";
- eval"&aes${p} ($inout1,$rndkey0)";
- &lea ($key,&DWP(32,$key));
- eval"&aes${p} ($inout2,$rndkey0)";
- &$movekey ($rndkey0,&QWP(0,$key));
- &jnz (&label("${p}3_loop"));
- eval"&aes${p} ($inout0,$rndkey1)";
- eval"&aes${p} ($inout1,$rndkey1)";
- eval"&aes${p} ($inout2,$rndkey1)";
- eval"&aes${p}last ($inout0,$rndkey0)";
- eval"&aes${p}last ($inout1,$rndkey0)";
- eval"&aes${p}last ($inout2,$rndkey0)";
- &ret();
- &function_end_B("_aesni_${p}rypt3");
-}
-
-# 4x interleave is implemented to improve small block performance,
-# most notably [and naturally] 4 block by ~30%. One can argue that one
-# should have implemented 5x as well, but improvement would be <20%,
-# so it's not worth it...
-sub aesni_generate4
-{ my $p=shift;
-
- &function_begin_B("_aesni_${p}rypt4");
- &$movekey ($rndkey0,&QWP(0,$key));
- &$movekey ($rndkey1,&QWP(16,$key));
- &shr ($rounds,1);
- &lea ($key,&DWP(32,$key));
- &xorps ($inout0,$rndkey0);
- &pxor ($inout1,$rndkey0);
- &pxor ($inout2,$rndkey0);
- &pxor ($inout3,$rndkey0);
- &$movekey ($rndkey0,&QWP(0,$key));
-
- &set_label("${p}4_loop");
- eval"&aes${p} ($inout0,$rndkey1)";
- eval"&aes${p} ($inout1,$rndkey1)";
- &dec ($rounds);
- eval"&aes${p} ($inout2,$rndkey1)";
- eval"&aes${p} ($inout3,$rndkey1)";
- &$movekey ($rndkey1,&QWP(16,$key));
- eval"&aes${p} ($inout0,$rndkey0)";
- eval"&aes${p} ($inout1,$rndkey0)";
- &lea ($key,&DWP(32,$key));
- eval"&aes${p} ($inout2,$rndkey0)";
- eval"&aes${p} ($inout3,$rndkey0)";
- &$movekey ($rndkey0,&QWP(0,$key));
- &jnz (&label("${p}4_loop"));
-
- eval"&aes${p} ($inout0,$rndkey1)";
- eval"&aes${p} ($inout1,$rndkey1)";
- eval"&aes${p} ($inout2,$rndkey1)";
- eval"&aes${p} ($inout3,$rndkey1)";
- eval"&aes${p}last ($inout0,$rndkey0)";
- eval"&aes${p}last ($inout1,$rndkey0)";
- eval"&aes${p}last ($inout2,$rndkey0)";
- eval"&aes${p}last ($inout3,$rndkey0)";
- &ret();
- &function_end_B("_aesni_${p}rypt4");
-}
-
-sub aesni_generate6
-{ my $p=shift;
-
- &function_begin_B("_aesni_${p}rypt6");
- &static_label("_aesni_${p}rypt6_enter");
- &$movekey ($rndkey0,&QWP(0,$key));
- &shr ($rounds,1);
- &$movekey ($rndkey1,&QWP(16,$key));
- &lea ($key,&DWP(32,$key));
- &xorps ($inout0,$rndkey0);
- &pxor ($inout1,$rndkey0); # pxor does better here
- eval"&aes${p} ($inout0,$rndkey1)";
- &pxor ($inout2,$rndkey0);
- eval"&aes${p} ($inout1,$rndkey1)";
- &pxor ($inout3,$rndkey0);
- &dec ($rounds);
- eval"&aes${p} ($inout2,$rndkey1)";
- &pxor ($inout4,$rndkey0);
- eval"&aes${p} ($inout3,$rndkey1)";
- &pxor ($inout5,$rndkey0);
- eval"&aes${p} ($inout4,$rndkey1)";
- &$movekey ($rndkey0,&QWP(0,$key));
- eval"&aes${p} ($inout5,$rndkey1)";
- &jmp (&label("_aesni_${p}rypt6_enter"));
-
- &set_label("${p}6_loop",16);
- eval"&aes${p} ($inout0,$rndkey1)";
- eval"&aes${p} ($inout1,$rndkey1)";
- &dec ($rounds);
- eval"&aes${p} ($inout2,$rndkey1)";
- eval"&aes${p} ($inout3,$rndkey1)";
- eval"&aes${p} ($inout4,$rndkey1)";
- eval"&aes${p} ($inout5,$rndkey1)";
- &set_label("_aesni_${p}rypt6_enter",16);
- &$movekey ($rndkey1,&QWP(16,$key));
- eval"&aes${p} ($inout0,$rndkey0)";
- eval"&aes${p} ($inout1,$rndkey0)";
- &lea ($key,&DWP(32,$key));
- eval"&aes${p} ($inout2,$rndkey0)";
- eval"&aes${p} ($inout3,$rndkey0)";
- eval"&aes${p} ($inout4,$rndkey0)";
- eval"&aes${p} ($inout5,$rndkey0)";
- &$movekey ($rndkey0,&QWP(0,$key));
- &jnz (&label("${p}6_loop"));
-
- eval"&aes${p} ($inout0,$rndkey1)";
- eval"&aes${p} ($inout1,$rndkey1)";
- eval"&aes${p} ($inout2,$rndkey1)";
- eval"&aes${p} ($inout3,$rndkey1)";
- eval"&aes${p} ($inout4,$rndkey1)";
- eval"&aes${p} ($inout5,$rndkey1)";
- eval"&aes${p}last ($inout0,$rndkey0)";
- eval"&aes${p}last ($inout1,$rndkey0)";
- eval"&aes${p}last ($inout2,$rndkey0)";
- eval"&aes${p}last ($inout3,$rndkey0)";
- eval"&aes${p}last ($inout4,$rndkey0)";
- eval"&aes${p}last ($inout5,$rndkey0)";
- &ret();
- &function_end_B("_aesni_${p}rypt6");
-}
-&aesni_generate3("enc") if ($PREFIX eq "aesni");
-&aesni_generate3("dec");
-&aesni_generate4("enc") if ($PREFIX eq "aesni");
-&aesni_generate4("dec");
-&aesni_generate6("enc") if ($PREFIX eq "aesni");
-&aesni_generate6("dec");
-
-if ($PREFIX eq "aesni") {
-######################################################################
-# void aesni_ecb_encrypt (const void *in, void *out,
-# size_t length, const AES_KEY *key,
-# int enc);
-&function_begin("aesni_ecb_encrypt");
- &mov ($inp,&wparam(0));
- &mov ($out,&wparam(1));
- &mov ($len,&wparam(2));
- &mov ($key,&wparam(3));
- &mov ($rounds_,&wparam(4));
- &and ($len,-16);
- &jz (&label("ecb_ret"));
- &mov ($rounds,&DWP(240,$key));
- &test ($rounds_,$rounds_);
- &jz (&label("ecb_decrypt"));
-
- &mov ($key_,$key); # backup $key
- &mov ($rounds_,$rounds); # backup $rounds
- &cmp ($len,0x60);
- &jb (&label("ecb_enc_tail"));
-
- &movdqu ($inout0,&QWP(0,$inp));
- &movdqu ($inout1,&QWP(0x10,$inp));
- &movdqu ($inout2,&QWP(0x20,$inp));
- &movdqu ($inout3,&QWP(0x30,$inp));
- &movdqu ($inout4,&QWP(0x40,$inp));
- &movdqu ($inout5,&QWP(0x50,$inp));
- &lea ($inp,&DWP(0x60,$inp));
- &sub ($len,0x60);
- &jmp (&label("ecb_enc_loop6_enter"));
-
-&set_label("ecb_enc_loop6",16);
- &movups (&QWP(0,$out),$inout0);
- &movdqu ($inout0,&QWP(0,$inp));
- &movups (&QWP(0x10,$out),$inout1);
- &movdqu ($inout1,&QWP(0x10,$inp));
- &movups (&QWP(0x20,$out),$inout2);
- &movdqu ($inout2,&QWP(0x20,$inp));
- &movups (&QWP(0x30,$out),$inout3);
- &movdqu ($inout3,&QWP(0x30,$inp));
- &movups (&QWP(0x40,$out),$inout4);
- &movdqu ($inout4,&QWP(0x40,$inp));
- &movups (&QWP(0x50,$out),$inout5);
- &lea ($out,&DWP(0x60,$out));
- &movdqu ($inout5,&QWP(0x50,$inp));
- &lea ($inp,&DWP(0x60,$inp));
-&set_label("ecb_enc_loop6_enter");
-
- &call ("_aesni_encrypt6");
-
- &mov ($key,$key_); # restore $key
- &mov ($rounds,$rounds_); # restore $rounds
- &sub ($len,0x60);
- &jnc (&label("ecb_enc_loop6"));
-
- &movups (&QWP(0,$out),$inout0);
- &movups (&QWP(0x10,$out),$inout1);
- &movups (&QWP(0x20,$out),$inout2);
- &movups (&QWP(0x30,$out),$inout3);
- &movups (&QWP(0x40,$out),$inout4);
- &movups (&QWP(0x50,$out),$inout5);
- &lea ($out,&DWP(0x60,$out));
- &add ($len,0x60);
- &jz (&label("ecb_ret"));
-
-&set_label("ecb_enc_tail");
- &movups ($inout0,&QWP(0,$inp));
- &cmp ($len,0x20);
- &jb (&label("ecb_enc_one"));
- &movups ($inout1,&QWP(0x10,$inp));
- &je (&label("ecb_enc_two"));
- &movups ($inout2,&QWP(0x20,$inp));
- &cmp ($len,0x40);
- &jb (&label("ecb_enc_three"));
- &movups ($inout3,&QWP(0x30,$inp));
- &je (&label("ecb_enc_four"));
- &movups ($inout4,&QWP(0x40,$inp));
- &xorps ($inout5,$inout5);
- &call ("_aesni_encrypt6");
- &movups (&QWP(0,$out),$inout0);
- &movups (&QWP(0x10,$out),$inout1);
- &movups (&QWP(0x20,$out),$inout2);
- &movups (&QWP(0x30,$out),$inout3);
- &movups (&QWP(0x40,$out),$inout4);
- jmp (&label("ecb_ret"));
-
-&set_label("ecb_enc_one",16);
- if ($inline)
- { &aesni_inline_generate1("enc"); }
- else
- { &call ("_aesni_encrypt1"); }
- &movups (&QWP(0,$out),$inout0);
- &jmp (&label("ecb_ret"));
-
-&set_label("ecb_enc_two",16);
- &xorps ($inout2,$inout2);
- &call ("_aesni_encrypt3");
- &movups (&QWP(0,$out),$inout0);
- &movups (&QWP(0x10,$out),$inout1);
- &jmp (&label("ecb_ret"));
-
-&set_label("ecb_enc_three",16);
- &call ("_aesni_encrypt3");
- &movups (&QWP(0,$out),$inout0);
- &movups (&QWP(0x10,$out),$inout1);
- &movups (&QWP(0x20,$out),$inout2);
- &jmp (&label("ecb_ret"));
-
-&set_label("ecb_enc_four",16);
- &call ("_aesni_encrypt4");
- &movups (&QWP(0,$out),$inout0);
- &movups (&QWP(0x10,$out),$inout1);
- &movups (&QWP(0x20,$out),$inout2);
- &movups (&QWP(0x30,$out),$inout3);
- &jmp (&label("ecb_ret"));
-######################################################################
-&set_label("ecb_decrypt",16);
- &mov ($key_,$key); # backup $key
- &mov ($rounds_,$rounds); # backup $rounds
- &cmp ($len,0x60);
- &jb (&label("ecb_dec_tail"));
-
- &movdqu ($inout0,&QWP(0,$inp));
- &movdqu ($inout1,&QWP(0x10,$inp));
- &movdqu ($inout2,&QWP(0x20,$inp));
- &movdqu ($inout3,&QWP(0x30,$inp));
- &movdqu ($inout4,&QWP(0x40,$inp));
- &movdqu ($inout5,&QWP(0x50,$inp));
- &lea ($inp,&DWP(0x60,$inp));
- &sub ($len,0x60);
- &jmp (&label("ecb_dec_loop6_enter"));
-
-&set_label("ecb_dec_loop6",16);
- &movups (&QWP(0,$out),$inout0);
- &movdqu ($inout0,&QWP(0,$inp));
- &movups (&QWP(0x10,$out),$inout1);
- &movdqu ($inout1,&QWP(0x10,$inp));
- &movups (&QWP(0x20,$out),$inout2);
- &movdqu ($inout2,&QWP(0x20,$inp));
- &movups (&QWP(0x30,$out),$inout3);
- &movdqu ($inout3,&QWP(0x30,$inp));
- &movups (&QWP(0x40,$out),$inout4);
- &movdqu ($inout4,&QWP(0x40,$inp));
- &movups (&QWP(0x50,$out),$inout5);
- &lea ($out,&DWP(0x60,$out));
- &movdqu ($inout5,&QWP(0x50,$inp));
- &lea ($inp,&DWP(0x60,$inp));
-&set_label("ecb_dec_loop6_enter");
-
- &call ("_aesni_decrypt6");
-
- &mov ($key,$key_); # restore $key
- &mov ($rounds,$rounds_); # restore $rounds
- &sub ($len,0x60);
- &jnc (&label("ecb_dec_loop6"));
-
- &movups (&QWP(0,$out),$inout0);
- &movups (&QWP(0x10,$out),$inout1);
- &movups (&QWP(0x20,$out),$inout2);
- &movups (&QWP(0x30,$out),$inout3);
- &movups (&QWP(0x40,$out),$inout4);
- &movups (&QWP(0x50,$out),$inout5);
- &lea ($out,&DWP(0x60,$out));
- &add ($len,0x60);
- &jz (&label("ecb_ret"));
-
-&set_label("ecb_dec_tail");
- &movups ($inout0,&QWP(0,$inp));
- &cmp ($len,0x20);
- &jb (&label("ecb_dec_one"));
- &movups ($inout1,&QWP(0x10,$inp));
- &je (&label("ecb_dec_two"));
- &movups ($inout2,&QWP(0x20,$inp));
- &cmp ($len,0x40);
- &jb (&label("ecb_dec_three"));
- &movups ($inout3,&QWP(0x30,$inp));
- &je (&label("ecb_dec_four"));
- &movups ($inout4,&QWP(0x40,$inp));
- &xorps ($inout5,$inout5);
- &call ("_aesni_decrypt6");
- &movups (&QWP(0,$out),$inout0);
- &movups (&QWP(0x10,$out),$inout1);
- &movups (&QWP(0x20,$out),$inout2);
- &movups (&QWP(0x30,$out),$inout3);
- &movups (&QWP(0x40,$out),$inout4);
- &jmp (&label("ecb_ret"));
-
-&set_label("ecb_dec_one",16);
- if ($inline)
- { &aesni_inline_generate1("dec"); }
- else
- { &call ("_aesni_decrypt1"); }
- &movups (&QWP(0,$out),$inout0);
- &jmp (&label("ecb_ret"));
-
-&set_label("ecb_dec_two",16);
- &xorps ($inout2,$inout2);
- &call ("_aesni_decrypt3");
- &movups (&QWP(0,$out),$inout0);
- &movups (&QWP(0x10,$out),$inout1);
- &jmp (&label("ecb_ret"));
-
-&set_label("ecb_dec_three",16);
- &call ("_aesni_decrypt3");
- &movups (&QWP(0,$out),$inout0);
- &movups (&QWP(0x10,$out),$inout1);
- &movups (&QWP(0x20,$out),$inout2);
- &jmp (&label("ecb_ret"));
-
-&set_label("ecb_dec_four",16);
- &call ("_aesni_decrypt4");
- &movups (&QWP(0,$out),$inout0);
- &movups (&QWP(0x10,$out),$inout1);
- &movups (&QWP(0x20,$out),$inout2);
- &movups (&QWP(0x30,$out),$inout3);
-
-&set_label("ecb_ret");
-&function_end("aesni_ecb_encrypt");
-
-######################################################################
-# void aesni_ccm64_[en|de]crypt_blocks (const void *in, void *out,
-# size_t blocks, const AES_KEY *key,
-# const char *ivec,char *cmac);
-#
-# Handles only complete blocks, operates on 64-bit counter and
-# does not update *ivec! Nor does it finalize CMAC value
-# (see engine/eng_aesni.c for details)
-#
-{ my $cmac=$inout1;
-&function_begin("aesni_ccm64_encrypt_blocks");
- &mov ($inp,&wparam(0));
- &mov ($out,&wparam(1));
- &mov ($len,&wparam(2));
- &mov ($key,&wparam(3));
- &mov ($rounds_,&wparam(4));
- &mov ($rounds,&wparam(5));
- &mov ($key_,"esp");
- &sub ("esp",60);
- &and ("esp",-16); # align stack
- &mov (&DWP(48,"esp"),$key_);
-
- &movdqu ($ivec,&QWP(0,$rounds_)); # load ivec
- &movdqu ($cmac,&QWP(0,$rounds)); # load cmac
- &mov ($rounds,&DWP(240,$key));
-
- # compose byte-swap control mask for pshufb on stack
- &mov (&DWP(0,"esp"),0x0c0d0e0f);
- &mov (&DWP(4,"esp"),0x08090a0b);
- &mov (&DWP(8,"esp"),0x04050607);
- &mov (&DWP(12,"esp"),0x00010203);
-
- # compose counter increment vector on stack
- &mov ($rounds_,1);
- &xor ($key_,$key_);
- &mov (&DWP(16,"esp"),$rounds_);
- &mov (&DWP(20,"esp"),$key_);
- &mov (&DWP(24,"esp"),$key_);
- &mov (&DWP(28,"esp"),$key_);
-
- &shr ($rounds,1);
- &lea ($key_,&DWP(0,$key));
- &movdqa ($inout3,&QWP(0,"esp"));
- &movdqa ($inout0,$ivec);
- &mov ($rounds_,$rounds);
- &pshufb ($ivec,$inout3);
-
-&set_label("ccm64_enc_outer");
- &$movekey ($rndkey0,&QWP(0,$key_));
- &mov ($rounds,$rounds_);
- &movups ($in0,&QWP(0,$inp));
-
- &xorps ($inout0,$rndkey0);
- &$movekey ($rndkey1,&QWP(16,$key_));
- &xorps ($rndkey0,$in0);
- &lea ($key,&DWP(32,$key_));
- &xorps ($cmac,$rndkey0); # cmac^=inp
- &$movekey ($rndkey0,&QWP(0,$key));
-
-&set_label("ccm64_enc2_loop");
- &aesenc ($inout0,$rndkey1);
- &dec ($rounds);
- &aesenc ($cmac,$rndkey1);
- &$movekey ($rndkey1,&QWP(16,$key));
- &aesenc ($inout0,$rndkey0);
- &lea ($key,&DWP(32,$key));
- &aesenc ($cmac,$rndkey0);
- &$movekey ($rndkey0,&QWP(0,$key));
- &jnz (&label("ccm64_enc2_loop"));
- &aesenc ($inout0,$rndkey1);
- &aesenc ($cmac,$rndkey1);
- &paddq ($ivec,&QWP(16,"esp"));
- &aesenclast ($inout0,$rndkey0);
- &aesenclast ($cmac,$rndkey0);
-
- &dec ($len);
- &lea ($inp,&DWP(16,$inp));
- &xorps ($in0,$inout0); # inp^=E(ivec)
- &movdqa ($inout0,$ivec);
- &movups (&QWP(0,$out),$in0); # save output
- &lea ($out,&DWP(16,$out));
- &pshufb ($inout0,$inout3);
- &jnz (&label("ccm64_enc_outer"));
-
- &mov ("esp",&DWP(48,"esp"));
- &mov ($out,&wparam(5));
- &movups (&QWP(0,$out),$cmac);
-&function_end("aesni_ccm64_encrypt_blocks");
-
-&function_begin("aesni_ccm64_decrypt_blocks");
- &mov ($inp,&wparam(0));
- &mov ($out,&wparam(1));
- &mov ($len,&wparam(2));
- &mov ($key,&wparam(3));
- &mov ($rounds_,&wparam(4));
- &mov ($rounds,&wparam(5));
- &mov ($key_,"esp");
- &sub ("esp",60);
- &and ("esp",-16); # align stack
- &mov (&DWP(48,"esp"),$key_);
-
- &movdqu ($ivec,&QWP(0,$rounds_)); # load ivec
- &movdqu ($cmac,&QWP(0,$rounds)); # load cmac
- &mov ($rounds,&DWP(240,$key));
-
- # compose byte-swap control mask for pshufb on stack
- &mov (&DWP(0,"esp"),0x0c0d0e0f);
- &mov (&DWP(4,"esp"),0x08090a0b);
- &mov (&DWP(8,"esp"),0x04050607);
- &mov (&DWP(12,"esp"),0x00010203);
-
- # compose counter increment vector on stack
- &mov ($rounds_,1);
- &xor ($key_,$key_);
- &mov (&DWP(16,"esp"),$rounds_);
- &mov (&DWP(20,"esp"),$key_);
- &mov (&DWP(24,"esp"),$key_);
- &mov (&DWP(28,"esp"),$key_);
-
- &movdqa ($inout3,&QWP(0,"esp")); # bswap mask
- &movdqa ($inout0,$ivec);
-
- &mov ($key_,$key);
- &mov ($rounds_,$rounds);
-
- &pshufb ($ivec,$inout3);
- if ($inline)
- { &aesni_inline_generate1("enc"); }
- else
- { &call ("_aesni_encrypt1"); }
- &movups ($in0,&QWP(0,$inp)); # load inp
- &paddq ($ivec,&QWP(16,"esp"));
- &lea ($inp,&QWP(16,$inp));
- &jmp (&label("ccm64_dec_outer"));
-
-&set_label("ccm64_dec_outer",16);
- &xorps ($in0,$inout0); # inp ^= E(ivec)
- &movdqa ($inout0,$ivec);
- &mov ($rounds,$rounds_);
- &movups (&QWP(0,$out),$in0); # save output
- &lea ($out,&DWP(16,$out));
- &pshufb ($inout0,$inout3);
-
- &sub ($len,1);
- &jz (&label("ccm64_dec_break"));
-
- &$movekey ($rndkey0,&QWP(0,$key_));
- &shr ($rounds,1);
- &$movekey ($rndkey1,&QWP(16,$key_));
- &xorps ($in0,$rndkey0);
- &lea ($key,&DWP(32,$key_));
- &xorps ($inout0,$rndkey0);
- &xorps ($cmac,$in0); # cmac^=out
- &$movekey ($rndkey0,&QWP(0,$key));
-
-&set_label("ccm64_dec2_loop");
- &aesenc ($inout0,$rndkey1);
- &dec ($rounds);
- &aesenc ($cmac,$rndkey1);
- &$movekey ($rndkey1,&QWP(16,$key));
- &aesenc ($inout0,$rndkey0);
- &lea ($key,&DWP(32,$key));
- &aesenc ($cmac,$rndkey0);
- &$movekey ($rndkey0,&QWP(0,$key));
- &jnz (&label("ccm64_dec2_loop"));
- &movups ($in0,&QWP(0,$inp)); # load inp
- &paddq ($ivec,&QWP(16,"esp"));
- &aesenc ($inout0,$rndkey1);
- &aesenc ($cmac,$rndkey1);
- &lea ($inp,&QWP(16,$inp));
- &aesenclast ($inout0,$rndkey0);
- &aesenclast ($cmac,$rndkey0);
- &jmp (&label("ccm64_dec_outer"));
-
-&set_label("ccm64_dec_break",16);
- &mov ($key,$key_);
- if ($inline)
- { &aesni_inline_generate1("enc",$cmac,$in0); }
- else
- { &call ("_aesni_encrypt1",$cmac); }
-
- &mov ("esp",&DWP(48,"esp"));
- &mov ($out,&wparam(5));
- &movups (&QWP(0,$out),$cmac);
-&function_end("aesni_ccm64_decrypt_blocks");
-}
-
-######################################################################
-# void aesni_ctr32_encrypt_blocks (const void *in, void *out,
-# size_t blocks, const AES_KEY *key,
-# const char *ivec);
-#
-# Handles only complete blocks, operates on 32-bit counter and
-# does not update *ivec! (see engine/eng_aesni.c for details)
-#
-# stack layout:
-# 0 pshufb mask
-# 16 vector addend: 0,6,6,6
-# 32 counter-less ivec
-# 48 1st triplet of counter vector
-# 64 2nd triplet of counter vector
-# 80 saved %esp
-
-&function_begin("aesni_ctr32_encrypt_blocks");
- &mov ($inp,&wparam(0));
- &mov ($out,&wparam(1));
- &mov ($len,&wparam(2));
- &mov ($key,&wparam(3));
- &mov ($rounds_,&wparam(4));
- &mov ($key_,"esp");
- &sub ("esp",88);
- &and ("esp",-16); # align stack
- &mov (&DWP(80,"esp"),$key_);
-
- &cmp ($len,1);
- &je (&label("ctr32_one_shortcut"));
-
- &movdqu ($inout5,&QWP(0,$rounds_)); # load ivec
-
- # compose byte-swap control mask for pshufb on stack
- &mov (&DWP(0,"esp"),0x0c0d0e0f);
- &mov (&DWP(4,"esp"),0x08090a0b);
- &mov (&DWP(8,"esp"),0x04050607);
- &mov (&DWP(12,"esp"),0x00010203);
-
- # compose counter increment vector on stack
- &mov ($rounds,6);
- &xor ($key_,$key_);
- &mov (&DWP(16,"esp"),$rounds);
- &mov (&DWP(20,"esp"),$rounds);
- &mov (&DWP(24,"esp"),$rounds);
- &mov (&DWP(28,"esp"),$key_);
-
- &pextrd ($rounds_,$inout5,3); # pull 32-bit counter
- &pinsrd ($inout5,$key_,3); # wipe 32-bit counter
-
- &mov ($rounds,&DWP(240,$key)); # key->rounds
-
- # compose 2 vectors of 3x32-bit counters
- &bswap ($rounds_);
- &pxor ($rndkey1,$rndkey1);
- &pxor ($rndkey0,$rndkey0);
- &movdqa ($inout0,&QWP(0,"esp")); # load byte-swap mask
- &pinsrd ($rndkey1,$rounds_,0);
- &lea ($key_,&DWP(3,$rounds_));
- &pinsrd ($rndkey0,$key_,0);
- &inc ($rounds_);
- &pinsrd ($rndkey1,$rounds_,1);
- &inc ($key_);
- &pinsrd ($rndkey0,$key_,1);
- &inc ($rounds_);
- &pinsrd ($rndkey1,$rounds_,2);
- &inc ($key_);
- &pinsrd ($rndkey0,$key_,2);
- &movdqa (&QWP(48,"esp"),$rndkey1); # save 1st triplet
- &pshufb ($rndkey1,$inout0); # byte swap
- &movdqa (&QWP(64,"esp"),$rndkey0); # save 2nd triplet
- &pshufb ($rndkey0,$inout0); # byte swap
-
- &pshufd ($inout0,$rndkey1,3<<6); # place counter to upper dword
- &pshufd ($inout1,$rndkey1,2<<6);
- &cmp ($len,6);
- &jb (&label("ctr32_tail"));
- &movdqa (&QWP(32,"esp"),$inout5); # save counter-less ivec
- &shr ($rounds,1);
- &mov ($key_,$key); # backup $key
- &mov ($rounds_,$rounds); # backup $rounds
- &sub ($len,6);
- &jmp (&label("ctr32_loop6"));
-
-&set_label("ctr32_loop6",16);
- &pshufd ($inout2,$rndkey1,1<<6);
- &movdqa ($rndkey1,&QWP(32,"esp")); # pull counter-less ivec
- &pshufd ($inout3,$rndkey0,3<<6);
- &por ($inout0,$rndkey1); # merge counter-less ivec
- &pshufd ($inout4,$rndkey0,2<<6);
- &por ($inout1,$rndkey1);
- &pshufd ($inout5,$rndkey0,1<<6);
- &por ($inout2,$rndkey1);
- &por ($inout3,$rndkey1);
- &por ($inout4,$rndkey1);
- &por ($inout5,$rndkey1);
-
- # inlining _aesni_encrypt6's prologue gives ~4% improvement...
- &$movekey ($rndkey0,&QWP(0,$key_));
- &$movekey ($rndkey1,&QWP(16,$key_));
- &lea ($key,&DWP(32,$key_));
- &dec ($rounds);
- &pxor ($inout0,$rndkey0);
- &pxor ($inout1,$rndkey0);
- &aesenc ($inout0,$rndkey1);
- &pxor ($inout2,$rndkey0);
- &aesenc ($inout1,$rndkey1);
- &pxor ($inout3,$rndkey0);
- &aesenc ($inout2,$rndkey1);
- &pxor ($inout4,$rndkey0);
- &aesenc ($inout3,$rndkey1);
- &pxor ($inout5,$rndkey0);
- &aesenc ($inout4,$rndkey1);
- &$movekey ($rndkey0,&QWP(0,$key));
- &aesenc ($inout5,$rndkey1);
-
- &call (&label("_aesni_encrypt6_enter"));
-
- &movups ($rndkey1,&QWP(0,$inp));
- &movups ($rndkey0,&QWP(0x10,$inp));
- &xorps ($inout0,$rndkey1);
- &movups ($rndkey1,&QWP(0x20,$inp));
- &xorps ($inout1,$rndkey0);
- &movups (&QWP(0,$out),$inout0);
- &movdqa ($rndkey0,&QWP(16,"esp")); # load increment
- &xorps ($inout2,$rndkey1);
- &movdqa ($rndkey1,&QWP(48,"esp")); # load 1st triplet
- &movups (&QWP(0x10,$out),$inout1);
- &movups (&QWP(0x20,$out),$inout2);
-
- &paddd ($rndkey1,$rndkey0); # 1st triplet increment
- &paddd ($rndkey0,&QWP(64,"esp")); # 2nd triplet increment
- &movdqa ($inout0,&QWP(0,"esp")); # load byte swap mask
-
- &movups ($inout1,&QWP(0x30,$inp));
- &movups ($inout2,&QWP(0x40,$inp));
- &xorps ($inout3,$inout1);
- &movups ($inout1,&QWP(0x50,$inp));
- &lea ($inp,&DWP(0x60,$inp));
- &movdqa (&QWP(48,"esp"),$rndkey1); # save 1st triplet
- &pshufb ($rndkey1,$inout0); # byte swap
- &xorps ($inout4,$inout2);
- &movups (&QWP(0x30,$out),$inout3);
- &xorps ($inout5,$inout1);
- &movdqa (&QWP(64,"esp"),$rndkey0); # save 2nd triplet
- &pshufb ($rndkey0,$inout0); # byte swap
- &movups (&QWP(0x40,$out),$inout4);
- &pshufd ($inout0,$rndkey1,3<<6);
- &movups (&QWP(0x50,$out),$inout5);
- &lea ($out,&DWP(0x60,$out));
-
- &mov ($rounds,$rounds_);
- &pshufd ($inout1,$rndkey1,2<<6);
- &sub ($len,6);
- &jnc (&label("ctr32_loop6"));
-
- &add ($len,6);
- &jz (&label("ctr32_ret"));
- &mov ($key,$key_);
- &lea ($rounds,&DWP(1,"",$rounds,2)); # restore $rounds
- &movdqa ($inout5,&QWP(32,"esp")); # pull count-less ivec
-
-&set_label("ctr32_tail");
- &por ($inout0,$inout5);
- &cmp ($len,2);
- &jb (&label("ctr32_one"));
-
- &pshufd ($inout2,$rndkey1,1<<6);
- &por ($inout1,$inout5);
- &je (&label("ctr32_two"));
-
- &pshufd ($inout3,$rndkey0,3<<6);
- &por ($inout2,$inout5);
- &cmp ($len,4);
- &jb (&label("ctr32_three"));
-
- &pshufd ($inout4,$rndkey0,2<<6);
- &por ($inout3,$inout5);
- &je (&label("ctr32_four"));
-
- &por ($inout4,$inout5);
- &call ("_aesni_encrypt6");
- &movups ($rndkey1,&QWP(0,$inp));
- &movups ($rndkey0,&QWP(0x10,$inp));
- &xorps ($inout0,$rndkey1);
- &movups ($rndkey1,&QWP(0x20,$inp));
- &xorps ($inout1,$rndkey0);
- &movups ($rndkey0,&QWP(0x30,$inp));
- &xorps ($inout2,$rndkey1);
- &movups ($rndkey1,&QWP(0x40,$inp));
- &xorps ($inout3,$rndkey0);
- &movups (&QWP(0,$out),$inout0);
- &xorps ($inout4,$rndkey1);
- &movups (&QWP(0x10,$out),$inout1);
- &movups (&QWP(0x20,$out),$inout2);
- &movups (&QWP(0x30,$out),$inout3);
- &movups (&QWP(0x40,$out),$inout4);
- &jmp (&label("ctr32_ret"));
-
-&set_label("ctr32_one_shortcut",16);
- &movups ($inout0,&QWP(0,$rounds_)); # load ivec
- &mov ($rounds,&DWP(240,$key));
-
-&set_label("ctr32_one");
- if ($inline)
- { &aesni_inline_generate1("enc"); }
- else
- { &call ("_aesni_encrypt1"); }
- &movups ($in0,&QWP(0,$inp));
- &xorps ($in0,$inout0);
- &movups (&QWP(0,$out),$in0);
- &jmp (&label("ctr32_ret"));
-
-&set_label("ctr32_two",16);
- &call ("_aesni_encrypt3");
- &movups ($inout3,&QWP(0,$inp));
- &movups ($inout4,&QWP(0x10,$inp));
- &xorps ($inout0,$inout3);
- &xorps ($inout1,$inout4);
- &movups (&QWP(0,$out),$inout0);
- &movups (&QWP(0x10,$out),$inout1);
- &jmp (&label("ctr32_ret"));
-
-&set_label("ctr32_three",16);
- &call ("_aesni_encrypt3");
- &movups ($inout3,&QWP(0,$inp));
- &movups ($inout4,&QWP(0x10,$inp));
- &xorps ($inout0,$inout3);
- &movups ($inout5,&QWP(0x20,$inp));
- &xorps ($inout1,$inout4);
- &movups (&QWP(0,$out),$inout0);
- &xorps ($inout2,$inout5);
- &movups (&QWP(0x10,$out),$inout1);
- &movups (&QWP(0x20,$out),$inout2);
- &jmp (&label("ctr32_ret"));
-
-&set_label("ctr32_four",16);
- &call ("_aesni_encrypt4");
- &movups ($inout4,&QWP(0,$inp));
- &movups ($inout5,&QWP(0x10,$inp));
- &movups ($rndkey1,&QWP(0x20,$inp));
- &xorps ($inout0,$inout4);
- &movups ($rndkey0,&QWP(0x30,$inp));
- &xorps ($inout1,$inout5);
- &movups (&QWP(0,$out),$inout0);
- &xorps ($inout2,$rndkey1);
- &movups (&QWP(0x10,$out),$inout1);
- &xorps ($inout3,$rndkey0);
- &movups (&QWP(0x20,$out),$inout2);
- &movups (&QWP(0x30,$out),$inout3);
-
-&set_label("ctr32_ret");
- &mov ("esp",&DWP(80,"esp"));
-&function_end("aesni_ctr32_encrypt_blocks");
-
-######################################################################
-# void aesni_xts_[en|de]crypt(const char *inp,char *out,size_t len,
-# const AES_KEY *key1, const AES_KEY *key2
-# const unsigned char iv[16]);
-#
-{ my ($tweak,$twtmp,$twres,$twmask)=($rndkey1,$rndkey0,$inout0,$inout1);
-
-&function_begin("aesni_xts_encrypt");
- &mov ($key,&wparam(4)); # key2
- &mov ($inp,&wparam(5)); # clear-text tweak
-
- &mov ($rounds,&DWP(240,$key)); # key2->rounds
- &movups ($inout0,&QWP(0,$inp));
- if ($inline)
- { &aesni_inline_generate1("enc"); }
- else
- { &call ("_aesni_encrypt1"); }
-
- &mov ($inp,&wparam(0));
- &mov ($out,&wparam(1));
- &mov ($len,&wparam(2));
- &mov ($key,&wparam(3)); # key1
-
- &mov ($key_,"esp");
- &sub ("esp",16*7+8);
- &mov ($rounds,&DWP(240,$key)); # key1->rounds
- &and ("esp",-16); # align stack
-
- &mov (&DWP(16*6+0,"esp"),0x87); # compose the magic constant
- &mov (&DWP(16*6+4,"esp"),0);
- &mov (&DWP(16*6+8,"esp"),1);
- &mov (&DWP(16*6+12,"esp"),0);
- &mov (&DWP(16*7+0,"esp"),$len); # save original $len
- &mov (&DWP(16*7+4,"esp"),$key_); # save original %esp
-
- &movdqa ($tweak,$inout0);
- &pxor ($twtmp,$twtmp);
- &movdqa ($twmask,&QWP(6*16,"esp")); # 0x0...010...87
- &pcmpgtd($twtmp,$tweak); # broadcast upper bits
-
- &and ($len,-16);
- &mov ($key_,$key); # backup $key
- &mov ($rounds_,$rounds); # backup $rounds
- &sub ($len,16*6);
- &jc (&label("xts_enc_short"));
-
- &shr ($rounds,1);
- &mov ($rounds_,$rounds);
- &jmp (&label("xts_enc_loop6"));
-
-&set_label("xts_enc_loop6",16);
- for ($i=0;$i<4;$i++) {
- &pshufd ($twres,$twtmp,0x13);
- &pxor ($twtmp,$twtmp);
- &movdqa (&QWP(16*$i,"esp"),$tweak);
- &paddq ($tweak,$tweak); # &psllq($tweak,1);
- &pand ($twres,$twmask); # isolate carry and residue
- &pcmpgtd ($twtmp,$tweak); # broadcast upper bits
- &pxor ($tweak,$twres);
- }
- &pshufd ($inout5,$twtmp,0x13);
- &movdqa (&QWP(16*$i++,"esp"),$tweak);
- &paddq ($tweak,$tweak); # &psllq($tweak,1);
- &$movekey ($rndkey0,&QWP(0,$key_));
- &pand ($inout5,$twmask); # isolate carry and residue
- &movups ($inout0,&QWP(0,$inp)); # load input
- &pxor ($inout5,$tweak);
-
- # inline _aesni_encrypt6 prologue and flip xor with tweak and key[0]
- &movdqu ($inout1,&QWP(16*1,$inp));
- &xorps ($inout0,$rndkey0); # input^=rndkey[0]
- &movdqu ($inout2,&QWP(16*2,$inp));
- &pxor ($inout1,$rndkey0);
- &movdqu ($inout3,&QWP(16*3,$inp));
- &pxor ($inout2,$rndkey0);
- &movdqu ($inout4,&QWP(16*4,$inp));
- &pxor ($inout3,$rndkey0);
- &movdqu ($rndkey1,&QWP(16*5,$inp));
- &pxor ($inout4,$rndkey0);
- &lea ($inp,&DWP(16*6,$inp));
- &pxor ($inout0,&QWP(16*0,"esp")); # input^=tweak
- &movdqa (&QWP(16*$i,"esp"),$inout5); # save last tweak
- &pxor ($inout5,$rndkey1);
-
- &$movekey ($rndkey1,&QWP(16,$key_));
- &lea ($key,&DWP(32,$key_));
- &pxor ($inout1,&QWP(16*1,"esp"));
- &aesenc ($inout0,$rndkey1);
- &pxor ($inout2,&QWP(16*2,"esp"));
- &aesenc ($inout1,$rndkey1);
- &pxor ($inout3,&QWP(16*3,"esp"));
- &dec ($rounds);
- &aesenc ($inout2,$rndkey1);
- &pxor ($inout4,&QWP(16*4,"esp"));
- &aesenc ($inout3,$rndkey1);
- &pxor ($inout5,$rndkey0);
- &aesenc ($inout4,$rndkey1);
- &$movekey ($rndkey0,&QWP(0,$key));
- &aesenc ($inout5,$rndkey1);
- &call (&label("_aesni_encrypt6_enter"));
-
- &movdqa ($tweak,&QWP(16*5,"esp")); # last tweak
- &pxor ($twtmp,$twtmp);
- &xorps ($inout0,&QWP(16*0,"esp")); # output^=tweak
- &pcmpgtd ($twtmp,$tweak); # broadcast upper bits
- &xorps ($inout1,&QWP(16*1,"esp"));
- &movups (&QWP(16*0,$out),$inout0); # write output
- &xorps ($inout2,&QWP(16*2,"esp"));
- &movups (&QWP(16*1,$out),$inout1);
- &xorps ($inout3,&QWP(16*3,"esp"));
- &movups (&QWP(16*2,$out),$inout2);
- &xorps ($inout4,&QWP(16*4,"esp"));
- &movups (&QWP(16*3,$out),$inout3);
- &xorps ($inout5,$tweak);
- &movups (&QWP(16*4,$out),$inout4);
- &pshufd ($twres,$twtmp,0x13);
- &movups (&QWP(16*5,$out),$inout5);
- &lea ($out,&DWP(16*6,$out));
- &movdqa ($twmask,&QWP(16*6,"esp")); # 0x0...010...87
-
- &pxor ($twtmp,$twtmp);
- &paddq ($tweak,$tweak); # &psllq($tweak,1);
- &pand ($twres,$twmask); # isolate carry and residue
- &pcmpgtd($twtmp,$tweak); # broadcast upper bits
- &mov ($rounds,$rounds_); # restore $rounds
- &pxor ($tweak,$twres);
-
- &sub ($len,16*6);
- &jnc (&label("xts_enc_loop6"));
-
- &lea ($rounds,&DWP(1,"",$rounds,2)); # restore $rounds
- &mov ($key,$key_); # restore $key
- &mov ($rounds_,$rounds);
-
-&set_label("xts_enc_short");
- &add ($len,16*6);
- &jz (&label("xts_enc_done6x"));
-
- &movdqa ($inout3,$tweak); # put aside previous tweak
- &cmp ($len,0x20);
- &jb (&label("xts_enc_one"));
-
- &pshufd ($twres,$twtmp,0x13);
- &pxor ($twtmp,$twtmp);
- &paddq ($tweak,$tweak); # &psllq($tweak,1);
- &pand ($twres,$twmask); # isolate carry and residue
- &pcmpgtd($twtmp,$tweak); # broadcast upper bits
- &pxor ($tweak,$twres);
- &je (&label("xts_enc_two"));
-
- &pshufd ($twres,$twtmp,0x13);
- &pxor ($twtmp,$twtmp);
- &movdqa ($inout4,$tweak); # put aside previous tweak
- &paddq ($tweak,$tweak); # &psllq($tweak,1);
- &pand ($twres,$twmask); # isolate carry and residue
- &pcmpgtd($twtmp,$tweak); # broadcast upper bits
- &pxor ($tweak,$twres);
- &cmp ($len,0x40);
- &jb (&label("xts_enc_three"));
-
- &pshufd ($twres,$twtmp,0x13);
- &pxor ($twtmp,$twtmp);
- &movdqa ($inout5,$tweak); # put aside previous tweak
- &paddq ($tweak,$tweak); # &psllq($tweak,1);
- &pand ($twres,$twmask); # isolate carry and residue
- &pcmpgtd($twtmp,$tweak); # broadcast upper bits
- &pxor ($tweak,$twres);
- &movdqa (&QWP(16*0,"esp"),$inout3);
- &movdqa (&QWP(16*1,"esp"),$inout4);
- &je (&label("xts_enc_four"));
-
- &movdqa (&QWP(16*2,"esp"),$inout5);
- &pshufd ($inout5,$twtmp,0x13);
- &movdqa (&QWP(16*3,"esp"),$tweak);
- &paddq ($tweak,$tweak); # &psllq($inout0,1);
- &pand ($inout5,$twmask); # isolate carry and residue
- &pxor ($inout5,$tweak);
-
- &movdqu ($inout0,&QWP(16*0,$inp)); # load input
- &movdqu ($inout1,&QWP(16*1,$inp));
- &movdqu ($inout2,&QWP(16*2,$inp));
- &pxor ($inout0,&QWP(16*0,"esp")); # input^=tweak
- &movdqu ($inout3,&QWP(16*3,$inp));
- &pxor ($inout1,&QWP(16*1,"esp"));
- &movdqu ($inout4,&QWP(16*4,$inp));
- &pxor ($inout2,&QWP(16*2,"esp"));
- &lea ($inp,&DWP(16*5,$inp));
- &pxor ($inout3,&QWP(16*3,"esp"));
- &movdqa (&QWP(16*4,"esp"),$inout5); # save last tweak
- &pxor ($inout4,$inout5);
-
- &call ("_aesni_encrypt6");
-
- &movaps ($tweak,&QWP(16*4,"esp")); # last tweak
- &xorps ($inout0,&QWP(16*0,"esp")); # output^=tweak
- &xorps ($inout1,&QWP(16*1,"esp"));
- &xorps ($inout2,&QWP(16*2,"esp"));
- &movups (&QWP(16*0,$out),$inout0); # write output
- &xorps ($inout3,&QWP(16*3,"esp"));
- &movups (&QWP(16*1,$out),$inout1);
- &xorps ($inout4,$tweak);
- &movups (&QWP(16*2,$out),$inout2);
- &movups (&QWP(16*3,$out),$inout3);
- &movups (&QWP(16*4,$out),$inout4);
- &lea ($out,&DWP(16*5,$out));
- &jmp (&label("xts_enc_done"));
-
-&set_label("xts_enc_one",16);
- &movups ($inout0,&QWP(16*0,$inp)); # load input
- &lea ($inp,&DWP(16*1,$inp));
- &xorps ($inout0,$inout3); # input^=tweak
- if ($inline)
- { &aesni_inline_generate1("enc"); }
- else
- { &call ("_aesni_encrypt1"); }
- &xorps ($inout0,$inout3); # output^=tweak
- &movups (&QWP(16*0,$out),$inout0); # write output
- &lea ($out,&DWP(16*1,$out));
-
- &movdqa ($tweak,$inout3); # last tweak
- &jmp (&label("xts_enc_done"));
-
-&set_label("xts_enc_two",16);
- &movaps ($inout4,$tweak); # put aside last tweak
-
- &movups ($inout0,&QWP(16*0,$inp)); # load input
- &movups ($inout1,&QWP(16*1,$inp));
- &lea ($inp,&DWP(16*2,$inp));
- &xorps ($inout0,$inout3); # input^=tweak
- &xorps ($inout1,$inout4);
- &xorps ($inout2,$inout2);
-
- &call ("_aesni_encrypt3");
-
- &xorps ($inout0,$inout3); # output^=tweak
- &xorps ($inout1,$inout4);
- &movups (&QWP(16*0,$out),$inout0); # write output
- &movups (&QWP(16*1,$out),$inout1);
- &lea ($out,&DWP(16*2,$out));
-
- &movdqa ($tweak,$inout4); # last tweak
- &jmp (&label("xts_enc_done"));
-
-&set_label("xts_enc_three",16);
- &movaps ($inout5,$tweak); # put aside last tweak
- &movups ($inout0,&QWP(16*0,$inp)); # load input
- &movups ($inout1,&QWP(16*1,$inp));
- &movups ($inout2,&QWP(16*2,$inp));
- &lea ($inp,&DWP(16*3,$inp));
- &xorps ($inout0,$inout3); # input^=tweak
- &xorps ($inout1,$inout4);
- &xorps ($inout2,$inout5);
-
- &call ("_aesni_encrypt3");
-
- &xorps ($inout0,$inout3); # output^=tweak
- &xorps ($inout1,$inout4);
- &xorps ($inout2,$inout5);
- &movups (&QWP(16*0,$out),$inout0); # write output
- &movups (&QWP(16*1,$out),$inout1);
- &movups (&QWP(16*2,$out),$inout2);
- &lea ($out,&DWP(16*3,$out));
-
- &movdqa ($tweak,$inout5); # last tweak
- &jmp (&label("xts_enc_done"));
-
-&set_label("xts_enc_four",16);
- &movaps ($inout4,$tweak); # put aside last tweak
-
- &movups ($inout0,&QWP(16*0,$inp)); # load input
- &movups ($inout1,&QWP(16*1,$inp));
- &movups ($inout2,&QWP(16*2,$inp));
- &xorps ($inout0,&QWP(16*0,"esp")); # input^=tweak
- &movups ($inout3,&QWP(16*3,$inp));
- &lea ($inp,&DWP(16*4,$inp));
- &xorps ($inout1,&QWP(16*1,"esp"));
- &xorps ($inout2,$inout5);
- &xorps ($inout3,$inout4);
-
- &call ("_aesni_encrypt4");
-
- &xorps ($inout0,&QWP(16*0,"esp")); # output^=tweak
- &xorps ($inout1,&QWP(16*1,"esp"));
- &xorps ($inout2,$inout5);
- &movups (&QWP(16*0,$out),$inout0); # write output
- &xorps ($inout3,$inout4);
- &movups (&QWP(16*1,$out),$inout1);
- &movups (&QWP(16*2,$out),$inout2);
- &movups (&QWP(16*3,$out),$inout3);
- &lea ($out,&DWP(16*4,$out));
-
- &movdqa ($tweak,$inout4); # last tweak
- &jmp (&label("xts_enc_done"));
-
-&set_label("xts_enc_done6x",16); # $tweak is pre-calculated
- &mov ($len,&DWP(16*7+0,"esp")); # restore original $len
- &and ($len,15);
- &jz (&label("xts_enc_ret"));
- &movdqa ($inout3,$tweak);
- &mov (&DWP(16*7+0,"esp"),$len); # save $len%16
- &jmp (&label("xts_enc_steal"));
-
-&set_label("xts_enc_done",16);
- &mov ($len,&DWP(16*7+0,"esp")); # restore original $len
- &pxor ($twtmp,$twtmp);
- &and ($len,15);
- &jz (&label("xts_enc_ret"));
-
- &pcmpgtd($twtmp,$tweak); # broadcast upper bits
- &mov (&DWP(16*7+0,"esp"),$len); # save $len%16
- &pshufd ($inout3,$twtmp,0x13);
- &paddq ($tweak,$tweak); # &psllq($tweak,1);
- &pand ($inout3,&QWP(16*6,"esp")); # isolate carry and residue
- &pxor ($inout3,$tweak);
-
-&set_label("xts_enc_steal");
- &movz ($rounds,&BP(0,$inp));
- &movz ($key,&BP(-16,$out));
- &lea ($inp,&DWP(1,$inp));
- &mov (&BP(-16,$out),&LB($rounds));
- &mov (&BP(0,$out),&LB($key));
- &lea ($out,&DWP(1,$out));
- &sub ($len,1);
- &jnz (&label("xts_enc_steal"));
-
- &sub ($out,&DWP(16*7+0,"esp")); # rewind $out
- &mov ($key,$key_); # restore $key
- &mov ($rounds,$rounds_); # restore $rounds
-
- &movups ($inout0,&QWP(-16,$out)); # load input
- &xorps ($inout0,$inout3); # input^=tweak
- if ($inline)
- { &aesni_inline_generate1("enc"); }
- else
- { &call ("_aesni_encrypt1"); }
- &xorps ($inout0,$inout3); # output^=tweak
- &movups (&QWP(-16,$out),$inout0); # write output
-
-&set_label("xts_enc_ret");
- &mov ("esp",&DWP(16*7+4,"esp")); # restore %esp
-&function_end("aesni_xts_encrypt");
-
-&function_begin("aesni_xts_decrypt");
- &mov ($key,&wparam(4)); # key2
- &mov ($inp,&wparam(5)); # clear-text tweak
-
- &mov ($rounds,&DWP(240,$key)); # key2->rounds
- &movups ($inout0,&QWP(0,$inp));
- if ($inline)
- { &aesni_inline_generate1("enc"); }
- else
- { &call ("_aesni_encrypt1"); }
-
- &mov ($inp,&wparam(0));
- &mov ($out,&wparam(1));
- &mov ($len,&wparam(2));
- &mov ($key,&wparam(3)); # key1
-
- &mov ($key_,"esp");
- &sub ("esp",16*7+8);
- &and ("esp",-16); # align stack
-
- &xor ($rounds_,$rounds_); # if(len%16) len-=16;
- &test ($len,15);
- &setnz (&LB($rounds_));
- &shl ($rounds_,4);
- &sub ($len,$rounds_);
-
- &mov (&DWP(16*6+0,"esp"),0x87); # compose the magic constant
- &mov (&DWP(16*6+4,"esp"),0);
- &mov (&DWP(16*6+8,"esp"),1);
- &mov (&DWP(16*6+12,"esp"),0);
- &mov (&DWP(16*7+0,"esp"),$len); # save original $len
- &mov (&DWP(16*7+4,"esp"),$key_); # save original %esp
-
- &mov ($rounds,&DWP(240,$key)); # key1->rounds
- &mov ($key_,$key); # backup $key
- &mov ($rounds_,$rounds); # backup $rounds
-
- &movdqa ($tweak,$inout0);
- &pxor ($twtmp,$twtmp);
- &movdqa ($twmask,&QWP(6*16,"esp")); # 0x0...010...87
- &pcmpgtd($twtmp,$tweak); # broadcast upper bits
-
- &and ($len,-16);
- &sub ($len,16*6);
- &jc (&label("xts_dec_short"));
-
- &shr ($rounds,1);
- &mov ($rounds_,$rounds);
- &jmp (&label("xts_dec_loop6"));
-
-&set_label("xts_dec_loop6",16);
- for ($i=0;$i<4;$i++) {
- &pshufd ($twres,$twtmp,0x13);
- &pxor ($twtmp,$twtmp);
- &movdqa (&QWP(16*$i,"esp"),$tweak);
- &paddq ($tweak,$tweak); # &psllq($tweak,1);
- &pand ($twres,$twmask); # isolate carry and residue
- &pcmpgtd ($twtmp,$tweak); # broadcast upper bits
- &pxor ($tweak,$twres);
- }
- &pshufd ($inout5,$twtmp,0x13);
- &movdqa (&QWP(16*$i++,"esp"),$tweak);
- &paddq ($tweak,$tweak); # &psllq($tweak,1);
- &$movekey ($rndkey0,&QWP(0,$key_));
- &pand ($inout5,$twmask); # isolate carry and residue
- &movups ($inout0,&QWP(0,$inp)); # load input
- &pxor ($inout5,$tweak);
-
- # inline _aesni_encrypt6 prologue and flip xor with tweak and key[0]
- &movdqu ($inout1,&QWP(16*1,$inp));
- &xorps ($inout0,$rndkey0); # input^=rndkey[0]
- &movdqu ($inout2,&QWP(16*2,$inp));
- &pxor ($inout1,$rndkey0);
- &movdqu ($inout3,&QWP(16*3,$inp));
- &pxor ($inout2,$rndkey0);
- &movdqu ($inout4,&QWP(16*4,$inp));
- &pxor ($inout3,$rndkey0);
- &movdqu ($rndkey1,&QWP(16*5,$inp));
- &pxor ($inout4,$rndkey0);
- &lea ($inp,&DWP(16*6,$inp));
- &pxor ($inout0,&QWP(16*0,"esp")); # input^=tweak
- &movdqa (&QWP(16*$i,"esp"),$inout5); # save last tweak
- &pxor ($inout5,$rndkey1);
-
- &$movekey ($rndkey1,&QWP(16,$key_));
- &lea ($key,&DWP(32,$key_));
- &pxor ($inout1,&QWP(16*1,"esp"));
- &aesdec ($inout0,$rndkey1);
- &pxor ($inout2,&QWP(16*2,"esp"));
- &aesdec ($inout1,$rndkey1);
- &pxor ($inout3,&QWP(16*3,"esp"));
- &dec ($rounds);
- &aesdec ($inout2,$rndkey1);
- &pxor ($inout4,&QWP(16*4,"esp"));
- &aesdec ($inout3,$rndkey1);
- &pxor ($inout5,$rndkey0);
- &aesdec ($inout4,$rndkey1);
- &$movekey ($rndkey0,&QWP(0,$key));
- &aesdec ($inout5,$rndkey1);
- &call (&label("_aesni_decrypt6_enter"));
-
- &movdqa ($tweak,&QWP(16*5,"esp")); # last tweak
- &pxor ($twtmp,$twtmp);
- &xorps ($inout0,&QWP(16*0,"esp")); # output^=tweak
- &pcmpgtd ($twtmp,$tweak); # broadcast upper bits
- &xorps ($inout1,&QWP(16*1,"esp"));
- &movups (&QWP(16*0,$out),$inout0); # write output
- &xorps ($inout2,&QWP(16*2,"esp"));
- &movups (&QWP(16*1,$out),$inout1);
- &xorps ($inout3,&QWP(16*3,"esp"));
- &movups (&QWP(16*2,$out),$inout2);
- &xorps ($inout4,&QWP(16*4,"esp"));
- &movups (&QWP(16*3,$out),$inout3);
- &xorps ($inout5,$tweak);
- &movups (&QWP(16*4,$out),$inout4);
- &pshufd ($twres,$twtmp,0x13);
- &movups (&QWP(16*5,$out),$inout5);
- &lea ($out,&DWP(16*6,$out));
- &movdqa ($twmask,&QWP(16*6,"esp")); # 0x0...010...87
-
- &pxor ($twtmp,$twtmp);
- &paddq ($tweak,$tweak); # &psllq($tweak,1);
- &pand ($twres,$twmask); # isolate carry and residue
- &pcmpgtd($twtmp,$tweak); # broadcast upper bits
- &mov ($rounds,$rounds_); # restore $rounds
- &pxor ($tweak,$twres);
-
- &sub ($len,16*6);
- &jnc (&label("xts_dec_loop6"));
-
- &lea ($rounds,&DWP(1,"",$rounds,2)); # restore $rounds
- &mov ($key,$key_); # restore $key
- &mov ($rounds_,$rounds);
-
-&set_label("xts_dec_short");
- &add ($len,16*6);
- &jz (&label("xts_dec_done6x"));
-
- &movdqa ($inout3,$tweak); # put aside previous tweak
- &cmp ($len,0x20);
- &jb (&label("xts_dec_one"));
-
- &pshufd ($twres,$twtmp,0x13);
- &pxor ($twtmp,$twtmp);
- &paddq ($tweak,$tweak); # &psllq($tweak,1);
- &pand ($twres,$twmask); # isolate carry and residue
- &pcmpgtd($twtmp,$tweak); # broadcast upper bits
- &pxor ($tweak,$twres);
- &je (&label("xts_dec_two"));
-
- &pshufd ($twres,$twtmp,0x13);
- &pxor ($twtmp,$twtmp);
- &movdqa ($inout4,$tweak); # put aside previous tweak
- &paddq ($tweak,$tweak); # &psllq($tweak,1);
- &pand ($twres,$twmask); # isolate carry and residue
- &pcmpgtd($twtmp,$tweak); # broadcast upper bits
- &pxor ($tweak,$twres);
- &cmp ($len,0x40);
- &jb (&label("xts_dec_three"));
-
- &pshufd ($twres,$twtmp,0x13);
- &pxor ($twtmp,$twtmp);
- &movdqa ($inout5,$tweak); # put aside previous tweak
- &paddq ($tweak,$tweak); # &psllq($tweak,1);
- &pand ($twres,$twmask); # isolate carry and residue
- &pcmpgtd($twtmp,$tweak); # broadcast upper bits
- &pxor ($tweak,$twres);
- &movdqa (&QWP(16*0,"esp"),$inout3);
- &movdqa (&QWP(16*1,"esp"),$inout4);
- &je (&label("xts_dec_four"));
-
- &movdqa (&QWP(16*2,"esp"),$inout5);
- &pshufd ($inout5,$twtmp,0x13);
- &movdqa (&QWP(16*3,"esp"),$tweak);
- &paddq ($tweak,$tweak); # &psllq($inout0,1);
- &pand ($inout5,$twmask); # isolate carry and residue
- &pxor ($inout5,$tweak);
-
- &movdqu ($inout0,&QWP(16*0,$inp)); # load input
- &movdqu ($inout1,&QWP(16*1,$inp));
- &movdqu ($inout2,&QWP(16*2,$inp));
- &pxor ($inout0,&QWP(16*0,"esp")); # input^=tweak
- &movdqu ($inout3,&QWP(16*3,$inp));
- &pxor ($inout1,&QWP(16*1,"esp"));
- &movdqu ($inout4,&QWP(16*4,$inp));
- &pxor ($inout2,&QWP(16*2,"esp"));
- &lea ($inp,&DWP(16*5,$inp));
- &pxor ($inout3,&QWP(16*3,"esp"));
- &movdqa (&QWP(16*4,"esp"),$inout5); # save last tweak
- &pxor ($inout4,$inout5);
-
- &call ("_aesni_decrypt6");
-
- &movaps ($tweak,&QWP(16*4,"esp")); # last tweak
- &xorps ($inout0,&QWP(16*0,"esp")); # output^=tweak
- &xorps ($inout1,&QWP(16*1,"esp"));
- &xorps ($inout2,&QWP(16*2,"esp"));
- &movups (&QWP(16*0,$out),$inout0); # write output
- &xorps ($inout3,&QWP(16*3,"esp"));
- &movups (&QWP(16*1,$out),$inout1);
- &xorps ($inout4,$tweak);
- &movups (&QWP(16*2,$out),$inout2);
- &movups (&QWP(16*3,$out),$inout3);
- &movups (&QWP(16*4,$out),$inout4);
- &lea ($out,&DWP(16*5,$out));
- &jmp (&label("xts_dec_done"));
-
-&set_label("xts_dec_one",16);
- &movups ($inout0,&QWP(16*0,$inp)); # load input
- &lea ($inp,&DWP(16*1,$inp));
- &xorps ($inout0,$inout3); # input^=tweak
- if ($inline)
- { &aesni_inline_generate1("dec"); }
- else
- { &call ("_aesni_decrypt1"); }
- &xorps ($inout0,$inout3); # output^=tweak
- &movups (&QWP(16*0,$out),$inout0); # write output
- &lea ($out,&DWP(16*1,$out));
-
- &movdqa ($tweak,$inout3); # last tweak
- &jmp (&label("xts_dec_done"));
-
-&set_label("xts_dec_two",16);
- &movaps ($inout4,$tweak); # put aside last tweak
-
- &movups ($inout0,&QWP(16*0,$inp)); # load input
- &movups ($inout1,&QWP(16*1,$inp));
- &lea ($inp,&DWP(16*2,$inp));
- &xorps ($inout0,$inout3); # input^=tweak
- &xorps ($inout1,$inout4);
-
- &call ("_aesni_decrypt3");
-
- &xorps ($inout0,$inout3); # output^=tweak
- &xorps ($inout1,$inout4);
- &movups (&QWP(16*0,$out),$inout0); # write output
- &movups (&QWP(16*1,$out),$inout1);
- &lea ($out,&DWP(16*2,$out));
-
- &movdqa ($tweak,$inout4); # last tweak
- &jmp (&label("xts_dec_done"));
-
-&set_label("xts_dec_three",16);
- &movaps ($inout5,$tweak); # put aside last tweak
- &movups ($inout0,&QWP(16*0,$inp)); # load input
- &movups ($inout1,&QWP(16*1,$inp));
- &movups ($inout2,&QWP(16*2,$inp));
- &lea ($inp,&DWP(16*3,$inp));
- &xorps ($inout0,$inout3); # input^=tweak
- &xorps ($inout1,$inout4);
- &xorps ($inout2,$inout5);
-
- &call ("_aesni_decrypt3");
-
- &xorps ($inout0,$inout3); # output^=tweak
- &xorps ($inout1,$inout4);
- &xorps ($inout2,$inout5);
- &movups (&QWP(16*0,$out),$inout0); # write output
- &movups (&QWP(16*1,$out),$inout1);
- &movups (&QWP(16*2,$out),$inout2);
- &lea ($out,&DWP(16*3,$out));
-
- &movdqa ($tweak,$inout5); # last tweak
- &jmp (&label("xts_dec_done"));
-
-&set_label("xts_dec_four",16);
- &movaps ($inout4,$tweak); # put aside last tweak
-
- &movups ($inout0,&QWP(16*0,$inp)); # load input
- &movups ($inout1,&QWP(16*1,$inp));
- &movups ($inout2,&QWP(16*2,$inp));
- &xorps ($inout0,&QWP(16*0,"esp")); # input^=tweak
- &movups ($inout3,&QWP(16*3,$inp));
- &lea ($inp,&DWP(16*4,$inp));
- &xorps ($inout1,&QWP(16*1,"esp"));
- &xorps ($inout2,$inout5);
- &xorps ($inout3,$inout4);
-
- &call ("_aesni_decrypt4");
-
- &xorps ($inout0,&QWP(16*0,"esp")); # output^=tweak
- &xorps ($inout1,&QWP(16*1,"esp"));
- &xorps ($inout2,$inout5);
- &movups (&QWP(16*0,$out),$inout0); # write output
- &xorps ($inout3,$inout4);
- &movups (&QWP(16*1,$out),$inout1);
- &movups (&QWP(16*2,$out),$inout2);
- &movups (&QWP(16*3,$out),$inout3);
- &lea ($out,&DWP(16*4,$out));
-
- &movdqa ($tweak,$inout4); # last tweak
- &jmp (&label("xts_dec_done"));
-
-&set_label("xts_dec_done6x",16); # $tweak is pre-calculated
- &mov ($len,&DWP(16*7+0,"esp")); # restore original $len
- &and ($len,15);
- &jz (&label("xts_dec_ret"));
- &mov (&DWP(16*7+0,"esp"),$len); # save $len%16
- &jmp (&label("xts_dec_only_one_more"));
-
-&set_label("xts_dec_done",16);
- &mov ($len,&DWP(16*7+0,"esp")); # restore original $len
- &pxor ($twtmp,$twtmp);
- &and ($len,15);
- &jz (&label("xts_dec_ret"));
-
- &pcmpgtd($twtmp,$tweak); # broadcast upper bits
- &mov (&DWP(16*7+0,"esp"),$len); # save $len%16
- &pshufd ($twres,$twtmp,0x13);
- &pxor ($twtmp,$twtmp);
- &movdqa ($twmask,&QWP(16*6,"esp"));
- &paddq ($tweak,$tweak); # &psllq($tweak,1);
- &pand ($twres,$twmask); # isolate carry and residue
- &pcmpgtd($twtmp,$tweak); # broadcast upper bits
- &pxor ($tweak,$twres);
-
-&set_label("xts_dec_only_one_more");
- &pshufd ($inout3,$twtmp,0x13);
- &movdqa ($inout4,$tweak); # put aside previous tweak
- &paddq ($tweak,$tweak); # &psllq($tweak,1);
- &pand ($inout3,$twmask); # isolate carry and residue
- &pxor ($inout3,$tweak);
-
- &mov ($key,$key_); # restore $key
- &mov ($rounds,$rounds_); # restore $rounds
-
- &movups ($inout0,&QWP(0,$inp)); # load input
- &xorps ($inout0,$inout3); # input^=tweak
- if ($inline)
- { &aesni_inline_generate1("dec"); }
- else
- { &call ("_aesni_decrypt1"); }
- &xorps ($inout0,$inout3); # output^=tweak
- &movups (&QWP(0,$out),$inout0); # write output
-
-&set_label("xts_dec_steal");
- &movz ($rounds,&BP(16,$inp));
- &movz ($key,&BP(0,$out));
- &lea ($inp,&DWP(1,$inp));
- &mov (&BP(0,$out),&LB($rounds));
- &mov (&BP(16,$out),&LB($key));
- &lea ($out,&DWP(1,$out));
- &sub ($len,1);
- &jnz (&label("xts_dec_steal"));
-
- &sub ($out,&DWP(16*7+0,"esp")); # rewind $out
- &mov ($key,$key_); # restore $key
- &mov ($rounds,$rounds_); # restore $rounds
-
- &movups ($inout0,&QWP(0,$out)); # load input
- &xorps ($inout0,$inout4); # input^=tweak
- if ($inline)
- { &aesni_inline_generate1("dec"); }
- else
- { &call ("_aesni_decrypt1"); }
- &xorps ($inout0,$inout4); # output^=tweak
- &movups (&QWP(0,$out),$inout0); # write output
-
-&set_label("xts_dec_ret");
- &mov ("esp",&DWP(16*7+4,"esp")); # restore %esp
-&function_end("aesni_xts_decrypt");
-}
-}
-
-######################################################################
-# void $PREFIX_cbc_encrypt (const void *inp, void *out,
-# size_t length, const AES_KEY *key,
-# unsigned char *ivp,const int enc);
-&function_begin("${PREFIX}_cbc_encrypt");
- &mov ($inp,&wparam(0));
- &mov ($rounds_,"esp");
- &mov ($out,&wparam(1));
- &sub ($rounds_,24);
- &mov ($len,&wparam(2));
- &and ($rounds_,-16);
- &mov ($key,&wparam(3));
- &mov ($key_,&wparam(4));
- &test ($len,$len);
- &jz (&label("cbc_abort"));
-
- &cmp (&wparam(5),0);
- &xchg ($rounds_,"esp"); # alloca
- &movups ($ivec,&QWP(0,$key_)); # load IV
- &mov ($rounds,&DWP(240,$key));
- &mov ($key_,$key); # backup $key
- &mov (&DWP(16,"esp"),$rounds_); # save original %esp
- &mov ($rounds_,$rounds); # backup $rounds
- &je (&label("cbc_decrypt"));
-
- &movaps ($inout0,$ivec);
- &cmp ($len,16);
- &jb (&label("cbc_enc_tail"));
- &sub ($len,16);
- &jmp (&label("cbc_enc_loop"));
-
-&set_label("cbc_enc_loop",16);
- &movups ($ivec,&QWP(0,$inp)); # input actually
- &lea ($inp,&DWP(16,$inp));
- if ($inline)
- { &aesni_inline_generate1("enc",$inout0,$ivec); }
- else
- { &xorps($inout0,$ivec); &call("_aesni_encrypt1"); }
- &mov ($rounds,$rounds_); # restore $rounds
- &mov ($key,$key_); # restore $key
- &movups (&QWP(0,$out),$inout0); # store output
- &lea ($out,&DWP(16,$out));
- &sub ($len,16);
- &jnc (&label("cbc_enc_loop"));
- &add ($len,16);
- &jnz (&label("cbc_enc_tail"));
- &movaps ($ivec,$inout0);
- &jmp (&label("cbc_ret"));
-
-&set_label("cbc_enc_tail");
- &mov ("ecx",$len); # zaps $rounds
- &data_word(0xA4F3F689); # rep movsb
- &mov ("ecx",16); # zero tail
- &sub ("ecx",$len);
- &xor ("eax","eax"); # zaps $len
- &data_word(0xAAF3F689); # rep stosb
- &lea ($out,&DWP(-16,$out)); # rewind $out by 1 block
- &mov ($rounds,$rounds_); # restore $rounds
- &mov ($inp,$out); # $inp and $out are the same
- &mov ($key,$key_); # restore $key
- &jmp (&label("cbc_enc_loop"));
-######################################################################
-&set_label("cbc_decrypt",16);
- &cmp ($len,0x50);
- &jbe (&label("cbc_dec_tail"));
- &movaps (&QWP(0,"esp"),$ivec); # save IV
- &sub ($len,0x50);
- &jmp (&label("cbc_dec_loop6_enter"));
-
-&set_label("cbc_dec_loop6",16);
- &movaps (&QWP(0,"esp"),$rndkey0); # save IV
- &movups (&QWP(0,$out),$inout5);
- &lea ($out,&DWP(0x10,$out));
-&set_label("cbc_dec_loop6_enter");
- &movdqu ($inout0,&QWP(0,$inp));
- &movdqu ($inout1,&QWP(0x10,$inp));
- &movdqu ($inout2,&QWP(0x20,$inp));
- &movdqu ($inout3,&QWP(0x30,$inp));
- &movdqu ($inout4,&QWP(0x40,$inp));
- &movdqu ($inout5,&QWP(0x50,$inp));
-
- &call ("_aesni_decrypt6");
-
- &movups ($rndkey1,&QWP(0,$inp));
- &movups ($rndkey0,&QWP(0x10,$inp));
- &xorps ($inout0,&QWP(0,"esp")); # ^=IV
- &xorps ($inout1,$rndkey1);
- &movups ($rndkey1,&QWP(0x20,$inp));
- &xorps ($inout2,$rndkey0);
- &movups ($rndkey0,&QWP(0x30,$inp));
- &xorps ($inout3,$rndkey1);
- &movups ($rndkey1,&QWP(0x40,$inp));
- &xorps ($inout4,$rndkey0);
- &movups ($rndkey0,&QWP(0x50,$inp)); # IV
- &xorps ($inout5,$rndkey1);
- &movups (&QWP(0,$out),$inout0);
- &movups (&QWP(0x10,$out),$inout1);
- &lea ($inp,&DWP(0x60,$inp));
- &movups (&QWP(0x20,$out),$inout2);
- &mov ($rounds,$rounds_) # restore $rounds
- &movups (&QWP(0x30,$out),$inout3);
- &mov ($key,$key_); # restore $key
- &movups (&QWP(0x40,$out),$inout4);
- &lea ($out,&DWP(0x50,$out));
- &sub ($len,0x60);
- &ja (&label("cbc_dec_loop6"));
-
- &movaps ($inout0,$inout5);
- &movaps ($ivec,$rndkey0);
- &add ($len,0x50);
- &jle (&label("cbc_dec_tail_collected"));
- &movups (&QWP(0,$out),$inout0);
- &lea ($out,&DWP(0x10,$out));
-&set_label("cbc_dec_tail");
- &movups ($inout0,&QWP(0,$inp));
- &movaps ($in0,$inout0);
- &cmp ($len,0x10);
- &jbe (&label("cbc_dec_one"));
-
- &movups ($inout1,&QWP(0x10,$inp));
- &movaps ($in1,$inout1);
- &cmp ($len,0x20);
- &jbe (&label("cbc_dec_two"));
-
- &movups ($inout2,&QWP(0x20,$inp));
- &cmp ($len,0x30);
- &jbe (&label("cbc_dec_three"));
-
- &movups ($inout3,&QWP(0x30,$inp));
- &cmp ($len,0x40);
- &jbe (&label("cbc_dec_four"));
-
- &movups ($inout4,&QWP(0x40,$inp));
- &movaps (&QWP(0,"esp"),$ivec); # save IV
- &movups ($inout0,&QWP(0,$inp));
- &xorps ($inout5,$inout5);
- &call ("_aesni_decrypt6");
- &movups ($rndkey1,&QWP(0,$inp));
- &movups ($rndkey0,&QWP(0x10,$inp));
- &xorps ($inout0,&QWP(0,"esp")); # ^= IV
- &xorps ($inout1,$rndkey1);
- &movups ($rndkey1,&QWP(0x20,$inp));
- &xorps ($inout2,$rndkey0);
- &movups ($rndkey0,&QWP(0x30,$inp));
- &xorps ($inout3,$rndkey1);
- &movups ($ivec,&QWP(0x40,$inp)); # IV
- &xorps ($inout4,$rndkey0);
- &movups (&QWP(0,$out),$inout0);
- &movups (&QWP(0x10,$out),$inout1);
- &movups (&QWP(0x20,$out),$inout2);
- &movups (&QWP(0x30,$out),$inout3);
- &lea ($out,&DWP(0x40,$out));
- &movaps ($inout0,$inout4);
- &sub ($len,0x50);
- &jmp (&label("cbc_dec_tail_collected"));
-
-&set_label("cbc_dec_one",16);
- if ($inline)
- { &aesni_inline_generate1("dec"); }
- else
- { &call ("_aesni_decrypt1"); }
- &xorps ($inout0,$ivec);
- &movaps ($ivec,$in0);
- &sub ($len,0x10);
- &jmp (&label("cbc_dec_tail_collected"));
-
-&set_label("cbc_dec_two",16);
- &xorps ($inout2,$inout2);
- &call ("_aesni_decrypt3");
- &xorps ($inout0,$ivec);
- &xorps ($inout1,$in0);
- &movups (&QWP(0,$out),$inout0);
- &movaps ($inout0,$inout1);
- &lea ($out,&DWP(0x10,$out));
- &movaps ($ivec,$in1);
- &sub ($len,0x20);
- &jmp (&label("cbc_dec_tail_collected"));
-
-&set_label("cbc_dec_three",16);
- &call ("_aesni_decrypt3");
- &xorps ($inout0,$ivec);
- &xorps ($inout1,$in0);
- &xorps ($inout2,$in1);
- &movups (&QWP(0,$out),$inout0);
- &movaps ($inout0,$inout2);
- &movups (&QWP(0x10,$out),$inout1);
- &lea ($out,&DWP(0x20,$out));
- &movups ($ivec,&QWP(0x20,$inp));
- &sub ($len,0x30);
- &jmp (&label("cbc_dec_tail_collected"));
-
-&set_label("cbc_dec_four",16);
- &call ("_aesni_decrypt4");
- &movups ($rndkey1,&QWP(0x10,$inp));
- &movups ($rndkey0,&QWP(0x20,$inp));
- &xorps ($inout0,$ivec);
- &movups ($ivec,&QWP(0x30,$inp));
- &xorps ($inout1,$in0);
- &movups (&QWP(0,$out),$inout0);
- &xorps ($inout2,$rndkey1);
- &movups (&QWP(0x10,$out),$inout1);
- &xorps ($inout3,$rndkey0);
- &movups (&QWP(0x20,$out),$inout2);
- &lea ($out,&DWP(0x30,$out));
- &movaps ($inout0,$inout3);
- &sub ($len,0x40);
-
-&set_label("cbc_dec_tail_collected");
- &and ($len,15);
- &jnz (&label("cbc_dec_tail_partial"));
- &movups (&QWP(0,$out),$inout0);
- &jmp (&label("cbc_ret"));
-
-&set_label("cbc_dec_tail_partial",16);
- &movaps (&QWP(0,"esp"),$inout0);
- &mov ("ecx",16);
- &mov ($inp,"esp");
- &sub ("ecx",$len);
- &data_word(0xA4F3F689); # rep movsb
-
-&set_label("cbc_ret");
- &mov ("esp",&DWP(16,"esp")); # pull original %esp
- &mov ($key_,&wparam(4));
- &movups (&QWP(0,$key_),$ivec); # output IV
-&set_label("cbc_abort");
-&function_end("${PREFIX}_cbc_encrypt");
-
-######################################################################
-# Mechanical port from aesni-x86_64.pl.
-#
-# _aesni_set_encrypt_key is private interface,
-# input:
-# "eax" const unsigned char *userKey
-# $rounds int bits
-# $key AES_KEY *key
-# output:
-# "eax" return code
-# $round rounds
-
-&function_begin_B("_aesni_set_encrypt_key");
- &test ("eax","eax");
- &jz (&label("bad_pointer"));
- &test ($key,$key);
- &jz (&label("bad_pointer"));
-
- &movups ("xmm0",&QWP(0,"eax")); # pull first 128 bits of *userKey
- &xorps ("xmm4","xmm4"); # low dword of xmm4 is assumed 0
- &lea ($key,&DWP(16,$key));
- &cmp ($rounds,256);
- &je (&label("14rounds"));
- &cmp ($rounds,192);
- &je (&label("12rounds"));
- &cmp ($rounds,128);
- &jne (&label("bad_keybits"));
-
-&set_label("10rounds",16);
- &mov ($rounds,9);
- &$movekey (&QWP(-16,$key),"xmm0"); # round 0
- &aeskeygenassist("xmm1","xmm0",0x01); # round 1
- &call (&label("key_128_cold"));
- &aeskeygenassist("xmm1","xmm0",0x2); # round 2
- &call (&label("key_128"));
- &aeskeygenassist("xmm1","xmm0",0x04); # round 3
- &call (&label("key_128"));
- &aeskeygenassist("xmm1","xmm0",0x08); # round 4
- &call (&label("key_128"));
- &aeskeygenassist("xmm1","xmm0",0x10); # round 5
- &call (&label("key_128"));
- &aeskeygenassist("xmm1","xmm0",0x20); # round 6
- &call (&label("key_128"));
- &aeskeygenassist("xmm1","xmm0",0x40); # round 7
- &call (&label("key_128"));
- &aeskeygenassist("xmm1","xmm0",0x80); # round 8
- &call (&label("key_128"));
- &aeskeygenassist("xmm1","xmm0",0x1b); # round 9
- &call (&label("key_128"));
- &aeskeygenassist("xmm1","xmm0",0x36); # round 10
- &call (&label("key_128"));
- &$movekey (&QWP(0,$key),"xmm0");
- &mov (&DWP(80,$key),$rounds);
- &xor ("eax","eax");
- &ret();
-
-&set_label("key_128",16);
- &$movekey (&QWP(0,$key),"xmm0");
- &lea ($key,&DWP(16,$key));
-&set_label("key_128_cold");
- &shufps ("xmm4","xmm0",0b00010000);
- &xorps ("xmm0","xmm4");
- &shufps ("xmm4","xmm0",0b10001100);
- &xorps ("xmm0","xmm4");
- &shufps ("xmm1","xmm1",0b11111111); # critical path
- &xorps ("xmm0","xmm1");
- &ret();
-
-&set_label("12rounds",16);
- &movq ("xmm2",&QWP(16,"eax")); # remaining 1/3 of *userKey
- &mov ($rounds,11);
- &$movekey (&QWP(-16,$key),"xmm0") # round 0
- &aeskeygenassist("xmm1","xmm2",0x01); # round 1,2
- &call (&label("key_192a_cold"));
- &aeskeygenassist("xmm1","xmm2",0x02); # round 2,3
- &call (&label("key_192b"));
- &aeskeygenassist("xmm1","xmm2",0x04); # round 4,5
- &call (&label("key_192a"));
- &aeskeygenassist("xmm1","xmm2",0x08); # round 5,6
- &call (&label("key_192b"));
- &aeskeygenassist("xmm1","xmm2",0x10); # round 7,8
- &call (&label("key_192a"));
- &aeskeygenassist("xmm1","xmm2",0x20); # round 8,9
- &call (&label("key_192b"));
- &aeskeygenassist("xmm1","xmm2",0x40); # round 10,11
- &call (&label("key_192a"));
- &aeskeygenassist("xmm1","xmm2",0x80); # round 11,12
- &call (&label("key_192b"));
- &$movekey (&QWP(0,$key),"xmm0");
- &mov (&DWP(48,$key),$rounds);
- &xor ("eax","eax");
- &ret();
-
-&set_label("key_192a",16);
- &$movekey (&QWP(0,$key),"xmm0");
- &lea ($key,&DWP(16,$key));
-&set_label("key_192a_cold",16);
- &movaps ("xmm5","xmm2");
-&set_label("key_192b_warm");
- &shufps ("xmm4","xmm0",0b00010000);
- &movdqa ("xmm3","xmm2");
- &xorps ("xmm0","xmm4");
- &shufps ("xmm4","xmm0",0b10001100);
- &pslldq ("xmm3",4);
- &xorps ("xmm0","xmm4");
- &pshufd ("xmm1","xmm1",0b01010101); # critical path
- &pxor ("xmm2","xmm3");
- &pxor ("xmm0","xmm1");
- &pshufd ("xmm3","xmm0",0b11111111);
- &pxor ("xmm2","xmm3");
- &ret();
-
-&set_label("key_192b",16);
- &movaps ("xmm3","xmm0");
- &shufps ("xmm5","xmm0",0b01000100);
- &$movekey (&QWP(0,$key),"xmm5");
- &shufps ("xmm3","xmm2",0b01001110);
- &$movekey (&QWP(16,$key),"xmm3");
- &lea ($key,&DWP(32,$key));
- &jmp (&label("key_192b_warm"));
-
-&set_label("14rounds",16);
- &movups ("xmm2",&QWP(16,"eax")); # remaining half of *userKey
- &mov ($rounds,13);
- &lea ($key,&DWP(16,$key));
- &$movekey (&QWP(-32,$key),"xmm0"); # round 0
- &$movekey (&QWP(-16,$key),"xmm2"); # round 1
- &aeskeygenassist("xmm1","xmm2",0x01); # round 2
- &call (&label("key_256a_cold"));
- &aeskeygenassist("xmm1","xmm0",0x01); # round 3
- &call (&label("key_256b"));
- &aeskeygenassist("xmm1","xmm2",0x02); # round 4
- &call (&label("key_256a"));
- &aeskeygenassist("xmm1","xmm0",0x02); # round 5
- &call (&label("key_256b"));
- &aeskeygenassist("xmm1","xmm2",0x04); # round 6
- &call (&label("key_256a"));
- &aeskeygenassist("xmm1","xmm0",0x04); # round 7
- &call (&label("key_256b"));
- &aeskeygenassist("xmm1","xmm2",0x08); # round 8
- &call (&label("key_256a"));
- &aeskeygenassist("xmm1","xmm0",0x08); # round 9
- &call (&label("key_256b"));
- &aeskeygenassist("xmm1","xmm2",0x10); # round 10
- &call (&label("key_256a"));
- &aeskeygenassist("xmm1","xmm0",0x10); # round 11
- &call (&label("key_256b"));
- &aeskeygenassist("xmm1","xmm2",0x20); # round 12
- &call (&label("key_256a"));
- &aeskeygenassist("xmm1","xmm0",0x20); # round 13
- &call (&label("key_256b"));
- &aeskeygenassist("xmm1","xmm2",0x40); # round 14
- &call (&label("key_256a"));
- &$movekey (&QWP(0,$key),"xmm0");
- &mov (&DWP(16,$key),$rounds);
- &xor ("eax","eax");
- &ret();
-
-&set_label("key_256a",16);
- &$movekey (&QWP(0,$key),"xmm2");
- &lea ($key,&DWP(16,$key));
-&set_label("key_256a_cold");
- &shufps ("xmm4","xmm0",0b00010000);
- &xorps ("xmm0","xmm4");
- &shufps ("xmm4","xmm0",0b10001100);
- &xorps ("xmm0","xmm4");
- &shufps ("xmm1","xmm1",0b11111111); # critical path
- &xorps ("xmm0","xmm1");
- &ret();
-
-&set_label("key_256b",16);
- &$movekey (&QWP(0,$key),"xmm0");
- &lea ($key,&DWP(16,$key));
-
- &shufps ("xmm4","xmm2",0b00010000);
- &xorps ("xmm2","xmm4");
- &shufps ("xmm4","xmm2",0b10001100);
- &xorps ("xmm2","xmm4");
- &shufps ("xmm1","xmm1",0b10101010); # critical path
- &xorps ("xmm2","xmm1");
- &ret();
-
-&set_label("bad_pointer",4);
- &mov ("eax",-1);
- &ret ();
-&set_label("bad_keybits",4);
- &mov ("eax",-2);
- &ret ();
-&function_end_B("_aesni_set_encrypt_key");
-
-# int $PREFIX_set_encrypt_key (const unsigned char *userKey, int bits,
-# AES_KEY *key)
-&function_begin_B("${PREFIX}_set_encrypt_key");
- &mov ("eax",&wparam(0));
- &mov ($rounds,&wparam(1));
- &mov ($key,&wparam(2));
- &call ("_aesni_set_encrypt_key");
- &ret ();
-&function_end_B("${PREFIX}_set_encrypt_key");
-
-# int $PREFIX_set_decrypt_key (const unsigned char *userKey, int bits,
-# AES_KEY *key)
-&function_begin_B("${PREFIX}_set_decrypt_key");
- &mov ("eax",&wparam(0));
- &mov ($rounds,&wparam(1));
- &mov ($key,&wparam(2));
- &call ("_aesni_set_encrypt_key");
- &mov ($key,&wparam(2));
- &shl ($rounds,4) # rounds-1 after _aesni_set_encrypt_key
- &test ("eax","eax");
- &jnz (&label("dec_key_ret"));
- &lea ("eax",&DWP(16,$key,$rounds)); # end of key schedule
-
- &$movekey ("xmm0",&QWP(0,$key)); # just swap
- &$movekey ("xmm1",&QWP(0,"eax"));
- &$movekey (&QWP(0,"eax"),"xmm0");
- &$movekey (&QWP(0,$key),"xmm1");
- &lea ($key,&DWP(16,$key));
- &lea ("eax",&DWP(-16,"eax"));
-
-&set_label("dec_key_inverse");
- &$movekey ("xmm0",&QWP(0,$key)); # swap and inverse
- &$movekey ("xmm1",&QWP(0,"eax"));
- &aesimc ("xmm0","xmm0");
- &aesimc ("xmm1","xmm1");
- &lea ($key,&DWP(16,$key));
- &lea ("eax",&DWP(-16,"eax"));
- &$movekey (&QWP(16,"eax"),"xmm0");
- &$movekey (&QWP(-16,$key),"xmm1");
- &cmp ("eax",$key);
- &ja (&label("dec_key_inverse"));
-
- &$movekey ("xmm0",&QWP(0,$key)); # inverse middle
- &aesimc ("xmm0","xmm0");
- &$movekey (&QWP(0,$key),"xmm0");
-
- &xor ("eax","eax"); # return success
-&set_label("dec_key_ret");
- &ret ();
-&function_end_B("${PREFIX}_set_decrypt_key");
-&asciz("AES for Intel AES-NI, CRYPTOGAMS by <appro\@openssl.org>");
-
-&asm_finish();
diff --git a/app/openssl/crypto/aes/asm/aesni-x86_64.S b/app/openssl/crypto/aes/asm/aesni-x86_64.S
deleted file mode 100644
index 917c8323..00000000
--- a/app/openssl/crypto/aes/asm/aesni-x86_64.S
+++ /dev/null
@@ -1,2535 +0,0 @@
-.text
-.globl aesni_encrypt
-.type aesni_encrypt,@function
-.align 16
-aesni_encrypt:
- movups (%rdi),%xmm2
- movl 240(%rdx),%eax
- movups (%rdx),%xmm0
- movups 16(%rdx),%xmm1
- leaq 32(%rdx),%rdx
- xorps %xmm0,%xmm2
-.Loop_enc1_1:
-.byte 102,15,56,220,209
- decl %eax
- movups (%rdx),%xmm1
- leaq 16(%rdx),%rdx
- jnz .Loop_enc1_1
-.byte 102,15,56,221,209
- movups %xmm2,(%rsi)
- .byte 0xf3,0xc3
-.size aesni_encrypt,.-aesni_encrypt
-
-.globl aesni_decrypt
-.type aesni_decrypt,@function
-.align 16
-aesni_decrypt:
- movups (%rdi),%xmm2
- movl 240(%rdx),%eax
- movups (%rdx),%xmm0
- movups 16(%rdx),%xmm1
- leaq 32(%rdx),%rdx
- xorps %xmm0,%xmm2
-.Loop_dec1_2:
-.byte 102,15,56,222,209
- decl %eax
- movups (%rdx),%xmm1
- leaq 16(%rdx),%rdx
- jnz .Loop_dec1_2
-.byte 102,15,56,223,209
- movups %xmm2,(%rsi)
- .byte 0xf3,0xc3
-.size aesni_decrypt, .-aesni_decrypt
-.type _aesni_encrypt3,@function
-.align 16
-_aesni_encrypt3:
- movups (%rcx),%xmm0
- shrl $1,%eax
- movups 16(%rcx),%xmm1
- leaq 32(%rcx),%rcx
- xorps %xmm0,%xmm2
- xorps %xmm0,%xmm3
- xorps %xmm0,%xmm4
- movups (%rcx),%xmm0
-
-.Lenc_loop3:
-.byte 102,15,56,220,209
-.byte 102,15,56,220,217
- decl %eax
-.byte 102,15,56,220,225
- movups 16(%rcx),%xmm1
-.byte 102,15,56,220,208
-.byte 102,15,56,220,216
- leaq 32(%rcx),%rcx
-.byte 102,15,56,220,224
- movups (%rcx),%xmm0
- jnz .Lenc_loop3
-
-.byte 102,15,56,220,209
-.byte 102,15,56,220,217
-.byte 102,15,56,220,225
-.byte 102,15,56,221,208
-.byte 102,15,56,221,216
-.byte 102,15,56,221,224
- .byte 0xf3,0xc3
-.size _aesni_encrypt3,.-_aesni_encrypt3
-.type _aesni_decrypt3,@function
-.align 16
-_aesni_decrypt3:
- movups (%rcx),%xmm0
- shrl $1,%eax
- movups 16(%rcx),%xmm1
- leaq 32(%rcx),%rcx
- xorps %xmm0,%xmm2
- xorps %xmm0,%xmm3
- xorps %xmm0,%xmm4
- movups (%rcx),%xmm0
-
-.Ldec_loop3:
-.byte 102,15,56,222,209
-.byte 102,15,56,222,217
- decl %eax
-.byte 102,15,56,222,225
- movups 16(%rcx),%xmm1
-.byte 102,15,56,222,208
-.byte 102,15,56,222,216
- leaq 32(%rcx),%rcx
-.byte 102,15,56,222,224
- movups (%rcx),%xmm0
- jnz .Ldec_loop3
-
-.byte 102,15,56,222,209
-.byte 102,15,56,222,217
-.byte 102,15,56,222,225
-.byte 102,15,56,223,208
-.byte 102,15,56,223,216
-.byte 102,15,56,223,224
- .byte 0xf3,0xc3
-.size _aesni_decrypt3,.-_aesni_decrypt3
-.type _aesni_encrypt4,@function
-.align 16
-_aesni_encrypt4:
- movups (%rcx),%xmm0
- shrl $1,%eax
- movups 16(%rcx),%xmm1
- leaq 32(%rcx),%rcx
- xorps %xmm0,%xmm2
- xorps %xmm0,%xmm3
- xorps %xmm0,%xmm4
- xorps %xmm0,%xmm5
- movups (%rcx),%xmm0
-
-.Lenc_loop4:
-.byte 102,15,56,220,209
-.byte 102,15,56,220,217
- decl %eax
-.byte 102,15,56,220,225
-.byte 102,15,56,220,233
- movups 16(%rcx),%xmm1
-.byte 102,15,56,220,208
-.byte 102,15,56,220,216
- leaq 32(%rcx),%rcx
-.byte 102,15,56,220,224
-.byte 102,15,56,220,232
- movups (%rcx),%xmm0
- jnz .Lenc_loop4
-
-.byte 102,15,56,220,209
-.byte 102,15,56,220,217
-.byte 102,15,56,220,225
-.byte 102,15,56,220,233
-.byte 102,15,56,221,208
-.byte 102,15,56,221,216
-.byte 102,15,56,221,224
-.byte 102,15,56,221,232
- .byte 0xf3,0xc3
-.size _aesni_encrypt4,.-_aesni_encrypt4
-.type _aesni_decrypt4,@function
-.align 16
-_aesni_decrypt4:
- movups (%rcx),%xmm0
- shrl $1,%eax
- movups 16(%rcx),%xmm1
- leaq 32(%rcx),%rcx
- xorps %xmm0,%xmm2
- xorps %xmm0,%xmm3
- xorps %xmm0,%xmm4
- xorps %xmm0,%xmm5
- movups (%rcx),%xmm0
-
-.Ldec_loop4:
-.byte 102,15,56,222,209
-.byte 102,15,56,222,217
- decl %eax
-.byte 102,15,56,222,225
-.byte 102,15,56,222,233
- movups 16(%rcx),%xmm1
-.byte 102,15,56,222,208
-.byte 102,15,56,222,216
- leaq 32(%rcx),%rcx
-.byte 102,15,56,222,224
-.byte 102,15,56,222,232
- movups (%rcx),%xmm0
- jnz .Ldec_loop4
-
-.byte 102,15,56,222,209
-.byte 102,15,56,222,217
-.byte 102,15,56,222,225
-.byte 102,15,56,222,233
-.byte 102,15,56,223,208
-.byte 102,15,56,223,216
-.byte 102,15,56,223,224
-.byte 102,15,56,223,232
- .byte 0xf3,0xc3
-.size _aesni_decrypt4,.-_aesni_decrypt4
-.type _aesni_encrypt6,@function
-.align 16
-_aesni_encrypt6:
- movups (%rcx),%xmm0
- shrl $1,%eax
- movups 16(%rcx),%xmm1
- leaq 32(%rcx),%rcx
- xorps %xmm0,%xmm2
- pxor %xmm0,%xmm3
-.byte 102,15,56,220,209
- pxor %xmm0,%xmm4
-.byte 102,15,56,220,217
- pxor %xmm0,%xmm5
-.byte 102,15,56,220,225
- pxor %xmm0,%xmm6
-.byte 102,15,56,220,233
- pxor %xmm0,%xmm7
- decl %eax
-.byte 102,15,56,220,241
- movups (%rcx),%xmm0
-.byte 102,15,56,220,249
- jmp .Lenc_loop6_enter
-.align 16
-.Lenc_loop6:
-.byte 102,15,56,220,209
-.byte 102,15,56,220,217
- decl %eax
-.byte 102,15,56,220,225
-.byte 102,15,56,220,233
-.byte 102,15,56,220,241
-.byte 102,15,56,220,249
-.Lenc_loop6_enter:
- movups 16(%rcx),%xmm1
-.byte 102,15,56,220,208
-.byte 102,15,56,220,216
- leaq 32(%rcx),%rcx
-.byte 102,15,56,220,224
-.byte 102,15,56,220,232
-.byte 102,15,56,220,240
-.byte 102,15,56,220,248
- movups (%rcx),%xmm0
- jnz .Lenc_loop6
-
-.byte 102,15,56,220,209
-.byte 102,15,56,220,217
-.byte 102,15,56,220,225
-.byte 102,15,56,220,233
-.byte 102,15,56,220,241
-.byte 102,15,56,220,249
-.byte 102,15,56,221,208
-.byte 102,15,56,221,216
-.byte 102,15,56,221,224
-.byte 102,15,56,221,232
-.byte 102,15,56,221,240
-.byte 102,15,56,221,248
- .byte 0xf3,0xc3
-.size _aesni_encrypt6,.-_aesni_encrypt6
-.type _aesni_decrypt6,@function
-.align 16
-_aesni_decrypt6:
- movups (%rcx),%xmm0
- shrl $1,%eax
- movups 16(%rcx),%xmm1
- leaq 32(%rcx),%rcx
- xorps %xmm0,%xmm2
- pxor %xmm0,%xmm3
-.byte 102,15,56,222,209
- pxor %xmm0,%xmm4
-.byte 102,15,56,222,217
- pxor %xmm0,%xmm5
-.byte 102,15,56,222,225
- pxor %xmm0,%xmm6
-.byte 102,15,56,222,233
- pxor %xmm0,%xmm7
- decl %eax
-.byte 102,15,56,222,241
- movups (%rcx),%xmm0
-.byte 102,15,56,222,249
- jmp .Ldec_loop6_enter
-.align 16
-.Ldec_loop6:
-.byte 102,15,56,222,209
-.byte 102,15,56,222,217
- decl %eax
-.byte 102,15,56,222,225
-.byte 102,15,56,222,233
-.byte 102,15,56,222,241
-.byte 102,15,56,222,249
-.Ldec_loop6_enter:
- movups 16(%rcx),%xmm1
-.byte 102,15,56,222,208
-.byte 102,15,56,222,216
- leaq 32(%rcx),%rcx
-.byte 102,15,56,222,224
-.byte 102,15,56,222,232
-.byte 102,15,56,222,240
-.byte 102,15,56,222,248
- movups (%rcx),%xmm0
- jnz .Ldec_loop6
-
-.byte 102,15,56,222,209
-.byte 102,15,56,222,217
-.byte 102,15,56,222,225
-.byte 102,15,56,222,233
-.byte 102,15,56,222,241
-.byte 102,15,56,222,249
-.byte 102,15,56,223,208
-.byte 102,15,56,223,216
-.byte 102,15,56,223,224
-.byte 102,15,56,223,232
-.byte 102,15,56,223,240
-.byte 102,15,56,223,248
- .byte 0xf3,0xc3
-.size _aesni_decrypt6,.-_aesni_decrypt6
-.type _aesni_encrypt8,@function
-.align 16
-_aesni_encrypt8:
- movups (%rcx),%xmm0
- shrl $1,%eax
- movups 16(%rcx),%xmm1
- leaq 32(%rcx),%rcx
- xorps %xmm0,%xmm2
- xorps %xmm0,%xmm3
-.byte 102,15,56,220,209
- pxor %xmm0,%xmm4
-.byte 102,15,56,220,217
- pxor %xmm0,%xmm5
-.byte 102,15,56,220,225
- pxor %xmm0,%xmm6
-.byte 102,15,56,220,233
- pxor %xmm0,%xmm7
- decl %eax
-.byte 102,15,56,220,241
- pxor %xmm0,%xmm8
-.byte 102,15,56,220,249
- pxor %xmm0,%xmm9
- movups (%rcx),%xmm0
-.byte 102,68,15,56,220,193
-.byte 102,68,15,56,220,201
- movups 16(%rcx),%xmm1
- jmp .Lenc_loop8_enter
-.align 16
-.Lenc_loop8:
-.byte 102,15,56,220,209
-.byte 102,15,56,220,217
- decl %eax
-.byte 102,15,56,220,225
-.byte 102,15,56,220,233
-.byte 102,15,56,220,241
-.byte 102,15,56,220,249
-.byte 102,68,15,56,220,193
-.byte 102,68,15,56,220,201
- movups 16(%rcx),%xmm1
-.Lenc_loop8_enter:
-.byte 102,15,56,220,208
-.byte 102,15,56,220,216
- leaq 32(%rcx),%rcx
-.byte 102,15,56,220,224
-.byte 102,15,56,220,232
-.byte 102,15,56,220,240
-.byte 102,15,56,220,248
-.byte 102,68,15,56,220,192
-.byte 102,68,15,56,220,200
- movups (%rcx),%xmm0
- jnz .Lenc_loop8
-
-.byte 102,15,56,220,209
-.byte 102,15,56,220,217
-.byte 102,15,56,220,225
-.byte 102,15,56,220,233
-.byte 102,15,56,220,241
-.byte 102,15,56,220,249
-.byte 102,68,15,56,220,193
-.byte 102,68,15,56,220,201
-.byte 102,15,56,221,208
-.byte 102,15,56,221,216
-.byte 102,15,56,221,224
-.byte 102,15,56,221,232
-.byte 102,15,56,221,240
-.byte 102,15,56,221,248
-.byte 102,68,15,56,221,192
-.byte 102,68,15,56,221,200
- .byte 0xf3,0xc3
-.size _aesni_encrypt8,.-_aesni_encrypt8
-.type _aesni_decrypt8,@function
-.align 16
-_aesni_decrypt8:
- movups (%rcx),%xmm0
- shrl $1,%eax
- movups 16(%rcx),%xmm1
- leaq 32(%rcx),%rcx
- xorps %xmm0,%xmm2
- xorps %xmm0,%xmm3
-.byte 102,15,56,222,209
- pxor %xmm0,%xmm4
-.byte 102,15,56,222,217
- pxor %xmm0,%xmm5
-.byte 102,15,56,222,225
- pxor %xmm0,%xmm6
-.byte 102,15,56,222,233
- pxor %xmm0,%xmm7
- decl %eax
-.byte 102,15,56,222,241
- pxor %xmm0,%xmm8
-.byte 102,15,56,222,249
- pxor %xmm0,%xmm9
- movups (%rcx),%xmm0
-.byte 102,68,15,56,222,193
-.byte 102,68,15,56,222,201
- movups 16(%rcx),%xmm1
- jmp .Ldec_loop8_enter
-.align 16
-.Ldec_loop8:
-.byte 102,15,56,222,209
-.byte 102,15,56,222,217
- decl %eax
-.byte 102,15,56,222,225
-.byte 102,15,56,222,233
-.byte 102,15,56,222,241
-.byte 102,15,56,222,249
-.byte 102,68,15,56,222,193
-.byte 102,68,15,56,222,201
- movups 16(%rcx),%xmm1
-.Ldec_loop8_enter:
-.byte 102,15,56,222,208
-.byte 102,15,56,222,216
- leaq 32(%rcx),%rcx
-.byte 102,15,56,222,224
-.byte 102,15,56,222,232
-.byte 102,15,56,222,240
-.byte 102,15,56,222,248
-.byte 102,68,15,56,222,192
-.byte 102,68,15,56,222,200
- movups (%rcx),%xmm0
- jnz .Ldec_loop8
-
-.byte 102,15,56,222,209
-.byte 102,15,56,222,217
-.byte 102,15,56,222,225
-.byte 102,15,56,222,233
-.byte 102,15,56,222,241
-.byte 102,15,56,222,249
-.byte 102,68,15,56,222,193
-.byte 102,68,15,56,222,201
-.byte 102,15,56,223,208
-.byte 102,15,56,223,216
-.byte 102,15,56,223,224
-.byte 102,15,56,223,232
-.byte 102,15,56,223,240
-.byte 102,15,56,223,248
-.byte 102,68,15,56,223,192
-.byte 102,68,15,56,223,200
- .byte 0xf3,0xc3
-.size _aesni_decrypt8,.-_aesni_decrypt8
-.globl aesni_ecb_encrypt
-.type aesni_ecb_encrypt,@function
-.align 16
-aesni_ecb_encrypt:
- andq $-16,%rdx
- jz .Lecb_ret
-
- movl 240(%rcx),%eax
- movups (%rcx),%xmm0
- movq %rcx,%r11
- movl %eax,%r10d
- testl %r8d,%r8d
- jz .Lecb_decrypt
-
- cmpq $128,%rdx
- jb .Lecb_enc_tail
-
- movdqu (%rdi),%xmm2
- movdqu 16(%rdi),%xmm3
- movdqu 32(%rdi),%xmm4
- movdqu 48(%rdi),%xmm5
- movdqu 64(%rdi),%xmm6
- movdqu 80(%rdi),%xmm7
- movdqu 96(%rdi),%xmm8
- movdqu 112(%rdi),%xmm9
- leaq 128(%rdi),%rdi
- subq $128,%rdx
- jmp .Lecb_enc_loop8_enter
-.align 16
-.Lecb_enc_loop8:
- movups %xmm2,(%rsi)
- movq %r11,%rcx
- movdqu (%rdi),%xmm2
- movl %r10d,%eax
- movups %xmm3,16(%rsi)
- movdqu 16(%rdi),%xmm3
- movups %xmm4,32(%rsi)
- movdqu 32(%rdi),%xmm4
- movups %xmm5,48(%rsi)
- movdqu 48(%rdi),%xmm5
- movups %xmm6,64(%rsi)
- movdqu 64(%rdi),%xmm6
- movups %xmm7,80(%rsi)
- movdqu 80(%rdi),%xmm7
- movups %xmm8,96(%rsi)
- movdqu 96(%rdi),%xmm8
- movups %xmm9,112(%rsi)
- leaq 128(%rsi),%rsi
- movdqu 112(%rdi),%xmm9
- leaq 128(%rdi),%rdi
-.Lecb_enc_loop8_enter:
-
- call _aesni_encrypt8
-
- subq $128,%rdx
- jnc .Lecb_enc_loop8
-
- movups %xmm2,(%rsi)
- movq %r11,%rcx
- movups %xmm3,16(%rsi)
- movl %r10d,%eax
- movups %xmm4,32(%rsi)
- movups %xmm5,48(%rsi)
- movups %xmm6,64(%rsi)
- movups %xmm7,80(%rsi)
- movups %xmm8,96(%rsi)
- movups %xmm9,112(%rsi)
- leaq 128(%rsi),%rsi
- addq $128,%rdx
- jz .Lecb_ret
-
-.Lecb_enc_tail:
- movups (%rdi),%xmm2
- cmpq $32,%rdx
- jb .Lecb_enc_one
- movups 16(%rdi),%xmm3
- je .Lecb_enc_two
- movups 32(%rdi),%xmm4
- cmpq $64,%rdx
- jb .Lecb_enc_three
- movups 48(%rdi),%xmm5
- je .Lecb_enc_four
- movups 64(%rdi),%xmm6
- cmpq $96,%rdx
- jb .Lecb_enc_five
- movups 80(%rdi),%xmm7
- je .Lecb_enc_six
- movdqu 96(%rdi),%xmm8
- call _aesni_encrypt8
- movups %xmm2,(%rsi)
- movups %xmm3,16(%rsi)
- movups %xmm4,32(%rsi)
- movups %xmm5,48(%rsi)
- movups %xmm6,64(%rsi)
- movups %xmm7,80(%rsi)
- movups %xmm8,96(%rsi)
- jmp .Lecb_ret
-.align 16
-.Lecb_enc_one:
- movups (%rcx),%xmm0
- movups 16(%rcx),%xmm1
- leaq 32(%rcx),%rcx
- xorps %xmm0,%xmm2
-.Loop_enc1_3:
-.byte 102,15,56,220,209
- decl %eax
- movups (%rcx),%xmm1
- leaq 16(%rcx),%rcx
- jnz .Loop_enc1_3
-.byte 102,15,56,221,209
- movups %xmm2,(%rsi)
- jmp .Lecb_ret
-.align 16
-.Lecb_enc_two:
- xorps %xmm4,%xmm4
- call _aesni_encrypt3
- movups %xmm2,(%rsi)
- movups %xmm3,16(%rsi)
- jmp .Lecb_ret
-.align 16
-.Lecb_enc_three:
- call _aesni_encrypt3
- movups %xmm2,(%rsi)
- movups %xmm3,16(%rsi)
- movups %xmm4,32(%rsi)
- jmp .Lecb_ret
-.align 16
-.Lecb_enc_four:
- call _aesni_encrypt4
- movups %xmm2,(%rsi)
- movups %xmm3,16(%rsi)
- movups %xmm4,32(%rsi)
- movups %xmm5,48(%rsi)
- jmp .Lecb_ret
-.align 16
-.Lecb_enc_five:
- xorps %xmm7,%xmm7
- call _aesni_encrypt6
- movups %xmm2,(%rsi)
- movups %xmm3,16(%rsi)
- movups %xmm4,32(%rsi)
- movups %xmm5,48(%rsi)
- movups %xmm6,64(%rsi)
- jmp .Lecb_ret
-.align 16
-.Lecb_enc_six:
- call _aesni_encrypt6
- movups %xmm2,(%rsi)
- movups %xmm3,16(%rsi)
- movups %xmm4,32(%rsi)
- movups %xmm5,48(%rsi)
- movups %xmm6,64(%rsi)
- movups %xmm7,80(%rsi)
- jmp .Lecb_ret
-
-.align 16
-.Lecb_decrypt:
- cmpq $128,%rdx
- jb .Lecb_dec_tail
-
- movdqu (%rdi),%xmm2
- movdqu 16(%rdi),%xmm3
- movdqu 32(%rdi),%xmm4
- movdqu 48(%rdi),%xmm5
- movdqu 64(%rdi),%xmm6
- movdqu 80(%rdi),%xmm7
- movdqu 96(%rdi),%xmm8
- movdqu 112(%rdi),%xmm9
- leaq 128(%rdi),%rdi
- subq $128,%rdx
- jmp .Lecb_dec_loop8_enter
-.align 16
-.Lecb_dec_loop8:
- movups %xmm2,(%rsi)
- movq %r11,%rcx
- movdqu (%rdi),%xmm2
- movl %r10d,%eax
- movups %xmm3,16(%rsi)
- movdqu 16(%rdi),%xmm3
- movups %xmm4,32(%rsi)
- movdqu 32(%rdi),%xmm4
- movups %xmm5,48(%rsi)
- movdqu 48(%rdi),%xmm5
- movups %xmm6,64(%rsi)
- movdqu 64(%rdi),%xmm6
- movups %xmm7,80(%rsi)
- movdqu 80(%rdi),%xmm7
- movups %xmm8,96(%rsi)
- movdqu 96(%rdi),%xmm8
- movups %xmm9,112(%rsi)
- leaq 128(%rsi),%rsi
- movdqu 112(%rdi),%xmm9
- leaq 128(%rdi),%rdi
-.Lecb_dec_loop8_enter:
-
- call _aesni_decrypt8
-
- movups (%r11),%xmm0
- subq $128,%rdx
- jnc .Lecb_dec_loop8
-
- movups %xmm2,(%rsi)
- movq %r11,%rcx
- movups %xmm3,16(%rsi)
- movl %r10d,%eax
- movups %xmm4,32(%rsi)
- movups %xmm5,48(%rsi)
- movups %xmm6,64(%rsi)
- movups %xmm7,80(%rsi)
- movups %xmm8,96(%rsi)
- movups %xmm9,112(%rsi)
- leaq 128(%rsi),%rsi
- addq $128,%rdx
- jz .Lecb_ret
-
-.Lecb_dec_tail:
- movups (%rdi),%xmm2
- cmpq $32,%rdx
- jb .Lecb_dec_one
- movups 16(%rdi),%xmm3
- je .Lecb_dec_two
- movups 32(%rdi),%xmm4
- cmpq $64,%rdx
- jb .Lecb_dec_three
- movups 48(%rdi),%xmm5
- je .Lecb_dec_four
- movups 64(%rdi),%xmm6
- cmpq $96,%rdx
- jb .Lecb_dec_five
- movups 80(%rdi),%xmm7
- je .Lecb_dec_six
- movups 96(%rdi),%xmm8
- movups (%rcx),%xmm0
- call _aesni_decrypt8
- movups %xmm2,(%rsi)
- movups %xmm3,16(%rsi)
- movups %xmm4,32(%rsi)
- movups %xmm5,48(%rsi)
- movups %xmm6,64(%rsi)
- movups %xmm7,80(%rsi)
- movups %xmm8,96(%rsi)
- jmp .Lecb_ret
-.align 16
-.Lecb_dec_one:
- movups (%rcx),%xmm0
- movups 16(%rcx),%xmm1
- leaq 32(%rcx),%rcx
- xorps %xmm0,%xmm2
-.Loop_dec1_4:
-.byte 102,15,56,222,209
- decl %eax
- movups (%rcx),%xmm1
- leaq 16(%rcx),%rcx
- jnz .Loop_dec1_4
-.byte 102,15,56,223,209
- movups %xmm2,(%rsi)
- jmp .Lecb_ret
-.align 16
-.Lecb_dec_two:
- xorps %xmm4,%xmm4
- call _aesni_decrypt3
- movups %xmm2,(%rsi)
- movups %xmm3,16(%rsi)
- jmp .Lecb_ret
-.align 16
-.Lecb_dec_three:
- call _aesni_decrypt3
- movups %xmm2,(%rsi)
- movups %xmm3,16(%rsi)
- movups %xmm4,32(%rsi)
- jmp .Lecb_ret
-.align 16
-.Lecb_dec_four:
- call _aesni_decrypt4
- movups %xmm2,(%rsi)
- movups %xmm3,16(%rsi)
- movups %xmm4,32(%rsi)
- movups %xmm5,48(%rsi)
- jmp .Lecb_ret
-.align 16
-.Lecb_dec_five:
- xorps %xmm7,%xmm7
- call _aesni_decrypt6
- movups %xmm2,(%rsi)
- movups %xmm3,16(%rsi)
- movups %xmm4,32(%rsi)
- movups %xmm5,48(%rsi)
- movups %xmm6,64(%rsi)
- jmp .Lecb_ret
-.align 16
-.Lecb_dec_six:
- call _aesni_decrypt6
- movups %xmm2,(%rsi)
- movups %xmm3,16(%rsi)
- movups %xmm4,32(%rsi)
- movups %xmm5,48(%rsi)
- movups %xmm6,64(%rsi)
- movups %xmm7,80(%rsi)
-
-.Lecb_ret:
- .byte 0xf3,0xc3
-.size aesni_ecb_encrypt,.-aesni_ecb_encrypt
-.globl aesni_ccm64_encrypt_blocks
-.type aesni_ccm64_encrypt_blocks,@function
-.align 16
-aesni_ccm64_encrypt_blocks:
- movl 240(%rcx),%eax
- movdqu (%r8),%xmm9
- movdqa .Lincrement64(%rip),%xmm6
- movdqa .Lbswap_mask(%rip),%xmm7
-
- shrl $1,%eax
- leaq 0(%rcx),%r11
- movdqu (%r9),%xmm3
- movdqa %xmm9,%xmm2
- movl %eax,%r10d
-.byte 102,68,15,56,0,207
- jmp .Lccm64_enc_outer
-.align 16
-.Lccm64_enc_outer:
- movups (%r11),%xmm0
- movl %r10d,%eax
- movups (%rdi),%xmm8
-
- xorps %xmm0,%xmm2
- movups 16(%r11),%xmm1
- xorps %xmm8,%xmm0
- leaq 32(%r11),%rcx
- xorps %xmm0,%xmm3
- movups (%rcx),%xmm0
-
-.Lccm64_enc2_loop:
-.byte 102,15,56,220,209
- decl %eax
-.byte 102,15,56,220,217
- movups 16(%rcx),%xmm1
-.byte 102,15,56,220,208
- leaq 32(%rcx),%rcx
-.byte 102,15,56,220,216
- movups 0(%rcx),%xmm0
- jnz .Lccm64_enc2_loop
-.byte 102,15,56,220,209
-.byte 102,15,56,220,217
- paddq %xmm6,%xmm9
-.byte 102,15,56,221,208
-.byte 102,15,56,221,216
-
- decq %rdx
- leaq 16(%rdi),%rdi
- xorps %xmm2,%xmm8
- movdqa %xmm9,%xmm2
- movups %xmm8,(%rsi)
- leaq 16(%rsi),%rsi
-.byte 102,15,56,0,215
- jnz .Lccm64_enc_outer
-
- movups %xmm3,(%r9)
- .byte 0xf3,0xc3
-.size aesni_ccm64_encrypt_blocks,.-aesni_ccm64_encrypt_blocks
-.globl aesni_ccm64_decrypt_blocks
-.type aesni_ccm64_decrypt_blocks,@function
-.align 16
-aesni_ccm64_decrypt_blocks:
- movl 240(%rcx),%eax
- movups (%r8),%xmm9
- movdqu (%r9),%xmm3
- movdqa .Lincrement64(%rip),%xmm6
- movdqa .Lbswap_mask(%rip),%xmm7
-
- movaps %xmm9,%xmm2
- movl %eax,%r10d
- movq %rcx,%r11
-.byte 102,68,15,56,0,207
- movups (%rcx),%xmm0
- movups 16(%rcx),%xmm1
- leaq 32(%rcx),%rcx
- xorps %xmm0,%xmm2
-.Loop_enc1_5:
-.byte 102,15,56,220,209
- decl %eax
- movups (%rcx),%xmm1
- leaq 16(%rcx),%rcx
- jnz .Loop_enc1_5
-.byte 102,15,56,221,209
- movups (%rdi),%xmm8
- paddq %xmm6,%xmm9
- leaq 16(%rdi),%rdi
- jmp .Lccm64_dec_outer
-.align 16
-.Lccm64_dec_outer:
- xorps %xmm2,%xmm8
- movdqa %xmm9,%xmm2
- movl %r10d,%eax
- movups %xmm8,(%rsi)
- leaq 16(%rsi),%rsi
-.byte 102,15,56,0,215
-
- subq $1,%rdx
- jz .Lccm64_dec_break
-
- movups (%r11),%xmm0
- shrl $1,%eax
- movups 16(%r11),%xmm1
- xorps %xmm0,%xmm8
- leaq 32(%r11),%rcx
- xorps %xmm0,%xmm2
- xorps %xmm8,%xmm3
- movups (%rcx),%xmm0
-
-.Lccm64_dec2_loop:
-.byte 102,15,56,220,209
- decl %eax
-.byte 102,15,56,220,217
- movups 16(%rcx),%xmm1
-.byte 102,15,56,220,208
- leaq 32(%rcx),%rcx
-.byte 102,15,56,220,216
- movups 0(%rcx),%xmm0
- jnz .Lccm64_dec2_loop
- movups (%rdi),%xmm8
- paddq %xmm6,%xmm9
-.byte 102,15,56,220,209
-.byte 102,15,56,220,217
- leaq 16(%rdi),%rdi
-.byte 102,15,56,221,208
-.byte 102,15,56,221,216
- jmp .Lccm64_dec_outer
-
-.align 16
-.Lccm64_dec_break:
-
- movups (%r11),%xmm0
- movups 16(%r11),%xmm1
- xorps %xmm0,%xmm8
- leaq 32(%r11),%r11
- xorps %xmm8,%xmm3
-.Loop_enc1_6:
-.byte 102,15,56,220,217
- decl %eax
- movups (%r11),%xmm1
- leaq 16(%r11),%r11
- jnz .Loop_enc1_6
-.byte 102,15,56,221,217
- movups %xmm3,(%r9)
- .byte 0xf3,0xc3
-.size aesni_ccm64_decrypt_blocks,.-aesni_ccm64_decrypt_blocks
-.globl aesni_ctr32_encrypt_blocks
-.type aesni_ctr32_encrypt_blocks,@function
-.align 16
-aesni_ctr32_encrypt_blocks:
- cmpq $1,%rdx
- je .Lctr32_one_shortcut
-
- movdqu (%r8),%xmm14
- movdqa .Lbswap_mask(%rip),%xmm15
- xorl %eax,%eax
-.byte 102,69,15,58,22,242,3
-.byte 102,68,15,58,34,240,3
-
- movl 240(%rcx),%eax
- bswapl %r10d
- pxor %xmm12,%xmm12
- pxor %xmm13,%xmm13
-.byte 102,69,15,58,34,226,0
- leaq 3(%r10),%r11
-.byte 102,69,15,58,34,235,0
- incl %r10d
-.byte 102,69,15,58,34,226,1
- incq %r11
-.byte 102,69,15,58,34,235,1
- incl %r10d
-.byte 102,69,15,58,34,226,2
- incq %r11
-.byte 102,69,15,58,34,235,2
- movdqa %xmm12,-40(%rsp)
-.byte 102,69,15,56,0,231
- movdqa %xmm13,-24(%rsp)
-.byte 102,69,15,56,0,239
-
- pshufd $192,%xmm12,%xmm2
- pshufd $128,%xmm12,%xmm3
- pshufd $64,%xmm12,%xmm4
- cmpq $6,%rdx
- jb .Lctr32_tail
- shrl $1,%eax
- movq %rcx,%r11
- movl %eax,%r10d
- subq $6,%rdx
- jmp .Lctr32_loop6
-
-.align 16
-.Lctr32_loop6:
- pshufd $192,%xmm13,%xmm5
- por %xmm14,%xmm2
- movups (%r11),%xmm0
- pshufd $128,%xmm13,%xmm6
- por %xmm14,%xmm3
- movups 16(%r11),%xmm1
- pshufd $64,%xmm13,%xmm7
- por %xmm14,%xmm4
- por %xmm14,%xmm5
- xorps %xmm0,%xmm2
- por %xmm14,%xmm6
- por %xmm14,%xmm7
-
-
-
-
- pxor %xmm0,%xmm3
-.byte 102,15,56,220,209
- leaq 32(%r11),%rcx
- pxor %xmm0,%xmm4
-.byte 102,15,56,220,217
- movdqa .Lincrement32(%rip),%xmm13
- pxor %xmm0,%xmm5
-.byte 102,15,56,220,225
- movdqa -40(%rsp),%xmm12
- pxor %xmm0,%xmm6
-.byte 102,15,56,220,233
- pxor %xmm0,%xmm7
- movups (%rcx),%xmm0
- decl %eax
-.byte 102,15,56,220,241
-.byte 102,15,56,220,249
- jmp .Lctr32_enc_loop6_enter
-.align 16
-.Lctr32_enc_loop6:
-.byte 102,15,56,220,209
-.byte 102,15,56,220,217
- decl %eax
-.byte 102,15,56,220,225
-.byte 102,15,56,220,233
-.byte 102,15,56,220,241
-.byte 102,15,56,220,249
-.Lctr32_enc_loop6_enter:
- movups 16(%rcx),%xmm1
-.byte 102,15,56,220,208
-.byte 102,15,56,220,216
- leaq 32(%rcx),%rcx
-.byte 102,15,56,220,224
-.byte 102,15,56,220,232
-.byte 102,15,56,220,240
-.byte 102,15,56,220,248
- movups (%rcx),%xmm0
- jnz .Lctr32_enc_loop6
-
-.byte 102,15,56,220,209
- paddd %xmm13,%xmm12
-.byte 102,15,56,220,217
- paddd -24(%rsp),%xmm13
-.byte 102,15,56,220,225
- movdqa %xmm12,-40(%rsp)
-.byte 102,15,56,220,233
- movdqa %xmm13,-24(%rsp)
-.byte 102,15,56,220,241
-.byte 102,69,15,56,0,231
-.byte 102,15,56,220,249
-.byte 102,69,15,56,0,239
-
-.byte 102,15,56,221,208
- movups (%rdi),%xmm8
-.byte 102,15,56,221,216
- movups 16(%rdi),%xmm9
-.byte 102,15,56,221,224
- movups 32(%rdi),%xmm10
-.byte 102,15,56,221,232
- movups 48(%rdi),%xmm11
-.byte 102,15,56,221,240
- movups 64(%rdi),%xmm1
-.byte 102,15,56,221,248
- movups 80(%rdi),%xmm0
- leaq 96(%rdi),%rdi
-
- xorps %xmm2,%xmm8
- pshufd $192,%xmm12,%xmm2
- xorps %xmm3,%xmm9
- pshufd $128,%xmm12,%xmm3
- movups %xmm8,(%rsi)
- xorps %xmm4,%xmm10
- pshufd $64,%xmm12,%xmm4
- movups %xmm9,16(%rsi)
- xorps %xmm5,%xmm11
- movups %xmm10,32(%rsi)
- xorps %xmm6,%xmm1
- movups %xmm11,48(%rsi)
- xorps %xmm7,%xmm0
- movups %xmm1,64(%rsi)
- movups %xmm0,80(%rsi)
- leaq 96(%rsi),%rsi
- movl %r10d,%eax
- subq $6,%rdx
- jnc .Lctr32_loop6
-
- addq $6,%rdx
- jz .Lctr32_done
- movq %r11,%rcx
- leal 1(%rax,%rax,1),%eax
-
-.Lctr32_tail:
- por %xmm14,%xmm2
- movups (%rdi),%xmm8
- cmpq $2,%rdx
- jb .Lctr32_one
-
- por %xmm14,%xmm3
- movups 16(%rdi),%xmm9
- je .Lctr32_two
-
- pshufd $192,%xmm13,%xmm5
- por %xmm14,%xmm4
- movups 32(%rdi),%xmm10
- cmpq $4,%rdx
- jb .Lctr32_three
-
- pshufd $128,%xmm13,%xmm6
- por %xmm14,%xmm5
- movups 48(%rdi),%xmm11
- je .Lctr32_four
-
- por %xmm14,%xmm6
- xorps %xmm7,%xmm7
-
- call _aesni_encrypt6
-
- movups 64(%rdi),%xmm1
- xorps %xmm2,%xmm8
- xorps %xmm3,%xmm9
- movups %xmm8,(%rsi)
- xorps %xmm4,%xmm10
- movups %xmm9,16(%rsi)
- xorps %xmm5,%xmm11
- movups %xmm10,32(%rsi)
- xorps %xmm6,%xmm1
- movups %xmm11,48(%rsi)
- movups %xmm1,64(%rsi)
- jmp .Lctr32_done
-
-.align 16
-.Lctr32_one_shortcut:
- movups (%r8),%xmm2
- movups (%rdi),%xmm8
- movl 240(%rcx),%eax
-.Lctr32_one:
- movups (%rcx),%xmm0
- movups 16(%rcx),%xmm1
- leaq 32(%rcx),%rcx
- xorps %xmm0,%xmm2
-.Loop_enc1_7:
-.byte 102,15,56,220,209
- decl %eax
- movups (%rcx),%xmm1
- leaq 16(%rcx),%rcx
- jnz .Loop_enc1_7
-.byte 102,15,56,221,209
- xorps %xmm2,%xmm8
- movups %xmm8,(%rsi)
- jmp .Lctr32_done
-
-.align 16
-.Lctr32_two:
- xorps %xmm4,%xmm4
- call _aesni_encrypt3
- xorps %xmm2,%xmm8
- xorps %xmm3,%xmm9
- movups %xmm8,(%rsi)
- movups %xmm9,16(%rsi)
- jmp .Lctr32_done
-
-.align 16
-.Lctr32_three:
- call _aesni_encrypt3
- xorps %xmm2,%xmm8
- xorps %xmm3,%xmm9
- movups %xmm8,(%rsi)
- xorps %xmm4,%xmm10
- movups %xmm9,16(%rsi)
- movups %xmm10,32(%rsi)
- jmp .Lctr32_done
-
-.align 16
-.Lctr32_four:
- call _aesni_encrypt4
- xorps %xmm2,%xmm8
- xorps %xmm3,%xmm9
- movups %xmm8,(%rsi)
- xorps %xmm4,%xmm10
- movups %xmm9,16(%rsi)
- xorps %xmm5,%xmm11
- movups %xmm10,32(%rsi)
- movups %xmm11,48(%rsi)
-
-.Lctr32_done:
- .byte 0xf3,0xc3
-.size aesni_ctr32_encrypt_blocks,.-aesni_ctr32_encrypt_blocks
-.globl aesni_xts_encrypt
-.type aesni_xts_encrypt,@function
-.align 16
-aesni_xts_encrypt:
- leaq -104(%rsp),%rsp
- movups (%r9),%xmm15
- movl 240(%r8),%eax
- movl 240(%rcx),%r10d
- movups (%r8),%xmm0
- movups 16(%r8),%xmm1
- leaq 32(%r8),%r8
- xorps %xmm0,%xmm15
-.Loop_enc1_8:
-.byte 102,68,15,56,220,249
- decl %eax
- movups (%r8),%xmm1
- leaq 16(%r8),%r8
- jnz .Loop_enc1_8
-.byte 102,68,15,56,221,249
- movq %rcx,%r11
- movl %r10d,%eax
- movq %rdx,%r9
- andq $-16,%rdx
-
- movdqa .Lxts_magic(%rip),%xmm8
- pxor %xmm14,%xmm14
- pcmpgtd %xmm15,%xmm14
- pshufd $19,%xmm14,%xmm9
- pxor %xmm14,%xmm14
- movdqa %xmm15,%xmm10
- paddq %xmm15,%xmm15
- pand %xmm8,%xmm9
- pcmpgtd %xmm15,%xmm14
- pxor %xmm9,%xmm15
- pshufd $19,%xmm14,%xmm9
- pxor %xmm14,%xmm14
- movdqa %xmm15,%xmm11
- paddq %xmm15,%xmm15
- pand %xmm8,%xmm9
- pcmpgtd %xmm15,%xmm14
- pxor %xmm9,%xmm15
- pshufd $19,%xmm14,%xmm9
- pxor %xmm14,%xmm14
- movdqa %xmm15,%xmm12
- paddq %xmm15,%xmm15
- pand %xmm8,%xmm9
- pcmpgtd %xmm15,%xmm14
- pxor %xmm9,%xmm15
- pshufd $19,%xmm14,%xmm9
- pxor %xmm14,%xmm14
- movdqa %xmm15,%xmm13
- paddq %xmm15,%xmm15
- pand %xmm8,%xmm9
- pcmpgtd %xmm15,%xmm14
- pxor %xmm9,%xmm15
- subq $96,%rdx
- jc .Lxts_enc_short
-
- shrl $1,%eax
- subl $1,%eax
- movl %eax,%r10d
- jmp .Lxts_enc_grandloop
-
-.align 16
-.Lxts_enc_grandloop:
- pshufd $19,%xmm14,%xmm9
- movdqa %xmm15,%xmm14
- paddq %xmm15,%xmm15
- movdqu 0(%rdi),%xmm2
- pand %xmm8,%xmm9
- movdqu 16(%rdi),%xmm3
- pxor %xmm9,%xmm15
-
- movdqu 32(%rdi),%xmm4
- pxor %xmm10,%xmm2
- movdqu 48(%rdi),%xmm5
- pxor %xmm11,%xmm3
- movdqu 64(%rdi),%xmm6
- pxor %xmm12,%xmm4
- movdqu 80(%rdi),%xmm7
- leaq 96(%rdi),%rdi
- pxor %xmm13,%xmm5
- movups (%r11),%xmm0
- pxor %xmm14,%xmm6
- pxor %xmm15,%xmm7
-
-
-
- movups 16(%r11),%xmm1
- pxor %xmm0,%xmm2
- pxor %xmm0,%xmm3
- movdqa %xmm10,0(%rsp)
-.byte 102,15,56,220,209
- leaq 32(%r11),%rcx
- pxor %xmm0,%xmm4
- movdqa %xmm11,16(%rsp)
-.byte 102,15,56,220,217
- pxor %xmm0,%xmm5
- movdqa %xmm12,32(%rsp)
-.byte 102,15,56,220,225
- pxor %xmm0,%xmm6
- movdqa %xmm13,48(%rsp)
-.byte 102,15,56,220,233
- pxor %xmm0,%xmm7
- movups (%rcx),%xmm0
- decl %eax
- movdqa %xmm14,64(%rsp)
-.byte 102,15,56,220,241
- movdqa %xmm15,80(%rsp)
-.byte 102,15,56,220,249
- pxor %xmm14,%xmm14
- pcmpgtd %xmm15,%xmm14
- jmp .Lxts_enc_loop6_enter
-
-.align 16
-.Lxts_enc_loop6:
-.byte 102,15,56,220,209
-.byte 102,15,56,220,217
- decl %eax
-.byte 102,15,56,220,225
-.byte 102,15,56,220,233
-.byte 102,15,56,220,241
-.byte 102,15,56,220,249
-.Lxts_enc_loop6_enter:
- movups 16(%rcx),%xmm1
-.byte 102,15,56,220,208
-.byte 102,15,56,220,216
- leaq 32(%rcx),%rcx
-.byte 102,15,56,220,224
-.byte 102,15,56,220,232
-.byte 102,15,56,220,240
-.byte 102,15,56,220,248
- movups (%rcx),%xmm0
- jnz .Lxts_enc_loop6
-
- pshufd $19,%xmm14,%xmm9
- pxor %xmm14,%xmm14
- paddq %xmm15,%xmm15
-.byte 102,15,56,220,209
- pand %xmm8,%xmm9
-.byte 102,15,56,220,217
- pcmpgtd %xmm15,%xmm14
-.byte 102,15,56,220,225
- pxor %xmm9,%xmm15
-.byte 102,15,56,220,233
-.byte 102,15,56,220,241
-.byte 102,15,56,220,249
- movups 16(%rcx),%xmm1
-
- pshufd $19,%xmm14,%xmm9
- pxor %xmm14,%xmm14
- movdqa %xmm15,%xmm10
- paddq %xmm15,%xmm15
-.byte 102,15,56,220,208
- pand %xmm8,%xmm9
-.byte 102,15,56,220,216
- pcmpgtd %xmm15,%xmm14
-.byte 102,15,56,220,224
- pxor %xmm9,%xmm15
-.byte 102,15,56,220,232
-.byte 102,15,56,220,240
-.byte 102,15,56,220,248
- movups 32(%rcx),%xmm0
-
- pshufd $19,%xmm14,%xmm9
- pxor %xmm14,%xmm14
- movdqa %xmm15,%xmm11
- paddq %xmm15,%xmm15
-.byte 102,15,56,220,209
- pand %xmm8,%xmm9
-.byte 102,15,56,220,217
- pcmpgtd %xmm15,%xmm14
-.byte 102,15,56,220,225
- pxor %xmm9,%xmm15
-.byte 102,15,56,220,233
-.byte 102,15,56,220,241
-.byte 102,15,56,220,249
-
- pshufd $19,%xmm14,%xmm9
- pxor %xmm14,%xmm14
- movdqa %xmm15,%xmm12
- paddq %xmm15,%xmm15
-.byte 102,15,56,221,208
- pand %xmm8,%xmm9
-.byte 102,15,56,221,216
- pcmpgtd %xmm15,%xmm14
-.byte 102,15,56,221,224
- pxor %xmm9,%xmm15
-.byte 102,15,56,221,232
-.byte 102,15,56,221,240
-.byte 102,15,56,221,248
-
- pshufd $19,%xmm14,%xmm9
- pxor %xmm14,%xmm14
- movdqa %xmm15,%xmm13
- paddq %xmm15,%xmm15
- xorps 0(%rsp),%xmm2
- pand %xmm8,%xmm9
- xorps 16(%rsp),%xmm3
- pcmpgtd %xmm15,%xmm14
- pxor %xmm9,%xmm15
-
- xorps 32(%rsp),%xmm4
- movups %xmm2,0(%rsi)
- xorps 48(%rsp),%xmm5
- movups %xmm3,16(%rsi)
- xorps 64(%rsp),%xmm6
- movups %xmm4,32(%rsi)
- xorps 80(%rsp),%xmm7
- movups %xmm5,48(%rsi)
- movl %r10d,%eax
- movups %xmm6,64(%rsi)
- movups %xmm7,80(%rsi)
- leaq 96(%rsi),%rsi
- subq $96,%rdx
- jnc .Lxts_enc_grandloop
-
- leal 3(%rax,%rax,1),%eax
- movq %r11,%rcx
- movl %eax,%r10d
-
-.Lxts_enc_short:
- addq $96,%rdx
- jz .Lxts_enc_done
-
- cmpq $32,%rdx
- jb .Lxts_enc_one
- je .Lxts_enc_two
-
- cmpq $64,%rdx
- jb .Lxts_enc_three
- je .Lxts_enc_four
-
- pshufd $19,%xmm14,%xmm9
- movdqa %xmm15,%xmm14
- paddq %xmm15,%xmm15
- movdqu (%rdi),%xmm2
- pand %xmm8,%xmm9
- movdqu 16(%rdi),%xmm3
- pxor %xmm9,%xmm15
-
- movdqu 32(%rdi),%xmm4
- pxor %xmm10,%xmm2
- movdqu 48(%rdi),%xmm5
- pxor %xmm11,%xmm3
- movdqu 64(%rdi),%xmm6
- leaq 80(%rdi),%rdi
- pxor %xmm12,%xmm4
- pxor %xmm13,%xmm5
- pxor %xmm14,%xmm6
-
- call _aesni_encrypt6
-
- xorps %xmm10,%xmm2
- movdqa %xmm15,%xmm10
- xorps %xmm11,%xmm3
- xorps %xmm12,%xmm4
- movdqu %xmm2,(%rsi)
- xorps %xmm13,%xmm5
- movdqu %xmm3,16(%rsi)
- xorps %xmm14,%xmm6
- movdqu %xmm4,32(%rsi)
- movdqu %xmm5,48(%rsi)
- movdqu %xmm6,64(%rsi)
- leaq 80(%rsi),%rsi
- jmp .Lxts_enc_done
-
-.align 16
-.Lxts_enc_one:
- movups (%rdi),%xmm2
- leaq 16(%rdi),%rdi
- xorps %xmm10,%xmm2
- movups (%rcx),%xmm0
- movups 16(%rcx),%xmm1
- leaq 32(%rcx),%rcx
- xorps %xmm0,%xmm2
-.Loop_enc1_9:
-.byte 102,15,56,220,209
- decl %eax
- movups (%rcx),%xmm1
- leaq 16(%rcx),%rcx
- jnz .Loop_enc1_9
-.byte 102,15,56,221,209
- xorps %xmm10,%xmm2
- movdqa %xmm11,%xmm10
- movups %xmm2,(%rsi)
- leaq 16(%rsi),%rsi
- jmp .Lxts_enc_done
-
-.align 16
-.Lxts_enc_two:
- movups (%rdi),%xmm2
- movups 16(%rdi),%xmm3
- leaq 32(%rdi),%rdi
- xorps %xmm10,%xmm2
- xorps %xmm11,%xmm3
-
- call _aesni_encrypt3
-
- xorps %xmm10,%xmm2
- movdqa %xmm12,%xmm10
- xorps %xmm11,%xmm3
- movups %xmm2,(%rsi)
- movups %xmm3,16(%rsi)
- leaq 32(%rsi),%rsi
- jmp .Lxts_enc_done
-
-.align 16
-.Lxts_enc_three:
- movups (%rdi),%xmm2
- movups 16(%rdi),%xmm3
- movups 32(%rdi),%xmm4
- leaq 48(%rdi),%rdi
- xorps %xmm10,%xmm2
- xorps %xmm11,%xmm3
- xorps %xmm12,%xmm4
-
- call _aesni_encrypt3
-
- xorps %xmm10,%xmm2
- movdqa %xmm13,%xmm10
- xorps %xmm11,%xmm3
- xorps %xmm12,%xmm4
- movups %xmm2,(%rsi)
- movups %xmm3,16(%rsi)
- movups %xmm4,32(%rsi)
- leaq 48(%rsi),%rsi
- jmp .Lxts_enc_done
-
-.align 16
-.Lxts_enc_four:
- movups (%rdi),%xmm2
- movups 16(%rdi),%xmm3
- movups 32(%rdi),%xmm4
- xorps %xmm10,%xmm2
- movups 48(%rdi),%xmm5
- leaq 64(%rdi),%rdi
- xorps %xmm11,%xmm3
- xorps %xmm12,%xmm4
- xorps %xmm13,%xmm5
-
- call _aesni_encrypt4
-
- xorps %xmm10,%xmm2
- movdqa %xmm15,%xmm10
- xorps %xmm11,%xmm3
- xorps %xmm12,%xmm4
- movups %xmm2,(%rsi)
- xorps %xmm13,%xmm5
- movups %xmm3,16(%rsi)
- movups %xmm4,32(%rsi)
- movups %xmm5,48(%rsi)
- leaq 64(%rsi),%rsi
- jmp .Lxts_enc_done
-
-.align 16
-.Lxts_enc_done:
- andq $15,%r9
- jz .Lxts_enc_ret
- movq %r9,%rdx
-
-.Lxts_enc_steal:
- movzbl (%rdi),%eax
- movzbl -16(%rsi),%ecx
- leaq 1(%rdi),%rdi
- movb %al,-16(%rsi)
- movb %cl,0(%rsi)
- leaq 1(%rsi),%rsi
- subq $1,%rdx
- jnz .Lxts_enc_steal
-
- subq %r9,%rsi
- movq %r11,%rcx
- movl %r10d,%eax
-
- movups -16(%rsi),%xmm2
- xorps %xmm10,%xmm2
- movups (%rcx),%xmm0
- movups 16(%rcx),%xmm1
- leaq 32(%rcx),%rcx
- xorps %xmm0,%xmm2
-.Loop_enc1_10:
-.byte 102,15,56,220,209
- decl %eax
- movups (%rcx),%xmm1
- leaq 16(%rcx),%rcx
- jnz .Loop_enc1_10
-.byte 102,15,56,221,209
- xorps %xmm10,%xmm2
- movups %xmm2,-16(%rsi)
-
-.Lxts_enc_ret:
- leaq 104(%rsp),%rsp
-.Lxts_enc_epilogue:
- .byte 0xf3,0xc3
-.size aesni_xts_encrypt,.-aesni_xts_encrypt
-.globl aesni_xts_decrypt
-.type aesni_xts_decrypt,@function
-.align 16
-aesni_xts_decrypt:
- leaq -104(%rsp),%rsp
- movups (%r9),%xmm15
- movl 240(%r8),%eax
- movl 240(%rcx),%r10d
- movups (%r8),%xmm0
- movups 16(%r8),%xmm1
- leaq 32(%r8),%r8
- xorps %xmm0,%xmm15
-.Loop_enc1_11:
-.byte 102,68,15,56,220,249
- decl %eax
- movups (%r8),%xmm1
- leaq 16(%r8),%r8
- jnz .Loop_enc1_11
-.byte 102,68,15,56,221,249
- xorl %eax,%eax
- testq $15,%rdx
- setnz %al
- shlq $4,%rax
- subq %rax,%rdx
-
- movq %rcx,%r11
- movl %r10d,%eax
- movq %rdx,%r9
- andq $-16,%rdx
-
- movdqa .Lxts_magic(%rip),%xmm8
- pxor %xmm14,%xmm14
- pcmpgtd %xmm15,%xmm14
- pshufd $19,%xmm14,%xmm9
- pxor %xmm14,%xmm14
- movdqa %xmm15,%xmm10
- paddq %xmm15,%xmm15
- pand %xmm8,%xmm9
- pcmpgtd %xmm15,%xmm14
- pxor %xmm9,%xmm15
- pshufd $19,%xmm14,%xmm9
- pxor %xmm14,%xmm14
- movdqa %xmm15,%xmm11
- paddq %xmm15,%xmm15
- pand %xmm8,%xmm9
- pcmpgtd %xmm15,%xmm14
- pxor %xmm9,%xmm15
- pshufd $19,%xmm14,%xmm9
- pxor %xmm14,%xmm14
- movdqa %xmm15,%xmm12
- paddq %xmm15,%xmm15
- pand %xmm8,%xmm9
- pcmpgtd %xmm15,%xmm14
- pxor %xmm9,%xmm15
- pshufd $19,%xmm14,%xmm9
- pxor %xmm14,%xmm14
- movdqa %xmm15,%xmm13
- paddq %xmm15,%xmm15
- pand %xmm8,%xmm9
- pcmpgtd %xmm15,%xmm14
- pxor %xmm9,%xmm15
- subq $96,%rdx
- jc .Lxts_dec_short
-
- shrl $1,%eax
- subl $1,%eax
- movl %eax,%r10d
- jmp .Lxts_dec_grandloop
-
-.align 16
-.Lxts_dec_grandloop:
- pshufd $19,%xmm14,%xmm9
- movdqa %xmm15,%xmm14
- paddq %xmm15,%xmm15
- movdqu 0(%rdi),%xmm2
- pand %xmm8,%xmm9
- movdqu 16(%rdi),%xmm3
- pxor %xmm9,%xmm15
-
- movdqu 32(%rdi),%xmm4
- pxor %xmm10,%xmm2
- movdqu 48(%rdi),%xmm5
- pxor %xmm11,%xmm3
- movdqu 64(%rdi),%xmm6
- pxor %xmm12,%xmm4
- movdqu 80(%rdi),%xmm7
- leaq 96(%rdi),%rdi
- pxor %xmm13,%xmm5
- movups (%r11),%xmm0
- pxor %xmm14,%xmm6
- pxor %xmm15,%xmm7
-
-
-
- movups 16(%r11),%xmm1
- pxor %xmm0,%xmm2
- pxor %xmm0,%xmm3
- movdqa %xmm10,0(%rsp)
-.byte 102,15,56,222,209
- leaq 32(%r11),%rcx
- pxor %xmm0,%xmm4
- movdqa %xmm11,16(%rsp)
-.byte 102,15,56,222,217
- pxor %xmm0,%xmm5
- movdqa %xmm12,32(%rsp)
-.byte 102,15,56,222,225
- pxor %xmm0,%xmm6
- movdqa %xmm13,48(%rsp)
-.byte 102,15,56,222,233
- pxor %xmm0,%xmm7
- movups (%rcx),%xmm0
- decl %eax
- movdqa %xmm14,64(%rsp)
-.byte 102,15,56,222,241
- movdqa %xmm15,80(%rsp)
-.byte 102,15,56,222,249
- pxor %xmm14,%xmm14
- pcmpgtd %xmm15,%xmm14
- jmp .Lxts_dec_loop6_enter
-
-.align 16
-.Lxts_dec_loop6:
-.byte 102,15,56,222,209
-.byte 102,15,56,222,217
- decl %eax
-.byte 102,15,56,222,225
-.byte 102,15,56,222,233
-.byte 102,15,56,222,241
-.byte 102,15,56,222,249
-.Lxts_dec_loop6_enter:
- movups 16(%rcx),%xmm1
-.byte 102,15,56,222,208
-.byte 102,15,56,222,216
- leaq 32(%rcx),%rcx
-.byte 102,15,56,222,224
-.byte 102,15,56,222,232
-.byte 102,15,56,222,240
-.byte 102,15,56,222,248
- movups (%rcx),%xmm0
- jnz .Lxts_dec_loop6
-
- pshufd $19,%xmm14,%xmm9
- pxor %xmm14,%xmm14
- paddq %xmm15,%xmm15
-.byte 102,15,56,222,209
- pand %xmm8,%xmm9
-.byte 102,15,56,222,217
- pcmpgtd %xmm15,%xmm14
-.byte 102,15,56,222,225
- pxor %xmm9,%xmm15
-.byte 102,15,56,222,233
-.byte 102,15,56,222,241
-.byte 102,15,56,222,249
- movups 16(%rcx),%xmm1
-
- pshufd $19,%xmm14,%xmm9
- pxor %xmm14,%xmm14
- movdqa %xmm15,%xmm10
- paddq %xmm15,%xmm15
-.byte 102,15,56,222,208
- pand %xmm8,%xmm9
-.byte 102,15,56,222,216
- pcmpgtd %xmm15,%xmm14
-.byte 102,15,56,222,224
- pxor %xmm9,%xmm15
-.byte 102,15,56,222,232
-.byte 102,15,56,222,240
-.byte 102,15,56,222,248
- movups 32(%rcx),%xmm0
-
- pshufd $19,%xmm14,%xmm9
- pxor %xmm14,%xmm14
- movdqa %xmm15,%xmm11
- paddq %xmm15,%xmm15
-.byte 102,15,56,222,209
- pand %xmm8,%xmm9
-.byte 102,15,56,222,217
- pcmpgtd %xmm15,%xmm14
-.byte 102,15,56,222,225
- pxor %xmm9,%xmm15
-.byte 102,15,56,222,233
-.byte 102,15,56,222,241
-.byte 102,15,56,222,249
-
- pshufd $19,%xmm14,%xmm9
- pxor %xmm14,%xmm14
- movdqa %xmm15,%xmm12
- paddq %xmm15,%xmm15
-.byte 102,15,56,223,208
- pand %xmm8,%xmm9
-.byte 102,15,56,223,216
- pcmpgtd %xmm15,%xmm14
-.byte 102,15,56,223,224
- pxor %xmm9,%xmm15
-.byte 102,15,56,223,232
-.byte 102,15,56,223,240
-.byte 102,15,56,223,248
-
- pshufd $19,%xmm14,%xmm9
- pxor %xmm14,%xmm14
- movdqa %xmm15,%xmm13
- paddq %xmm15,%xmm15
- xorps 0(%rsp),%xmm2
- pand %xmm8,%xmm9
- xorps 16(%rsp),%xmm3
- pcmpgtd %xmm15,%xmm14
- pxor %xmm9,%xmm15
-
- xorps 32(%rsp),%xmm4
- movups %xmm2,0(%rsi)
- xorps 48(%rsp),%xmm5
- movups %xmm3,16(%rsi)
- xorps 64(%rsp),%xmm6
- movups %xmm4,32(%rsi)
- xorps 80(%rsp),%xmm7
- movups %xmm5,48(%rsi)
- movl %r10d,%eax
- movups %xmm6,64(%rsi)
- movups %xmm7,80(%rsi)
- leaq 96(%rsi),%rsi
- subq $96,%rdx
- jnc .Lxts_dec_grandloop
-
- leal 3(%rax,%rax,1),%eax
- movq %r11,%rcx
- movl %eax,%r10d
-
-.Lxts_dec_short:
- addq $96,%rdx
- jz .Lxts_dec_done
-
- cmpq $32,%rdx
- jb .Lxts_dec_one
- je .Lxts_dec_two
-
- cmpq $64,%rdx
- jb .Lxts_dec_three
- je .Lxts_dec_four
-
- pshufd $19,%xmm14,%xmm9
- movdqa %xmm15,%xmm14
- paddq %xmm15,%xmm15
- movdqu (%rdi),%xmm2
- pand %xmm8,%xmm9
- movdqu 16(%rdi),%xmm3
- pxor %xmm9,%xmm15
-
- movdqu 32(%rdi),%xmm4
- pxor %xmm10,%xmm2
- movdqu 48(%rdi),%xmm5
- pxor %xmm11,%xmm3
- movdqu 64(%rdi),%xmm6
- leaq 80(%rdi),%rdi
- pxor %xmm12,%xmm4
- pxor %xmm13,%xmm5
- pxor %xmm14,%xmm6
-
- call _aesni_decrypt6
-
- xorps %xmm10,%xmm2
- xorps %xmm11,%xmm3
- xorps %xmm12,%xmm4
- movdqu %xmm2,(%rsi)
- xorps %xmm13,%xmm5
- movdqu %xmm3,16(%rsi)
- xorps %xmm14,%xmm6
- movdqu %xmm4,32(%rsi)
- pxor %xmm14,%xmm14
- movdqu %xmm5,48(%rsi)
- pcmpgtd %xmm15,%xmm14
- movdqu %xmm6,64(%rsi)
- leaq 80(%rsi),%rsi
- pshufd $19,%xmm14,%xmm11
- andq $15,%r9
- jz .Lxts_dec_ret
-
- movdqa %xmm15,%xmm10
- paddq %xmm15,%xmm15
- pand %xmm8,%xmm11
- pxor %xmm15,%xmm11
- jmp .Lxts_dec_done2
-
-.align 16
-.Lxts_dec_one:
- movups (%rdi),%xmm2
- leaq 16(%rdi),%rdi
- xorps %xmm10,%xmm2
- movups (%rcx),%xmm0
- movups 16(%rcx),%xmm1
- leaq 32(%rcx),%rcx
- xorps %xmm0,%xmm2
-.Loop_dec1_12:
-.byte 102,15,56,222,209
- decl %eax
- movups (%rcx),%xmm1
- leaq 16(%rcx),%rcx
- jnz .Loop_dec1_12
-.byte 102,15,56,223,209
- xorps %xmm10,%xmm2
- movdqa %xmm11,%xmm10
- movups %xmm2,(%rsi)
- movdqa %xmm12,%xmm11
- leaq 16(%rsi),%rsi
- jmp .Lxts_dec_done
-
-.align 16
-.Lxts_dec_two:
- movups (%rdi),%xmm2
- movups 16(%rdi),%xmm3
- leaq 32(%rdi),%rdi
- xorps %xmm10,%xmm2
- xorps %xmm11,%xmm3
-
- call _aesni_decrypt3
-
- xorps %xmm10,%xmm2
- movdqa %xmm12,%xmm10
- xorps %xmm11,%xmm3
- movdqa %xmm13,%xmm11
- movups %xmm2,(%rsi)
- movups %xmm3,16(%rsi)
- leaq 32(%rsi),%rsi
- jmp .Lxts_dec_done
-
-.align 16
-.Lxts_dec_three:
- movups (%rdi),%xmm2
- movups 16(%rdi),%xmm3
- movups 32(%rdi),%xmm4
- leaq 48(%rdi),%rdi
- xorps %xmm10,%xmm2
- xorps %xmm11,%xmm3
- xorps %xmm12,%xmm4
-
- call _aesni_decrypt3
-
- xorps %xmm10,%xmm2
- movdqa %xmm13,%xmm10
- xorps %xmm11,%xmm3
- movdqa %xmm15,%xmm11
- xorps %xmm12,%xmm4
- movups %xmm2,(%rsi)
- movups %xmm3,16(%rsi)
- movups %xmm4,32(%rsi)
- leaq 48(%rsi),%rsi
- jmp .Lxts_dec_done
-
-.align 16
-.Lxts_dec_four:
- pshufd $19,%xmm14,%xmm9
- movdqa %xmm15,%xmm14
- paddq %xmm15,%xmm15
- movups (%rdi),%xmm2
- pand %xmm8,%xmm9
- movups 16(%rdi),%xmm3
- pxor %xmm9,%xmm15
-
- movups 32(%rdi),%xmm4
- xorps %xmm10,%xmm2
- movups 48(%rdi),%xmm5
- leaq 64(%rdi),%rdi
- xorps %xmm11,%xmm3
- xorps %xmm12,%xmm4
- xorps %xmm13,%xmm5
-
- call _aesni_decrypt4
-
- xorps %xmm10,%xmm2
- movdqa %xmm14,%xmm10
- xorps %xmm11,%xmm3
- movdqa %xmm15,%xmm11
- xorps %xmm12,%xmm4
- movups %xmm2,(%rsi)
- xorps %xmm13,%xmm5
- movups %xmm3,16(%rsi)
- movups %xmm4,32(%rsi)
- movups %xmm5,48(%rsi)
- leaq 64(%rsi),%rsi
- jmp .Lxts_dec_done
-
-.align 16
-.Lxts_dec_done:
- andq $15,%r9
- jz .Lxts_dec_ret
-.Lxts_dec_done2:
- movq %r9,%rdx
- movq %r11,%rcx
- movl %r10d,%eax
-
- movups (%rdi),%xmm2
- xorps %xmm11,%xmm2
- movups (%rcx),%xmm0
- movups 16(%rcx),%xmm1
- leaq 32(%rcx),%rcx
- xorps %xmm0,%xmm2
-.Loop_dec1_13:
-.byte 102,15,56,222,209
- decl %eax
- movups (%rcx),%xmm1
- leaq 16(%rcx),%rcx
- jnz .Loop_dec1_13
-.byte 102,15,56,223,209
- xorps %xmm11,%xmm2
- movups %xmm2,(%rsi)
-
-.Lxts_dec_steal:
- movzbl 16(%rdi),%eax
- movzbl (%rsi),%ecx
- leaq 1(%rdi),%rdi
- movb %al,(%rsi)
- movb %cl,16(%rsi)
- leaq 1(%rsi),%rsi
- subq $1,%rdx
- jnz .Lxts_dec_steal
-
- subq %r9,%rsi
- movq %r11,%rcx
- movl %r10d,%eax
-
- movups (%rsi),%xmm2
- xorps %xmm10,%xmm2
- movups (%rcx),%xmm0
- movups 16(%rcx),%xmm1
- leaq 32(%rcx),%rcx
- xorps %xmm0,%xmm2
-.Loop_dec1_14:
-.byte 102,15,56,222,209
- decl %eax
- movups (%rcx),%xmm1
- leaq 16(%rcx),%rcx
- jnz .Loop_dec1_14
-.byte 102,15,56,223,209
- xorps %xmm10,%xmm2
- movups %xmm2,(%rsi)
-
-.Lxts_dec_ret:
- leaq 104(%rsp),%rsp
-.Lxts_dec_epilogue:
- .byte 0xf3,0xc3
-.size aesni_xts_decrypt,.-aesni_xts_decrypt
-.globl aesni_cbc_encrypt
-.type aesni_cbc_encrypt,@function
-.align 16
-aesni_cbc_encrypt:
- testq %rdx,%rdx
- jz .Lcbc_ret
-
- movl 240(%rcx),%r10d
- movq %rcx,%r11
- testl %r9d,%r9d
- jz .Lcbc_decrypt
-
- movups (%r8),%xmm2
- movl %r10d,%eax
- cmpq $16,%rdx
- jb .Lcbc_enc_tail
- subq $16,%rdx
- jmp .Lcbc_enc_loop
-.align 16
-.Lcbc_enc_loop:
- movups (%rdi),%xmm3
- leaq 16(%rdi),%rdi
-
- movups (%rcx),%xmm0
- movups 16(%rcx),%xmm1
- xorps %xmm0,%xmm3
- leaq 32(%rcx),%rcx
- xorps %xmm3,%xmm2
-.Loop_enc1_15:
-.byte 102,15,56,220,209
- decl %eax
- movups (%rcx),%xmm1
- leaq 16(%rcx),%rcx
- jnz .Loop_enc1_15
-.byte 102,15,56,221,209
- movl %r10d,%eax
- movq %r11,%rcx
- movups %xmm2,0(%rsi)
- leaq 16(%rsi),%rsi
- subq $16,%rdx
- jnc .Lcbc_enc_loop
- addq $16,%rdx
- jnz .Lcbc_enc_tail
- movups %xmm2,(%r8)
- jmp .Lcbc_ret
-
-.Lcbc_enc_tail:
- movq %rdx,%rcx
- xchgq %rdi,%rsi
-.long 0x9066A4F3
- movl $16,%ecx
- subq %rdx,%rcx
- xorl %eax,%eax
-.long 0x9066AAF3
- leaq -16(%rdi),%rdi
- movl %r10d,%eax
- movq %rdi,%rsi
- movq %r11,%rcx
- xorq %rdx,%rdx
- jmp .Lcbc_enc_loop
-
-.align 16
-.Lcbc_decrypt:
- movups (%r8),%xmm9
- movl %r10d,%eax
- cmpq $112,%rdx
- jbe .Lcbc_dec_tail
- shrl $1,%r10d
- subq $112,%rdx
- movl %r10d,%eax
- movaps %xmm9,-24(%rsp)
- jmp .Lcbc_dec_loop8_enter
-.align 16
-.Lcbc_dec_loop8:
- movaps %xmm0,-24(%rsp)
- movups %xmm9,(%rsi)
- leaq 16(%rsi),%rsi
-.Lcbc_dec_loop8_enter:
- movups (%rcx),%xmm0
- movups (%rdi),%xmm2
- movups 16(%rdi),%xmm3
- movups 16(%rcx),%xmm1
-
- leaq 32(%rcx),%rcx
- movdqu 32(%rdi),%xmm4
- xorps %xmm0,%xmm2
- movdqu 48(%rdi),%xmm5
- xorps %xmm0,%xmm3
- movdqu 64(%rdi),%xmm6
-.byte 102,15,56,222,209
- pxor %xmm0,%xmm4
- movdqu 80(%rdi),%xmm7
-.byte 102,15,56,222,217
- pxor %xmm0,%xmm5
- movdqu 96(%rdi),%xmm8
-.byte 102,15,56,222,225
- pxor %xmm0,%xmm6
- movdqu 112(%rdi),%xmm9
-.byte 102,15,56,222,233
- pxor %xmm0,%xmm7
- decl %eax
-.byte 102,15,56,222,241
- pxor %xmm0,%xmm8
-.byte 102,15,56,222,249
- pxor %xmm0,%xmm9
- movups (%rcx),%xmm0
-.byte 102,68,15,56,222,193
-.byte 102,68,15,56,222,201
- movups 16(%rcx),%xmm1
-
- call .Ldec_loop8_enter
-
- movups (%rdi),%xmm1
- movups 16(%rdi),%xmm0
- xorps -24(%rsp),%xmm2
- xorps %xmm1,%xmm3
- movups 32(%rdi),%xmm1
- xorps %xmm0,%xmm4
- movups 48(%rdi),%xmm0
- xorps %xmm1,%xmm5
- movups 64(%rdi),%xmm1
- xorps %xmm0,%xmm6
- movups 80(%rdi),%xmm0
- xorps %xmm1,%xmm7
- movups 96(%rdi),%xmm1
- xorps %xmm0,%xmm8
- movups 112(%rdi),%xmm0
- xorps %xmm1,%xmm9
- movups %xmm2,(%rsi)
- movups %xmm3,16(%rsi)
- movups %xmm4,32(%rsi)
- movups %xmm5,48(%rsi)
- movl %r10d,%eax
- movups %xmm6,64(%rsi)
- movq %r11,%rcx
- movups %xmm7,80(%rsi)
- leaq 128(%rdi),%rdi
- movups %xmm8,96(%rsi)
- leaq 112(%rsi),%rsi
- subq $128,%rdx
- ja .Lcbc_dec_loop8
-
- movaps %xmm9,%xmm2
- movaps %xmm0,%xmm9
- addq $112,%rdx
- jle .Lcbc_dec_tail_collected
- movups %xmm2,(%rsi)
- leal 1(%r10,%r10,1),%eax
- leaq 16(%rsi),%rsi
-.Lcbc_dec_tail:
- movups (%rdi),%xmm2
- movaps %xmm2,%xmm8
- cmpq $16,%rdx
- jbe .Lcbc_dec_one
-
- movups 16(%rdi),%xmm3
- movaps %xmm3,%xmm7
- cmpq $32,%rdx
- jbe .Lcbc_dec_two
-
- movups 32(%rdi),%xmm4
- movaps %xmm4,%xmm6
- cmpq $48,%rdx
- jbe .Lcbc_dec_three
-
- movups 48(%rdi),%xmm5
- cmpq $64,%rdx
- jbe .Lcbc_dec_four
-
- movups 64(%rdi),%xmm6
- cmpq $80,%rdx
- jbe .Lcbc_dec_five
-
- movups 80(%rdi),%xmm7
- cmpq $96,%rdx
- jbe .Lcbc_dec_six
-
- movups 96(%rdi),%xmm8
- movaps %xmm9,-24(%rsp)
- call _aesni_decrypt8
- movups (%rdi),%xmm1
- movups 16(%rdi),%xmm0
- xorps -24(%rsp),%xmm2
- xorps %xmm1,%xmm3
- movups 32(%rdi),%xmm1
- xorps %xmm0,%xmm4
- movups 48(%rdi),%xmm0
- xorps %xmm1,%xmm5
- movups 64(%rdi),%xmm1
- xorps %xmm0,%xmm6
- movups 80(%rdi),%xmm0
- xorps %xmm1,%xmm7
- movups 96(%rdi),%xmm9
- xorps %xmm0,%xmm8
- movups %xmm2,(%rsi)
- movups %xmm3,16(%rsi)
- movups %xmm4,32(%rsi)
- movups %xmm5,48(%rsi)
- movups %xmm6,64(%rsi)
- movups %xmm7,80(%rsi)
- leaq 96(%rsi),%rsi
- movaps %xmm8,%xmm2
- subq $112,%rdx
- jmp .Lcbc_dec_tail_collected
-.align 16
-.Lcbc_dec_one:
- movups (%rcx),%xmm0
- movups 16(%rcx),%xmm1
- leaq 32(%rcx),%rcx
- xorps %xmm0,%xmm2
-.Loop_dec1_16:
-.byte 102,15,56,222,209
- decl %eax
- movups (%rcx),%xmm1
- leaq 16(%rcx),%rcx
- jnz .Loop_dec1_16
-.byte 102,15,56,223,209
- xorps %xmm9,%xmm2
- movaps %xmm8,%xmm9
- subq $16,%rdx
- jmp .Lcbc_dec_tail_collected
-.align 16
-.Lcbc_dec_two:
- xorps %xmm4,%xmm4
- call _aesni_decrypt3
- xorps %xmm9,%xmm2
- xorps %xmm8,%xmm3
- movups %xmm2,(%rsi)
- movaps %xmm7,%xmm9
- movaps %xmm3,%xmm2
- leaq 16(%rsi),%rsi
- subq $32,%rdx
- jmp .Lcbc_dec_tail_collected
-.align 16
-.Lcbc_dec_three:
- call _aesni_decrypt3
- xorps %xmm9,%xmm2
- xorps %xmm8,%xmm3
- movups %xmm2,(%rsi)
- xorps %xmm7,%xmm4
- movups %xmm3,16(%rsi)
- movaps %xmm6,%xmm9
- movaps %xmm4,%xmm2
- leaq 32(%rsi),%rsi
- subq $48,%rdx
- jmp .Lcbc_dec_tail_collected
-.align 16
-.Lcbc_dec_four:
- call _aesni_decrypt4
- xorps %xmm9,%xmm2
- movups 48(%rdi),%xmm9
- xorps %xmm8,%xmm3
- movups %xmm2,(%rsi)
- xorps %xmm7,%xmm4
- movups %xmm3,16(%rsi)
- xorps %xmm6,%xmm5
- movups %xmm4,32(%rsi)
- movaps %xmm5,%xmm2
- leaq 48(%rsi),%rsi
- subq $64,%rdx
- jmp .Lcbc_dec_tail_collected
-.align 16
-.Lcbc_dec_five:
- xorps %xmm7,%xmm7
- call _aesni_decrypt6
- movups 16(%rdi),%xmm1
- movups 32(%rdi),%xmm0
- xorps %xmm9,%xmm2
- xorps %xmm8,%xmm3
- xorps %xmm1,%xmm4
- movups 48(%rdi),%xmm1
- xorps %xmm0,%xmm5
- movups 64(%rdi),%xmm9
- xorps %xmm1,%xmm6
- movups %xmm2,(%rsi)
- movups %xmm3,16(%rsi)
- movups %xmm4,32(%rsi)
- movups %xmm5,48(%rsi)
- leaq 64(%rsi),%rsi
- movaps %xmm6,%xmm2
- subq $80,%rdx
- jmp .Lcbc_dec_tail_collected
-.align 16
-.Lcbc_dec_six:
- call _aesni_decrypt6
- movups 16(%rdi),%xmm1
- movups 32(%rdi),%xmm0
- xorps %xmm9,%xmm2
- xorps %xmm8,%xmm3
- xorps %xmm1,%xmm4
- movups 48(%rdi),%xmm1
- xorps %xmm0,%xmm5
- movups 64(%rdi),%xmm0
- xorps %xmm1,%xmm6
- movups 80(%rdi),%xmm9
- xorps %xmm0,%xmm7
- movups %xmm2,(%rsi)
- movups %xmm3,16(%rsi)
- movups %xmm4,32(%rsi)
- movups %xmm5,48(%rsi)
- movups %xmm6,64(%rsi)
- leaq 80(%rsi),%rsi
- movaps %xmm7,%xmm2
- subq $96,%rdx
- jmp .Lcbc_dec_tail_collected
-.align 16
-.Lcbc_dec_tail_collected:
- andq $15,%rdx
- movups %xmm9,(%r8)
- jnz .Lcbc_dec_tail_partial
- movups %xmm2,(%rsi)
- jmp .Lcbc_dec_ret
-.align 16
-.Lcbc_dec_tail_partial:
- movaps %xmm2,-24(%rsp)
- movq $16,%rcx
- movq %rsi,%rdi
- subq %rdx,%rcx
- leaq -24(%rsp),%rsi
-.long 0x9066A4F3
-
-.Lcbc_dec_ret:
-.Lcbc_ret:
- .byte 0xf3,0xc3
-.size aesni_cbc_encrypt,.-aesni_cbc_encrypt
-.globl aesni_set_decrypt_key
-.type aesni_set_decrypt_key,@function
-.align 16
-aesni_set_decrypt_key:
-.byte 0x48,0x83,0xEC,0x08
- call __aesni_set_encrypt_key
- shll $4,%esi
- testl %eax,%eax
- jnz .Ldec_key_ret
- leaq 16(%rdx,%rsi,1),%rdi
-
- movups (%rdx),%xmm0
- movups (%rdi),%xmm1
- movups %xmm0,(%rdi)
- movups %xmm1,(%rdx)
- leaq 16(%rdx),%rdx
- leaq -16(%rdi),%rdi
-
-.Ldec_key_inverse:
- movups (%rdx),%xmm0
- movups (%rdi),%xmm1
-.byte 102,15,56,219,192
-.byte 102,15,56,219,201
- leaq 16(%rdx),%rdx
- leaq -16(%rdi),%rdi
- movups %xmm0,16(%rdi)
- movups %xmm1,-16(%rdx)
- cmpq %rdx,%rdi
- ja .Ldec_key_inverse
-
- movups (%rdx),%xmm0
-.byte 102,15,56,219,192
- movups %xmm0,(%rdi)
-.Ldec_key_ret:
- addq $8,%rsp
- .byte 0xf3,0xc3
-.LSEH_end_set_decrypt_key:
-.size aesni_set_decrypt_key,.-aesni_set_decrypt_key
-.globl aesni_set_encrypt_key
-.type aesni_set_encrypt_key,@function
-.align 16
-aesni_set_encrypt_key:
-__aesni_set_encrypt_key:
-.byte 0x48,0x83,0xEC,0x08
- movq $-1,%rax
- testq %rdi,%rdi
- jz .Lenc_key_ret
- testq %rdx,%rdx
- jz .Lenc_key_ret
-
- movups (%rdi),%xmm0
- xorps %xmm4,%xmm4
- leaq 16(%rdx),%rax
- cmpl $256,%esi
- je .L14rounds
- cmpl $192,%esi
- je .L12rounds
- cmpl $128,%esi
- jne .Lbad_keybits
-
-.L10rounds:
- movl $9,%esi
- movups %xmm0,(%rdx)
-.byte 102,15,58,223,200,1
- call .Lkey_expansion_128_cold
-.byte 102,15,58,223,200,2
- call .Lkey_expansion_128
-.byte 102,15,58,223,200,4
- call .Lkey_expansion_128
-.byte 102,15,58,223,200,8
- call .Lkey_expansion_128
-.byte 102,15,58,223,200,16
- call .Lkey_expansion_128
-.byte 102,15,58,223,200,32
- call .Lkey_expansion_128
-.byte 102,15,58,223,200,64
- call .Lkey_expansion_128
-.byte 102,15,58,223,200,128
- call .Lkey_expansion_128
-.byte 102,15,58,223,200,27
- call .Lkey_expansion_128
-.byte 102,15,58,223,200,54
- call .Lkey_expansion_128
- movups %xmm0,(%rax)
- movl %esi,80(%rax)
- xorl %eax,%eax
- jmp .Lenc_key_ret
-
-.align 16
-.L12rounds:
- movq 16(%rdi),%xmm2
- movl $11,%esi
- movups %xmm0,(%rdx)
-.byte 102,15,58,223,202,1
- call .Lkey_expansion_192a_cold
-.byte 102,15,58,223,202,2
- call .Lkey_expansion_192b
-.byte 102,15,58,223,202,4
- call .Lkey_expansion_192a
-.byte 102,15,58,223,202,8
- call .Lkey_expansion_192b
-.byte 102,15,58,223,202,16
- call .Lkey_expansion_192a
-.byte 102,15,58,223,202,32
- call .Lkey_expansion_192b
-.byte 102,15,58,223,202,64
- call .Lkey_expansion_192a
-.byte 102,15,58,223,202,128
- call .Lkey_expansion_192b
- movups %xmm0,(%rax)
- movl %esi,48(%rax)
- xorq %rax,%rax
- jmp .Lenc_key_ret
-
-.align 16
-.L14rounds:
- movups 16(%rdi),%xmm2
- movl $13,%esi
- leaq 16(%rax),%rax
- movups %xmm0,(%rdx)
- movups %xmm2,16(%rdx)
-.byte 102,15,58,223,202,1
- call .Lkey_expansion_256a_cold
-.byte 102,15,58,223,200,1
- call .Lkey_expansion_256b
-.byte 102,15,58,223,202,2
- call .Lkey_expansion_256a
-.byte 102,15,58,223,200,2
- call .Lkey_expansion_256b
-.byte 102,15,58,223,202,4
- call .Lkey_expansion_256a
-.byte 102,15,58,223,200,4
- call .Lkey_expansion_256b
-.byte 102,15,58,223,202,8
- call .Lkey_expansion_256a
-.byte 102,15,58,223,200,8
- call .Lkey_expansion_256b
-.byte 102,15,58,223,202,16
- call .Lkey_expansion_256a
-.byte 102,15,58,223,200,16
- call .Lkey_expansion_256b
-.byte 102,15,58,223,202,32
- call .Lkey_expansion_256a
-.byte 102,15,58,223,200,32
- call .Lkey_expansion_256b
-.byte 102,15,58,223,202,64
- call .Lkey_expansion_256a
- movups %xmm0,(%rax)
- movl %esi,16(%rax)
- xorq %rax,%rax
- jmp .Lenc_key_ret
-
-.align 16
-.Lbad_keybits:
- movq $-2,%rax
-.Lenc_key_ret:
- addq $8,%rsp
- .byte 0xf3,0xc3
-.LSEH_end_set_encrypt_key:
-
-.align 16
-.Lkey_expansion_128:
- movups %xmm0,(%rax)
- leaq 16(%rax),%rax
-.Lkey_expansion_128_cold:
- shufps $16,%xmm0,%xmm4
- xorps %xmm4,%xmm0
- shufps $140,%xmm0,%xmm4
- xorps %xmm4,%xmm0
- shufps $255,%xmm1,%xmm1
- xorps %xmm1,%xmm0
- .byte 0xf3,0xc3
-
-.align 16
-.Lkey_expansion_192a:
- movups %xmm0,(%rax)
- leaq 16(%rax),%rax
-.Lkey_expansion_192a_cold:
- movaps %xmm2,%xmm5
-.Lkey_expansion_192b_warm:
- shufps $16,%xmm0,%xmm4
- movdqa %xmm2,%xmm3
- xorps %xmm4,%xmm0
- shufps $140,%xmm0,%xmm4
- pslldq $4,%xmm3
- xorps %xmm4,%xmm0
- pshufd $85,%xmm1,%xmm1
- pxor %xmm3,%xmm2
- pxor %xmm1,%xmm0
- pshufd $255,%xmm0,%xmm3
- pxor %xmm3,%xmm2
- .byte 0xf3,0xc3
-
-.align 16
-.Lkey_expansion_192b:
- movaps %xmm0,%xmm3
- shufps $68,%xmm0,%xmm5
- movups %xmm5,(%rax)
- shufps $78,%xmm2,%xmm3
- movups %xmm3,16(%rax)
- leaq 32(%rax),%rax
- jmp .Lkey_expansion_192b_warm
-
-.align 16
-.Lkey_expansion_256a:
- movups %xmm2,(%rax)
- leaq 16(%rax),%rax
-.Lkey_expansion_256a_cold:
- shufps $16,%xmm0,%xmm4
- xorps %xmm4,%xmm0
- shufps $140,%xmm0,%xmm4
- xorps %xmm4,%xmm0
- shufps $255,%xmm1,%xmm1
- xorps %xmm1,%xmm0
- .byte 0xf3,0xc3
-
-.align 16
-.Lkey_expansion_256b:
- movups %xmm0,(%rax)
- leaq 16(%rax),%rax
-
- shufps $16,%xmm2,%xmm4
- xorps %xmm4,%xmm2
- shufps $140,%xmm2,%xmm4
- xorps %xmm4,%xmm2
- shufps $170,%xmm1,%xmm1
- xorps %xmm1,%xmm2
- .byte 0xf3,0xc3
-.size aesni_set_encrypt_key,.-aesni_set_encrypt_key
-.size __aesni_set_encrypt_key,.-__aesni_set_encrypt_key
-.align 64
-.Lbswap_mask:
-.byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
-.Lincrement32:
-.long 6,6,6,0
-.Lincrement64:
-.long 1,0,0,0
-.Lxts_magic:
-.long 0x87,0,1,0
-
-.byte 65,69,83,32,102,111,114,32,73,110,116,101,108,32,65,69,83,45,78,73,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
-.align 64
diff --git a/app/openssl/crypto/aes/asm/aesni-x86_64.pl b/app/openssl/crypto/aes/asm/aesni-x86_64.pl
deleted file mode 100644
index c9270dfd..00000000
--- a/app/openssl/crypto/aes/asm/aesni-x86_64.pl
+++ /dev/null
@@ -1,3071 +0,0 @@
-#!/usr/bin/env perl
-#
-# ====================================================================
-# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
-# project. The module is, however, dual licensed under OpenSSL and
-# CRYPTOGAMS licenses depending on where you obtain it. For further
-# details see http://www.openssl.org/~appro/cryptogams/.
-# ====================================================================
-#
-# This module implements support for Intel AES-NI extension. In
-# OpenSSL context it's used with Intel engine, but can also be used as
-# drop-in replacement for crypto/aes/asm/aes-x86_64.pl [see below for
-# details].
-#
-# Performance.
-#
-# Given aes(enc|dec) instructions' latency asymptotic performance for
-# non-parallelizable modes such as CBC encrypt is 3.75 cycles per byte
-# processed with 128-bit key. And given their throughput asymptotic
-# performance for parallelizable modes is 1.25 cycles per byte. Being
-# asymptotic limit it's not something you commonly achieve in reality,
-# but how close does one get? Below are results collected for
-# different modes and block sized. Pairs of numbers are for en-/
-# decryption.
-#
-# 16-byte 64-byte 256-byte 1-KB 8-KB
-# ECB 4.25/4.25 1.38/1.38 1.28/1.28 1.26/1.26 1.26/1.26
-# CTR 5.42/5.42 1.92/1.92 1.44/1.44 1.28/1.28 1.26/1.26
-# CBC 4.38/4.43 4.15/1.43 4.07/1.32 4.07/1.29 4.06/1.28
-# CCM 5.66/9.42 4.42/5.41 4.16/4.40 4.09/4.15 4.06/4.07
-# OFB 5.42/5.42 4.64/4.64 4.44/4.44 4.39/4.39 4.38/4.38
-# CFB 5.73/5.85 5.56/5.62 5.48/5.56 5.47/5.55 5.47/5.55
-#
-# ECB, CTR, CBC and CCM results are free from EVP overhead. This means
-# that otherwise used 'openssl speed -evp aes-128-??? -engine aesni
-# [-decrypt]' will exhibit 10-15% worse results for smaller blocks.
-# The results were collected with specially crafted speed.c benchmark
-# in order to compare them with results reported in "Intel Advanced
-# Encryption Standard (AES) New Instruction Set" White Paper Revision
-# 3.0 dated May 2010. All above results are consistently better. This
-# module also provides better performance for block sizes smaller than
-# 128 bytes in points *not* represented in the above table.
-#
-# Looking at the results for 8-KB buffer.
-#
-# CFB and OFB results are far from the limit, because implementation
-# uses "generic" CRYPTO_[c|o]fb128_encrypt interfaces relying on
-# single-block aesni_encrypt, which is not the most optimal way to go.
-# CBC encrypt result is unexpectedly high and there is no documented
-# explanation for it. Seemingly there is a small penalty for feeding
-# the result back to AES unit the way it's done in CBC mode. There is
-# nothing one can do and the result appears optimal. CCM result is
-# identical to CBC, because CBC-MAC is essentially CBC encrypt without
-# saving output. CCM CTR "stays invisible," because it's neatly
-# interleaved wih CBC-MAC. This provides ~30% improvement over
-# "straghtforward" CCM implementation with CTR and CBC-MAC performed
-# disjointly. Parallelizable modes practically achieve the theoretical
-# limit.
-#
-# Looking at how results vary with buffer size.
-#
-# Curves are practically saturated at 1-KB buffer size. In most cases
-# "256-byte" performance is >95%, and "64-byte" is ~90% of "8-KB" one.
-# CTR curve doesn't follow this pattern and is "slowest" changing one
-# with "256-byte" result being 87% of "8-KB." This is because overhead
-# in CTR mode is most computationally intensive. Small-block CCM
-# decrypt is slower than encrypt, because first CTR and last CBC-MAC
-# iterations can't be interleaved.
-#
-# Results for 192- and 256-bit keys.
-#
-# EVP-free results were observed to scale perfectly with number of
-# rounds for larger block sizes, i.e. 192-bit result being 10/12 times
-# lower and 256-bit one - 10/14. Well, in CBC encrypt case differences
-# are a tad smaller, because the above mentioned penalty biases all
-# results by same constant value. In similar way function call
-# overhead affects small-block performance, as well as OFB and CFB
-# results. Differences are not large, most common coefficients are
-# 10/11.7 and 10/13.4 (as opposite to 10/12.0 and 10/14.0), but one
-# observe even 10/11.2 and 10/12.4 (CTR, OFB, CFB)...
-
-# January 2011
-#
-# While Westmere processor features 6 cycles latency for aes[enc|dec]
-# instructions, which can be scheduled every second cycle, Sandy
-# Bridge spends 8 cycles per instruction, but it can schedule them
-# every cycle. This means that code targeting Westmere would perform
-# suboptimally on Sandy Bridge. Therefore this update.
-#
-# In addition, non-parallelizable CBC encrypt (as well as CCM) is
-# optimized. Relative improvement might appear modest, 8% on Westmere,
-# but in absolute terms it's 3.77 cycles per byte encrypted with
-# 128-bit key on Westmere, and 5.07 - on Sandy Bridge. These numbers
-# should be compared to asymptotic limits of 3.75 for Westmere and
-# 5.00 for Sandy Bridge. Actually, the fact that they get this close
-# to asymptotic limits is quite amazing. Indeed, the limit is
-# calculated as latency times number of rounds, 10 for 128-bit key,
-# and divided by 16, the number of bytes in block, or in other words
-# it accounts *solely* for aesenc instructions. But there are extra
-# instructions, and numbers so close to the asymptotic limits mean
-# that it's as if it takes as little as *one* additional cycle to
-# execute all of them. How is it possible? It is possible thanks to
-# out-of-order execution logic, which manages to overlap post-
-# processing of previous block, things like saving the output, with
-# actual encryption of current block, as well as pre-processing of
-# current block, things like fetching input and xor-ing it with
-# 0-round element of the key schedule, with actual encryption of
-# previous block. Keep this in mind...
-#
-# For parallelizable modes, such as ECB, CBC decrypt, CTR, higher
-# performance is achieved by interleaving instructions working on
-# independent blocks. In which case asymptotic limit for such modes
-# can be obtained by dividing above mentioned numbers by AES
-# instructions' interleave factor. Westmere can execute at most 3
-# instructions at a time, meaning that optimal interleave factor is 3,
-# and that's where the "magic" number of 1.25 come from. "Optimal
-# interleave factor" means that increase of interleave factor does
-# not improve performance. The formula has proven to reflect reality
-# pretty well on Westmere... Sandy Bridge on the other hand can
-# execute up to 8 AES instructions at a time, so how does varying
-# interleave factor affect the performance? Here is table for ECB
-# (numbers are cycles per byte processed with 128-bit key):
-#
-# instruction interleave factor 3x 6x 8x
-# theoretical asymptotic limit 1.67 0.83 0.625
-# measured performance for 8KB block 1.05 0.86 0.84
-#
-# "as if" interleave factor 4.7x 5.8x 6.0x
-#
-# Further data for other parallelizable modes:
-#
-# CBC decrypt 1.16 0.93 0.93
-# CTR 1.14 0.91 n/a
-#
-# Well, given 3x column it's probably inappropriate to call the limit
-# asymptotic, if it can be surpassed, isn't it? What happens there?
-# Rewind to CBC paragraph for the answer. Yes, out-of-order execution
-# magic is responsible for this. Processor overlaps not only the
-# additional instructions with AES ones, but even AES instuctions
-# processing adjacent triplets of independent blocks. In the 6x case
-# additional instructions still claim disproportionally small amount
-# of additional cycles, but in 8x case number of instructions must be
-# a tad too high for out-of-order logic to cope with, and AES unit
-# remains underutilized... As you can see 8x interleave is hardly
-# justifiable, so there no need to feel bad that 32-bit aesni-x86.pl
-# utilizies 6x interleave because of limited register bank capacity.
-#
-# Higher interleave factors do have negative impact on Westmere
-# performance. While for ECB mode it's negligible ~1.5%, other
-# parallelizables perform ~5% worse, which is outweighed by ~25%
-# improvement on Sandy Bridge. To balance regression on Westmere
-# CTR mode was implemented with 6x aesenc interleave factor.
-
-# April 2011
-#
-# Add aesni_xts_[en|de]crypt. Westmere spends 1.33 cycles processing
-# one byte out of 8KB with 128-bit key, Sandy Bridge - 0.97. Just like
-# in CTR mode AES instruction interleave factor was chosen to be 6x.
-
-$PREFIX="aesni"; # if $PREFIX is set to "AES", the script
- # generates drop-in replacement for
- # crypto/aes/asm/aes-x86_64.pl:-)
-
-$flavour = shift;
-$output = shift;
-if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
-
-$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
-
-$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
-( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
-( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
-die "can't locate x86_64-xlate.pl";
-
-open OUT,"| \"$^X\" $xlate $flavour $output";
-*STDOUT=*OUT;
-
-$movkey = $PREFIX eq "aesni" ? "movups" : "movups";
-@_4args=$win64? ("%rcx","%rdx","%r8", "%r9") : # Win64 order
- ("%rdi","%rsi","%rdx","%rcx"); # Unix order
-
-$code=".text\n";
-
-$rounds="%eax"; # input to and changed by aesni_[en|de]cryptN !!!
-# this is natural Unix argument order for public $PREFIX_[ecb|cbc]_encrypt ...
-$inp="%rdi";
-$out="%rsi";
-$len="%rdx";
-$key="%rcx"; # input to and changed by aesni_[en|de]cryptN !!!
-$ivp="%r8"; # cbc, ctr, ...
-
-$rnds_="%r10d"; # backup copy for $rounds
-$key_="%r11"; # backup copy for $key
-
-# %xmm register layout
-$rndkey0="%xmm0"; $rndkey1="%xmm1";
-$inout0="%xmm2"; $inout1="%xmm3";
-$inout2="%xmm4"; $inout3="%xmm5";
-$inout4="%xmm6"; $inout5="%xmm7";
-$inout6="%xmm8"; $inout7="%xmm9";
-
-$in2="%xmm6"; $in1="%xmm7"; # used in CBC decrypt, CTR, ...
-$in0="%xmm8"; $iv="%xmm9";
-
-# Inline version of internal aesni_[en|de]crypt1.
-#
-# Why folded loop? Because aes[enc|dec] is slow enough to accommodate
-# cycles which take care of loop variables...
-{ my $sn;
-sub aesni_generate1 {
-my ($p,$key,$rounds,$inout,$ivec)=@_; $inout=$inout0 if (!defined($inout));
-++$sn;
-$code.=<<___;
- $movkey ($key),$rndkey0
- $movkey 16($key),$rndkey1
-___
-$code.=<<___ if (defined($ivec));
- xorps $rndkey0,$ivec
- lea 32($key),$key
- xorps $ivec,$inout
-___
-$code.=<<___ if (!defined($ivec));
- lea 32($key),$key
- xorps $rndkey0,$inout
-___
-$code.=<<___;
-.Loop_${p}1_$sn:
- aes${p} $rndkey1,$inout
- dec $rounds
- $movkey ($key),$rndkey1
- lea 16($key),$key
- jnz .Loop_${p}1_$sn # loop body is 16 bytes
- aes${p}last $rndkey1,$inout
-___
-}}
-# void $PREFIX_[en|de]crypt (const void *inp,void *out,const AES_KEY *key);
-#
-{ my ($inp,$out,$key) = @_4args;
-
-$code.=<<___;
-.globl ${PREFIX}_encrypt
-.type ${PREFIX}_encrypt,\@abi-omnipotent
-.align 16
-${PREFIX}_encrypt:
- movups ($inp),$inout0 # load input
- mov 240($key),$rounds # key->rounds
-___
- &aesni_generate1("enc",$key,$rounds);
-$code.=<<___;
- movups $inout0,($out) # output
- ret
-.size ${PREFIX}_encrypt,.-${PREFIX}_encrypt
-
-.globl ${PREFIX}_decrypt
-.type ${PREFIX}_decrypt,\@abi-omnipotent
-.align 16
-${PREFIX}_decrypt:
- movups ($inp),$inout0 # load input
- mov 240($key),$rounds # key->rounds
-___
- &aesni_generate1("dec",$key,$rounds);
-$code.=<<___;
- movups $inout0,($out) # output
- ret
-.size ${PREFIX}_decrypt, .-${PREFIX}_decrypt
-___
-}
-
-# _aesni_[en|de]cryptN are private interfaces, N denotes interleave
-# factor. Why 3x subroutine were originally used in loops? Even though
-# aes[enc|dec] latency was originally 6, it could be scheduled only
-# every *2nd* cycle. Thus 3x interleave was the one providing optimal
-# utilization, i.e. when subroutine's throughput is virtually same as
-# of non-interleaved subroutine [for number of input blocks up to 3].
-# This is why it makes no sense to implement 2x subroutine.
-# aes[enc|dec] latency in next processor generation is 8, but the
-# instructions can be scheduled every cycle. Optimal interleave for
-# new processor is therefore 8x...
-sub aesni_generate3 {
-my $dir=shift;
-# As already mentioned it takes in $key and $rounds, which are *not*
-# preserved. $inout[0-2] is cipher/clear text...
-$code.=<<___;
-.type _aesni_${dir}rypt3,\@abi-omnipotent
-.align 16
-_aesni_${dir}rypt3:
- $movkey ($key),$rndkey0
- shr \$1,$rounds
- $movkey 16($key),$rndkey1
- lea 32($key),$key
- xorps $rndkey0,$inout0
- xorps $rndkey0,$inout1
- xorps $rndkey0,$inout2
- $movkey ($key),$rndkey0
-
-.L${dir}_loop3:
- aes${dir} $rndkey1,$inout0
- aes${dir} $rndkey1,$inout1
- dec $rounds
- aes${dir} $rndkey1,$inout2
- $movkey 16($key),$rndkey1
- aes${dir} $rndkey0,$inout0
- aes${dir} $rndkey0,$inout1
- lea 32($key),$key
- aes${dir} $rndkey0,$inout2
- $movkey ($key),$rndkey0
- jnz .L${dir}_loop3
-
- aes${dir} $rndkey1,$inout0
- aes${dir} $rndkey1,$inout1
- aes${dir} $rndkey1,$inout2
- aes${dir}last $rndkey0,$inout0
- aes${dir}last $rndkey0,$inout1
- aes${dir}last $rndkey0,$inout2
- ret
-.size _aesni_${dir}rypt3,.-_aesni_${dir}rypt3
-___
-}
-# 4x interleave is implemented to improve small block performance,
-# most notably [and naturally] 4 block by ~30%. One can argue that one
-# should have implemented 5x as well, but improvement would be <20%,
-# so it's not worth it...
-sub aesni_generate4 {
-my $dir=shift;
-# As already mentioned it takes in $key and $rounds, which are *not*
-# preserved. $inout[0-3] is cipher/clear text...
-$code.=<<___;
-.type _aesni_${dir}rypt4,\@abi-omnipotent
-.align 16
-_aesni_${dir}rypt4:
- $movkey ($key),$rndkey0
- shr \$1,$rounds
- $movkey 16($key),$rndkey1
- lea 32($key),$key
- xorps $rndkey0,$inout0
- xorps $rndkey0,$inout1
- xorps $rndkey0,$inout2
- xorps $rndkey0,$inout3
- $movkey ($key),$rndkey0
-
-.L${dir}_loop4:
- aes${dir} $rndkey1,$inout0
- aes${dir} $rndkey1,$inout1
- dec $rounds
- aes${dir} $rndkey1,$inout2
- aes${dir} $rndkey1,$inout3
- $movkey 16($key),$rndkey1
- aes${dir} $rndkey0,$inout0
- aes${dir} $rndkey0,$inout1
- lea 32($key),$key
- aes${dir} $rndkey0,$inout2
- aes${dir} $rndkey0,$inout3
- $movkey ($key),$rndkey0
- jnz .L${dir}_loop4
-
- aes${dir} $rndkey1,$inout0
- aes${dir} $rndkey1,$inout1
- aes${dir} $rndkey1,$inout2
- aes${dir} $rndkey1,$inout3
- aes${dir}last $rndkey0,$inout0
- aes${dir}last $rndkey0,$inout1
- aes${dir}last $rndkey0,$inout2
- aes${dir}last $rndkey0,$inout3
- ret
-.size _aesni_${dir}rypt4,.-_aesni_${dir}rypt4
-___
-}
-sub aesni_generate6 {
-my $dir=shift;
-# As already mentioned it takes in $key and $rounds, which are *not*
-# preserved. $inout[0-5] is cipher/clear text...
-$code.=<<___;
-.type _aesni_${dir}rypt6,\@abi-omnipotent
-.align 16
-_aesni_${dir}rypt6:
- $movkey ($key),$rndkey0
- shr \$1,$rounds
- $movkey 16($key),$rndkey1
- lea 32($key),$key
- xorps $rndkey0,$inout0
- pxor $rndkey0,$inout1
- aes${dir} $rndkey1,$inout0
- pxor $rndkey0,$inout2
- aes${dir} $rndkey1,$inout1
- pxor $rndkey0,$inout3
- aes${dir} $rndkey1,$inout2
- pxor $rndkey0,$inout4
- aes${dir} $rndkey1,$inout3
- pxor $rndkey0,$inout5
- dec $rounds
- aes${dir} $rndkey1,$inout4
- $movkey ($key),$rndkey0
- aes${dir} $rndkey1,$inout5
- jmp .L${dir}_loop6_enter
-.align 16
-.L${dir}_loop6:
- aes${dir} $rndkey1,$inout0
- aes${dir} $rndkey1,$inout1
- dec $rounds
- aes${dir} $rndkey1,$inout2
- aes${dir} $rndkey1,$inout3
- aes${dir} $rndkey1,$inout4
- aes${dir} $rndkey1,$inout5
-.L${dir}_loop6_enter: # happens to be 16-byte aligned
- $movkey 16($key),$rndkey1
- aes${dir} $rndkey0,$inout0
- aes${dir} $rndkey0,$inout1
- lea 32($key),$key
- aes${dir} $rndkey0,$inout2
- aes${dir} $rndkey0,$inout3
- aes${dir} $rndkey0,$inout4
- aes${dir} $rndkey0,$inout5
- $movkey ($key),$rndkey0
- jnz .L${dir}_loop6
-
- aes${dir} $rndkey1,$inout0
- aes${dir} $rndkey1,$inout1
- aes${dir} $rndkey1,$inout2
- aes${dir} $rndkey1,$inout3
- aes${dir} $rndkey1,$inout4
- aes${dir} $rndkey1,$inout5
- aes${dir}last $rndkey0,$inout0
- aes${dir}last $rndkey0,$inout1
- aes${dir}last $rndkey0,$inout2
- aes${dir}last $rndkey0,$inout3
- aes${dir}last $rndkey0,$inout4
- aes${dir}last $rndkey0,$inout5
- ret
-.size _aesni_${dir}rypt6,.-_aesni_${dir}rypt6
-___
-}
-sub aesni_generate8 {
-my $dir=shift;
-# As already mentioned it takes in $key and $rounds, which are *not*
-# preserved. $inout[0-7] is cipher/clear text...
-$code.=<<___;
-.type _aesni_${dir}rypt8,\@abi-omnipotent
-.align 16
-_aesni_${dir}rypt8:
- $movkey ($key),$rndkey0
- shr \$1,$rounds
- $movkey 16($key),$rndkey1
- lea 32($key),$key
- xorps $rndkey0,$inout0
- xorps $rndkey0,$inout1
- aes${dir} $rndkey1,$inout0
- pxor $rndkey0,$inout2
- aes${dir} $rndkey1,$inout1
- pxor $rndkey0,$inout3
- aes${dir} $rndkey1,$inout2
- pxor $rndkey0,$inout4
- aes${dir} $rndkey1,$inout3
- pxor $rndkey0,$inout5
- dec $rounds
- aes${dir} $rndkey1,$inout4
- pxor $rndkey0,$inout6
- aes${dir} $rndkey1,$inout5
- pxor $rndkey0,$inout7
- $movkey ($key),$rndkey0
- aes${dir} $rndkey1,$inout6
- aes${dir} $rndkey1,$inout7
- $movkey 16($key),$rndkey1
- jmp .L${dir}_loop8_enter
-.align 16
-.L${dir}_loop8:
- aes${dir} $rndkey1,$inout0
- aes${dir} $rndkey1,$inout1
- dec $rounds
- aes${dir} $rndkey1,$inout2
- aes${dir} $rndkey1,$inout3
- aes${dir} $rndkey1,$inout4
- aes${dir} $rndkey1,$inout5
- aes${dir} $rndkey1,$inout6
- aes${dir} $rndkey1,$inout7
- $movkey 16($key),$rndkey1
-.L${dir}_loop8_enter: # happens to be 16-byte aligned
- aes${dir} $rndkey0,$inout0
- aes${dir} $rndkey0,$inout1
- lea 32($key),$key
- aes${dir} $rndkey0,$inout2
- aes${dir} $rndkey0,$inout3
- aes${dir} $rndkey0,$inout4
- aes${dir} $rndkey0,$inout5
- aes${dir} $rndkey0,$inout6
- aes${dir} $rndkey0,$inout7
- $movkey ($key),$rndkey0
- jnz .L${dir}_loop8
-
- aes${dir} $rndkey1,$inout0
- aes${dir} $rndkey1,$inout1
- aes${dir} $rndkey1,$inout2
- aes${dir} $rndkey1,$inout3
- aes${dir} $rndkey1,$inout4
- aes${dir} $rndkey1,$inout5
- aes${dir} $rndkey1,$inout6
- aes${dir} $rndkey1,$inout7
- aes${dir}last $rndkey0,$inout0
- aes${dir}last $rndkey0,$inout1
- aes${dir}last $rndkey0,$inout2
- aes${dir}last $rndkey0,$inout3
- aes${dir}last $rndkey0,$inout4
- aes${dir}last $rndkey0,$inout5
- aes${dir}last $rndkey0,$inout6
- aes${dir}last $rndkey0,$inout7
- ret
-.size _aesni_${dir}rypt8,.-_aesni_${dir}rypt8
-___
-}
-&aesni_generate3("enc") if ($PREFIX eq "aesni");
-&aesni_generate3("dec");
-&aesni_generate4("enc") if ($PREFIX eq "aesni");
-&aesni_generate4("dec");
-&aesni_generate6("enc") if ($PREFIX eq "aesni");
-&aesni_generate6("dec");
-&aesni_generate8("enc") if ($PREFIX eq "aesni");
-&aesni_generate8("dec");
-
-if ($PREFIX eq "aesni") {
-########################################################################
-# void aesni_ecb_encrypt (const void *in, void *out,
-# size_t length, const AES_KEY *key,
-# int enc);
-$code.=<<___;
-.globl aesni_ecb_encrypt
-.type aesni_ecb_encrypt,\@function,5
-.align 16
-aesni_ecb_encrypt:
-___
-$code.=<<___ if ($win64);
- lea -0x58(%rsp),%rsp
- movaps %xmm6,(%rsp)
- movaps %xmm7,0x10(%rsp)
- movaps %xmm8,0x20(%rsp)
- movaps %xmm9,0x30(%rsp)
-.Lecb_enc_body:
-___
-$code.=<<___;
- and \$-16,$len
- jz .Lecb_ret
-
- mov 240($key),$rounds # key->rounds
- $movkey ($key),$rndkey0
- mov $key,$key_ # backup $key
- mov $rounds,$rnds_ # backup $rounds
- test %r8d,%r8d # 5th argument
- jz .Lecb_decrypt
-#--------------------------- ECB ENCRYPT ------------------------------#
- cmp \$0x80,$len
- jb .Lecb_enc_tail
-
- movdqu ($inp),$inout0
- movdqu 0x10($inp),$inout1
- movdqu 0x20($inp),$inout2
- movdqu 0x30($inp),$inout3
- movdqu 0x40($inp),$inout4
- movdqu 0x50($inp),$inout5
- movdqu 0x60($inp),$inout6
- movdqu 0x70($inp),$inout7
- lea 0x80($inp),$inp
- sub \$0x80,$len
- jmp .Lecb_enc_loop8_enter
-.align 16
-.Lecb_enc_loop8:
- movups $inout0,($out)
- mov $key_,$key # restore $key
- movdqu ($inp),$inout0
- mov $rnds_,$rounds # restore $rounds
- movups $inout1,0x10($out)
- movdqu 0x10($inp),$inout1
- movups $inout2,0x20($out)
- movdqu 0x20($inp),$inout2
- movups $inout3,0x30($out)
- movdqu 0x30($inp),$inout3
- movups $inout4,0x40($out)
- movdqu 0x40($inp),$inout4
- movups $inout5,0x50($out)
- movdqu 0x50($inp),$inout5
- movups $inout6,0x60($out)
- movdqu 0x60($inp),$inout6
- movups $inout7,0x70($out)
- lea 0x80($out),$out
- movdqu 0x70($inp),$inout7
- lea 0x80($inp),$inp
-.Lecb_enc_loop8_enter:
-
- call _aesni_encrypt8
-
- sub \$0x80,$len
- jnc .Lecb_enc_loop8
-
- movups $inout0,($out)
- mov $key_,$key # restore $key
- movups $inout1,0x10($out)
- mov $rnds_,$rounds # restore $rounds
- movups $inout2,0x20($out)
- movups $inout3,0x30($out)
- movups $inout4,0x40($out)
- movups $inout5,0x50($out)
- movups $inout6,0x60($out)
- movups $inout7,0x70($out)
- lea 0x80($out),$out
- add \$0x80,$len
- jz .Lecb_ret
-
-.Lecb_enc_tail:
- movups ($inp),$inout0
- cmp \$0x20,$len
- jb .Lecb_enc_one
- movups 0x10($inp),$inout1
- je .Lecb_enc_two
- movups 0x20($inp),$inout2
- cmp \$0x40,$len
- jb .Lecb_enc_three
- movups 0x30($inp),$inout3
- je .Lecb_enc_four
- movups 0x40($inp),$inout4
- cmp \$0x60,$len
- jb .Lecb_enc_five
- movups 0x50($inp),$inout5
- je .Lecb_enc_six
- movdqu 0x60($inp),$inout6
- call _aesni_encrypt8
- movups $inout0,($out)
- movups $inout1,0x10($out)
- movups $inout2,0x20($out)
- movups $inout3,0x30($out)
- movups $inout4,0x40($out)
- movups $inout5,0x50($out)
- movups $inout6,0x60($out)
- jmp .Lecb_ret
-.align 16
-.Lecb_enc_one:
-___
- &aesni_generate1("enc",$key,$rounds);
-$code.=<<___;
- movups $inout0,($out)
- jmp .Lecb_ret
-.align 16
-.Lecb_enc_two:
- xorps $inout2,$inout2
- call _aesni_encrypt3
- movups $inout0,($out)
- movups $inout1,0x10($out)
- jmp .Lecb_ret
-.align 16
-.Lecb_enc_three:
- call _aesni_encrypt3
- movups $inout0,($out)
- movups $inout1,0x10($out)
- movups $inout2,0x20($out)
- jmp .Lecb_ret
-.align 16
-.Lecb_enc_four:
- call _aesni_encrypt4
- movups $inout0,($out)
- movups $inout1,0x10($out)
- movups $inout2,0x20($out)
- movups $inout3,0x30($out)
- jmp .Lecb_ret
-.align 16
-.Lecb_enc_five:
- xorps $inout5,$inout5
- call _aesni_encrypt6
- movups $inout0,($out)
- movups $inout1,0x10($out)
- movups $inout2,0x20($out)
- movups $inout3,0x30($out)
- movups $inout4,0x40($out)
- jmp .Lecb_ret
-.align 16
-.Lecb_enc_six:
- call _aesni_encrypt6
- movups $inout0,($out)
- movups $inout1,0x10($out)
- movups $inout2,0x20($out)
- movups $inout3,0x30($out)
- movups $inout4,0x40($out)
- movups $inout5,0x50($out)
- jmp .Lecb_ret
- #--------------------------- ECB DECRYPT ------------------------------#
-.align 16
-.Lecb_decrypt:
- cmp \$0x80,$len
- jb .Lecb_dec_tail
-
- movdqu ($inp),$inout0
- movdqu 0x10($inp),$inout1
- movdqu 0x20($inp),$inout2
- movdqu 0x30($inp),$inout3
- movdqu 0x40($inp),$inout4
- movdqu 0x50($inp),$inout5
- movdqu 0x60($inp),$inout6
- movdqu 0x70($inp),$inout7
- lea 0x80($inp),$inp
- sub \$0x80,$len
- jmp .Lecb_dec_loop8_enter
-.align 16
-.Lecb_dec_loop8:
- movups $inout0,($out)
- mov $key_,$key # restore $key
- movdqu ($inp),$inout0
- mov $rnds_,$rounds # restore $rounds
- movups $inout1,0x10($out)
- movdqu 0x10($inp),$inout1
- movups $inout2,0x20($out)
- movdqu 0x20($inp),$inout2
- movups $inout3,0x30($out)
- movdqu 0x30($inp),$inout3
- movups $inout4,0x40($out)
- movdqu 0x40($inp),$inout4
- movups $inout5,0x50($out)
- movdqu 0x50($inp),$inout5
- movups $inout6,0x60($out)
- movdqu 0x60($inp),$inout6
- movups $inout7,0x70($out)
- lea 0x80($out),$out
- movdqu 0x70($inp),$inout7
- lea 0x80($inp),$inp
-.Lecb_dec_loop8_enter:
-
- call _aesni_decrypt8
-
- $movkey ($key_),$rndkey0
- sub \$0x80,$len
- jnc .Lecb_dec_loop8
-
- movups $inout0,($out)
- mov $key_,$key # restore $key
- movups $inout1,0x10($out)
- mov $rnds_,$rounds # restore $rounds
- movups $inout2,0x20($out)
- movups $inout3,0x30($out)
- movups $inout4,0x40($out)
- movups $inout5,0x50($out)
- movups $inout6,0x60($out)
- movups $inout7,0x70($out)
- lea 0x80($out),$out
- add \$0x80,$len
- jz .Lecb_ret
-
-.Lecb_dec_tail:
- movups ($inp),$inout0
- cmp \$0x20,$len
- jb .Lecb_dec_one
- movups 0x10($inp),$inout1
- je .Lecb_dec_two
- movups 0x20($inp),$inout2
- cmp \$0x40,$len
- jb .Lecb_dec_three
- movups 0x30($inp),$inout3
- je .Lecb_dec_four
- movups 0x40($inp),$inout4
- cmp \$0x60,$len
- jb .Lecb_dec_five
- movups 0x50($inp),$inout5
- je .Lecb_dec_six
- movups 0x60($inp),$inout6
- $movkey ($key),$rndkey0
- call _aesni_decrypt8
- movups $inout0,($out)
- movups $inout1,0x10($out)
- movups $inout2,0x20($out)
- movups $inout3,0x30($out)
- movups $inout4,0x40($out)
- movups $inout5,0x50($out)
- movups $inout6,0x60($out)
- jmp .Lecb_ret
-.align 16
-.Lecb_dec_one:
-___
- &aesni_generate1("dec",$key,$rounds);
-$code.=<<___;
- movups $inout0,($out)
- jmp .Lecb_ret
-.align 16
-.Lecb_dec_two:
- xorps $inout2,$inout2
- call _aesni_decrypt3
- movups $inout0,($out)
- movups $inout1,0x10($out)
- jmp .Lecb_ret
-.align 16
-.Lecb_dec_three:
- call _aesni_decrypt3
- movups $inout0,($out)
- movups $inout1,0x10($out)
- movups $inout2,0x20($out)
- jmp .Lecb_ret
-.align 16
-.Lecb_dec_four:
- call _aesni_decrypt4
- movups $inout0,($out)
- movups $inout1,0x10($out)
- movups $inout2,0x20($out)
- movups $inout3,0x30($out)
- jmp .Lecb_ret
-.align 16
-.Lecb_dec_five:
- xorps $inout5,$inout5
- call _aesni_decrypt6
- movups $inout0,($out)
- movups $inout1,0x10($out)
- movups $inout2,0x20($out)
- movups $inout3,0x30($out)
- movups $inout4,0x40($out)
- jmp .Lecb_ret
-.align 16
-.Lecb_dec_six:
- call _aesni_decrypt6
- movups $inout0,($out)
- movups $inout1,0x10($out)
- movups $inout2,0x20($out)
- movups $inout3,0x30($out)
- movups $inout4,0x40($out)
- movups $inout5,0x50($out)
-
-.Lecb_ret:
-___
-$code.=<<___ if ($win64);
- movaps (%rsp),%xmm6
- movaps 0x10(%rsp),%xmm7
- movaps 0x20(%rsp),%xmm8
- movaps 0x30(%rsp),%xmm9
- lea 0x58(%rsp),%rsp
-.Lecb_enc_ret:
-___
-$code.=<<___;
- ret
-.size aesni_ecb_encrypt,.-aesni_ecb_encrypt
-___
-
-{
-######################################################################
-# void aesni_ccm64_[en|de]crypt_blocks (const void *in, void *out,
-# size_t blocks, const AES_KEY *key,
-# const char *ivec,char *cmac);
-#
-# Handles only complete blocks, operates on 64-bit counter and
-# does not update *ivec! Nor does it finalize CMAC value
-# (see engine/eng_aesni.c for details)
-#
-{
-my $cmac="%r9"; # 6th argument
-
-my $increment="%xmm6";
-my $bswap_mask="%xmm7";
-
-$code.=<<___;
-.globl aesni_ccm64_encrypt_blocks
-.type aesni_ccm64_encrypt_blocks,\@function,6
-.align 16
-aesni_ccm64_encrypt_blocks:
-___
-$code.=<<___ if ($win64);
- lea -0x58(%rsp),%rsp
- movaps %xmm6,(%rsp)
- movaps %xmm7,0x10(%rsp)
- movaps %xmm8,0x20(%rsp)
- movaps %xmm9,0x30(%rsp)
-.Lccm64_enc_body:
-___
-$code.=<<___;
- mov 240($key),$rounds # key->rounds
- movdqu ($ivp),$iv
- movdqa .Lincrement64(%rip),$increment
- movdqa .Lbswap_mask(%rip),$bswap_mask
-
- shr \$1,$rounds
- lea 0($key),$key_
- movdqu ($cmac),$inout1
- movdqa $iv,$inout0
- mov $rounds,$rnds_
- pshufb $bswap_mask,$iv
- jmp .Lccm64_enc_outer
-.align 16
-.Lccm64_enc_outer:
- $movkey ($key_),$rndkey0
- mov $rnds_,$rounds
- movups ($inp),$in0 # load inp
-
- xorps $rndkey0,$inout0 # counter
- $movkey 16($key_),$rndkey1
- xorps $in0,$rndkey0
- lea 32($key_),$key
- xorps $rndkey0,$inout1 # cmac^=inp
- $movkey ($key),$rndkey0
-
-.Lccm64_enc2_loop:
- aesenc $rndkey1,$inout0
- dec $rounds
- aesenc $rndkey1,$inout1
- $movkey 16($key),$rndkey1
- aesenc $rndkey0,$inout0
- lea 32($key),$key
- aesenc $rndkey0,$inout1
- $movkey 0($key),$rndkey0
- jnz .Lccm64_enc2_loop
- aesenc $rndkey1,$inout0
- aesenc $rndkey1,$inout1
- paddq $increment,$iv
- aesenclast $rndkey0,$inout0
- aesenclast $rndkey0,$inout1
-
- dec $len
- lea 16($inp),$inp
- xorps $inout0,$in0 # inp ^= E(iv)
- movdqa $iv,$inout0
- movups $in0,($out) # save output
- lea 16($out),$out
- pshufb $bswap_mask,$inout0
- jnz .Lccm64_enc_outer
-
- movups $inout1,($cmac)
-___
-$code.=<<___ if ($win64);
- movaps (%rsp),%xmm6
- movaps 0x10(%rsp),%xmm7
- movaps 0x20(%rsp),%xmm8
- movaps 0x30(%rsp),%xmm9
- lea 0x58(%rsp),%rsp
-.Lccm64_enc_ret:
-___
-$code.=<<___;
- ret
-.size aesni_ccm64_encrypt_blocks,.-aesni_ccm64_encrypt_blocks
-___
-######################################################################
-$code.=<<___;
-.globl aesni_ccm64_decrypt_blocks
-.type aesni_ccm64_decrypt_blocks,\@function,6
-.align 16
-aesni_ccm64_decrypt_blocks:
-___
-$code.=<<___ if ($win64);
- lea -0x58(%rsp),%rsp
- movaps %xmm6,(%rsp)
- movaps %xmm7,0x10(%rsp)
- movaps %xmm8,0x20(%rsp)
- movaps %xmm9,0x30(%rsp)
-.Lccm64_dec_body:
-___
-$code.=<<___;
- mov 240($key),$rounds # key->rounds
- movups ($ivp),$iv
- movdqu ($cmac),$inout1
- movdqa .Lincrement64(%rip),$increment
- movdqa .Lbswap_mask(%rip),$bswap_mask
-
- movaps $iv,$inout0
- mov $rounds,$rnds_
- mov $key,$key_
- pshufb $bswap_mask,$iv
-___
- &aesni_generate1("enc",$key,$rounds);
-$code.=<<___;
- movups ($inp),$in0 # load inp
- paddq $increment,$iv
- lea 16($inp),$inp
- jmp .Lccm64_dec_outer
-.align 16
-.Lccm64_dec_outer:
- xorps $inout0,$in0 # inp ^= E(iv)
- movdqa $iv,$inout0
- mov $rnds_,$rounds
- movups $in0,($out) # save output
- lea 16($out),$out
- pshufb $bswap_mask,$inout0
-
- sub \$1,$len
- jz .Lccm64_dec_break
-
- $movkey ($key_),$rndkey0
- shr \$1,$rounds
- $movkey 16($key_),$rndkey1
- xorps $rndkey0,$in0
- lea 32($key_),$key
- xorps $rndkey0,$inout0
- xorps $in0,$inout1 # cmac^=out
- $movkey ($key),$rndkey0
-
-.Lccm64_dec2_loop:
- aesenc $rndkey1,$inout0
- dec $rounds
- aesenc $rndkey1,$inout1
- $movkey 16($key),$rndkey1
- aesenc $rndkey0,$inout0
- lea 32($key),$key
- aesenc $rndkey0,$inout1
- $movkey 0($key),$rndkey0
- jnz .Lccm64_dec2_loop
- movups ($inp),$in0 # load inp
- paddq $increment,$iv
- aesenc $rndkey1,$inout0
- aesenc $rndkey1,$inout1
- lea 16($inp),$inp
- aesenclast $rndkey0,$inout0
- aesenclast $rndkey0,$inout1
- jmp .Lccm64_dec_outer
-
-.align 16
-.Lccm64_dec_break:
- #xorps $in0,$inout1 # cmac^=out
-___
- &aesni_generate1("enc",$key_,$rounds,$inout1,$in0);
-$code.=<<___;
- movups $inout1,($cmac)
-___
-$code.=<<___ if ($win64);
- movaps (%rsp),%xmm6
- movaps 0x10(%rsp),%xmm7
- movaps 0x20(%rsp),%xmm8
- movaps 0x30(%rsp),%xmm9
- lea 0x58(%rsp),%rsp
-.Lccm64_dec_ret:
-___
-$code.=<<___;
- ret
-.size aesni_ccm64_decrypt_blocks,.-aesni_ccm64_decrypt_blocks
-___
-}
-######################################################################
-# void aesni_ctr32_encrypt_blocks (const void *in, void *out,
-# size_t blocks, const AES_KEY *key,
-# const char *ivec);
-#
-# Handles only complete blocks, operates on 32-bit counter and
-# does not update *ivec! (see engine/eng_aesni.c for details)
-#
-{
-my $reserved = $win64?0:-0x28;
-my ($in0,$in1,$in2,$in3)=map("%xmm$_",(8..11));
-my ($iv0,$iv1,$ivec)=("%xmm12","%xmm13","%xmm14");
-my $bswap_mask="%xmm15";
-
-$code.=<<___;
-.globl aesni_ctr32_encrypt_blocks
-.type aesni_ctr32_encrypt_blocks,\@function,5
-.align 16
-aesni_ctr32_encrypt_blocks:
-___
-$code.=<<___ if ($win64);
- lea -0xc8(%rsp),%rsp
- movaps %xmm6,0x20(%rsp)
- movaps %xmm7,0x30(%rsp)
- movaps %xmm8,0x40(%rsp)
- movaps %xmm9,0x50(%rsp)
- movaps %xmm10,0x60(%rsp)
- movaps %xmm11,0x70(%rsp)
- movaps %xmm12,0x80(%rsp)
- movaps %xmm13,0x90(%rsp)
- movaps %xmm14,0xa0(%rsp)
- movaps %xmm15,0xb0(%rsp)
-.Lctr32_body:
-___
-$code.=<<___;
- cmp \$1,$len
- je .Lctr32_one_shortcut
-
- movdqu ($ivp),$ivec
- movdqa .Lbswap_mask(%rip),$bswap_mask
- xor $rounds,$rounds
- pextrd \$3,$ivec,$rnds_ # pull 32-bit counter
- pinsrd \$3,$rounds,$ivec # wipe 32-bit counter
-
- mov 240($key),$rounds # key->rounds
- bswap $rnds_
- pxor $iv0,$iv0 # vector of 3 32-bit counters
- pxor $iv1,$iv1 # vector of 3 32-bit counters
- pinsrd \$0,$rnds_,$iv0
- lea 3($rnds_),$key_
- pinsrd \$0,$key_,$iv1
- inc $rnds_
- pinsrd \$1,$rnds_,$iv0
- inc $key_
- pinsrd \$1,$key_,$iv1
- inc $rnds_
- pinsrd \$2,$rnds_,$iv0
- inc $key_
- pinsrd \$2,$key_,$iv1
- movdqa $iv0,$reserved(%rsp)
- pshufb $bswap_mask,$iv0
- movdqa $iv1,`$reserved+0x10`(%rsp)
- pshufb $bswap_mask,$iv1
-
- pshufd \$`3<<6`,$iv0,$inout0 # place counter to upper dword
- pshufd \$`2<<6`,$iv0,$inout1
- pshufd \$`1<<6`,$iv0,$inout2
- cmp \$6,$len
- jb .Lctr32_tail
- shr \$1,$rounds
- mov $key,$key_ # backup $key
- mov $rounds,$rnds_ # backup $rounds
- sub \$6,$len
- jmp .Lctr32_loop6
-
-.align 16
-.Lctr32_loop6:
- pshufd \$`3<<6`,$iv1,$inout3
- por $ivec,$inout0 # merge counter-less ivec
- $movkey ($key_),$rndkey0
- pshufd \$`2<<6`,$iv1,$inout4
- por $ivec,$inout1
- $movkey 16($key_),$rndkey1
- pshufd \$`1<<6`,$iv1,$inout5
- por $ivec,$inout2
- por $ivec,$inout3
- xorps $rndkey0,$inout0
- por $ivec,$inout4
- por $ivec,$inout5
-
- # inline _aesni_encrypt6 and interleave last rounds
- # with own code...
-
- pxor $rndkey0,$inout1
- aesenc $rndkey1,$inout0
- lea 32($key_),$key
- pxor $rndkey0,$inout2
- aesenc $rndkey1,$inout1
- movdqa .Lincrement32(%rip),$iv1
- pxor $rndkey0,$inout3
- aesenc $rndkey1,$inout2
- movdqa $reserved(%rsp),$iv0
- pxor $rndkey0,$inout4
- aesenc $rndkey1,$inout3
- pxor $rndkey0,$inout5
- $movkey ($key),$rndkey0
- dec $rounds
- aesenc $rndkey1,$inout4
- aesenc $rndkey1,$inout5
- jmp .Lctr32_enc_loop6_enter
-.align 16
-.Lctr32_enc_loop6:
- aesenc $rndkey1,$inout0
- aesenc $rndkey1,$inout1
- dec $rounds
- aesenc $rndkey1,$inout2
- aesenc $rndkey1,$inout3
- aesenc $rndkey1,$inout4
- aesenc $rndkey1,$inout5
-.Lctr32_enc_loop6_enter:
- $movkey 16($key),$rndkey1
- aesenc $rndkey0,$inout0
- aesenc $rndkey0,$inout1
- lea 32($key),$key
- aesenc $rndkey0,$inout2
- aesenc $rndkey0,$inout3
- aesenc $rndkey0,$inout4
- aesenc $rndkey0,$inout5
- $movkey ($key),$rndkey0
- jnz .Lctr32_enc_loop6
-
- aesenc $rndkey1,$inout0
- paddd $iv1,$iv0 # increment counter vector
- aesenc $rndkey1,$inout1
- paddd `$reserved+0x10`(%rsp),$iv1
- aesenc $rndkey1,$inout2
- movdqa $iv0,$reserved(%rsp) # save counter vector
- aesenc $rndkey1,$inout3
- movdqa $iv1,`$reserved+0x10`(%rsp)
- aesenc $rndkey1,$inout4
- pshufb $bswap_mask,$iv0 # byte swap
- aesenc $rndkey1,$inout5
- pshufb $bswap_mask,$iv1
-
- aesenclast $rndkey0,$inout0
- movups ($inp),$in0 # load input
- aesenclast $rndkey0,$inout1
- movups 0x10($inp),$in1
- aesenclast $rndkey0,$inout2
- movups 0x20($inp),$in2
- aesenclast $rndkey0,$inout3
- movups 0x30($inp),$in3
- aesenclast $rndkey0,$inout4
- movups 0x40($inp),$rndkey1
- aesenclast $rndkey0,$inout5
- movups 0x50($inp),$rndkey0
- lea 0x60($inp),$inp
-
- xorps $inout0,$in0 # xor
- pshufd \$`3<<6`,$iv0,$inout0
- xorps $inout1,$in1
- pshufd \$`2<<6`,$iv0,$inout1
- movups $in0,($out) # store output
- xorps $inout2,$in2
- pshufd \$`1<<6`,$iv0,$inout2
- movups $in1,0x10($out)
- xorps $inout3,$in3
- movups $in2,0x20($out)
- xorps $inout4,$rndkey1
- movups $in3,0x30($out)
- xorps $inout5,$rndkey0
- movups $rndkey1,0x40($out)
- movups $rndkey0,0x50($out)
- lea 0x60($out),$out
- mov $rnds_,$rounds
- sub \$6,$len
- jnc .Lctr32_loop6
-
- add \$6,$len
- jz .Lctr32_done
- mov $key_,$key # restore $key
- lea 1($rounds,$rounds),$rounds # restore original value
-
-.Lctr32_tail:
- por $ivec,$inout0
- movups ($inp),$in0
- cmp \$2,$len
- jb .Lctr32_one
-
- por $ivec,$inout1
- movups 0x10($inp),$in1
- je .Lctr32_two
-
- pshufd \$`3<<6`,$iv1,$inout3
- por $ivec,$inout2
- movups 0x20($inp),$in2
- cmp \$4,$len
- jb .Lctr32_three
-
- pshufd \$`2<<6`,$iv1,$inout4
- por $ivec,$inout3
- movups 0x30($inp),$in3
- je .Lctr32_four
-
- por $ivec,$inout4
- xorps $inout5,$inout5
-
- call _aesni_encrypt6
-
- movups 0x40($inp),$rndkey1
- xorps $inout0,$in0
- xorps $inout1,$in1
- movups $in0,($out)
- xorps $inout2,$in2
- movups $in1,0x10($out)
- xorps $inout3,$in3
- movups $in2,0x20($out)
- xorps $inout4,$rndkey1
- movups $in3,0x30($out)
- movups $rndkey1,0x40($out)
- jmp .Lctr32_done
-
-.align 16
-.Lctr32_one_shortcut:
- movups ($ivp),$inout0
- movups ($inp),$in0
- mov 240($key),$rounds # key->rounds
-.Lctr32_one:
-___
- &aesni_generate1("enc",$key,$rounds);
-$code.=<<___;
- xorps $inout0,$in0
- movups $in0,($out)
- jmp .Lctr32_done
-
-.align 16
-.Lctr32_two:
- xorps $inout2,$inout2
- call _aesni_encrypt3
- xorps $inout0,$in0
- xorps $inout1,$in1
- movups $in0,($out)
- movups $in1,0x10($out)
- jmp .Lctr32_done
-
-.align 16
-.Lctr32_three:
- call _aesni_encrypt3
- xorps $inout0,$in0
- xorps $inout1,$in1
- movups $in0,($out)
- xorps $inout2,$in2
- movups $in1,0x10($out)
- movups $in2,0x20($out)
- jmp .Lctr32_done
-
-.align 16
-.Lctr32_four:
- call _aesni_encrypt4
- xorps $inout0,$in0
- xorps $inout1,$in1
- movups $in0,($out)
- xorps $inout2,$in2
- movups $in1,0x10($out)
- xorps $inout3,$in3
- movups $in2,0x20($out)
- movups $in3,0x30($out)
-
-.Lctr32_done:
-___
-$code.=<<___ if ($win64);
- movaps 0x20(%rsp),%xmm6
- movaps 0x30(%rsp),%xmm7
- movaps 0x40(%rsp),%xmm8
- movaps 0x50(%rsp),%xmm9
- movaps 0x60(%rsp),%xmm10
- movaps 0x70(%rsp),%xmm11
- movaps 0x80(%rsp),%xmm12
- movaps 0x90(%rsp),%xmm13
- movaps 0xa0(%rsp),%xmm14
- movaps 0xb0(%rsp),%xmm15
- lea 0xc8(%rsp),%rsp
-.Lctr32_ret:
-___
-$code.=<<___;
- ret
-.size aesni_ctr32_encrypt_blocks,.-aesni_ctr32_encrypt_blocks
-___
-}
-
-######################################################################
-# void aesni_xts_[en|de]crypt(const char *inp,char *out,size_t len,
-# const AES_KEY *key1, const AES_KEY *key2
-# const unsigned char iv[16]);
-#
-{
-my @tweak=map("%xmm$_",(10..15));
-my ($twmask,$twres,$twtmp)=("%xmm8","%xmm9",@tweak[4]);
-my ($key2,$ivp,$len_)=("%r8","%r9","%r9");
-my $frame_size = 0x68 + ($win64?160:0);
-
-$code.=<<___;
-.globl aesni_xts_encrypt
-.type aesni_xts_encrypt,\@function,6
-.align 16
-aesni_xts_encrypt:
- lea -$frame_size(%rsp),%rsp
-___
-$code.=<<___ if ($win64);
- movaps %xmm6,0x60(%rsp)
- movaps %xmm7,0x70(%rsp)
- movaps %xmm8,0x80(%rsp)
- movaps %xmm9,0x90(%rsp)
- movaps %xmm10,0xa0(%rsp)
- movaps %xmm11,0xb0(%rsp)
- movaps %xmm12,0xc0(%rsp)
- movaps %xmm13,0xd0(%rsp)
- movaps %xmm14,0xe0(%rsp)
- movaps %xmm15,0xf0(%rsp)
-.Lxts_enc_body:
-___
-$code.=<<___;
- movups ($ivp),@tweak[5] # load clear-text tweak
- mov 240(%r8),$rounds # key2->rounds
- mov 240($key),$rnds_ # key1->rounds
-___
- # generate the tweak
- &aesni_generate1("enc",$key2,$rounds,@tweak[5]);
-$code.=<<___;
- mov $key,$key_ # backup $key
- mov $rnds_,$rounds # backup $rounds
- mov $len,$len_ # backup $len
- and \$-16,$len
-
- movdqa .Lxts_magic(%rip),$twmask
- pxor $twtmp,$twtmp
- pcmpgtd @tweak[5],$twtmp # broadcast upper bits
-___
- for ($i=0;$i<4;$i++) {
- $code.=<<___;
- pshufd \$0x13,$twtmp,$twres
- pxor $twtmp,$twtmp
- movdqa @tweak[5],@tweak[$i]
- paddq @tweak[5],@tweak[5] # psllq 1,$tweak
- pand $twmask,$twres # isolate carry and residue
- pcmpgtd @tweak[5],$twtmp # broadcat upper bits
- pxor $twres,@tweak[5]
-___
- }
-$code.=<<___;
- sub \$16*6,$len
- jc .Lxts_enc_short
-
- shr \$1,$rounds
- sub \$1,$rounds
- mov $rounds,$rnds_
- jmp .Lxts_enc_grandloop
-
-.align 16
-.Lxts_enc_grandloop:
- pshufd \$0x13,$twtmp,$twres
- movdqa @tweak[5],@tweak[4]
- paddq @tweak[5],@tweak[5] # psllq 1,$tweak
- movdqu `16*0`($inp),$inout0 # load input
- pand $twmask,$twres # isolate carry and residue
- movdqu `16*1`($inp),$inout1
- pxor $twres,@tweak[5]
-
- movdqu `16*2`($inp),$inout2
- pxor @tweak[0],$inout0 # input^=tweak
- movdqu `16*3`($inp),$inout3
- pxor @tweak[1],$inout1
- movdqu `16*4`($inp),$inout4
- pxor @tweak[2],$inout2
- movdqu `16*5`($inp),$inout5
- lea `16*6`($inp),$inp
- pxor @tweak[3],$inout3
- $movkey ($key_),$rndkey0
- pxor @tweak[4],$inout4
- pxor @tweak[5],$inout5
-
- # inline _aesni_encrypt6 and interleave first and last rounds
- # with own code...
- $movkey 16($key_),$rndkey1
- pxor $rndkey0,$inout0
- pxor $rndkey0,$inout1
- movdqa @tweak[0],`16*0`(%rsp) # put aside tweaks
- aesenc $rndkey1,$inout0
- lea 32($key_),$key
- pxor $rndkey0,$inout2
- movdqa @tweak[1],`16*1`(%rsp)
- aesenc $rndkey1,$inout1
- pxor $rndkey0,$inout3
- movdqa @tweak[2],`16*2`(%rsp)
- aesenc $rndkey1,$inout2
- pxor $rndkey0,$inout4
- movdqa @tweak[3],`16*3`(%rsp)
- aesenc $rndkey1,$inout3
- pxor $rndkey0,$inout5
- $movkey ($key),$rndkey0
- dec $rounds
- movdqa @tweak[4],`16*4`(%rsp)
- aesenc $rndkey1,$inout4
- movdqa @tweak[5],`16*5`(%rsp)
- aesenc $rndkey1,$inout5
- pxor $twtmp,$twtmp
- pcmpgtd @tweak[5],$twtmp
- jmp .Lxts_enc_loop6_enter
-
-.align 16
-.Lxts_enc_loop6:
- aesenc $rndkey1,$inout0
- aesenc $rndkey1,$inout1
- dec $rounds
- aesenc $rndkey1,$inout2
- aesenc $rndkey1,$inout3
- aesenc $rndkey1,$inout4
- aesenc $rndkey1,$inout5
-.Lxts_enc_loop6_enter:
- $movkey 16($key),$rndkey1
- aesenc $rndkey0,$inout0
- aesenc $rndkey0,$inout1
- lea 32($key),$key
- aesenc $rndkey0,$inout2
- aesenc $rndkey0,$inout3
- aesenc $rndkey0,$inout4
- aesenc $rndkey0,$inout5
- $movkey ($key),$rndkey0
- jnz .Lxts_enc_loop6
-
- pshufd \$0x13,$twtmp,$twres
- pxor $twtmp,$twtmp
- paddq @tweak[5],@tweak[5] # psllq 1,$tweak
- aesenc $rndkey1,$inout0
- pand $twmask,$twres # isolate carry and residue
- aesenc $rndkey1,$inout1
- pcmpgtd @tweak[5],$twtmp # broadcast upper bits
- aesenc $rndkey1,$inout2
- pxor $twres,@tweak[5]
- aesenc $rndkey1,$inout3
- aesenc $rndkey1,$inout4
- aesenc $rndkey1,$inout5
- $movkey 16($key),$rndkey1
-
- pshufd \$0x13,$twtmp,$twres
- pxor $twtmp,$twtmp
- movdqa @tweak[5],@tweak[0]
- paddq @tweak[5],@tweak[5] # psllq 1,$tweak
- aesenc $rndkey0,$inout0
- pand $twmask,$twres # isolate carry and residue
- aesenc $rndkey0,$inout1
- pcmpgtd @tweak[5],$twtmp # broadcat upper bits
- aesenc $rndkey0,$inout2
- pxor $twres,@tweak[5]
- aesenc $rndkey0,$inout3
- aesenc $rndkey0,$inout4
- aesenc $rndkey0,$inout5
- $movkey 32($key),$rndkey0
-
- pshufd \$0x13,$twtmp,$twres
- pxor $twtmp,$twtmp
- movdqa @tweak[5],@tweak[1]
- paddq @tweak[5],@tweak[5] # psllq 1,$tweak
- aesenc $rndkey1,$inout0
- pand $twmask,$twres # isolate carry and residue
- aesenc $rndkey1,$inout1
- pcmpgtd @tweak[5],$twtmp # broadcat upper bits
- aesenc $rndkey1,$inout2
- pxor $twres,@tweak[5]
- aesenc $rndkey1,$inout3
- aesenc $rndkey1,$inout4
- aesenc $rndkey1,$inout5
-
- pshufd \$0x13,$twtmp,$twres
- pxor $twtmp,$twtmp
- movdqa @tweak[5],@tweak[2]
- paddq @tweak[5],@tweak[5] # psllq 1,$tweak
- aesenclast $rndkey0,$inout0
- pand $twmask,$twres # isolate carry and residue
- aesenclast $rndkey0,$inout1
- pcmpgtd @tweak[5],$twtmp # broadcat upper bits
- aesenclast $rndkey0,$inout2
- pxor $twres,@tweak[5]
- aesenclast $rndkey0,$inout3
- aesenclast $rndkey0,$inout4
- aesenclast $rndkey0,$inout5
-
- pshufd \$0x13,$twtmp,$twres
- pxor $twtmp,$twtmp
- movdqa @tweak[5],@tweak[3]
- paddq @tweak[5],@tweak[5] # psllq 1,$tweak
- xorps `16*0`(%rsp),$inout0 # output^=tweak
- pand $twmask,$twres # isolate carry and residue
- xorps `16*1`(%rsp),$inout1
- pcmpgtd @tweak[5],$twtmp # broadcat upper bits
- pxor $twres,@tweak[5]
-
- xorps `16*2`(%rsp),$inout2
- movups $inout0,`16*0`($out) # write output
- xorps `16*3`(%rsp),$inout3
- movups $inout1,`16*1`($out)
- xorps `16*4`(%rsp),$inout4
- movups $inout2,`16*2`($out)
- xorps `16*5`(%rsp),$inout5
- movups $inout3,`16*3`($out)
- mov $rnds_,$rounds # restore $rounds
- movups $inout4,`16*4`($out)
- movups $inout5,`16*5`($out)
- lea `16*6`($out),$out
- sub \$16*6,$len
- jnc .Lxts_enc_grandloop
-
- lea 3($rounds,$rounds),$rounds # restore original value
- mov $key_,$key # restore $key
- mov $rounds,$rnds_ # backup $rounds
-
-.Lxts_enc_short:
- add \$16*6,$len
- jz .Lxts_enc_done
-
- cmp \$0x20,$len
- jb .Lxts_enc_one
- je .Lxts_enc_two
-
- cmp \$0x40,$len
- jb .Lxts_enc_three
- je .Lxts_enc_four
-
- pshufd \$0x13,$twtmp,$twres
- movdqa @tweak[5],@tweak[4]
- paddq @tweak[5],@tweak[5] # psllq 1,$tweak
- movdqu ($inp),$inout0
- pand $twmask,$twres # isolate carry and residue
- movdqu 16*1($inp),$inout1
- pxor $twres,@tweak[5]
-
- movdqu 16*2($inp),$inout2
- pxor @tweak[0],$inout0
- movdqu 16*3($inp),$inout3
- pxor @tweak[1],$inout1
- movdqu 16*4($inp),$inout4
- lea 16*5($inp),$inp
- pxor @tweak[2],$inout2
- pxor @tweak[3],$inout3
- pxor @tweak[4],$inout4
-
- call _aesni_encrypt6
-
- xorps @tweak[0],$inout0
- movdqa @tweak[5],@tweak[0]
- xorps @tweak[1],$inout1
- xorps @tweak[2],$inout2
- movdqu $inout0,($out)
- xorps @tweak[3],$inout3
- movdqu $inout1,16*1($out)
- xorps @tweak[4],$inout4
- movdqu $inout2,16*2($out)
- movdqu $inout3,16*3($out)
- movdqu $inout4,16*4($out)
- lea 16*5($out),$out
- jmp .Lxts_enc_done
-
-.align 16
-.Lxts_enc_one:
- movups ($inp),$inout0
- lea 16*1($inp),$inp
- xorps @tweak[0],$inout0
-___
- &aesni_generate1("enc",$key,$rounds);
-$code.=<<___;
- xorps @tweak[0],$inout0
- movdqa @tweak[1],@tweak[0]
- movups $inout0,($out)
- lea 16*1($out),$out
- jmp .Lxts_enc_done
-
-.align 16
-.Lxts_enc_two:
- movups ($inp),$inout0
- movups 16($inp),$inout1
- lea 32($inp),$inp
- xorps @tweak[0],$inout0
- xorps @tweak[1],$inout1
-
- call _aesni_encrypt3
-
- xorps @tweak[0],$inout0
- movdqa @tweak[2],@tweak[0]
- xorps @tweak[1],$inout1
- movups $inout0,($out)
- movups $inout1,16*1($out)
- lea 16*2($out),$out
- jmp .Lxts_enc_done
-
-.align 16
-.Lxts_enc_three:
- movups ($inp),$inout0
- movups 16*1($inp),$inout1
- movups 16*2($inp),$inout2
- lea 16*3($inp),$inp
- xorps @tweak[0],$inout0
- xorps @tweak[1],$inout1
- xorps @tweak[2],$inout2
-
- call _aesni_encrypt3
-
- xorps @tweak[0],$inout0
- movdqa @tweak[3],@tweak[0]
- xorps @tweak[1],$inout1
- xorps @tweak[2],$inout2
- movups $inout0,($out)
- movups $inout1,16*1($out)
- movups $inout2,16*2($out)
- lea 16*3($out),$out
- jmp .Lxts_enc_done
-
-.align 16
-.Lxts_enc_four:
- movups ($inp),$inout0
- movups 16*1($inp),$inout1
- movups 16*2($inp),$inout2
- xorps @tweak[0],$inout0
- movups 16*3($inp),$inout3
- lea 16*4($inp),$inp
- xorps @tweak[1],$inout1
- xorps @tweak[2],$inout2
- xorps @tweak[3],$inout3
-
- call _aesni_encrypt4
-
- xorps @tweak[0],$inout0
- movdqa @tweak[5],@tweak[0]
- xorps @tweak[1],$inout1
- xorps @tweak[2],$inout2
- movups $inout0,($out)
- xorps @tweak[3],$inout3
- movups $inout1,16*1($out)
- movups $inout2,16*2($out)
- movups $inout3,16*3($out)
- lea 16*4($out),$out
- jmp .Lxts_enc_done
-
-.align 16
-.Lxts_enc_done:
- and \$15,$len_
- jz .Lxts_enc_ret
- mov $len_,$len
-
-.Lxts_enc_steal:
- movzb ($inp),%eax # borrow $rounds ...
- movzb -16($out),%ecx # ... and $key
- lea 1($inp),$inp
- mov %al,-16($out)
- mov %cl,0($out)
- lea 1($out),$out
- sub \$1,$len
- jnz .Lxts_enc_steal
-
- sub $len_,$out # rewind $out
- mov $key_,$key # restore $key
- mov $rnds_,$rounds # restore $rounds
-
- movups -16($out),$inout0
- xorps @tweak[0],$inout0
-___
- &aesni_generate1("enc",$key,$rounds);
-$code.=<<___;
- xorps @tweak[0],$inout0
- movups $inout0,-16($out)
-
-.Lxts_enc_ret:
-___
-$code.=<<___ if ($win64);
- movaps 0x60(%rsp),%xmm6
- movaps 0x70(%rsp),%xmm7
- movaps 0x80(%rsp),%xmm8
- movaps 0x90(%rsp),%xmm9
- movaps 0xa0(%rsp),%xmm10
- movaps 0xb0(%rsp),%xmm11
- movaps 0xc0(%rsp),%xmm12
- movaps 0xd0(%rsp),%xmm13
- movaps 0xe0(%rsp),%xmm14
- movaps 0xf0(%rsp),%xmm15
-___
-$code.=<<___;
- lea $frame_size(%rsp),%rsp
-.Lxts_enc_epilogue:
- ret
-.size aesni_xts_encrypt,.-aesni_xts_encrypt
-___
-
-$code.=<<___;
-.globl aesni_xts_decrypt
-.type aesni_xts_decrypt,\@function,6
-.align 16
-aesni_xts_decrypt:
- lea -$frame_size(%rsp),%rsp
-___
-$code.=<<___ if ($win64);
- movaps %xmm6,0x60(%rsp)
- movaps %xmm7,0x70(%rsp)
- movaps %xmm8,0x80(%rsp)
- movaps %xmm9,0x90(%rsp)
- movaps %xmm10,0xa0(%rsp)
- movaps %xmm11,0xb0(%rsp)
- movaps %xmm12,0xc0(%rsp)
- movaps %xmm13,0xd0(%rsp)
- movaps %xmm14,0xe0(%rsp)
- movaps %xmm15,0xf0(%rsp)
-.Lxts_dec_body:
-___
-$code.=<<___;
- movups ($ivp),@tweak[5] # load clear-text tweak
- mov 240($key2),$rounds # key2->rounds
- mov 240($key),$rnds_ # key1->rounds
-___
- # generate the tweak
- &aesni_generate1("enc",$key2,$rounds,@tweak[5]);
-$code.=<<___;
- xor %eax,%eax # if ($len%16) len-=16;
- test \$15,$len
- setnz %al
- shl \$4,%rax
- sub %rax,$len
-
- mov $key,$key_ # backup $key
- mov $rnds_,$rounds # backup $rounds
- mov $len,$len_ # backup $len
- and \$-16,$len
-
- movdqa .Lxts_magic(%rip),$twmask
- pxor $twtmp,$twtmp
- pcmpgtd @tweak[5],$twtmp # broadcast upper bits
-___
- for ($i=0;$i<4;$i++) {
- $code.=<<___;
- pshufd \$0x13,$twtmp,$twres
- pxor $twtmp,$twtmp
- movdqa @tweak[5],@tweak[$i]
- paddq @tweak[5],@tweak[5] # psllq 1,$tweak
- pand $twmask,$twres # isolate carry and residue
- pcmpgtd @tweak[5],$twtmp # broadcat upper bits
- pxor $twres,@tweak[5]
-___
- }
-$code.=<<___;
- sub \$16*6,$len
- jc .Lxts_dec_short
-
- shr \$1,$rounds
- sub \$1,$rounds
- mov $rounds,$rnds_
- jmp .Lxts_dec_grandloop
-
-.align 16
-.Lxts_dec_grandloop:
- pshufd \$0x13,$twtmp,$twres
- movdqa @tweak[5],@tweak[4]
- paddq @tweak[5],@tweak[5] # psllq 1,$tweak
- movdqu `16*0`($inp),$inout0 # load input
- pand $twmask,$twres # isolate carry and residue
- movdqu `16*1`($inp),$inout1
- pxor $twres,@tweak[5]
-
- movdqu `16*2`($inp),$inout2
- pxor @tweak[0],$inout0 # input^=tweak
- movdqu `16*3`($inp),$inout3
- pxor @tweak[1],$inout1
- movdqu `16*4`($inp),$inout4
- pxor @tweak[2],$inout2
- movdqu `16*5`($inp),$inout5
- lea `16*6`($inp),$inp
- pxor @tweak[3],$inout3
- $movkey ($key_),$rndkey0
- pxor @tweak[4],$inout4
- pxor @tweak[5],$inout5
-
- # inline _aesni_decrypt6 and interleave first and last rounds
- # with own code...
- $movkey 16($key_),$rndkey1
- pxor $rndkey0,$inout0
- pxor $rndkey0,$inout1
- movdqa @tweak[0],`16*0`(%rsp) # put aside tweaks
- aesdec $rndkey1,$inout0
- lea 32($key_),$key
- pxor $rndkey0,$inout2
- movdqa @tweak[1],`16*1`(%rsp)
- aesdec $rndkey1,$inout1
- pxor $rndkey0,$inout3
- movdqa @tweak[2],`16*2`(%rsp)
- aesdec $rndkey1,$inout2
- pxor $rndkey0,$inout4
- movdqa @tweak[3],`16*3`(%rsp)
- aesdec $rndkey1,$inout3
- pxor $rndkey0,$inout5
- $movkey ($key),$rndkey0
- dec $rounds
- movdqa @tweak[4],`16*4`(%rsp)
- aesdec $rndkey1,$inout4
- movdqa @tweak[5],`16*5`(%rsp)
- aesdec $rndkey1,$inout5
- pxor $twtmp,$twtmp
- pcmpgtd @tweak[5],$twtmp
- jmp .Lxts_dec_loop6_enter
-
-.align 16
-.Lxts_dec_loop6:
- aesdec $rndkey1,$inout0
- aesdec $rndkey1,$inout1
- dec $rounds
- aesdec $rndkey1,$inout2
- aesdec $rndkey1,$inout3
- aesdec $rndkey1,$inout4
- aesdec $rndkey1,$inout5
-.Lxts_dec_loop6_enter:
- $movkey 16($key),$rndkey1
- aesdec $rndkey0,$inout0
- aesdec $rndkey0,$inout1
- lea 32($key),$key
- aesdec $rndkey0,$inout2
- aesdec $rndkey0,$inout3
- aesdec $rndkey0,$inout4
- aesdec $rndkey0,$inout5
- $movkey ($key),$rndkey0
- jnz .Lxts_dec_loop6
-
- pshufd \$0x13,$twtmp,$twres
- pxor $twtmp,$twtmp
- paddq @tweak[5],@tweak[5] # psllq 1,$tweak
- aesdec $rndkey1,$inout0
- pand $twmask,$twres # isolate carry and residue
- aesdec $rndkey1,$inout1
- pcmpgtd @tweak[5],$twtmp # broadcast upper bits
- aesdec $rndkey1,$inout2
- pxor $twres,@tweak[5]
- aesdec $rndkey1,$inout3
- aesdec $rndkey1,$inout4
- aesdec $rndkey1,$inout5
- $movkey 16($key),$rndkey1
-
- pshufd \$0x13,$twtmp,$twres
- pxor $twtmp,$twtmp
- movdqa @tweak[5],@tweak[0]
- paddq @tweak[5],@tweak[5] # psllq 1,$tweak
- aesdec $rndkey0,$inout0
- pand $twmask,$twres # isolate carry and residue
- aesdec $rndkey0,$inout1
- pcmpgtd @tweak[5],$twtmp # broadcat upper bits
- aesdec $rndkey0,$inout2
- pxor $twres,@tweak[5]
- aesdec $rndkey0,$inout3
- aesdec $rndkey0,$inout4
- aesdec $rndkey0,$inout5
- $movkey 32($key),$rndkey0
-
- pshufd \$0x13,$twtmp,$twres
- pxor $twtmp,$twtmp
- movdqa @tweak[5],@tweak[1]
- paddq @tweak[5],@tweak[5] # psllq 1,$tweak
- aesdec $rndkey1,$inout0
- pand $twmask,$twres # isolate carry and residue
- aesdec $rndkey1,$inout1
- pcmpgtd @tweak[5],$twtmp # broadcat upper bits
- aesdec $rndkey1,$inout2
- pxor $twres,@tweak[5]
- aesdec $rndkey1,$inout3
- aesdec $rndkey1,$inout4
- aesdec $rndkey1,$inout5
-
- pshufd \$0x13,$twtmp,$twres
- pxor $twtmp,$twtmp
- movdqa @tweak[5],@tweak[2]
- paddq @tweak[5],@tweak[5] # psllq 1,$tweak
- aesdeclast $rndkey0,$inout0
- pand $twmask,$twres # isolate carry and residue
- aesdeclast $rndkey0,$inout1
- pcmpgtd @tweak[5],$twtmp # broadcat upper bits
- aesdeclast $rndkey0,$inout2
- pxor $twres,@tweak[5]
- aesdeclast $rndkey0,$inout3
- aesdeclast $rndkey0,$inout4
- aesdeclast $rndkey0,$inout5
-
- pshufd \$0x13,$twtmp,$twres
- pxor $twtmp,$twtmp
- movdqa @tweak[5],@tweak[3]
- paddq @tweak[5],@tweak[5] # psllq 1,$tweak
- xorps `16*0`(%rsp),$inout0 # output^=tweak
- pand $twmask,$twres # isolate carry and residue
- xorps `16*1`(%rsp),$inout1
- pcmpgtd @tweak[5],$twtmp # broadcat upper bits
- pxor $twres,@tweak[5]
-
- xorps `16*2`(%rsp),$inout2
- movups $inout0,`16*0`($out) # write output
- xorps `16*3`(%rsp),$inout3
- movups $inout1,`16*1`($out)
- xorps `16*4`(%rsp),$inout4
- movups $inout2,`16*2`($out)
- xorps `16*5`(%rsp),$inout5
- movups $inout3,`16*3`($out)
- mov $rnds_,$rounds # restore $rounds
- movups $inout4,`16*4`($out)
- movups $inout5,`16*5`($out)
- lea `16*6`($out),$out
- sub \$16*6,$len
- jnc .Lxts_dec_grandloop
-
- lea 3($rounds,$rounds),$rounds # restore original value
- mov $key_,$key # restore $key
- mov $rounds,$rnds_ # backup $rounds
-
-.Lxts_dec_short:
- add \$16*6,$len
- jz .Lxts_dec_done
-
- cmp \$0x20,$len
- jb .Lxts_dec_one
- je .Lxts_dec_two
-
- cmp \$0x40,$len
- jb .Lxts_dec_three
- je .Lxts_dec_four
-
- pshufd \$0x13,$twtmp,$twres
- movdqa @tweak[5],@tweak[4]
- paddq @tweak[5],@tweak[5] # psllq 1,$tweak
- movdqu ($inp),$inout0
- pand $twmask,$twres # isolate carry and residue
- movdqu 16*1($inp),$inout1
- pxor $twres,@tweak[5]
-
- movdqu 16*2($inp),$inout2
- pxor @tweak[0],$inout0
- movdqu 16*3($inp),$inout3
- pxor @tweak[1],$inout1
- movdqu 16*4($inp),$inout4
- lea 16*5($inp),$inp
- pxor @tweak[2],$inout2
- pxor @tweak[3],$inout3
- pxor @tweak[4],$inout4
-
- call _aesni_decrypt6
-
- xorps @tweak[0],$inout0
- xorps @tweak[1],$inout1
- xorps @tweak[2],$inout2
- movdqu $inout0,($out)
- xorps @tweak[3],$inout3
- movdqu $inout1,16*1($out)
- xorps @tweak[4],$inout4
- movdqu $inout2,16*2($out)
- pxor $twtmp,$twtmp
- movdqu $inout3,16*3($out)
- pcmpgtd @tweak[5],$twtmp
- movdqu $inout4,16*4($out)
- lea 16*5($out),$out
- pshufd \$0x13,$twtmp,@tweak[1] # $twres
- and \$15,$len_
- jz .Lxts_dec_ret
-
- movdqa @tweak[5],@tweak[0]
- paddq @tweak[5],@tweak[5] # psllq 1,$tweak
- pand $twmask,@tweak[1] # isolate carry and residue
- pxor @tweak[5],@tweak[1]
- jmp .Lxts_dec_done2
-
-.align 16
-.Lxts_dec_one:
- movups ($inp),$inout0
- lea 16*1($inp),$inp
- xorps @tweak[0],$inout0
-___
- &aesni_generate1("dec",$key,$rounds);
-$code.=<<___;
- xorps @tweak[0],$inout0
- movdqa @tweak[1],@tweak[0]
- movups $inout0,($out)
- movdqa @tweak[2],@tweak[1]
- lea 16*1($out),$out
- jmp .Lxts_dec_done
-
-.align 16
-.Lxts_dec_two:
- movups ($inp),$inout0
- movups 16($inp),$inout1
- lea 32($inp),$inp
- xorps @tweak[0],$inout0
- xorps @tweak[1],$inout1
-
- call _aesni_decrypt3
-
- xorps @tweak[0],$inout0
- movdqa @tweak[2],@tweak[0]
- xorps @tweak[1],$inout1
- movdqa @tweak[3],@tweak[1]
- movups $inout0,($out)
- movups $inout1,16*1($out)
- lea 16*2($out),$out
- jmp .Lxts_dec_done
-
-.align 16
-.Lxts_dec_three:
- movups ($inp),$inout0
- movups 16*1($inp),$inout1
- movups 16*2($inp),$inout2
- lea 16*3($inp),$inp
- xorps @tweak[0],$inout0
- xorps @tweak[1],$inout1
- xorps @tweak[2],$inout2
-
- call _aesni_decrypt3
-
- xorps @tweak[0],$inout0
- movdqa @tweak[3],@tweak[0]
- xorps @tweak[1],$inout1
- movdqa @tweak[5],@tweak[1]
- xorps @tweak[2],$inout2
- movups $inout0,($out)
- movups $inout1,16*1($out)
- movups $inout2,16*2($out)
- lea 16*3($out),$out
- jmp .Lxts_dec_done
-
-.align 16
-.Lxts_dec_four:
- pshufd \$0x13,$twtmp,$twres
- movdqa @tweak[5],@tweak[4]
- paddq @tweak[5],@tweak[5] # psllq 1,$tweak
- movups ($inp),$inout0
- pand $twmask,$twres # isolate carry and residue
- movups 16*1($inp),$inout1
- pxor $twres,@tweak[5]
-
- movups 16*2($inp),$inout2
- xorps @tweak[0],$inout0
- movups 16*3($inp),$inout3
- lea 16*4($inp),$inp
- xorps @tweak[1],$inout1
- xorps @tweak[2],$inout2
- xorps @tweak[3],$inout3
-
- call _aesni_decrypt4
-
- xorps @tweak[0],$inout0
- movdqa @tweak[4],@tweak[0]
- xorps @tweak[1],$inout1
- movdqa @tweak[5],@tweak[1]
- xorps @tweak[2],$inout2
- movups $inout0,($out)
- xorps @tweak[3],$inout3
- movups $inout1,16*1($out)
- movups $inout2,16*2($out)
- movups $inout3,16*3($out)
- lea 16*4($out),$out
- jmp .Lxts_dec_done
-
-.align 16
-.Lxts_dec_done:
- and \$15,$len_
- jz .Lxts_dec_ret
-.Lxts_dec_done2:
- mov $len_,$len
- mov $key_,$key # restore $key
- mov $rnds_,$rounds # restore $rounds
-
- movups ($inp),$inout0
- xorps @tweak[1],$inout0
-___
- &aesni_generate1("dec",$key,$rounds);
-$code.=<<___;
- xorps @tweak[1],$inout0
- movups $inout0,($out)
-
-.Lxts_dec_steal:
- movzb 16($inp),%eax # borrow $rounds ...
- movzb ($out),%ecx # ... and $key
- lea 1($inp),$inp
- mov %al,($out)
- mov %cl,16($out)
- lea 1($out),$out
- sub \$1,$len
- jnz .Lxts_dec_steal
-
- sub $len_,$out # rewind $out
- mov $key_,$key # restore $key
- mov $rnds_,$rounds # restore $rounds
-
- movups ($out),$inout0
- xorps @tweak[0],$inout0
-___
- &aesni_generate1("dec",$key,$rounds);
-$code.=<<___;
- xorps @tweak[0],$inout0
- movups $inout0,($out)
-
-.Lxts_dec_ret:
-___
-$code.=<<___ if ($win64);
- movaps 0x60(%rsp),%xmm6
- movaps 0x70(%rsp),%xmm7
- movaps 0x80(%rsp),%xmm8
- movaps 0x90(%rsp),%xmm9
- movaps 0xa0(%rsp),%xmm10
- movaps 0xb0(%rsp),%xmm11
- movaps 0xc0(%rsp),%xmm12
- movaps 0xd0(%rsp),%xmm13
- movaps 0xe0(%rsp),%xmm14
- movaps 0xf0(%rsp),%xmm15
-___
-$code.=<<___;
- lea $frame_size(%rsp),%rsp
-.Lxts_dec_epilogue:
- ret
-.size aesni_xts_decrypt,.-aesni_xts_decrypt
-___
-} }}
-
-########################################################################
-# void $PREFIX_cbc_encrypt (const void *inp, void *out,
-# size_t length, const AES_KEY *key,
-# unsigned char *ivp,const int enc);
-{
-my $reserved = $win64?0x40:-0x18; # used in decrypt
-$code.=<<___;
-.globl ${PREFIX}_cbc_encrypt
-.type ${PREFIX}_cbc_encrypt,\@function,6
-.align 16
-${PREFIX}_cbc_encrypt:
- test $len,$len # check length
- jz .Lcbc_ret
-
- mov 240($key),$rnds_ # key->rounds
- mov $key,$key_ # backup $key
- test %r9d,%r9d # 6th argument
- jz .Lcbc_decrypt
-#--------------------------- CBC ENCRYPT ------------------------------#
- movups ($ivp),$inout0 # load iv as initial state
- mov $rnds_,$rounds
- cmp \$16,$len
- jb .Lcbc_enc_tail
- sub \$16,$len
- jmp .Lcbc_enc_loop
-.align 16
-.Lcbc_enc_loop:
- movups ($inp),$inout1 # load input
- lea 16($inp),$inp
- #xorps $inout1,$inout0
-___
- &aesni_generate1("enc",$key,$rounds,$inout0,$inout1);
-$code.=<<___;
- mov $rnds_,$rounds # restore $rounds
- mov $key_,$key # restore $key
- movups $inout0,0($out) # store output
- lea 16($out),$out
- sub \$16,$len
- jnc .Lcbc_enc_loop
- add \$16,$len
- jnz .Lcbc_enc_tail
- movups $inout0,($ivp)
- jmp .Lcbc_ret
-
-.Lcbc_enc_tail:
- mov $len,%rcx # zaps $key
- xchg $inp,$out # $inp is %rsi and $out is %rdi now
- .long 0x9066A4F3 # rep movsb
- mov \$16,%ecx # zero tail
- sub $len,%rcx
- xor %eax,%eax
- .long 0x9066AAF3 # rep stosb
- lea -16(%rdi),%rdi # rewind $out by 1 block
- mov $rnds_,$rounds # restore $rounds
- mov %rdi,%rsi # $inp and $out are the same
- mov $key_,$key # restore $key
- xor $len,$len # len=16
- jmp .Lcbc_enc_loop # one more spin
- #--------------------------- CBC DECRYPT ------------------------------#
-.align 16
-.Lcbc_decrypt:
-___
-$code.=<<___ if ($win64);
- lea -0x58(%rsp),%rsp
- movaps %xmm6,(%rsp)
- movaps %xmm7,0x10(%rsp)
- movaps %xmm8,0x20(%rsp)
- movaps %xmm9,0x30(%rsp)
-.Lcbc_decrypt_body:
-___
-$code.=<<___;
- movups ($ivp),$iv
- mov $rnds_,$rounds
- cmp \$0x70,$len
- jbe .Lcbc_dec_tail
- shr \$1,$rnds_
- sub \$0x70,$len
- mov $rnds_,$rounds
- movaps $iv,$reserved(%rsp)
- jmp .Lcbc_dec_loop8_enter
-.align 16
-.Lcbc_dec_loop8:
- movaps $rndkey0,$reserved(%rsp) # save IV
- movups $inout7,($out)
- lea 0x10($out),$out
-.Lcbc_dec_loop8_enter:
- $movkey ($key),$rndkey0
- movups ($inp),$inout0 # load input
- movups 0x10($inp),$inout1
- $movkey 16($key),$rndkey1
-
- lea 32($key),$key
- movdqu 0x20($inp),$inout2
- xorps $rndkey0,$inout0
- movdqu 0x30($inp),$inout3
- xorps $rndkey0,$inout1
- movdqu 0x40($inp),$inout4
- aesdec $rndkey1,$inout0
- pxor $rndkey0,$inout2
- movdqu 0x50($inp),$inout5
- aesdec $rndkey1,$inout1
- pxor $rndkey0,$inout3
- movdqu 0x60($inp),$inout6
- aesdec $rndkey1,$inout2
- pxor $rndkey0,$inout4
- movdqu 0x70($inp),$inout7
- aesdec $rndkey1,$inout3
- pxor $rndkey0,$inout5
- dec $rounds
- aesdec $rndkey1,$inout4
- pxor $rndkey0,$inout6
- aesdec $rndkey1,$inout5
- pxor $rndkey0,$inout7
- $movkey ($key),$rndkey0
- aesdec $rndkey1,$inout6
- aesdec $rndkey1,$inout7
- $movkey 16($key),$rndkey1
-
- call .Ldec_loop8_enter
-
- movups ($inp),$rndkey1 # re-load input
- movups 0x10($inp),$rndkey0
- xorps $reserved(%rsp),$inout0 # ^= IV
- xorps $rndkey1,$inout1
- movups 0x20($inp),$rndkey1
- xorps $rndkey0,$inout2
- movups 0x30($inp),$rndkey0
- xorps $rndkey1,$inout3
- movups 0x40($inp),$rndkey1
- xorps $rndkey0,$inout4
- movups 0x50($inp),$rndkey0
- xorps $rndkey1,$inout5
- movups 0x60($inp),$rndkey1
- xorps $rndkey0,$inout6
- movups 0x70($inp),$rndkey0 # IV
- xorps $rndkey1,$inout7
- movups $inout0,($out)
- movups $inout1,0x10($out)
- movups $inout2,0x20($out)
- movups $inout3,0x30($out)
- mov $rnds_,$rounds # restore $rounds
- movups $inout4,0x40($out)
- mov $key_,$key # restore $key
- movups $inout5,0x50($out)
- lea 0x80($inp),$inp
- movups $inout6,0x60($out)
- lea 0x70($out),$out
- sub \$0x80,$len
- ja .Lcbc_dec_loop8
-
- movaps $inout7,$inout0
- movaps $rndkey0,$iv
- add \$0x70,$len
- jle .Lcbc_dec_tail_collected
- movups $inout0,($out)
- lea 1($rnds_,$rnds_),$rounds
- lea 0x10($out),$out
-.Lcbc_dec_tail:
- movups ($inp),$inout0
- movaps $inout0,$in0
- cmp \$0x10,$len
- jbe .Lcbc_dec_one
-
- movups 0x10($inp),$inout1
- movaps $inout1,$in1
- cmp \$0x20,$len
- jbe .Lcbc_dec_two
-
- movups 0x20($inp),$inout2
- movaps $inout2,$in2
- cmp \$0x30,$len
- jbe .Lcbc_dec_three
-
- movups 0x30($inp),$inout3
- cmp \$0x40,$len
- jbe .Lcbc_dec_four
-
- movups 0x40($inp),$inout4
- cmp \$0x50,$len
- jbe .Lcbc_dec_five
-
- movups 0x50($inp),$inout5
- cmp \$0x60,$len
- jbe .Lcbc_dec_six
-
- movups 0x60($inp),$inout6
- movaps $iv,$reserved(%rsp) # save IV
- call _aesni_decrypt8
- movups ($inp),$rndkey1
- movups 0x10($inp),$rndkey0
- xorps $reserved(%rsp),$inout0 # ^= IV
- xorps $rndkey1,$inout1
- movups 0x20($inp),$rndkey1
- xorps $rndkey0,$inout2
- movups 0x30($inp),$rndkey0
- xorps $rndkey1,$inout3
- movups 0x40($inp),$rndkey1
- xorps $rndkey0,$inout4
- movups 0x50($inp),$rndkey0
- xorps $rndkey1,$inout5
- movups 0x60($inp),$iv # IV
- xorps $rndkey0,$inout6
- movups $inout0,($out)
- movups $inout1,0x10($out)
- movups $inout2,0x20($out)
- movups $inout3,0x30($out)
- movups $inout4,0x40($out)
- movups $inout5,0x50($out)
- lea 0x60($out),$out
- movaps $inout6,$inout0
- sub \$0x70,$len
- jmp .Lcbc_dec_tail_collected
-.align 16
-.Lcbc_dec_one:
-___
- &aesni_generate1("dec",$key,$rounds);
-$code.=<<___;
- xorps $iv,$inout0
- movaps $in0,$iv
- sub \$0x10,$len
- jmp .Lcbc_dec_tail_collected
-.align 16
-.Lcbc_dec_two:
- xorps $inout2,$inout2
- call _aesni_decrypt3
- xorps $iv,$inout0
- xorps $in0,$inout1
- movups $inout0,($out)
- movaps $in1,$iv
- movaps $inout1,$inout0
- lea 0x10($out),$out
- sub \$0x20,$len
- jmp .Lcbc_dec_tail_collected
-.align 16
-.Lcbc_dec_three:
- call _aesni_decrypt3
- xorps $iv,$inout0
- xorps $in0,$inout1
- movups $inout0,($out)
- xorps $in1,$inout2
- movups $inout1,0x10($out)
- movaps $in2,$iv
- movaps $inout2,$inout0
- lea 0x20($out),$out
- sub \$0x30,$len
- jmp .Lcbc_dec_tail_collected
-.align 16
-.Lcbc_dec_four:
- call _aesni_decrypt4
- xorps $iv,$inout0
- movups 0x30($inp),$iv
- xorps $in0,$inout1
- movups $inout0,($out)
- xorps $in1,$inout2
- movups $inout1,0x10($out)
- xorps $in2,$inout3
- movups $inout2,0x20($out)
- movaps $inout3,$inout0
- lea 0x30($out),$out
- sub \$0x40,$len
- jmp .Lcbc_dec_tail_collected
-.align 16
-.Lcbc_dec_five:
- xorps $inout5,$inout5
- call _aesni_decrypt6
- movups 0x10($inp),$rndkey1
- movups 0x20($inp),$rndkey0
- xorps $iv,$inout0
- xorps $in0,$inout1
- xorps $rndkey1,$inout2
- movups 0x30($inp),$rndkey1
- xorps $rndkey0,$inout3
- movups 0x40($inp),$iv
- xorps $rndkey1,$inout4
- movups $inout0,($out)
- movups $inout1,0x10($out)
- movups $inout2,0x20($out)
- movups $inout3,0x30($out)
- lea 0x40($out),$out
- movaps $inout4,$inout0
- sub \$0x50,$len
- jmp .Lcbc_dec_tail_collected
-.align 16
-.Lcbc_dec_six:
- call _aesni_decrypt6
- movups 0x10($inp),$rndkey1
- movups 0x20($inp),$rndkey0
- xorps $iv,$inout0
- xorps $in0,$inout1
- xorps $rndkey1,$inout2
- movups 0x30($inp),$rndkey1
- xorps $rndkey0,$inout3
- movups 0x40($inp),$rndkey0
- xorps $rndkey1,$inout4
- movups 0x50($inp),$iv
- xorps $rndkey0,$inout5
- movups $inout0,($out)
- movups $inout1,0x10($out)
- movups $inout2,0x20($out)
- movups $inout3,0x30($out)
- movups $inout4,0x40($out)
- lea 0x50($out),$out
- movaps $inout5,$inout0
- sub \$0x60,$len
- jmp .Lcbc_dec_tail_collected
-.align 16
-.Lcbc_dec_tail_collected:
- and \$15,$len
- movups $iv,($ivp)
- jnz .Lcbc_dec_tail_partial
- movups $inout0,($out)
- jmp .Lcbc_dec_ret
-.align 16
-.Lcbc_dec_tail_partial:
- movaps $inout0,$reserved(%rsp)
- mov \$16,%rcx
- mov $out,%rdi
- sub $len,%rcx
- lea $reserved(%rsp),%rsi
- .long 0x9066A4F3 # rep movsb
-
-.Lcbc_dec_ret:
-___
-$code.=<<___ if ($win64);
- movaps (%rsp),%xmm6
- movaps 0x10(%rsp),%xmm7
- movaps 0x20(%rsp),%xmm8
- movaps 0x30(%rsp),%xmm9
- lea 0x58(%rsp),%rsp
-___
-$code.=<<___;
-.Lcbc_ret:
- ret
-.size ${PREFIX}_cbc_encrypt,.-${PREFIX}_cbc_encrypt
-___
-}
-# int $PREFIX_set_[en|de]crypt_key (const unsigned char *userKey,
-# int bits, AES_KEY *key)
-{ my ($inp,$bits,$key) = @_4args;
- $bits =~ s/%r/%e/;
-
-$code.=<<___;
-.globl ${PREFIX}_set_decrypt_key
-.type ${PREFIX}_set_decrypt_key,\@abi-omnipotent
-.align 16
-${PREFIX}_set_decrypt_key:
- .byte 0x48,0x83,0xEC,0x08 # sub rsp,8
- call __aesni_set_encrypt_key
- shl \$4,$bits # rounds-1 after _aesni_set_encrypt_key
- test %eax,%eax
- jnz .Ldec_key_ret
- lea 16($key,$bits),$inp # points at the end of key schedule
-
- $movkey ($key),%xmm0 # just swap
- $movkey ($inp),%xmm1
- $movkey %xmm0,($inp)
- $movkey %xmm1,($key)
- lea 16($key),$key
- lea -16($inp),$inp
-
-.Ldec_key_inverse:
- $movkey ($key),%xmm0 # swap and inverse
- $movkey ($inp),%xmm1
- aesimc %xmm0,%xmm0
- aesimc %xmm1,%xmm1
- lea 16($key),$key
- lea -16($inp),$inp
- $movkey %xmm0,16($inp)
- $movkey %xmm1,-16($key)
- cmp $key,$inp
- ja .Ldec_key_inverse
-
- $movkey ($key),%xmm0 # inverse middle
- aesimc %xmm0,%xmm0
- $movkey %xmm0,($inp)
-.Ldec_key_ret:
- add \$8,%rsp
- ret
-.LSEH_end_set_decrypt_key:
-.size ${PREFIX}_set_decrypt_key,.-${PREFIX}_set_decrypt_key
-___
-
-# This is based on submission by
-#
-# Huang Ying <ying.huang@intel.com>
-# Vinodh Gopal <vinodh.gopal@intel.com>
-# Kahraman Akdemir
-#
-# Agressively optimized in respect to aeskeygenassist's critical path
-# and is contained in %xmm0-5 to meet Win64 ABI requirement.
-#
-$code.=<<___;
-.globl ${PREFIX}_set_encrypt_key
-.type ${PREFIX}_set_encrypt_key,\@abi-omnipotent
-.align 16
-${PREFIX}_set_encrypt_key:
-__aesni_set_encrypt_key:
- .byte 0x48,0x83,0xEC,0x08 # sub rsp,8
- mov \$-1,%rax
- test $inp,$inp
- jz .Lenc_key_ret
- test $key,$key
- jz .Lenc_key_ret
-
- movups ($inp),%xmm0 # pull first 128 bits of *userKey
- xorps %xmm4,%xmm4 # low dword of xmm4 is assumed 0
- lea 16($key),%rax
- cmp \$256,$bits
- je .L14rounds
- cmp \$192,$bits
- je .L12rounds
- cmp \$128,$bits
- jne .Lbad_keybits
-
-.L10rounds:
- mov \$9,$bits # 10 rounds for 128-bit key
- $movkey %xmm0,($key) # round 0
- aeskeygenassist \$0x1,%xmm0,%xmm1 # round 1
- call .Lkey_expansion_128_cold
- aeskeygenassist \$0x2,%xmm0,%xmm1 # round 2
- call .Lkey_expansion_128
- aeskeygenassist \$0x4,%xmm0,%xmm1 # round 3
- call .Lkey_expansion_128
- aeskeygenassist \$0x8,%xmm0,%xmm1 # round 4
- call .Lkey_expansion_128
- aeskeygenassist \$0x10,%xmm0,%xmm1 # round 5
- call .Lkey_expansion_128
- aeskeygenassist \$0x20,%xmm0,%xmm1 # round 6
- call .Lkey_expansion_128
- aeskeygenassist \$0x40,%xmm0,%xmm1 # round 7
- call .Lkey_expansion_128
- aeskeygenassist \$0x80,%xmm0,%xmm1 # round 8
- call .Lkey_expansion_128
- aeskeygenassist \$0x1b,%xmm0,%xmm1 # round 9
- call .Lkey_expansion_128
- aeskeygenassist \$0x36,%xmm0,%xmm1 # round 10
- call .Lkey_expansion_128
- $movkey %xmm0,(%rax)
- mov $bits,80(%rax) # 240(%rdx)
- xor %eax,%eax
- jmp .Lenc_key_ret
-
-.align 16
-.L12rounds:
- movq 16($inp),%xmm2 # remaining 1/3 of *userKey
- mov \$11,$bits # 12 rounds for 192
- $movkey %xmm0,($key) # round 0
- aeskeygenassist \$0x1,%xmm2,%xmm1 # round 1,2
- call .Lkey_expansion_192a_cold
- aeskeygenassist \$0x2,%xmm2,%xmm1 # round 2,3
- call .Lkey_expansion_192b
- aeskeygenassist \$0x4,%xmm2,%xmm1 # round 4,5
- call .Lkey_expansion_192a
- aeskeygenassist \$0x8,%xmm2,%xmm1 # round 5,6
- call .Lkey_expansion_192b
- aeskeygenassist \$0x10,%xmm2,%xmm1 # round 7,8
- call .Lkey_expansion_192a
- aeskeygenassist \$0x20,%xmm2,%xmm1 # round 8,9
- call .Lkey_expansion_192b
- aeskeygenassist \$0x40,%xmm2,%xmm1 # round 10,11
- call .Lkey_expansion_192a
- aeskeygenassist \$0x80,%xmm2,%xmm1 # round 11,12
- call .Lkey_expansion_192b
- $movkey %xmm0,(%rax)
- mov $bits,48(%rax) # 240(%rdx)
- xor %rax, %rax
- jmp .Lenc_key_ret
-
-.align 16
-.L14rounds:
- movups 16($inp),%xmm2 # remaning half of *userKey
- mov \$13,$bits # 14 rounds for 256
- lea 16(%rax),%rax
- $movkey %xmm0,($key) # round 0
- $movkey %xmm2,16($key) # round 1
- aeskeygenassist \$0x1,%xmm2,%xmm1 # round 2
- call .Lkey_expansion_256a_cold
- aeskeygenassist \$0x1,%xmm0,%xmm1 # round 3
- call .Lkey_expansion_256b
- aeskeygenassist \$0x2,%xmm2,%xmm1 # round 4
- call .Lkey_expansion_256a
- aeskeygenassist \$0x2,%xmm0,%xmm1 # round 5
- call .Lkey_expansion_256b
- aeskeygenassist \$0x4,%xmm2,%xmm1 # round 6
- call .Lkey_expansion_256a
- aeskeygenassist \$0x4,%xmm0,%xmm1 # round 7
- call .Lkey_expansion_256b
- aeskeygenassist \$0x8,%xmm2,%xmm1 # round 8
- call .Lkey_expansion_256a
- aeskeygenassist \$0x8,%xmm0,%xmm1 # round 9
- call .Lkey_expansion_256b
- aeskeygenassist \$0x10,%xmm2,%xmm1 # round 10
- call .Lkey_expansion_256a
- aeskeygenassist \$0x10,%xmm0,%xmm1 # round 11
- call .Lkey_expansion_256b
- aeskeygenassist \$0x20,%xmm2,%xmm1 # round 12
- call .Lkey_expansion_256a
- aeskeygenassist \$0x20,%xmm0,%xmm1 # round 13
- call .Lkey_expansion_256b
- aeskeygenassist \$0x40,%xmm2,%xmm1 # round 14
- call .Lkey_expansion_256a
- $movkey %xmm0,(%rax)
- mov $bits,16(%rax) # 240(%rdx)
- xor %rax,%rax
- jmp .Lenc_key_ret
-
-.align 16
-.Lbad_keybits:
- mov \$-2,%rax
-.Lenc_key_ret:
- add \$8,%rsp
- ret
-.LSEH_end_set_encrypt_key:
-
-.align 16
-.Lkey_expansion_128:
- $movkey %xmm0,(%rax)
- lea 16(%rax),%rax
-.Lkey_expansion_128_cold:
- shufps \$0b00010000,%xmm0,%xmm4
- xorps %xmm4, %xmm0
- shufps \$0b10001100,%xmm0,%xmm4
- xorps %xmm4, %xmm0
- shufps \$0b11111111,%xmm1,%xmm1 # critical path
- xorps %xmm1,%xmm0
- ret
-
-.align 16
-.Lkey_expansion_192a:
- $movkey %xmm0,(%rax)
- lea 16(%rax),%rax
-.Lkey_expansion_192a_cold:
- movaps %xmm2, %xmm5
-.Lkey_expansion_192b_warm:
- shufps \$0b00010000,%xmm0,%xmm4
- movdqa %xmm2,%xmm3
- xorps %xmm4,%xmm0
- shufps \$0b10001100,%xmm0,%xmm4
- pslldq \$4,%xmm3
- xorps %xmm4,%xmm0
- pshufd \$0b01010101,%xmm1,%xmm1 # critical path
- pxor %xmm3,%xmm2
- pxor %xmm1,%xmm0
- pshufd \$0b11111111,%xmm0,%xmm3
- pxor %xmm3,%xmm2
- ret
-
-.align 16
-.Lkey_expansion_192b:
- movaps %xmm0,%xmm3
- shufps \$0b01000100,%xmm0,%xmm5
- $movkey %xmm5,(%rax)
- shufps \$0b01001110,%xmm2,%xmm3
- $movkey %xmm3,16(%rax)
- lea 32(%rax),%rax
- jmp .Lkey_expansion_192b_warm
-
-.align 16
-.Lkey_expansion_256a:
- $movkey %xmm2,(%rax)
- lea 16(%rax),%rax
-.Lkey_expansion_256a_cold:
- shufps \$0b00010000,%xmm0,%xmm4
- xorps %xmm4,%xmm0
- shufps \$0b10001100,%xmm0,%xmm4
- xorps %xmm4,%xmm0
- shufps \$0b11111111,%xmm1,%xmm1 # critical path
- xorps %xmm1,%xmm0
- ret
-
-.align 16
-.Lkey_expansion_256b:
- $movkey %xmm0,(%rax)
- lea 16(%rax),%rax
-
- shufps \$0b00010000,%xmm2,%xmm4
- xorps %xmm4,%xmm2
- shufps \$0b10001100,%xmm2,%xmm4
- xorps %xmm4,%xmm2
- shufps \$0b10101010,%xmm1,%xmm1 # critical path
- xorps %xmm1,%xmm2
- ret
-.size ${PREFIX}_set_encrypt_key,.-${PREFIX}_set_encrypt_key
-.size __aesni_set_encrypt_key,.-__aesni_set_encrypt_key
-___
-}
-
-$code.=<<___;
-.align 64
-.Lbswap_mask:
- .byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
-.Lincrement32:
- .long 6,6,6,0
-.Lincrement64:
- .long 1,0,0,0
-.Lxts_magic:
- .long 0x87,0,1,0
-
-.asciz "AES for Intel AES-NI, CRYPTOGAMS by <appro\@openssl.org>"
-.align 64
-___
-
-# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
-# CONTEXT *context,DISPATCHER_CONTEXT *disp)
-if ($win64) {
-$rec="%rcx";
-$frame="%rdx";
-$context="%r8";
-$disp="%r9";
-
-$code.=<<___;
-.extern __imp_RtlVirtualUnwind
-___
-$code.=<<___ if ($PREFIX eq "aesni");
-.type ecb_ccm64_se_handler,\@abi-omnipotent
-.align 16
-ecb_ccm64_se_handler:
- push %rsi
- push %rdi
- push %rbx
- push %rbp
- push %r12
- push %r13
- push %r14
- push %r15
- pushfq
- sub \$64,%rsp
-
- mov 120($context),%rax # pull context->Rax
- mov 248($context),%rbx # pull context->Rip
-
- mov 8($disp),%rsi # disp->ImageBase
- mov 56($disp),%r11 # disp->HandlerData
-
- mov 0(%r11),%r10d # HandlerData[0]
- lea (%rsi,%r10),%r10 # prologue label
- cmp %r10,%rbx # context->Rip<prologue label
- jb .Lcommon_seh_tail
-
- mov 152($context),%rax # pull context->Rsp
-
- mov 4(%r11),%r10d # HandlerData[1]
- lea (%rsi,%r10),%r10 # epilogue label
- cmp %r10,%rbx # context->Rip>=epilogue label
- jae .Lcommon_seh_tail
-
- lea 0(%rax),%rsi # %xmm save area
- lea 512($context),%rdi # &context.Xmm6
- mov \$8,%ecx # 4*sizeof(%xmm0)/sizeof(%rax)
- .long 0xa548f3fc # cld; rep movsq
- lea 0x58(%rax),%rax # adjust stack pointer
-
- jmp .Lcommon_seh_tail
-.size ecb_ccm64_se_handler,.-ecb_ccm64_se_handler
-
-.type ctr32_se_handler,\@abi-omnipotent
-.align 16
-ctr32_se_handler:
- push %rsi
- push %rdi
- push %rbx
- push %rbp
- push %r12
- push %r13
- push %r14
- push %r15
- pushfq
- sub \$64,%rsp
-
- mov 120($context),%rax # pull context->Rax
- mov 248($context),%rbx # pull context->Rip
-
- lea .Lctr32_body(%rip),%r10
- cmp %r10,%rbx # context->Rip<"prologue" label
- jb .Lcommon_seh_tail
-
- mov 152($context),%rax # pull context->Rsp
-
- lea .Lctr32_ret(%rip),%r10
- cmp %r10,%rbx
- jae .Lcommon_seh_tail
-
- lea 0x20(%rax),%rsi # %xmm save area
- lea 512($context),%rdi # &context.Xmm6
- mov \$20,%ecx # 10*sizeof(%xmm0)/sizeof(%rax)
- .long 0xa548f3fc # cld; rep movsq
- lea 0xc8(%rax),%rax # adjust stack pointer
-
- jmp .Lcommon_seh_tail
-.size ctr32_se_handler,.-ctr32_se_handler
-
-.type xts_se_handler,\@abi-omnipotent
-.align 16
-xts_se_handler:
- push %rsi
- push %rdi
- push %rbx
- push %rbp
- push %r12
- push %r13
- push %r14
- push %r15
- pushfq
- sub \$64,%rsp
-
- mov 120($context),%rax # pull context->Rax
- mov 248($context),%rbx # pull context->Rip
-
- mov 8($disp),%rsi # disp->ImageBase
- mov 56($disp),%r11 # disp->HandlerData
-
- mov 0(%r11),%r10d # HandlerData[0]
- lea (%rsi,%r10),%r10 # prologue lable
- cmp %r10,%rbx # context->Rip<prologue label
- jb .Lcommon_seh_tail
-
- mov 152($context),%rax # pull context->Rsp
-
- mov 4(%r11),%r10d # HandlerData[1]
- lea (%rsi,%r10),%r10 # epilogue label
- cmp %r10,%rbx # context->Rip>=epilogue label
- jae .Lcommon_seh_tail
-
- lea 0x60(%rax),%rsi # %xmm save area
- lea 512($context),%rdi # & context.Xmm6
- mov \$20,%ecx # 10*sizeof(%xmm0)/sizeof(%rax)
- .long 0xa548f3fc # cld; rep movsq
- lea 0x68+160(%rax),%rax # adjust stack pointer
-
- jmp .Lcommon_seh_tail
-.size xts_se_handler,.-xts_se_handler
-___
-$code.=<<___;
-.type cbc_se_handler,\@abi-omnipotent
-.align 16
-cbc_se_handler:
- push %rsi
- push %rdi
- push %rbx
- push %rbp
- push %r12
- push %r13
- push %r14
- push %r15
- pushfq
- sub \$64,%rsp
-
- mov 152($context),%rax # pull context->Rsp
- mov 248($context),%rbx # pull context->Rip
-
- lea .Lcbc_decrypt(%rip),%r10
- cmp %r10,%rbx # context->Rip<"prologue" label
- jb .Lcommon_seh_tail
-
- lea .Lcbc_decrypt_body(%rip),%r10
- cmp %r10,%rbx # context->Rip<cbc_decrypt_body
- jb .Lrestore_cbc_rax
-
- lea .Lcbc_ret(%rip),%r10
- cmp %r10,%rbx # context->Rip>="epilogue" label
- jae .Lcommon_seh_tail
-
- lea 0(%rax),%rsi # top of stack
- lea 512($context),%rdi # &context.Xmm6
- mov \$8,%ecx # 4*sizeof(%xmm0)/sizeof(%rax)
- .long 0xa548f3fc # cld; rep movsq
- lea 0x58(%rax),%rax # adjust stack pointer
- jmp .Lcommon_seh_tail
-
-.Lrestore_cbc_rax:
- mov 120($context),%rax
-
-.Lcommon_seh_tail:
- mov 8(%rax),%rdi
- mov 16(%rax),%rsi
- mov %rax,152($context) # restore context->Rsp
- mov %rsi,168($context) # restore context->Rsi
- mov %rdi,176($context) # restore context->Rdi
-
- mov 40($disp),%rdi # disp->ContextRecord
- mov $context,%rsi # context
- mov \$154,%ecx # sizeof(CONTEXT)
- .long 0xa548f3fc # cld; rep movsq
-
- mov $disp,%rsi
- xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
- mov 8(%rsi),%rdx # arg2, disp->ImageBase
- mov 0(%rsi),%r8 # arg3, disp->ControlPc
- mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
- mov 40(%rsi),%r10 # disp->ContextRecord
- lea 56(%rsi),%r11 # &disp->HandlerData
- lea 24(%rsi),%r12 # &disp->EstablisherFrame
- mov %r10,32(%rsp) # arg5
- mov %r11,40(%rsp) # arg6
- mov %r12,48(%rsp) # arg7
- mov %rcx,56(%rsp) # arg8, (NULL)
- call *__imp_RtlVirtualUnwind(%rip)
-
- mov \$1,%eax # ExceptionContinueSearch
- add \$64,%rsp
- popfq
- pop %r15
- pop %r14
- pop %r13
- pop %r12
- pop %rbp
- pop %rbx
- pop %rdi
- pop %rsi
- ret
-.size cbc_se_handler,.-cbc_se_handler
-
-.section .pdata
-.align 4
-___
-$code.=<<___ if ($PREFIX eq "aesni");
- .rva .LSEH_begin_aesni_ecb_encrypt
- .rva .LSEH_end_aesni_ecb_encrypt
- .rva .LSEH_info_ecb
-
- .rva .LSEH_begin_aesni_ccm64_encrypt_blocks
- .rva .LSEH_end_aesni_ccm64_encrypt_blocks
- .rva .LSEH_info_ccm64_enc
-
- .rva .LSEH_begin_aesni_ccm64_decrypt_blocks
- .rva .LSEH_end_aesni_ccm64_decrypt_blocks
- .rva .LSEH_info_ccm64_dec
-
- .rva .LSEH_begin_aesni_ctr32_encrypt_blocks
- .rva .LSEH_end_aesni_ctr32_encrypt_blocks
- .rva .LSEH_info_ctr32
-
- .rva .LSEH_begin_aesni_xts_encrypt
- .rva .LSEH_end_aesni_xts_encrypt
- .rva .LSEH_info_xts_enc
-
- .rva .LSEH_begin_aesni_xts_decrypt
- .rva .LSEH_end_aesni_xts_decrypt
- .rva .LSEH_info_xts_dec
-___
-$code.=<<___;
- .rva .LSEH_begin_${PREFIX}_cbc_encrypt
- .rva .LSEH_end_${PREFIX}_cbc_encrypt
- .rva .LSEH_info_cbc
-
- .rva ${PREFIX}_set_decrypt_key
- .rva .LSEH_end_set_decrypt_key
- .rva .LSEH_info_key
-
- .rva ${PREFIX}_set_encrypt_key
- .rva .LSEH_end_set_encrypt_key
- .rva .LSEH_info_key
-.section .xdata
-.align 8
-___
-$code.=<<___ if ($PREFIX eq "aesni");
-.LSEH_info_ecb:
- .byte 9,0,0,0
- .rva ecb_ccm64_se_handler
- .rva .Lecb_enc_body,.Lecb_enc_ret # HandlerData[]
-.LSEH_info_ccm64_enc:
- .byte 9,0,0,0
- .rva ecb_ccm64_se_handler
- .rva .Lccm64_enc_body,.Lccm64_enc_ret # HandlerData[]
-.LSEH_info_ccm64_dec:
- .byte 9,0,0,0
- .rva ecb_ccm64_se_handler
- .rva .Lccm64_dec_body,.Lccm64_dec_ret # HandlerData[]
-.LSEH_info_ctr32:
- .byte 9,0,0,0
- .rva ctr32_se_handler
-.LSEH_info_xts_enc:
- .byte 9,0,0,0
- .rva xts_se_handler
- .rva .Lxts_enc_body,.Lxts_enc_epilogue # HandlerData[]
-.LSEH_info_xts_dec:
- .byte 9,0,0,0
- .rva xts_se_handler
- .rva .Lxts_dec_body,.Lxts_dec_epilogue # HandlerData[]
-___
-$code.=<<___;
-.LSEH_info_cbc:
- .byte 9,0,0,0
- .rva cbc_se_handler
-.LSEH_info_key:
- .byte 0x01,0x04,0x01,0x00
- .byte 0x04,0x02,0x00,0x00 # sub rsp,8
-___
-}
-
-sub rex {
- local *opcode=shift;
- my ($dst,$src)=@_;
- my $rex=0;
-
- $rex|=0x04 if($dst>=8);
- $rex|=0x01 if($src>=8);
- push @opcode,$rex|0x40 if($rex);
-}
-
-sub aesni {
- my $line=shift;
- my @opcode=(0x66);
-
- if ($line=~/(aeskeygenassist)\s+\$([x0-9a-f]+),\s*%xmm([0-9]+),\s*%xmm([0-9]+)/) {
- rex(\@opcode,$4,$3);
- push @opcode,0x0f,0x3a,0xdf;
- push @opcode,0xc0|($3&7)|(($4&7)<<3); # ModR/M
- my $c=$2;
- push @opcode,$c=~/^0/?oct($c):$c;
- return ".byte\t".join(',',@opcode);
- }
- elsif ($line=~/(aes[a-z]+)\s+%xmm([0-9]+),\s*%xmm([0-9]+)/) {
- my %opcodelet = (
- "aesimc" => 0xdb,
- "aesenc" => 0xdc, "aesenclast" => 0xdd,
- "aesdec" => 0xde, "aesdeclast" => 0xdf
- );
- return undef if (!defined($opcodelet{$1}));
- rex(\@opcode,$3,$2);
- push @opcode,0x0f,0x38,$opcodelet{$1};
- push @opcode,0xc0|($2&7)|(($3&7)<<3); # ModR/M
- return ".byte\t".join(',',@opcode);
- }
- return $line;
-}
-
-$code =~ s/\`([^\`]*)\`/eval($1)/gem;
-$code =~ s/\b(aes.*%xmm[0-9]+).*$/aesni($1)/gem;
-
-print $code;
-
-close STDOUT;
diff --git a/app/openssl/crypto/aes/asm/aesv8-armx-64.S b/app/openssl/crypto/aes/asm/aesv8-armx-64.S
deleted file mode 100644
index be0a13df..00000000
--- a/app/openssl/crypto/aes/asm/aesv8-armx-64.S
+++ /dev/null
@@ -1,761 +0,0 @@
-#include "arm_arch.h"
-
-#if __ARM_ARCH__>=7
-.text
-.arch armv8-a+crypto
-.align 5
-rcon:
-.long 0x01,0x01,0x01,0x01
-.long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d // rotate-n-splat
-.long 0x1b,0x1b,0x1b,0x1b
-
-.globl aes_v8_set_encrypt_key
-.type aes_v8_set_encrypt_key,%function
-.align 5
-aes_v8_set_encrypt_key:
-.Lenc_key:
- stp x29,x30,[sp,#-16]!
- add x29,sp,#0
- adr x3,rcon
- cmp w1,#192
-
- eor v0.16b,v0.16b,v0.16b
- ld1 {v3.16b},[x0],#16
- mov w1,#8 // reuse w1
- ld1 {v1.4s,v2.4s},[x3],#32
-
- b.lt .Loop128
- b.eq .L192
- b .L256
-
-.align 4
-.Loop128:
- tbl v6.16b,{v3.16b},v2.16b
- ext v5.16b,v0.16b,v3.16b,#12
- st1 {v3.4s},[x2],#16
- aese v6.16b,v0.16b
- subs w1,w1,#1
-
- eor v3.16b,v3.16b,v5.16b
- ext v5.16b,v0.16b,v5.16b,#12
- eor v3.16b,v3.16b,v5.16b
- ext v5.16b,v0.16b,v5.16b,#12
- eor v6.16b,v6.16b,v1.16b
- eor v3.16b,v3.16b,v5.16b
- shl v1.16b,v1.16b,#1
- eor v3.16b,v3.16b,v6.16b
- b.ne .Loop128
-
- ld1 {v1.4s},[x3]
-
- tbl v6.16b,{v3.16b},v2.16b
- ext v5.16b,v0.16b,v3.16b,#12
- st1 {v3.4s},[x2],#16
- aese v6.16b,v0.16b
-
- eor v3.16b,v3.16b,v5.16b
- ext v5.16b,v0.16b,v5.16b,#12
- eor v3.16b,v3.16b,v5.16b
- ext v5.16b,v0.16b,v5.16b,#12
- eor v6.16b,v6.16b,v1.16b
- eor v3.16b,v3.16b,v5.16b
- shl v1.16b,v1.16b,#1
- eor v3.16b,v3.16b,v6.16b
-
- tbl v6.16b,{v3.16b},v2.16b
- ext v5.16b,v0.16b,v3.16b,#12
- st1 {v3.4s},[x2],#16
- aese v6.16b,v0.16b
-
- eor v3.16b,v3.16b,v5.16b
- ext v5.16b,v0.16b,v5.16b,#12
- eor v3.16b,v3.16b,v5.16b
- ext v5.16b,v0.16b,v5.16b,#12
- eor v6.16b,v6.16b,v1.16b
- eor v3.16b,v3.16b,v5.16b
- eor v3.16b,v3.16b,v6.16b
- st1 {v3.4s},[x2]
- add x2,x2,#0x50
-
- mov w12,#10
- b .Ldone
-
-.align 4
-.L192:
- ld1 {v4.8b},[x0],#8
- movi v6.16b,#8 // borrow v6.16b
- st1 {v3.4s},[x2],#16
- sub v2.16b,v2.16b,v6.16b // adjust the mask
-
-.Loop192:
- tbl v6.16b,{v4.16b},v2.16b
- ext v5.16b,v0.16b,v3.16b,#12
- st1 {v4.8b},[x2],#8
- aese v6.16b,v0.16b
- subs w1,w1,#1
-
- eor v3.16b,v3.16b,v5.16b
- ext v5.16b,v0.16b,v5.16b,#12
- eor v3.16b,v3.16b,v5.16b
- ext v5.16b,v0.16b,v5.16b,#12
- eor v3.16b,v3.16b,v5.16b
-
- dup v5.4s,v3.s[3]
- eor v5.16b,v5.16b,v4.16b
- eor v6.16b,v6.16b,v1.16b
- ext v4.16b,v0.16b,v4.16b,#12
- shl v1.16b,v1.16b,#1
- eor v4.16b,v4.16b,v5.16b
- eor v3.16b,v3.16b,v6.16b
- eor v4.16b,v4.16b,v6.16b
- st1 {v3.4s},[x2],#16
- b.ne .Loop192
-
- mov w12,#12
- add x2,x2,#0x20
- b .Ldone
-
-.align 4
-.L256:
- ld1 {v4.16b},[x0]
- mov w1,#7
- mov w12,#14
- st1 {v3.4s},[x2],#16
-
-.Loop256:
- tbl v6.16b,{v4.16b},v2.16b
- ext v5.16b,v0.16b,v3.16b,#12
- st1 {v4.4s},[x2],#16
- aese v6.16b,v0.16b
- subs w1,w1,#1
-
- eor v3.16b,v3.16b,v5.16b
- ext v5.16b,v0.16b,v5.16b,#12
- eor v3.16b,v3.16b,v5.16b
- ext v5.16b,v0.16b,v5.16b,#12
- eor v6.16b,v6.16b,v1.16b
- eor v3.16b,v3.16b,v5.16b
- shl v1.16b,v1.16b,#1
- eor v3.16b,v3.16b,v6.16b
- st1 {v3.4s},[x2],#16
- b.eq .Ldone
-
- dup v6.4s,v3.s[3] // just splat
- ext v5.16b,v0.16b,v4.16b,#12
- aese v6.16b,v0.16b
-
- eor v4.16b,v4.16b,v5.16b
- ext v5.16b,v0.16b,v5.16b,#12
- eor v4.16b,v4.16b,v5.16b
- ext v5.16b,v0.16b,v5.16b,#12
- eor v4.16b,v4.16b,v5.16b
-
- eor v4.16b,v4.16b,v6.16b
- b .Loop256
-
-.Ldone:
- str w12,[x2]
-
- eor x0,x0,x0 // return value
- ldr x29,[sp],#16
- ret
-.size aes_v8_set_encrypt_key,.-aes_v8_set_encrypt_key
-
-.globl aes_v8_set_decrypt_key
-.type aes_v8_set_decrypt_key,%function
-.align 5
-aes_v8_set_decrypt_key:
- stp x29,x30,[sp,#-16]!
- add x29,sp,#0
- bl .Lenc_key
-
- sub x2,x2,#240 // restore original x2
- mov x4,#-16
- add x0,x2,x12,lsl#4 // end of key schedule
-
- ld1 {v0.4s},[x2]
- ld1 {v1.4s},[x0]
- st1 {v0.4s},[x0],x4
- st1 {v1.4s},[x2],#16
-
-.Loop_imc:
- ld1 {v0.4s},[x2]
- ld1 {v1.4s},[x0]
- aesimc v0.16b,v0.16b
- aesimc v1.16b,v1.16b
- st1 {v0.4s},[x0],x4
- st1 {v1.4s},[x2],#16
- cmp x0,x2
- b.hi .Loop_imc
-
- ld1 {v0.4s},[x2]
- aesimc v0.16b,v0.16b
- st1 {v0.4s},[x0]
-
- eor x0,x0,x0 // return value
- ldp x29,x30,[sp],#16
- ret
-.size aes_v8_set_decrypt_key,.-aes_v8_set_decrypt_key
-.globl aes_v8_encrypt
-.type aes_v8_encrypt,%function
-.align 5
-aes_v8_encrypt:
- ldr w3,[x2,#240]
- ld1 {v0.4s},[x2],#16
- ld1 {v2.16b},[x0]
- sub w3,w3,#2
- ld1 {v1.4s},[x2],#16
-
-.Loop_enc:
- aese v2.16b,v0.16b
- ld1 {v0.4s},[x2],#16
- aesmc v2.16b,v2.16b
- subs w3,w3,#2
- aese v2.16b,v1.16b
- ld1 {v1.4s},[x2],#16
- aesmc v2.16b,v2.16b
- b.gt .Loop_enc
-
- aese v2.16b,v0.16b
- ld1 {v0.4s},[x2]
- aesmc v2.16b,v2.16b
- aese v2.16b,v1.16b
- eor v2.16b,v2.16b,v0.16b
-
- st1 {v2.16b},[x1]
- ret
-.size aes_v8_encrypt,.-aes_v8_encrypt
-.globl aes_v8_decrypt
-.type aes_v8_decrypt,%function
-.align 5
-aes_v8_decrypt:
- ldr w3,[x2,#240]
- ld1 {v0.4s},[x2],#16
- ld1 {v2.16b},[x0]
- sub w3,w3,#2
- ld1 {v1.4s},[x2],#16
-
-.Loop_dec:
- aesd v2.16b,v0.16b
- ld1 {v0.4s},[x2],#16
- aesimc v2.16b,v2.16b
- subs w3,w3,#2
- aesd v2.16b,v1.16b
- ld1 {v1.4s},[x2],#16
- aesimc v2.16b,v2.16b
- b.gt .Loop_dec
-
- aesd v2.16b,v0.16b
- ld1 {v0.4s},[x2]
- aesimc v2.16b,v2.16b
- aesd v2.16b,v1.16b
- eor v2.16b,v2.16b,v0.16b
-
- st1 {v2.16b},[x1]
- ret
-.size aes_v8_decrypt,.-aes_v8_decrypt
-.globl aes_v8_cbc_encrypt
-.type aes_v8_cbc_encrypt,%function
-.align 5
-aes_v8_cbc_encrypt:
- stp x29,x30,[sp,#-16]!
- add x29,sp,#0
- subs x2,x2,#16
- mov x8,#16
- b.lo .Lcbc_abort
- csel x8,xzr,x8,eq
-
- cmp w5,#0 // en- or decrypting?
- ldr w5,[x3,#240]
- and x2,x2,#-16
- ld1 {v6.16b},[x4]
- ld1 {v0.16b},[x0],x8
-
- ld1 {v16.4s-v17.4s},[x3] // load key schedule...
- sub w5,w5,#6
- add x7,x3,x5,lsl#4 // pointer to last 7 round keys
- sub w5,w5,#2
- ld1 {v18.4s-v19.4s},[x7],#32
- ld1 {v20.4s-v21.4s},[x7],#32
- ld1 {v22.4s-v23.4s},[x7],#32
- ld1 {v7.4s},[x7]
-
- add x7,x3,#32
- mov w6,w5
- b.eq .Lcbc_dec
-
- cmp w5,#2
- eor v0.16b,v0.16b,v6.16b
- eor v5.16b,v16.16b,v7.16b
- b.eq .Lcbc_enc128
-
-.Loop_cbc_enc:
- aese v0.16b,v16.16b
- ld1 {v16.4s},[x7],#16
- aesmc v0.16b,v0.16b
- subs w6,w6,#2
- aese v0.16b,v17.16b
- ld1 {v17.4s},[x7],#16
- aesmc v0.16b,v0.16b
- b.gt .Loop_cbc_enc
-
- aese v0.16b,v16.16b
- aesmc v0.16b,v0.16b
- subs x2,x2,#16
- aese v0.16b,v17.16b
- aesmc v0.16b,v0.16b
- csel x8,xzr,x8,eq
- aese v0.16b,v18.16b
- aesmc v0.16b,v0.16b
- add x7,x3,#16
- aese v0.16b,v19.16b
- aesmc v0.16b,v0.16b
- ld1 {v16.16b},[x0],x8
- aese v0.16b,v20.16b
- aesmc v0.16b,v0.16b
- eor v16.16b,v16.16b,v5.16b
- aese v0.16b,v21.16b
- aesmc v0.16b,v0.16b
- ld1 {v17.4s},[x7],#16 // re-pre-load rndkey[1]
- aese v0.16b,v22.16b
- aesmc v0.16b,v0.16b
- aese v0.16b,v23.16b
-
- mov w6,w5
- eor v6.16b,v0.16b,v7.16b
- st1 {v6.16b},[x1],#16
- b.hs .Loop_cbc_enc
-
- b .Lcbc_done
-
-.align 5
-.Lcbc_enc128:
- ld1 {v2.4s-v3.4s},[x7]
- aese v0.16b,v16.16b
- aesmc v0.16b,v0.16b
- b .Lenter_cbc_enc128
-.Loop_cbc_enc128:
- aese v0.16b,v16.16b
- aesmc v0.16b,v0.16b
- st1 {v6.16b},[x1],#16
-.Lenter_cbc_enc128:
- aese v0.16b,v17.16b
- aesmc v0.16b,v0.16b
- subs x2,x2,#16
- aese v0.16b,v2.16b
- aesmc v0.16b,v0.16b
- csel x8,xzr,x8,eq
- aese v0.16b,v3.16b
- aesmc v0.16b,v0.16b
- aese v0.16b,v18.16b
- aesmc v0.16b,v0.16b
- aese v0.16b,v19.16b
- aesmc v0.16b,v0.16b
- ld1 {v16.16b},[x0],x8
- aese v0.16b,v20.16b
- aesmc v0.16b,v0.16b
- aese v0.16b,v21.16b
- aesmc v0.16b,v0.16b
- aese v0.16b,v22.16b
- aesmc v0.16b,v0.16b
- eor v16.16b,v16.16b,v5.16b
- aese v0.16b,v23.16b
- eor v6.16b,v0.16b,v7.16b
- b.hs .Loop_cbc_enc128
-
- st1 {v6.16b},[x1],#16
- b .Lcbc_done
-
-.align 5
-.Lcbc_dec128:
- ld1 {v4.4s-v5.4s},[x7]
- eor v6.16b,v6.16b,v7.16b
- eor v2.16b,v0.16b,v7.16b
- mov x12,x8
-
-.Loop2x_cbc_dec128:
- aesd v0.16b,v16.16b
- aesd v1.16b,v16.16b
- aesimc v0.16b,v0.16b
- aesimc v1.16b,v1.16b
- subs x2,x2,#32
- aesd v0.16b,v17.16b
- aesd v1.16b,v17.16b
- aesimc v0.16b,v0.16b
- aesimc v1.16b,v1.16b
- csel x8,xzr,x8,lo
- aesd v0.16b,v4.16b
- aesd v1.16b,v4.16b
- aesimc v0.16b,v0.16b
- aesimc v1.16b,v1.16b
- csel x12,xzr,x12,ls
- aesd v0.16b,v5.16b
- aesd v1.16b,v5.16b
- aesimc v0.16b,v0.16b
- aesimc v1.16b,v1.16b
- aesd v0.16b,v18.16b
- aesd v1.16b,v18.16b
- aesimc v0.16b,v0.16b
- aesimc v1.16b,v1.16b
- aesd v0.16b,v19.16b
- aesd v1.16b,v19.16b
- aesimc v0.16b,v0.16b
- aesimc v1.16b,v1.16b
- aesd v0.16b,v20.16b
- aesd v1.16b,v20.16b
- aesimc v0.16b,v0.16b
- aesimc v1.16b,v1.16b
- aesd v0.16b,v21.16b
- aesd v1.16b,v21.16b
- aesimc v0.16b,v0.16b
- aesimc v1.16b,v1.16b
- aesd v0.16b,v22.16b
- aesd v1.16b,v22.16b
- aesimc v0.16b,v0.16b
- aesimc v1.16b,v1.16b
- aesd v0.16b,v23.16b
- aesd v1.16b,v23.16b
-
- eor v6.16b,v6.16b,v0.16b
- ld1 {v0.16b},[x0],x8
- eor v2.16b,v2.16b,v1.16b
- ld1 {v1.16b},[x0],x12
- st1 {v6.16b},[x1],#16
- eor v6.16b,v3.16b,v7.16b
- st1 {v2.16b},[x1],#16
- eor v2.16b,v0.16b,v7.16b
- orr v3.16b,v1.16b,v1.16b
- b.hs .Loop2x_cbc_dec128
-
- adds x2,x2,#32
- eor v6.16b,v6.16b,v7.16b
- b.eq .Lcbc_done
- eor v2.16b,v2.16b,v7.16b
- b .Lcbc_dec_tail
-
-.align 5
-.Lcbc_dec:
- subs x2,x2,#16
- orr v2.16b,v0.16b,v0.16b
- b.lo .Lcbc_dec_tail
-
- csel x8,xzr,x8,eq
- cmp w5,#2
- ld1 {v1.16b},[x0],x8
- orr v3.16b,v1.16b,v1.16b
- b.eq .Lcbc_dec128
-
-.Loop2x_cbc_dec:
- aesd v0.16b,v16.16b
- aesd v1.16b,v16.16b
- ld1 {v16.4s},[x7],#16
- aesimc v0.16b,v0.16b
- aesimc v1.16b,v1.16b
- subs w6,w6,#2
- aesd v0.16b,v17.16b
- aesd v1.16b,v17.16b
- ld1 {v17.4s},[x7],#16
- aesimc v0.16b,v0.16b
- aesimc v1.16b,v1.16b
- b.gt .Loop2x_cbc_dec
-
- aesd v0.16b,v16.16b
- aesd v1.16b,v16.16b
- aesimc v0.16b,v0.16b
- aesimc v1.16b,v1.16b
- eor v4.16b,v6.16b,v7.16b
- eor v5.16b,v2.16b,v7.16b
- aesd v0.16b,v17.16b
- aesd v1.16b,v17.16b
- aesimc v0.16b,v0.16b
- aesimc v1.16b,v1.16b
- orr v6.16b,v3.16b,v3.16b
- subs x2,x2,#32
- aesd v0.16b,v18.16b
- aesd v1.16b,v18.16b
- aesimc v0.16b,v0.16b
- csel x8,xzr,x8,lo
- aesimc v1.16b,v1.16b
- mov x7,x3
- aesd v0.16b,v19.16b
- aesd v1.16b,v19.16b
- aesimc v0.16b,v0.16b
- ld1 {v2.16b},[x0],x8
- aesimc v1.16b,v1.16b
- csel x8,xzr,x8,ls
- aesd v0.16b,v20.16b
- aesd v1.16b,v20.16b
- aesimc v0.16b,v0.16b
- aesimc v1.16b,v1.16b
- ld1 {v3.16b},[x0],x8
- aesd v0.16b,v21.16b
- aesd v1.16b,v21.16b
- aesimc v0.16b,v0.16b
- aesimc v1.16b,v1.16b
- ld1 {v16.4s},[x7],#16 // re-pre-load rndkey[0]
- aesd v0.16b,v22.16b
- aesd v1.16b,v22.16b
- aesimc v0.16b,v0.16b
- aesimc v1.16b,v1.16b
- ld1 {v17.4s},[x7],#16 // re-pre-load rndkey[1]
- aesd v0.16b,v23.16b
- aesd v1.16b,v23.16b
-
- mov w6,w5
- eor v4.16b,v4.16b,v0.16b
- eor v5.16b,v5.16b,v1.16b
- orr v0.16b,v2.16b,v2.16b
- st1 {v4.16b},[x1],#16
- orr v1.16b,v3.16b,v3.16b
- st1 {v5.16b},[x1],#16
- b.hs .Loop2x_cbc_dec
-
- adds x2,x2,#32
- b.eq .Lcbc_done
-
-.Lcbc_dec_tail:
- aesd v0.16b,v16.16b
- ld1 {v16.4s},[x7],#16
- aesimc v0.16b,v0.16b
- subs w6,w6,#2
- aesd v0.16b,v17.16b
- ld1 {v17.4s},[x7],#16
- aesimc v0.16b,v0.16b
- b.gt .Lcbc_dec_tail
-
- aesd v0.16b,v16.16b
- aesimc v0.16b,v0.16b
- aesd v0.16b,v17.16b
- aesimc v0.16b,v0.16b
- eor v4.16b,v6.16b,v7.16b
- aesd v0.16b,v18.16b
- aesimc v0.16b,v0.16b
- orr v6.16b,v2.16b,v2.16b
- aesd v0.16b,v19.16b
- aesimc v0.16b,v0.16b
- aesd v0.16b,v20.16b
- aesimc v0.16b,v0.16b
- aesd v0.16b,v21.16b
- aesimc v0.16b,v0.16b
- aesd v0.16b,v22.16b
- aesimc v0.16b,v0.16b
- aesd v0.16b,v23.16b
-
- eor v4.16b,v4.16b,v0.16b
- st1 {v4.16b},[x1],#16
-
-.Lcbc_done:
- st1 {v6.16b},[x4]
-.Lcbc_abort:
- ldr x29,[sp],#16
- ret
-.size aes_v8_cbc_encrypt,.-aes_v8_cbc_encrypt
-.globl aes_v8_ctr32_encrypt_blocks
-.type aes_v8_ctr32_encrypt_blocks,%function
-.align 5
-aes_v8_ctr32_encrypt_blocks:
- stp x29,x30,[sp,#-16]!
- add x29,sp,#0
- ldr w5,[x3,#240]
-
- ldr w8, [x4, #12]
- ld1 {v0.4s},[x4]
-
- ld1 {v16.4s-v17.4s},[x3] // load key schedule...
- sub w5,w5,#6
- add x7,x3,x5,lsl#4 // pointer to last 7 round keys
- sub w5,w5,#2
- ld1 {v18.4s-v19.4s},[x7],#32
- ld1 {v20.4s-v21.4s},[x7],#32
- ld1 {v22.4s-v23.4s},[x7],#32
- ld1 {v7.4s},[x7]
-
- add x7,x3,#32
- mov w6,w5
-
- subs x2,x2,#2
- b.lo .Lctr32_tail
-
-#ifndef __ARMEB__
- rev w8, w8
-#endif
- orr v1.16b,v0.16b,v0.16b
- add w8, w8, #1
- orr v6.16b,v0.16b,v0.16b
- rev w10, w8
- cmp w5,#2
- mov v1.s[3],w10
- b.eq .Lctr32_128
-
-.Loop2x_ctr32:
- aese v0.16b,v16.16b
- aese v1.16b,v16.16b
- ld1 {v16.4s},[x7],#16
- aesmc v0.16b,v0.16b
- aesmc v1.16b,v1.16b
- subs w6,w6,#2
- aese v0.16b,v17.16b
- aese v1.16b,v17.16b
- ld1 {v17.4s},[x7],#16
- aesmc v0.16b,v0.16b
- aesmc v1.16b,v1.16b
- b.gt .Loop2x_ctr32
-
- aese v0.16b,v16.16b
- aese v1.16b,v16.16b
- aesmc v4.16b,v0.16b
- orr v0.16b,v6.16b,v6.16b
- aesmc v5.16b,v1.16b
- orr v1.16b,v6.16b,v6.16b
- aese v4.16b,v17.16b
- aese v5.16b,v17.16b
- ld1 {v2.16b},[x0],#16
- aesmc v4.16b,v4.16b
- ld1 {v3.16b},[x0],#16
- aesmc v5.16b,v5.16b
- add w8,w8,#1
- aese v4.16b,v18.16b
- aese v5.16b,v18.16b
- rev w9,w8
- aesmc v4.16b,v4.16b
- aesmc v5.16b,v5.16b
- add w8,w8,#1
- aese v4.16b,v19.16b
- aese v5.16b,v19.16b
- eor v2.16b,v2.16b,v7.16b
- rev w10,w8
- aesmc v4.16b,v4.16b
- aesmc v5.16b,v5.16b
- eor v3.16b,v3.16b,v7.16b
- mov x7,x3
- aese v4.16b,v20.16b
- aese v5.16b,v20.16b
- subs x2,x2,#2
- aesmc v4.16b,v4.16b
- aesmc v5.16b,v5.16b
- ld1 {v16.4s-v17.4s},[x7],#32 // re-pre-load rndkey[0-1]
- aese v4.16b,v21.16b
- aese v5.16b,v21.16b
- aesmc v4.16b,v4.16b
- aesmc v5.16b,v5.16b
- aese v4.16b,v22.16b
- aese v5.16b,v22.16b
- mov v0.s[3], w9
- aesmc v4.16b,v4.16b
- mov v1.s[3], w10
- aesmc v5.16b,v5.16b
- aese v4.16b,v23.16b
- aese v5.16b,v23.16b
-
- mov w6,w5
- eor v2.16b,v2.16b,v4.16b
- eor v3.16b,v3.16b,v5.16b
- st1 {v2.16b},[x1],#16
- st1 {v3.16b},[x1],#16
- b.hs .Loop2x_ctr32
-
- adds x2,x2,#2
- b.eq .Lctr32_done
- b .Lctr32_tail
-
-.Lctr32_128:
- ld1 {v4.4s-v5.4s},[x7]
-
-.Loop2x_ctr32_128:
- aese v0.16b,v16.16b
- aese v1.16b,v16.16b
- aesmc v0.16b,v0.16b
- ld1 {v2.16b},[x0],#16
- aesmc v1.16b,v1.16b
- ld1 {v3.16b},[x0],#16
- aese v0.16b,v17.16b
- aese v1.16b,v17.16b
- add w8,w8,#1
- aesmc v0.16b,v0.16b
- aesmc v1.16b,v1.16b
- rev w9,w8
- aese v0.16b,v4.16b
- aese v1.16b,v4.16b
- add w8,w8,#1
- aesmc v0.16b,v0.16b
- aesmc v1.16b,v1.16b
- rev w10,w8
- aese v0.16b,v5.16b
- aese v1.16b,v5.16b
- subs x2,x2,#2
- aesmc v0.16b,v0.16b
- aesmc v1.16b,v1.16b
- aese v0.16b,v18.16b
- aese v1.16b,v18.16b
- aesmc v0.16b,v0.16b
- aesmc v1.16b,v1.16b
- aese v0.16b,v19.16b
- aese v1.16b,v19.16b
- aesmc v0.16b,v0.16b
- aesmc v1.16b,v1.16b
- aese v0.16b,v20.16b
- aese v1.16b,v20.16b
- aesmc v0.16b,v0.16b
- aesmc v1.16b,v1.16b
- aese v0.16b,v21.16b
- aese v1.16b,v21.16b
- aesmc v0.16b,v0.16b
- aesmc v1.16b,v1.16b
- aese v0.16b,v22.16b
- aese v1.16b,v22.16b
- aesmc v0.16b,v0.16b
- aesmc v1.16b,v1.16b
- eor v2.16b,v2.16b,v7.16b
- aese v0.16b,v23.16b
- eor v3.16b,v3.16b,v7.16b
- aese v1.16b,v23.16b
-
- eor v2.16b,v2.16b,v0.16b
- orr v0.16b,v6.16b,v6.16b
- eor v3.16b,v3.16b,v1.16b
- orr v1.16b,v6.16b,v6.16b
- st1 {v2.16b},[x1],#16
- mov v0.s[3], w9
- st1 {v3.16b},[x1],#16
- mov v1.s[3], w10
- b.hs .Loop2x_ctr32_128
-
- adds x2,x2,#2
- b.eq .Lctr32_done
-
-.Lctr32_tail:
- aese v0.16b,v16.16b
- ld1 {v16.4s},[x7],#16
- aesmc v0.16b,v0.16b
- subs w6,w6,#2
- aese v0.16b,v17.16b
- ld1 {v17.4s},[x7],#16
- aesmc v0.16b,v0.16b
- b.gt .Lctr32_tail
-
- aese v0.16b,v16.16b
- aesmc v0.16b,v0.16b
- aese v0.16b,v17.16b
- aesmc v0.16b,v0.16b
- ld1 {v2.16b},[x0]
- aese v0.16b,v18.16b
- aesmc v0.16b,v0.16b
- aese v0.16b,v19.16b
- aesmc v0.16b,v0.16b
- aese v0.16b,v20.16b
- aesmc v0.16b,v0.16b
- aese v0.16b,v21.16b
- aesmc v0.16b,v0.16b
- aese v0.16b,v22.16b
- aesmc v0.16b,v0.16b
- eor v2.16b,v2.16b,v7.16b
- aese v0.16b,v23.16b
-
- eor v2.16b,v2.16b,v0.16b
- st1 {v2.16b},[x1]
-
-.Lctr32_done:
- ldr x29,[sp],#16
- ret
-.size aes_v8_ctr32_encrypt_blocks,.-aes_v8_ctr32_encrypt_blocks
-#endif
diff --git a/app/openssl/crypto/aes/asm/aesv8-armx.S b/app/openssl/crypto/aes/asm/aesv8-armx.S
deleted file mode 100644
index 1637e4d4..00000000
--- a/app/openssl/crypto/aes/asm/aesv8-armx.S
+++ /dev/null
@@ -1,767 +0,0 @@
-#include "arm_arch.h"
-
-#if __ARM_ARCH__>=7
-.text
-.fpu neon
-.code 32
-.align 5
-rcon:
-.long 0x01,0x01,0x01,0x01
-.long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d @ rotate-n-splat
-.long 0x1b,0x1b,0x1b,0x1b
-
-.globl aes_v8_set_encrypt_key
-.type aes_v8_set_encrypt_key,%function
-.align 5
-aes_v8_set_encrypt_key:
-.Lenc_key:
- adr r3,rcon
- cmp r1,#192
-
- veor q0,q0,q0
- vld1.8 {q3},[r0]!
- mov r1,#8 @ reuse r1
- vld1.32 {q1,q2},[r3]!
-
- blt .Loop128
- beq .L192
- b .L256
-
-.align 4
-.Loop128:
- vtbl.8 d20,{q3},d4
- vtbl.8 d21,{q3},d5
- vext.8 q9,q0,q3,#12
- vst1.32 {q3},[r2]!
- .byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0
- subs r1,r1,#1
-
- veor q3,q3,q9
- vext.8 q9,q0,q9,#12
- veor q3,q3,q9
- vext.8 q9,q0,q9,#12
- veor q10,q10,q1
- veor q3,q3,q9
- vshl.u8 q1,q1,#1
- veor q3,q3,q10
- bne .Loop128
-
- vld1.32 {q1},[r3]
-
- vtbl.8 d20,{q3},d4
- vtbl.8 d21,{q3},d5
- vext.8 q9,q0,q3,#12
- vst1.32 {q3},[r2]!
- .byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0
-
- veor q3,q3,q9
- vext.8 q9,q0,q9,#12
- veor q3,q3,q9
- vext.8 q9,q0,q9,#12
- veor q10,q10,q1
- veor q3,q3,q9
- vshl.u8 q1,q1,#1
- veor q3,q3,q10
-
- vtbl.8 d20,{q3},d4
- vtbl.8 d21,{q3},d5
- vext.8 q9,q0,q3,#12
- vst1.32 {q3},[r2]!
- .byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0
-
- veor q3,q3,q9
- vext.8 q9,q0,q9,#12
- veor q3,q3,q9
- vext.8 q9,q0,q9,#12
- veor q10,q10,q1
- veor q3,q3,q9
- veor q3,q3,q10
- vst1.32 {q3},[r2]
- add r2,r2,#0x50
-
- mov r12,#10
- b .Ldone
-
-.align 4
-.L192:
- vld1.8 {d16},[r0]!
- vmov.i8 q10,#8 @ borrow q10
- vst1.32 {q3},[r2]!
- vsub.i8 q2,q2,q10 @ adjust the mask
-
-.Loop192:
- vtbl.8 d20,{q8},d4
- vtbl.8 d21,{q8},d5
- vext.8 q9,q0,q3,#12
- vst1.32 {d16},[r2]!
- .byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0
- subs r1,r1,#1
-
- veor q3,q3,q9
- vext.8 q9,q0,q9,#12
- veor q3,q3,q9
- vext.8 q9,q0,q9,#12
- veor q3,q3,q9
-
- vdup.32 q9,d7[1]
- veor q9,q9,q8
- veor q10,q10,q1
- vext.8 q8,q0,q8,#12
- vshl.u8 q1,q1,#1
- veor q8,q8,q9
- veor q3,q3,q10
- veor q8,q8,q10
- vst1.32 {q3},[r2]!
- bne .Loop192
-
- mov r12,#12
- add r2,r2,#0x20
- b .Ldone
-
-.align 4
-.L256:
- vld1.8 {q8},[r0]
- mov r1,#7
- mov r12,#14
- vst1.32 {q3},[r2]!
-
-.Loop256:
- vtbl.8 d20,{q8},d4
- vtbl.8 d21,{q8},d5
- vext.8 q9,q0,q3,#12
- vst1.32 {q8},[r2]!
- .byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0
- subs r1,r1,#1
-
- veor q3,q3,q9
- vext.8 q9,q0,q9,#12
- veor q3,q3,q9
- vext.8 q9,q0,q9,#12
- veor q10,q10,q1
- veor q3,q3,q9
- vshl.u8 q1,q1,#1
- veor q3,q3,q10
- vst1.32 {q3},[r2]!
- beq .Ldone
-
- vdup.32 q10,d7[1]
- vext.8 q9,q0,q8,#12
- .byte 0x00,0x43,0xf0,0xf3 @ aese q10,q0
-
- veor q8,q8,q9
- vext.8 q9,q0,q9,#12
- veor q8,q8,q9
- vext.8 q9,q0,q9,#12
- veor q8,q8,q9
-
- veor q8,q8,q10
- b .Loop256
-
-.Ldone:
- str r12,[r2]
-
- eor r0,r0,r0 @ return value
-
- bx lr
-.size aes_v8_set_encrypt_key,.-aes_v8_set_encrypt_key
-
-.globl aes_v8_set_decrypt_key
-.type aes_v8_set_decrypt_key,%function
-.align 5
-aes_v8_set_decrypt_key:
- stmdb sp!,{r4,lr}
- bl .Lenc_key
-
- sub r2,r2,#240 @ restore original r2
- mov r4,#-16
- add r0,r2,r12,lsl#4 @ end of key schedule
-
- vld1.32 {q0},[r2]
- vld1.32 {q1},[r0]
- vst1.32 {q0},[r0],r4
- vst1.32 {q1},[r2]!
-
-.Loop_imc:
- vld1.32 {q0},[r2]
- vld1.32 {q1},[r0]
- .byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
- .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
- vst1.32 {q0},[r0],r4
- vst1.32 {q1},[r2]!
- cmp r0,r2
- bhi .Loop_imc
-
- vld1.32 {q0},[r2]
- .byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
- vst1.32 {q0},[r0]
-
- eor r0,r0,r0 @ return value
- ldmia sp!,{r4,pc}
-.size aes_v8_set_decrypt_key,.-aes_v8_set_decrypt_key
-.globl aes_v8_encrypt
-.type aes_v8_encrypt,%function
-.align 5
-aes_v8_encrypt:
- ldr r3,[r2,#240]
- vld1.32 {q0},[r2]!
- vld1.8 {q2},[r0]
- sub r3,r3,#2
- vld1.32 {q1},[r2]!
-
-.Loop_enc:
- .byte 0x00,0x43,0xb0,0xf3 @ aese q2,q0
- vld1.32 {q0},[r2]!
- .byte 0x84,0x43,0xb0,0xf3 @ aesmc q2,q2
- subs r3,r3,#2
- .byte 0x02,0x43,0xb0,0xf3 @ aese q2,q1
- vld1.32 {q1},[r2]!
- .byte 0x84,0x43,0xb0,0xf3 @ aesmc q2,q2
- bgt .Loop_enc
-
- .byte 0x00,0x43,0xb0,0xf3 @ aese q2,q0
- vld1.32 {q0},[r2]
- .byte 0x84,0x43,0xb0,0xf3 @ aesmc q2,q2
- .byte 0x02,0x43,0xb0,0xf3 @ aese q2,q1
- veor q2,q2,q0
-
- vst1.8 {q2},[r1]
- bx lr
-.size aes_v8_encrypt,.-aes_v8_encrypt
-.globl aes_v8_decrypt
-.type aes_v8_decrypt,%function
-.align 5
-aes_v8_decrypt:
- ldr r3,[r2,#240]
- vld1.32 {q0},[r2]!
- vld1.8 {q2},[r0]
- sub r3,r3,#2
- vld1.32 {q1},[r2]!
-
-.Loop_dec:
- .byte 0x40,0x43,0xb0,0xf3 @ aesd q2,q0
- vld1.32 {q0},[r2]!
- .byte 0xc4,0x43,0xb0,0xf3 @ aesimc q2,q2
- subs r3,r3,#2
- .byte 0x42,0x43,0xb0,0xf3 @ aesd q2,q1
- vld1.32 {q1},[r2]!
- .byte 0xc4,0x43,0xb0,0xf3 @ aesimc q2,q2
- bgt .Loop_dec
-
- .byte 0x40,0x43,0xb0,0xf3 @ aesd q2,q0
- vld1.32 {q0},[r2]
- .byte 0xc4,0x43,0xb0,0xf3 @ aesimc q2,q2
- .byte 0x42,0x43,0xb0,0xf3 @ aesd q2,q1
- veor q2,q2,q0
-
- vst1.8 {q2},[r1]
- bx lr
-.size aes_v8_decrypt,.-aes_v8_decrypt
-.globl aes_v8_cbc_encrypt
-.type aes_v8_cbc_encrypt,%function
-.align 5
-aes_v8_cbc_encrypt:
- mov ip,sp
- stmdb sp!,{r4-r8,lr}
- vstmdb sp!,{d8-d15} @ ABI specification says so
- ldmia ip,{r4-r5} @ load remaining args
- subs r2,r2,#16
- mov r8,#16
- blo .Lcbc_abort
- moveq r8,#0
-
- cmp r5,#0 @ en- or decrypting?
- ldr r5,[r3,#240]
- and r2,r2,#-16
- vld1.8 {q6},[r4]
- vld1.8 {q0},[r0],r8
-
- vld1.32 {q8-q9},[r3] @ load key schedule...
- sub r5,r5,#6
- add r7,r3,r5,lsl#4 @ pointer to last 7 round keys
- sub r5,r5,#2
- vld1.32 {q10-q11},[r7]!
- vld1.32 {q12-q13},[r7]!
- vld1.32 {q14-q15},[r7]!
- vld1.32 {q7},[r7]
-
- add r7,r3,#32
- mov r6,r5
- beq .Lcbc_dec
-
- cmp r5,#2
- veor q0,q0,q6
- veor q5,q8,q7
- beq .Lcbc_enc128
-
-.Loop_cbc_enc:
- .byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
- vld1.32 {q8},[r7]!
- .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
- subs r6,r6,#2
- .byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
- vld1.32 {q9},[r7]!
- .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
- bgt .Loop_cbc_enc
-
- .byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
- .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
- subs r2,r2,#16
- .byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
- .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
- moveq r8,#0
- .byte 0x24,0x03,0xb0,0xf3 @ aese q0,q10
- .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
- add r7,r3,#16
- .byte 0x26,0x03,0xb0,0xf3 @ aese q0,q11
- .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
- vld1.8 {q8},[r0],r8
- .byte 0x28,0x03,0xb0,0xf3 @ aese q0,q12
- .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
- veor q8,q8,q5
- .byte 0x2a,0x03,0xb0,0xf3 @ aese q0,q13
- .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
- vld1.32 {q9},[r7]! @ re-pre-load rndkey[1]
- .byte 0x2c,0x03,0xb0,0xf3 @ aese q0,q14
- .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
- .byte 0x2e,0x03,0xb0,0xf3 @ aese q0,q15
-
- mov r6,r5
- veor q6,q0,q7
- vst1.8 {q6},[r1]!
- bhs .Loop_cbc_enc
-
- b .Lcbc_done
-
-.align 5
-.Lcbc_enc128:
- vld1.32 {q2-q3},[r7]
- .byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
- .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
- b .Lenter_cbc_enc128
-.Loop_cbc_enc128:
- .byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
- .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
- vst1.8 {q6},[r1]!
-.Lenter_cbc_enc128:
- .byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
- .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
- subs r2,r2,#16
- .byte 0x04,0x03,0xb0,0xf3 @ aese q0,q2
- .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
- moveq r8,#0
- .byte 0x06,0x03,0xb0,0xf3 @ aese q0,q3
- .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
- .byte 0x24,0x03,0xb0,0xf3 @ aese q0,q10
- .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
- .byte 0x26,0x03,0xb0,0xf3 @ aese q0,q11
- .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
- vld1.8 {q8},[r0],r8
- .byte 0x28,0x03,0xb0,0xf3 @ aese q0,q12
- .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
- .byte 0x2a,0x03,0xb0,0xf3 @ aese q0,q13
- .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
- .byte 0x2c,0x03,0xb0,0xf3 @ aese q0,q14
- .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
- veor q8,q8,q5
- .byte 0x2e,0x03,0xb0,0xf3 @ aese q0,q15
- veor q6,q0,q7
- bhs .Loop_cbc_enc128
-
- vst1.8 {q6},[r1]!
- b .Lcbc_done
-
-.align 5
-.Lcbc_dec128:
- vld1.32 {q4-q5},[r7]
- veor q6,q6,q7
- veor q2,q0,q7
- mov r12,r8
-
-.Loop2x_cbc_dec128:
- .byte 0x60,0x03,0xb0,0xf3 @ aesd q0,q8
- .byte 0x60,0x23,0xb0,0xf3 @ aesd q1,q8
- .byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
- .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
- subs r2,r2,#32
- .byte 0x62,0x03,0xb0,0xf3 @ aesd q0,q9
- .byte 0x62,0x23,0xb0,0xf3 @ aesd q1,q9
- .byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
- .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
- movlo r8,#0
- .byte 0x48,0x03,0xb0,0xf3 @ aesd q0,q4
- .byte 0x48,0x23,0xb0,0xf3 @ aesd q1,q4
- .byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
- .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
- movls r12,#0
- .byte 0x4a,0x03,0xb0,0xf3 @ aesd q0,q5
- .byte 0x4a,0x23,0xb0,0xf3 @ aesd q1,q5
- .byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
- .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
- .byte 0x64,0x03,0xb0,0xf3 @ aesd q0,q10
- .byte 0x64,0x23,0xb0,0xf3 @ aesd q1,q10
- .byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
- .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
- .byte 0x66,0x03,0xb0,0xf3 @ aesd q0,q11
- .byte 0x66,0x23,0xb0,0xf3 @ aesd q1,q11
- .byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
- .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
- .byte 0x68,0x03,0xb0,0xf3 @ aesd q0,q12
- .byte 0x68,0x23,0xb0,0xf3 @ aesd q1,q12
- .byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
- .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
- .byte 0x6a,0x03,0xb0,0xf3 @ aesd q0,q13
- .byte 0x6a,0x23,0xb0,0xf3 @ aesd q1,q13
- .byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
- .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
- .byte 0x6c,0x03,0xb0,0xf3 @ aesd q0,q14
- .byte 0x6c,0x23,0xb0,0xf3 @ aesd q1,q14
- .byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
- .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
- .byte 0x6e,0x03,0xb0,0xf3 @ aesd q0,q15
- .byte 0x6e,0x23,0xb0,0xf3 @ aesd q1,q15
-
- veor q6,q6,q0
- vld1.8 {q0},[r0],r8
- veor q2,q2,q1
- vld1.8 {q1},[r0],r12
- vst1.8 {q6},[r1]!
- veor q6,q3,q7
- vst1.8 {q2},[r1]!
- veor q2,q0,q7
- vorr q3,q1,q1
- bhs .Loop2x_cbc_dec128
-
- adds r2,r2,#32
- veor q6,q6,q7
- beq .Lcbc_done
- veor q2,q2,q7
- b .Lcbc_dec_tail
-
-.align 5
-.Lcbc_dec:
- subs r2,r2,#16
- vorr q2,q0,q0
- blo .Lcbc_dec_tail
-
- moveq r8,#0
- cmp r5,#2
- vld1.8 {q1},[r0],r8
- vorr q3,q1,q1
- beq .Lcbc_dec128
-
-.Loop2x_cbc_dec:
- .byte 0x60,0x03,0xb0,0xf3 @ aesd q0,q8
- .byte 0x60,0x23,0xb0,0xf3 @ aesd q1,q8
- vld1.32 {q8},[r7]!
- .byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
- .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
- subs r6,r6,#2
- .byte 0x62,0x03,0xb0,0xf3 @ aesd q0,q9
- .byte 0x62,0x23,0xb0,0xf3 @ aesd q1,q9
- vld1.32 {q9},[r7]!
- .byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
- .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
- bgt .Loop2x_cbc_dec
-
- .byte 0x60,0x03,0xb0,0xf3 @ aesd q0,q8
- .byte 0x60,0x23,0xb0,0xf3 @ aesd q1,q8
- .byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
- .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
- veor q4,q6,q7
- veor q5,q2,q7
- .byte 0x62,0x03,0xb0,0xf3 @ aesd q0,q9
- .byte 0x62,0x23,0xb0,0xf3 @ aesd q1,q9
- .byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
- .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
- vorr q6,q3,q3
- subs r2,r2,#32
- .byte 0x64,0x03,0xb0,0xf3 @ aesd q0,q10
- .byte 0x64,0x23,0xb0,0xf3 @ aesd q1,q10
- .byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
- movlo r8,#0
- .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
- mov r7,r3
- .byte 0x66,0x03,0xb0,0xf3 @ aesd q0,q11
- .byte 0x66,0x23,0xb0,0xf3 @ aesd q1,q11
- .byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
- vld1.8 {q2},[r0],r8
- .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
- movls r8,#0
- .byte 0x68,0x03,0xb0,0xf3 @ aesd q0,q12
- .byte 0x68,0x23,0xb0,0xf3 @ aesd q1,q12
- .byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
- .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
- vld1.8 {q3},[r0],r8
- .byte 0x6a,0x03,0xb0,0xf3 @ aesd q0,q13
- .byte 0x6a,0x23,0xb0,0xf3 @ aesd q1,q13
- .byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
- .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
- vld1.32 {q8},[r7]! @ re-pre-load rndkey[0]
- .byte 0x6c,0x03,0xb0,0xf3 @ aesd q0,q14
- .byte 0x6c,0x23,0xb0,0xf3 @ aesd q1,q14
- .byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
- .byte 0xc2,0x23,0xb0,0xf3 @ aesimc q1,q1
- vld1.32 {q9},[r7]! @ re-pre-load rndkey[1]
- .byte 0x6e,0x03,0xb0,0xf3 @ aesd q0,q15
- .byte 0x6e,0x23,0xb0,0xf3 @ aesd q1,q15
-
- mov r6,r5
- veor q4,q4,q0
- veor q5,q5,q1
- vorr q0,q2,q2
- vst1.8 {q4},[r1]!
- vorr q1,q3,q3
- vst1.8 {q5},[r1]!
- bhs .Loop2x_cbc_dec
-
- adds r2,r2,#32
- beq .Lcbc_done
-
-.Lcbc_dec_tail:
- .byte 0x60,0x03,0xb0,0xf3 @ aesd q0,q8
- vld1.32 {q8},[r7]!
- .byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
- subs r6,r6,#2
- .byte 0x62,0x03,0xb0,0xf3 @ aesd q0,q9
- vld1.32 {q9},[r7]!
- .byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
- bgt .Lcbc_dec_tail
-
- .byte 0x60,0x03,0xb0,0xf3 @ aesd q0,q8
- .byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
- .byte 0x62,0x03,0xb0,0xf3 @ aesd q0,q9
- .byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
- veor q4,q6,q7
- .byte 0x64,0x03,0xb0,0xf3 @ aesd q0,q10
- .byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
- vorr q6,q2,q2
- .byte 0x66,0x03,0xb0,0xf3 @ aesd q0,q11
- .byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
- .byte 0x68,0x03,0xb0,0xf3 @ aesd q0,q12
- .byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
- .byte 0x6a,0x03,0xb0,0xf3 @ aesd q0,q13
- .byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
- .byte 0x6c,0x03,0xb0,0xf3 @ aesd q0,q14
- .byte 0xc0,0x03,0xb0,0xf3 @ aesimc q0,q0
- .byte 0x6e,0x03,0xb0,0xf3 @ aesd q0,q15
-
- veor q4,q4,q0
- vst1.8 {q4},[r1]!
-
-.Lcbc_done:
- vst1.8 {q6},[r4]
-.Lcbc_abort:
- vldmia sp!,{d8-d15}
- ldmia sp!,{r4-r8,pc}
-.size aes_v8_cbc_encrypt,.-aes_v8_cbc_encrypt
-.globl aes_v8_ctr32_encrypt_blocks
-.type aes_v8_ctr32_encrypt_blocks,%function
-.align 5
-aes_v8_ctr32_encrypt_blocks:
- mov ip,sp
- stmdb sp!,{r4-r10,lr}
- vstmdb sp!,{d8-d15} @ ABI specification says so
- ldr r4, [ip] @ load remaining arg
- ldr r5,[r3,#240]
-
- ldr r8, [r4, #12]
- vld1.32 {q0},[r4]
-
- vld1.32 {q8-q9},[r3] @ load key schedule...
- sub r5,r5,#6
- add r7,r3,r5,lsl#4 @ pointer to last 7 round keys
- sub r5,r5,#2
- vld1.32 {q10-q11},[r7]!
- vld1.32 {q12-q13},[r7]!
- vld1.32 {q14-q15},[r7]!
- vld1.32 {q7},[r7]
-
- add r7,r3,#32
- mov r6,r5
-
- subs r2,r2,#2
- blo .Lctr32_tail
-
-#ifndef __ARMEB__
- rev r8, r8
-#endif
- vorr q1,q0,q0
- add r8, r8, #1
- vorr q6,q0,q0
- rev r10, r8
- cmp r5,#2
- vmov.32 d3[1],r10
- beq .Lctr32_128
-
-.Loop2x_ctr32:
- .byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
- .byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8
- vld1.32 {q8},[r7]!
- .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
- .byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
- subs r6,r6,#2
- .byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
- .byte 0x22,0x23,0xb0,0xf3 @ aese q1,q9
- vld1.32 {q9},[r7]!
- .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
- .byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
- bgt .Loop2x_ctr32
-
- .byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
- .byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8
- .byte 0x80,0x83,0xb0,0xf3 @ aesmc q4,q0
- vorr q0,q6,q6
- .byte 0x82,0xa3,0xb0,0xf3 @ aesmc q5,q1
- vorr q1,q6,q6
- .byte 0x22,0x83,0xb0,0xf3 @ aese q4,q9
- .byte 0x22,0xa3,0xb0,0xf3 @ aese q5,q9
- vld1.8 {q2},[r0]!
- .byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4
- vld1.8 {q3},[r0]!
- .byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5
- add r8,r8,#1
- .byte 0x24,0x83,0xb0,0xf3 @ aese q4,q10
- .byte 0x24,0xa3,0xb0,0xf3 @ aese q5,q10
- rev r9,r8
- .byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4
- .byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5
- add r8,r8,#1
- .byte 0x26,0x83,0xb0,0xf3 @ aese q4,q11
- .byte 0x26,0xa3,0xb0,0xf3 @ aese q5,q11
- veor q2,q2,q7
- rev r10,r8
- .byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4
- .byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5
- veor q3,q3,q7
- mov r7,r3
- .byte 0x28,0x83,0xb0,0xf3 @ aese q4,q12
- .byte 0x28,0xa3,0xb0,0xf3 @ aese q5,q12
- subs r2,r2,#2
- .byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4
- .byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5
- vld1.32 {q8-q9},[r7]! @ re-pre-load rndkey[0-1]
- .byte 0x2a,0x83,0xb0,0xf3 @ aese q4,q13
- .byte 0x2a,0xa3,0xb0,0xf3 @ aese q5,q13
- .byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4
- .byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5
- .byte 0x2c,0x83,0xb0,0xf3 @ aese q4,q14
- .byte 0x2c,0xa3,0xb0,0xf3 @ aese q5,q14
- vmov.32 d1[1], r9
- .byte 0x88,0x83,0xb0,0xf3 @ aesmc q4,q4
- vmov.32 d3[1], r10
- .byte 0x8a,0xa3,0xb0,0xf3 @ aesmc q5,q5
- .byte 0x2e,0x83,0xb0,0xf3 @ aese q4,q15
- .byte 0x2e,0xa3,0xb0,0xf3 @ aese q5,q15
-
- mov r6,r5
- veor q2,q2,q4
- veor q3,q3,q5
- vst1.8 {q2},[r1]!
- vst1.8 {q3},[r1]!
- bhs .Loop2x_ctr32
-
- adds r2,r2,#2
- beq .Lctr32_done
- b .Lctr32_tail
-
-.Lctr32_128:
- vld1.32 {q4-q5},[r7]
-
-.Loop2x_ctr32_128:
- .byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
- .byte 0x20,0x23,0xb0,0xf3 @ aese q1,q8
- .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
- vld1.8 {q2},[r0]!
- .byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
- vld1.8 {q3},[r0]!
- .byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
- .byte 0x22,0x23,0xb0,0xf3 @ aese q1,q9
- add r8,r8,#1
- .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
- .byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
- rev r9,r8
- .byte 0x08,0x03,0xb0,0xf3 @ aese q0,q4
- .byte 0x08,0x23,0xb0,0xf3 @ aese q1,q4
- add r8,r8,#1
- .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
- .byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
- rev r10,r8
- .byte 0x0a,0x03,0xb0,0xf3 @ aese q0,q5
- .byte 0x0a,0x23,0xb0,0xf3 @ aese q1,q5
- subs r2,r2,#2
- .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
- .byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
- .byte 0x24,0x03,0xb0,0xf3 @ aese q0,q10
- .byte 0x24,0x23,0xb0,0xf3 @ aese q1,q10
- .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
- .byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
- .byte 0x26,0x03,0xb0,0xf3 @ aese q0,q11
- .byte 0x26,0x23,0xb0,0xf3 @ aese q1,q11
- .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
- .byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
- .byte 0x28,0x03,0xb0,0xf3 @ aese q0,q12
- .byte 0x28,0x23,0xb0,0xf3 @ aese q1,q12
- .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
- .byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
- .byte 0x2a,0x03,0xb0,0xf3 @ aese q0,q13
- .byte 0x2a,0x23,0xb0,0xf3 @ aese q1,q13
- .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
- .byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
- .byte 0x2c,0x03,0xb0,0xf3 @ aese q0,q14
- .byte 0x2c,0x23,0xb0,0xf3 @ aese q1,q14
- .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
- .byte 0x82,0x23,0xb0,0xf3 @ aesmc q1,q1
- veor q2,q2,q7
- .byte 0x2e,0x03,0xb0,0xf3 @ aese q0,q15
- veor q3,q3,q7
- .byte 0x2e,0x23,0xb0,0xf3 @ aese q1,q15
-
- veor q2,q2,q0
- vorr q0,q6,q6
- veor q3,q3,q1
- vorr q1,q6,q6
- vst1.8 {q2},[r1]!
- vmov.32 d1[1], r9
- vst1.8 {q3},[r1]!
- vmov.32 d3[1], r10
- bhs .Loop2x_ctr32_128
-
- adds r2,r2,#2
- beq .Lctr32_done
-
-.Lctr32_tail:
- .byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
- vld1.32 {q8},[r7]!
- .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
- subs r6,r6,#2
- .byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
- vld1.32 {q9},[r7]!
- .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
- bgt .Lctr32_tail
-
- .byte 0x20,0x03,0xb0,0xf3 @ aese q0,q8
- .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
- .byte 0x22,0x03,0xb0,0xf3 @ aese q0,q9
- .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
- vld1.8 {q2},[r0]
- .byte 0x24,0x03,0xb0,0xf3 @ aese q0,q10
- .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
- .byte 0x26,0x03,0xb0,0xf3 @ aese q0,q11
- .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
- .byte 0x28,0x03,0xb0,0xf3 @ aese q0,q12
- .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
- .byte 0x2a,0x03,0xb0,0xf3 @ aese q0,q13
- .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
- .byte 0x2c,0x03,0xb0,0xf3 @ aese q0,q14
- .byte 0x80,0x03,0xb0,0xf3 @ aesmc q0,q0
- veor q2,q2,q7
- .byte 0x2e,0x03,0xb0,0xf3 @ aese q0,q15
-
- veor q2,q2,q0
- vst1.8 {q2},[r1]
-
-.Lctr32_done:
- vldmia sp!,{d8-d15}
- ldmia sp!,{r4-r10,pc}
-.size aes_v8_ctr32_encrypt_blocks,.-aes_v8_ctr32_encrypt_blocks
-#endif
diff --git a/app/openssl/crypto/aes/asm/aesv8-armx.pl b/app/openssl/crypto/aes/asm/aesv8-armx.pl
deleted file mode 100644
index 415dc04a..00000000
--- a/app/openssl/crypto/aes/asm/aesv8-armx.pl
+++ /dev/null
@@ -1,980 +0,0 @@
-#!/usr/bin/env perl
-#
-# ====================================================================
-# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
-# project. The module is, however, dual licensed under OpenSSL and
-# CRYPTOGAMS licenses depending on where you obtain it. For further
-# details see http://www.openssl.org/~appro/cryptogams/.
-# ====================================================================
-#
-# This module implements support for ARMv8 AES instructions. The
-# module is endian-agnostic in sense that it supports both big- and
-# little-endian cases. As does it support both 32- and 64-bit modes
-# of operation. Latter is achieved by limiting amount of utilized
-# registers to 16, which implies additional instructions. This has
-# no effect on mighty Apple A7, as results are literally equal to
-# the theoretical estimates based on instruction latencies and issue
-# rate. It remains to be seen how does it affect other platforms...
-#
-# Performance in cycles per byte processed with 128-bit key:
-#
-# CBC enc CBC dec CTR
-# Apple A7 2.39 1.20 1.20
-# Cortex-A5x n/a n/a n/a
-
-$flavour = shift;
-open STDOUT,">".shift;
-
-$prefix="aes_v8";
-
-$code=<<___;
-#include "arm_arch.h"
-
-#if __ARM_ARCH__>=7
-.text
-___
-$code.=".arch armv8-a+crypto\n" if ($flavour =~ /64/);
-$code.=".fpu neon\n.code 32\n" if ($flavour !~ /64/);
-
-# Assembler mnemonics are an eclectic mix of 32- and 64-bit syntax,
-# NEON is mostly 32-bit mnemonics, integer - mostly 64. Goal is to
-# maintain both 32- and 64-bit codes within single module and
-# transliterate common code to either flavour with regex vodoo.
-#
-{{{
-my ($inp,$bits,$out,$ptr,$rounds)=("x0","w1","x2","x3","w12");
-my ($zero,$rcon,$mask,$in0,$in1,$tmp,$key)=
- $flavour=~/64/? map("q$_",(0..6)) : map("q$_",(0..3,8..10));
-
-
-$code.=<<___;
-.align 5
-rcon:
-.long 0x01,0x01,0x01,0x01
-.long 0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d // rotate-n-splat
-.long 0x1b,0x1b,0x1b,0x1b
-
-.globl ${prefix}_set_encrypt_key
-.type ${prefix}_set_encrypt_key,%function
-.align 5
-${prefix}_set_encrypt_key:
-.Lenc_key:
-___
-$code.=<<___ if ($flavour =~ /64/);
- stp x29,x30,[sp,#-16]!
- add x29,sp,#0
-___
-$code.=<<___;
- adr $ptr,rcon
- cmp $bits,#192
-
- veor $zero,$zero,$zero
- vld1.8 {$in0},[$inp],#16
- mov $bits,#8 // reuse $bits
- vld1.32 {$rcon,$mask},[$ptr],#32
-
- b.lt .Loop128
- b.eq .L192
- b .L256
-
-.align 4
-.Loop128:
- vtbl.8 $key,{$in0},$mask
- vext.8 $tmp,$zero,$in0,#12
- vst1.32 {$in0},[$out],#16
- aese $key,$zero
- subs $bits,$bits,#1
-
- veor $in0,$in0,$tmp
- vext.8 $tmp,$zero,$tmp,#12
- veor $in0,$in0,$tmp
- vext.8 $tmp,$zero,$tmp,#12
- veor $key,$key,$rcon
- veor $in0,$in0,$tmp
- vshl.u8 $rcon,$rcon,#1
- veor $in0,$in0,$key
- b.ne .Loop128
-
- vld1.32 {$rcon},[$ptr]
-
- vtbl.8 $key,{$in0},$mask
- vext.8 $tmp,$zero,$in0,#12
- vst1.32 {$in0},[$out],#16
- aese $key,$zero
-
- veor $in0,$in0,$tmp
- vext.8 $tmp,$zero,$tmp,#12
- veor $in0,$in0,$tmp
- vext.8 $tmp,$zero,$tmp,#12
- veor $key,$key,$rcon
- veor $in0,$in0,$tmp
- vshl.u8 $rcon,$rcon,#1
- veor $in0,$in0,$key
-
- vtbl.8 $key,{$in0},$mask
- vext.8 $tmp,$zero,$in0,#12
- vst1.32 {$in0},[$out],#16
- aese $key,$zero
-
- veor $in0,$in0,$tmp
- vext.8 $tmp,$zero,$tmp,#12
- veor $in0,$in0,$tmp
- vext.8 $tmp,$zero,$tmp,#12
- veor $key,$key,$rcon
- veor $in0,$in0,$tmp
- veor $in0,$in0,$key
- vst1.32 {$in0},[$out]
- add $out,$out,#0x50
-
- mov $rounds,#10
- b .Ldone
-
-.align 4
-.L192:
- vld1.8 {$in1},[$inp],#8
- vmov.i8 $key,#8 // borrow $key
- vst1.32 {$in0},[$out],#16
- vsub.i8 $mask,$mask,$key // adjust the mask
-
-.Loop192:
- vtbl.8 $key,{$in1},$mask
- vext.8 $tmp,$zero,$in0,#12
- vst1.32 {$in1},[$out],#8
- aese $key,$zero
- subs $bits,$bits,#1
-
- veor $in0,$in0,$tmp
- vext.8 $tmp,$zero,$tmp,#12
- veor $in0,$in0,$tmp
- vext.8 $tmp,$zero,$tmp,#12
- veor $in0,$in0,$tmp
-
- vdup.32 $tmp,${in0}[3]
- veor $tmp,$tmp,$in1
- veor $key,$key,$rcon
- vext.8 $in1,$zero,$in1,#12
- vshl.u8 $rcon,$rcon,#1
- veor $in1,$in1,$tmp
- veor $in0,$in0,$key
- veor $in1,$in1,$key
- vst1.32 {$in0},[$out],#16
- b.ne .Loop192
-
- mov $rounds,#12
- add $out,$out,#0x20
- b .Ldone
-
-.align 4
-.L256:
- vld1.8 {$in1},[$inp]
- mov $bits,#7
- mov $rounds,#14
- vst1.32 {$in0},[$out],#16
-
-.Loop256:
- vtbl.8 $key,{$in1},$mask
- vext.8 $tmp,$zero,$in0,#12
- vst1.32 {$in1},[$out],#16
- aese $key,$zero
- subs $bits,$bits,#1
-
- veor $in0,$in0,$tmp
- vext.8 $tmp,$zero,$tmp,#12
- veor $in0,$in0,$tmp
- vext.8 $tmp,$zero,$tmp,#12
- veor $key,$key,$rcon
- veor $in0,$in0,$tmp
- vshl.u8 $rcon,$rcon,#1
- veor $in0,$in0,$key
- vst1.32 {$in0},[$out],#16
- b.eq .Ldone
-
- vdup.32 $key,${in0}[3] // just splat
- vext.8 $tmp,$zero,$in1,#12
- aese $key,$zero
-
- veor $in1,$in1,$tmp
- vext.8 $tmp,$zero,$tmp,#12
- veor $in1,$in1,$tmp
- vext.8 $tmp,$zero,$tmp,#12
- veor $in1,$in1,$tmp
-
- veor $in1,$in1,$key
- b .Loop256
-
-.Ldone:
- str $rounds,[$out]
-
- eor x0,x0,x0 // return value
- `"ldr x29,[sp],#16" if ($flavour =~ /64/)`
- ret
-.size ${prefix}_set_encrypt_key,.-${prefix}_set_encrypt_key
-
-.globl ${prefix}_set_decrypt_key
-.type ${prefix}_set_decrypt_key,%function
-.align 5
-${prefix}_set_decrypt_key:
-___
-$code.=<<___ if ($flavour =~ /64/);
- stp x29,x30,[sp,#-16]!
- add x29,sp,#0
-___
-$code.=<<___ if ($flavour !~ /64/);
- stmdb sp!,{r4,lr}
-___
-$code.=<<___;
- bl .Lenc_key
-
- sub $out,$out,#240 // restore original $out
- mov x4,#-16
- add $inp,$out,x12,lsl#4 // end of key schedule
-
- vld1.32 {v0.16b},[$out]
- vld1.32 {v1.16b},[$inp]
- vst1.32 {v0.16b},[$inp],x4
- vst1.32 {v1.16b},[$out],#16
-
-.Loop_imc:
- vld1.32 {v0.16b},[$out]
- vld1.32 {v1.16b},[$inp]
- aesimc v0.16b,v0.16b
- aesimc v1.16b,v1.16b
- vst1.32 {v0.16b},[$inp],x4
- vst1.32 {v1.16b},[$out],#16
- cmp $inp,$out
- b.hi .Loop_imc
-
- vld1.32 {v0.16b},[$out]
- aesimc v0.16b,v0.16b
- vst1.32 {v0.16b},[$inp]
-
- eor x0,x0,x0 // return value
-___
-$code.=<<___ if ($flavour !~ /64/);
- ldmia sp!,{r4,pc}
-___
-$code.=<<___ if ($flavour =~ /64/);
- ldp x29,x30,[sp],#16
- ret
-___
-$code.=<<___;
-.size ${prefix}_set_decrypt_key,.-${prefix}_set_decrypt_key
-___
-}}}
-{{{
-sub gen_block () {
-my $dir = shift;
-my ($e,$mc) = $dir eq "en" ? ("e","mc") : ("d","imc");
-my ($inp,$out,$key)=map("x$_",(0..2));
-my $rounds="w3";
-my ($rndkey0,$rndkey1,$inout)=map("q$_",(0..3));
-
-$code.=<<___;
-.globl ${prefix}_${dir}crypt
-.type ${prefix}_${dir}crypt,%function
-.align 5
-${prefix}_${dir}crypt:
- ldr $rounds,[$key,#240]
- vld1.32 {$rndkey0},[$key],#16
- vld1.8 {$inout},[$inp]
- sub $rounds,$rounds,#2
- vld1.32 {$rndkey1},[$key],#16
-
-.Loop_${dir}c:
- aes$e $inout,$rndkey0
- vld1.32 {$rndkey0},[$key],#16
- aes$mc $inout,$inout
- subs $rounds,$rounds,#2
- aes$e $inout,$rndkey1
- vld1.32 {$rndkey1},[$key],#16
- aes$mc $inout,$inout
- b.gt .Loop_${dir}c
-
- aes$e $inout,$rndkey0
- vld1.32 {$rndkey0},[$key]
- aes$mc $inout,$inout
- aes$e $inout,$rndkey1
- veor $inout,$inout,$rndkey0
-
- vst1.8 {$inout},[$out]
- ret
-.size ${prefix}_${dir}crypt,.-${prefix}_${dir}crypt
-___
-}
-&gen_block("en");
-&gen_block("de");
-}}}
-{{{
-my ($inp,$out,$len,$key,$ivp)=map("x$_",(0..4)); my $enc="w5";
-my ($rounds,$cnt,$key_,$step,$step1)=($enc,"w6","x7","x8","x12");
-my ($dat0,$dat1,$in0,$in1,$tmp0,$tmp1,$ivec,$rndlast)=map("q$_",(0..7));
-
-my ($dat,$tmp,$rndzero_n_last)=($dat0,$tmp0,$tmp1);
-
-### q8-q15 preloaded key schedule
-
-$code.=<<___;
-.globl ${prefix}_cbc_encrypt
-.type ${prefix}_cbc_encrypt,%function
-.align 5
-${prefix}_cbc_encrypt:
-___
-$code.=<<___ if ($flavour =~ /64/);
- stp x29,x30,[sp,#-16]!
- add x29,sp,#0
-___
-$code.=<<___ if ($flavour !~ /64/);
- mov ip,sp
- stmdb sp!,{r4-r8,lr}
- vstmdb sp!,{d8-d15} @ ABI specification says so
- ldmia ip,{r4-r5} @ load remaining args
-___
-$code.=<<___;
- subs $len,$len,#16
- mov $step,#16
- b.lo .Lcbc_abort
- cclr $step,eq
-
- cmp $enc,#0 // en- or decrypting?
- ldr $rounds,[$key,#240]
- and $len,$len,#-16
- vld1.8 {$ivec},[$ivp]
- vld1.8 {$dat},[$inp],$step
-
- vld1.32 {q8-q9},[$key] // load key schedule...
- sub $rounds,$rounds,#6
- add $key_,$key,x5,lsl#4 // pointer to last 7 round keys
- sub $rounds,$rounds,#2
- vld1.32 {q10-q11},[$key_],#32
- vld1.32 {q12-q13},[$key_],#32
- vld1.32 {q14-q15},[$key_],#32
- vld1.32 {$rndlast},[$key_]
-
- add $key_,$key,#32
- mov $cnt,$rounds
- b.eq .Lcbc_dec
-
- cmp $rounds,#2
- veor $dat,$dat,$ivec
- veor $rndzero_n_last,q8,$rndlast
- b.eq .Lcbc_enc128
-
-.Loop_cbc_enc:
- aese $dat,q8
- vld1.32 {q8},[$key_],#16
- aesmc $dat,$dat
- subs $cnt,$cnt,#2
- aese $dat,q9
- vld1.32 {q9},[$key_],#16
- aesmc $dat,$dat
- b.gt .Loop_cbc_enc
-
- aese $dat,q8
- aesmc $dat,$dat
- subs $len,$len,#16
- aese $dat,q9
- aesmc $dat,$dat
- cclr $step,eq
- aese $dat,q10
- aesmc $dat,$dat
- add $key_,$key,#16
- aese $dat,q11
- aesmc $dat,$dat
- vld1.8 {q8},[$inp],$step
- aese $dat,q12
- aesmc $dat,$dat
- veor q8,q8,$rndzero_n_last
- aese $dat,q13
- aesmc $dat,$dat
- vld1.32 {q9},[$key_],#16 // re-pre-load rndkey[1]
- aese $dat,q14
- aesmc $dat,$dat
- aese $dat,q15
-
- mov $cnt,$rounds
- veor $ivec,$dat,$rndlast
- vst1.8 {$ivec},[$out],#16
- b.hs .Loop_cbc_enc
-
- b .Lcbc_done
-
-.align 5
-.Lcbc_enc128:
- vld1.32 {$in0-$in1},[$key_]
- aese $dat,q8
- aesmc $dat,$dat
- b .Lenter_cbc_enc128
-.Loop_cbc_enc128:
- aese $dat,q8
- aesmc $dat,$dat
- vst1.8 {$ivec},[$out],#16
-.Lenter_cbc_enc128:
- aese $dat,q9
- aesmc $dat,$dat
- subs $len,$len,#16
- aese $dat,$in0
- aesmc $dat,$dat
- cclr $step,eq
- aese $dat,$in1
- aesmc $dat,$dat
- aese $dat,q10
- aesmc $dat,$dat
- aese $dat,q11
- aesmc $dat,$dat
- vld1.8 {q8},[$inp],$step
- aese $dat,q12
- aesmc $dat,$dat
- aese $dat,q13
- aesmc $dat,$dat
- aese $dat,q14
- aesmc $dat,$dat
- veor q8,q8,$rndzero_n_last
- aese $dat,q15
- veor $ivec,$dat,$rndlast
- b.hs .Loop_cbc_enc128
-
- vst1.8 {$ivec},[$out],#16
- b .Lcbc_done
-
-.align 5
-.Lcbc_dec128:
- vld1.32 {$tmp0-$tmp1},[$key_]
- veor $ivec,$ivec,$rndlast
- veor $in0,$dat0,$rndlast
- mov $step1,$step
-
-.Loop2x_cbc_dec128:
- aesd $dat0,q8
- aesd $dat1,q8
- aesimc $dat0,$dat0
- aesimc $dat1,$dat1
- subs $len,$len,#32
- aesd $dat0,q9
- aesd $dat1,q9
- aesimc $dat0,$dat0
- aesimc $dat1,$dat1
- cclr $step,lo
- aesd $dat0,$tmp0
- aesd $dat1,$tmp0
- aesimc $dat0,$dat0
- aesimc $dat1,$dat1
- cclr $step1,ls
- aesd $dat0,$tmp1
- aesd $dat1,$tmp1
- aesimc $dat0,$dat0
- aesimc $dat1,$dat1
- aesd $dat0,q10
- aesd $dat1,q10
- aesimc $dat0,$dat0
- aesimc $dat1,$dat1
- aesd $dat0,q11
- aesd $dat1,q11
- aesimc $dat0,$dat0
- aesimc $dat1,$dat1
- aesd $dat0,q12
- aesd $dat1,q12
- aesimc $dat0,$dat0
- aesimc $dat1,$dat1
- aesd $dat0,q13
- aesd $dat1,q13
- aesimc $dat0,$dat0
- aesimc $dat1,$dat1
- aesd $dat0,q14
- aesd $dat1,q14
- aesimc $dat0,$dat0
- aesimc $dat1,$dat1
- aesd $dat0,q15
- aesd $dat1,q15
-
- veor $ivec,$ivec,$dat0
- vld1.8 {$dat0},[$inp],$step
- veor $in0,$in0,$dat1
- vld1.8 {$dat1},[$inp],$step1
- vst1.8 {$ivec},[$out],#16
- veor $ivec,$in1,$rndlast
- vst1.8 {$in0},[$out],#16
- veor $in0,$dat0,$rndlast
- vorr $in1,$dat1,$dat1
- b.hs .Loop2x_cbc_dec128
-
- adds $len,$len,#32
- veor $ivec,$ivec,$rndlast
- b.eq .Lcbc_done
- veor $in0,$in0,$rndlast
- b .Lcbc_dec_tail
-
-.align 5
-.Lcbc_dec:
- subs $len,$len,#16
- vorr $in0,$dat,$dat
- b.lo .Lcbc_dec_tail
-
- cclr $step,eq
- cmp $rounds,#2
- vld1.8 {$dat1},[$inp],$step
- vorr $in1,$dat1,$dat1
- b.eq .Lcbc_dec128
-
-.Loop2x_cbc_dec:
- aesd $dat0,q8
- aesd $dat1,q8
- vld1.32 {q8},[$key_],#16
- aesimc $dat0,$dat0
- aesimc $dat1,$dat1
- subs $cnt,$cnt,#2
- aesd $dat0,q9
- aesd $dat1,q9
- vld1.32 {q9},[$key_],#16
- aesimc $dat0,$dat0
- aesimc $dat1,$dat1
- b.gt .Loop2x_cbc_dec
-
- aesd $dat0,q8
- aesd $dat1,q8
- aesimc $dat0,$dat0
- aesimc $dat1,$dat1
- veor $tmp0,$ivec,$rndlast
- veor $tmp1,$in0,$rndlast
- aesd $dat0,q9
- aesd $dat1,q9
- aesimc $dat0,$dat0
- aesimc $dat1,$dat1
- vorr $ivec,$in1,$in1
- subs $len,$len,#32
- aesd $dat0,q10
- aesd $dat1,q10
- aesimc $dat0,$dat0
- cclr $step,lo
- aesimc $dat1,$dat1
- mov $key_,$key
- aesd $dat0,q11
- aesd $dat1,q11
- aesimc $dat0,$dat0
- vld1.8 {$in0},[$inp],$step
- aesimc $dat1,$dat1
- cclr $step,ls
- aesd $dat0,q12
- aesd $dat1,q12
- aesimc $dat0,$dat0
- aesimc $dat1,$dat1
- vld1.8 {$in1},[$inp],$step
- aesd $dat0,q13
- aesd $dat1,q13
- aesimc $dat0,$dat0
- aesimc $dat1,$dat1
- vld1.32 {q8},[$key_],#16 // re-pre-load rndkey[0]
- aesd $dat0,q14
- aesd $dat1,q14
- aesimc $dat0,$dat0
- aesimc $dat1,$dat1
- vld1.32 {q9},[$key_],#16 // re-pre-load rndkey[1]
- aesd $dat0,q15
- aesd $dat1,q15
-
- mov $cnt,$rounds
- veor $tmp0,$tmp0,$dat0
- veor $tmp1,$tmp1,$dat1
- vorr $dat0,$in0,$in0
- vst1.8 {$tmp0},[$out],#16
- vorr $dat1,$in1,$in1
- vst1.8 {$tmp1},[$out],#16
- b.hs .Loop2x_cbc_dec
-
- adds $len,$len,#32
- b.eq .Lcbc_done
-
-.Lcbc_dec_tail:
- aesd $dat,q8
- vld1.32 {q8},[$key_],#16
- aesimc $dat,$dat
- subs $cnt,$cnt,#2
- aesd $dat,q9
- vld1.32 {q9},[$key_],#16
- aesimc $dat,$dat
- b.gt .Lcbc_dec_tail
-
- aesd $dat,q8
- aesimc $dat,$dat
- aesd $dat,q9
- aesimc $dat,$dat
- veor $tmp,$ivec,$rndlast
- aesd $dat,q10
- aesimc $dat,$dat
- vorr $ivec,$in0,$in0
- aesd $dat,q11
- aesimc $dat,$dat
- aesd $dat,q12
- aesimc $dat,$dat
- aesd $dat,q13
- aesimc $dat,$dat
- aesd $dat,q14
- aesimc $dat,$dat
- aesd $dat,q15
-
- veor $tmp,$tmp,$dat
- vst1.8 {$tmp},[$out],#16
-
-.Lcbc_done:
- vst1.8 {$ivec},[$ivp]
-.Lcbc_abort:
-___
-$code.=<<___ if ($flavour !~ /64/);
- vldmia sp!,{d8-d15}
- ldmia sp!,{r4-r8,pc}
-___
-$code.=<<___ if ($flavour =~ /64/);
- ldr x29,[sp],#16
- ret
-___
-$code.=<<___;
-.size ${prefix}_cbc_encrypt,.-${prefix}_cbc_encrypt
-___
-}}}
-{{{
-my ($inp,$out,$len,$key,$ivp)=map("x$_",(0..4));
-my ($rounds,$cnt,$key_,$ctr,$tctr,$tctr1)=("w5","w6","x7","w8","w9","w10");
-my ($dat0,$dat1,$in0,$in1,$tmp0,$tmp1,$ivec,$rndlast)=map("q$_",(0..7));
-
-my ($dat,$tmp)=($dat0,$tmp0);
-
-### q8-q15 preloaded key schedule
-
-$code.=<<___;
-.globl ${prefix}_ctr32_encrypt_blocks
-.type ${prefix}_ctr32_encrypt_blocks,%function
-.align 5
-${prefix}_ctr32_encrypt_blocks:
-___
-$code.=<<___ if ($flavour =~ /64/);
- stp x29,x30,[sp,#-16]!
- add x29,sp,#0
-___
-$code.=<<___ if ($flavour !~ /64/);
- mov ip,sp
- stmdb sp!,{r4-r10,lr}
- vstmdb sp!,{d8-d15} @ ABI specification says so
- ldr r4, [ip] @ load remaining arg
-___
-$code.=<<___;
- ldr $rounds,[$key,#240]
-
- ldr $ctr, [$ivp, #12]
- vld1.32 {$dat0},[$ivp]
-
- vld1.32 {q8-q9},[$key] // load key schedule...
- sub $rounds,$rounds,#6
- add $key_,$key,x5,lsl#4 // pointer to last 7 round keys
- sub $rounds,$rounds,#2
- vld1.32 {q10-q11},[$key_],#32
- vld1.32 {q12-q13},[$key_],#32
- vld1.32 {q14-q15},[$key_],#32
- vld1.32 {$rndlast},[$key_]
-
- add $key_,$key,#32
- mov $cnt,$rounds
-
- subs $len,$len,#2
- b.lo .Lctr32_tail
-
-#ifndef __ARMEB__
- rev $ctr, $ctr
-#endif
- vorr $dat1,$dat0,$dat0
- add $ctr, $ctr, #1
- vorr $ivec,$dat0,$dat0
- rev $tctr1, $ctr
- cmp $rounds,#2
- vmov.32 ${dat1}[3],$tctr1
- b.eq .Lctr32_128
-
-.Loop2x_ctr32:
- aese $dat0,q8
- aese $dat1,q8
- vld1.32 {q8},[$key_],#16
- aesmc $dat0,$dat0
- aesmc $dat1,$dat1
- subs $cnt,$cnt,#2
- aese $dat0,q9
- aese $dat1,q9
- vld1.32 {q9},[$key_],#16
- aesmc $dat0,$dat0
- aesmc $dat1,$dat1
- b.gt .Loop2x_ctr32
-
- aese $dat0,q8
- aese $dat1,q8
- aesmc $tmp0,$dat0
- vorr $dat0,$ivec,$ivec
- aesmc $tmp1,$dat1
- vorr $dat1,$ivec,$ivec
- aese $tmp0,q9
- aese $tmp1,q9
- vld1.8 {$in0},[$inp],#16
- aesmc $tmp0,$tmp0
- vld1.8 {$in1},[$inp],#16
- aesmc $tmp1,$tmp1
- add $ctr,$ctr,#1
- aese $tmp0,q10
- aese $tmp1,q10
- rev $tctr,$ctr
- aesmc $tmp0,$tmp0
- aesmc $tmp1,$tmp1
- add $ctr,$ctr,#1
- aese $tmp0,q11
- aese $tmp1,q11
- veor $in0,$in0,$rndlast
- rev $tctr1,$ctr
- aesmc $tmp0,$tmp0
- aesmc $tmp1,$tmp1
- veor $in1,$in1,$rndlast
- mov $key_,$key
- aese $tmp0,q12
- aese $tmp1,q12
- subs $len,$len,#2
- aesmc $tmp0,$tmp0
- aesmc $tmp1,$tmp1
- vld1.32 {q8-q9},[$key_],#32 // re-pre-load rndkey[0-1]
- aese $tmp0,q13
- aese $tmp1,q13
- aesmc $tmp0,$tmp0
- aesmc $tmp1,$tmp1
- aese $tmp0,q14
- aese $tmp1,q14
- vmov.32 ${dat0}[3], $tctr
- aesmc $tmp0,$tmp0
- vmov.32 ${dat1}[3], $tctr1
- aesmc $tmp1,$tmp1
- aese $tmp0,q15
- aese $tmp1,q15
-
- mov $cnt,$rounds
- veor $in0,$in0,$tmp0
- veor $in1,$in1,$tmp1
- vst1.8 {$in0},[$out],#16
- vst1.8 {$in1},[$out],#16
- b.hs .Loop2x_ctr32
-
- adds $len,$len,#2
- b.eq .Lctr32_done
- b .Lctr32_tail
-
-.Lctr32_128:
- vld1.32 {$tmp0-$tmp1},[$key_]
-
-.Loop2x_ctr32_128:
- aese $dat0,q8
- aese $dat1,q8
- aesmc $dat0,$dat0
- vld1.8 {$in0},[$inp],#16
- aesmc $dat1,$dat1
- vld1.8 {$in1},[$inp],#16
- aese $dat0,q9
- aese $dat1,q9
- add $ctr,$ctr,#1
- aesmc $dat0,$dat0
- aesmc $dat1,$dat1
- rev $tctr,$ctr
- aese $dat0,$tmp0
- aese $dat1,$tmp0
- add $ctr,$ctr,#1
- aesmc $dat0,$dat0
- aesmc $dat1,$dat1
- rev $tctr1,$ctr
- aese $dat0,$tmp1
- aese $dat1,$tmp1
- subs $len,$len,#2
- aesmc $dat0,$dat0
- aesmc $dat1,$dat1
- aese $dat0,q10
- aese $dat1,q10
- aesmc $dat0,$dat0
- aesmc $dat1,$dat1
- aese $dat0,q11
- aese $dat1,q11
- aesmc $dat0,$dat0
- aesmc $dat1,$dat1
- aese $dat0,q12
- aese $dat1,q12
- aesmc $dat0,$dat0
- aesmc $dat1,$dat1
- aese $dat0,q13
- aese $dat1,q13
- aesmc $dat0,$dat0
- aesmc $dat1,$dat1
- aese $dat0,q14
- aese $dat1,q14
- aesmc $dat0,$dat0
- aesmc $dat1,$dat1
- veor $in0,$in0,$rndlast
- aese $dat0,q15
- veor $in1,$in1,$rndlast
- aese $dat1,q15
-
- veor $in0,$in0,$dat0
- vorr $dat0,$ivec,$ivec
- veor $in1,$in1,$dat1
- vorr $dat1,$ivec,$ivec
- vst1.8 {$in0},[$out],#16
- vmov.32 ${dat0}[3], $tctr
- vst1.8 {$in1},[$out],#16
- vmov.32 ${dat1}[3], $tctr1
- b.hs .Loop2x_ctr32_128
-
- adds $len,$len,#2
- b.eq .Lctr32_done
-
-.Lctr32_tail:
- aese $dat,q8
- vld1.32 {q8},[$key_],#16
- aesmc $dat,$dat
- subs $cnt,$cnt,#2
- aese $dat,q9
- vld1.32 {q9},[$key_],#16
- aesmc $dat,$dat
- b.gt .Lctr32_tail
-
- aese $dat,q8
- aesmc $dat,$dat
- aese $dat,q9
- aesmc $dat,$dat
- vld1.8 {$in0},[$inp]
- aese $dat,q10
- aesmc $dat,$dat
- aese $dat,q11
- aesmc $dat,$dat
- aese $dat,q12
- aesmc $dat,$dat
- aese $dat,q13
- aesmc $dat,$dat
- aese $dat,q14
- aesmc $dat,$dat
- veor $in0,$in0,$rndlast
- aese $dat,q15
-
- veor $in0,$in0,$dat
- vst1.8 {$in0},[$out]
-
-.Lctr32_done:
-___
-$code.=<<___ if ($flavour !~ /64/);
- vldmia sp!,{d8-d15}
- ldmia sp!,{r4-r10,pc}
-___
-$code.=<<___ if ($flavour =~ /64/);
- ldr x29,[sp],#16
- ret
-___
-$code.=<<___;
-.size ${prefix}_ctr32_encrypt_blocks,.-${prefix}_ctr32_encrypt_blocks
-___
-}}}
-$code.=<<___;
-#endif
-___
-########################################
-if ($flavour =~ /64/) { ######## 64-bit code
- my %opcode = (
- "aesd" => 0x4e285800, "aese" => 0x4e284800,
- "aesimc"=> 0x4e287800, "aesmc" => 0x4e286800 );
-
- local *unaes = sub {
- my ($mnemonic,$arg)=@_;
-
- $arg =~ m/[qv]([0-9]+)[^,]*,\s*[qv]([0-9]+)/o &&
- sprintf ".inst\t0x%08x\t//%s %s",
- $opcode{$mnemonic}|$1|($2<<5),
- $mnemonic,$arg;
- };
-
- foreach(split("\n",$code)) {
- s/\`([^\`]*)\`/eval($1)/geo;
-
- s/\bq([0-9]+)\b/"v".($1<8?$1:$1+8).".16b"/geo; # old->new registers
- s/@\s/\/\//o; # old->new style commentary
-
- #s/[v]?(aes\w+)\s+([qv].*)/unaes($1,$2)/geo or
- s/cclr\s+([wx])([^,]+),\s*([a-z]+)/csel $1$2,$1zr,$1$2,$3/o or
- s/vmov\.i8/movi/o or # fix up legacy mnemonics
- s/vext\.8/ext/o or
- s/vrev32\.8/rev32/o or
- s/vtst\.8/cmtst/o or
- s/vshr/ushr/o or
- s/^(\s+)v/$1/o or # strip off v prefix
- s/\bbx\s+lr\b/ret/o;
-
- # fix up remainig legacy suffixes
- s/\.[ui]?8//o;
- m/\],#8/o and s/\.16b/\.8b/go;
- s/\.[ui]?32//o and s/\.16b/\.4s/go;
- s/\.[ui]?64//o and s/\.16b/\.2d/go;
- s/\.[42]([sd])\[([0-3])\]/\.$1\[$2\]/o;
-
- print $_,"\n";
- }
-} else { ######## 32-bit code
- my %opcode = (
- "aesd" => 0xf3b00340, "aese" => 0xf3b00300,
- "aesimc"=> 0xf3b003c0, "aesmc" => 0xf3b00380 );
-
- local *unaes = sub {
- my ($mnemonic,$arg)=@_;
-
- if ($arg =~ m/[qv]([0-9]+)[^,]*,\s*[qv]([0-9]+)/o) {
- my $word = $opcode{$mnemonic}|(($1&7)<<13)|(($1&8)<<19)
- |(($2&7)<<1) |(($2&8)<<2);
- # since ARMv7 instructions are always encoded little-endian.
- # correct solution is to use .inst directive, but older
- # assemblers don't implement it:-(
- sprintf ".byte\t0x%02x,0x%02x,0x%02x,0x%02x\t@ %s %s",
- $word&0xff,($word>>8)&0xff,
- ($word>>16)&0xff,($word>>24)&0xff,
- $mnemonic,$arg;
- }
- };
-
- sub unvtbl {
- my $arg=shift;
-
- $arg =~ m/q([0-9]+),\s*\{q([0-9]+)\},\s*q([0-9]+)/o &&
- sprintf "vtbl.8 d%d,{q%d},d%d\n\t".
- "vtbl.8 d%d,{q%d},d%d", 2*$1,$2,2*$3, 2*$1+1,$2,2*$3+1;
- }
-
- sub unvdup32 {
- my $arg=shift;
-
- $arg =~ m/q([0-9]+),\s*q([0-9]+)\[([0-3])\]/o &&
- sprintf "vdup.32 q%d,d%d[%d]",$1,2*$2+($3>>1),$3&1;
- }
-
- sub unvmov32 {
- my $arg=shift;
-
- $arg =~ m/q([0-9]+)\[([0-3])\],(.*)/o &&
- sprintf "vmov.32 d%d[%d],%s",2*$1+($2>>1),$2&1,$3;
- }
-
- foreach(split("\n",$code)) {
- s/\`([^\`]*)\`/eval($1)/geo;
-
- s/\b[wx]([0-9]+)\b/r$1/go; # new->old registers
- s/\bv([0-9])\.[12468]+[bsd]\b/q$1/go; # new->old registers
- s/\/\/\s?/@ /o; # new->old style commentary
-
- # fix up remainig new-style suffixes
- s/\{q([0-9]+)\},\s*\[(.+)\],#8/sprintf "{d%d},[$2]!",2*$1/eo or
- s/\],#[0-9]+/]!/o;
-
- s/[v]?(aes\w+)\s+([qv].*)/unaes($1,$2)/geo or
- s/cclr\s+([^,]+),\s*([a-z]+)/mov$2 $1,#0/o or
- s/vtbl\.8\s+(.*)/unvtbl($1)/geo or
- s/vdup\.32\s+(.*)/unvdup32($1)/geo or
- s/vmov\.32\s+(.*)/unvmov32($1)/geo or
- s/^(\s+)b\./$1b/o or
- s/^(\s+)ret/$1bx\tlr/o;
-
- print $_,"\n";
- }
-}
-
-close STDOUT;
diff --git a/app/openssl/crypto/aes/asm/bsaes-armv7.S b/app/openssl/crypto/aes/asm/bsaes-armv7.S
deleted file mode 100644
index 64205d45..00000000
--- a/app/openssl/crypto/aes/asm/bsaes-armv7.S
+++ /dev/null
@@ -1,2544 +0,0 @@
-
-@ ====================================================================
-@ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
-@ project. The module is, however, dual licensed under OpenSSL and
-@ CRYPTOGAMS licenses depending on where you obtain it. For further
-@ details see http://www.openssl.org/~appro/cryptogams/.
-@
-@ Specific modes and adaptation for Linux kernel by Ard Biesheuvel
-@ <ard.biesheuvel@linaro.org>. Permission to use under GPL terms is
-@ granted.
-@ ====================================================================
-
-@ Bit-sliced AES for ARM NEON
-@
-@ February 2012.
-@
-@ This implementation is direct adaptation of bsaes-x86_64 module for
-@ ARM NEON. Except that this module is endian-neutral [in sense that
-@ it can be compiled for either endianness] by courtesy of vld1.8's
-@ neutrality. Initial version doesn't implement interface to OpenSSL,
-@ only low-level primitives and unsupported entry points, just enough
-@ to collect performance results, which for Cortex-A8 core are:
-@
-@ encrypt 19.5 cycles per byte processed with 128-bit key
-@ decrypt 22.1 cycles per byte processed with 128-bit key
-@ key conv. 440 cycles per 128-bit key/0.18 of 8x block
-@
-@ Snapdragon S4 encrypts byte in 17.6 cycles and decrypts in 19.7,
-@ which is [much] worse than anticipated (for further details see
-@ http://www.openssl.org/~appro/Snapdragon-S4.html).
-@
-@ Cortex-A15 manages in 14.2/16.1 cycles [when integer-only code
-@ manages in 20.0 cycles].
-@
-@ When comparing to x86_64 results keep in mind that NEON unit is
-@ [mostly] single-issue and thus can't [fully] benefit from
-@ instruction-level parallelism. And when comparing to aes-armv4
-@ results keep in mind key schedule conversion overhead (see
-@ bsaes-x86_64.pl for further details)...
-@
-@ <appro@openssl.org>
-
-@ April-August 2013
-@
-@ Add CBC, CTR and XTS subroutines, adapt for kernel use.
-@
-@ <ard.biesheuvel@linaro.org>
-
-#ifndef __KERNEL__
-# include "arm_arch.h"
-
-# define VFP_ABI_PUSH vstmdb sp!,{d8-d15}
-# define VFP_ABI_POP vldmia sp!,{d8-d15}
-# define VFP_ABI_FRAME 0x40
-#else
-# define VFP_ABI_PUSH
-# define VFP_ABI_POP
-# define VFP_ABI_FRAME 0
-# define BSAES_ASM_EXTENDED_KEY
-# define XTS_CHAIN_TWEAK
-# define __ARM_ARCH__ __LINUX_ARM_ARCH__
-#endif
-
-#ifdef __thumb__
-# define adrl adr
-#endif
-
-#if __ARM_ARCH__>=7
-.text
-.syntax unified @ ARMv7-capable assembler is expected to handle this
-#ifdef __thumb2__
-.thumb
-#else
-.code 32
-#endif
-
-.fpu neon
-
-.type _bsaes_decrypt8,%function
-.align 4
-_bsaes_decrypt8:
- adr r6,_bsaes_decrypt8
- vldmia r4!, {q9} @ round 0 key
- add r6,r6,#.LM0ISR-_bsaes_decrypt8
-
- vldmia r6!, {q8} @ .LM0ISR
- veor q10, q0, q9 @ xor with round0 key
- veor q11, q1, q9
- vtbl.8 d0, {q10}, d16
- vtbl.8 d1, {q10}, d17
- veor q12, q2, q9
- vtbl.8 d2, {q11}, d16
- vtbl.8 d3, {q11}, d17
- veor q13, q3, q9
- vtbl.8 d4, {q12}, d16
- vtbl.8 d5, {q12}, d17
- veor q14, q4, q9
- vtbl.8 d6, {q13}, d16
- vtbl.8 d7, {q13}, d17
- veor q15, q5, q9
- vtbl.8 d8, {q14}, d16
- vtbl.8 d9, {q14}, d17
- veor q10, q6, q9
- vtbl.8 d10, {q15}, d16
- vtbl.8 d11, {q15}, d17
- veor q11, q7, q9
- vtbl.8 d12, {q10}, d16
- vtbl.8 d13, {q10}, d17
- vtbl.8 d14, {q11}, d16
- vtbl.8 d15, {q11}, d17
- vmov.i8 q8,#0x55 @ compose .LBS0
- vmov.i8 q9,#0x33 @ compose .LBS1
- vshr.u64 q10, q6, #1
- vshr.u64 q11, q4, #1
- veor q10, q10, q7
- veor q11, q11, q5
- vand q10, q10, q8
- vand q11, q11, q8
- veor q7, q7, q10
- vshl.u64 q10, q10, #1
- veor q5, q5, q11
- vshl.u64 q11, q11, #1
- veor q6, q6, q10
- veor q4, q4, q11
- vshr.u64 q10, q2, #1
- vshr.u64 q11, q0, #1
- veor q10, q10, q3
- veor q11, q11, q1
- vand q10, q10, q8
- vand q11, q11, q8
- veor q3, q3, q10
- vshl.u64 q10, q10, #1
- veor q1, q1, q11
- vshl.u64 q11, q11, #1
- veor q2, q2, q10
- veor q0, q0, q11
- vmov.i8 q8,#0x0f @ compose .LBS2
- vshr.u64 q10, q5, #2
- vshr.u64 q11, q4, #2
- veor q10, q10, q7
- veor q11, q11, q6
- vand q10, q10, q9
- vand q11, q11, q9
- veor q7, q7, q10
- vshl.u64 q10, q10, #2
- veor q6, q6, q11
- vshl.u64 q11, q11, #2
- veor q5, q5, q10
- veor q4, q4, q11
- vshr.u64 q10, q1, #2
- vshr.u64 q11, q0, #2
- veor q10, q10, q3
- veor q11, q11, q2
- vand q10, q10, q9
- vand q11, q11, q9
- veor q3, q3, q10
- vshl.u64 q10, q10, #2
- veor q2, q2, q11
- vshl.u64 q11, q11, #2
- veor q1, q1, q10
- veor q0, q0, q11
- vshr.u64 q10, q3, #4
- vshr.u64 q11, q2, #4
- veor q10, q10, q7
- veor q11, q11, q6
- vand q10, q10, q8
- vand q11, q11, q8
- veor q7, q7, q10
- vshl.u64 q10, q10, #4
- veor q6, q6, q11
- vshl.u64 q11, q11, #4
- veor q3, q3, q10
- veor q2, q2, q11
- vshr.u64 q10, q1, #4
- vshr.u64 q11, q0, #4
- veor q10, q10, q5
- veor q11, q11, q4
- vand q10, q10, q8
- vand q11, q11, q8
- veor q5, q5, q10
- vshl.u64 q10, q10, #4
- veor q4, q4, q11
- vshl.u64 q11, q11, #4
- veor q1, q1, q10
- veor q0, q0, q11
- sub r5,r5,#1
- b .Ldec_sbox
-.align 4
-.Ldec_loop:
- vldmia r4!, {q8-q11}
- veor q8, q8, q0
- veor q9, q9, q1
- vtbl.8 d0, {q8}, d24
- vtbl.8 d1, {q8}, d25
- vldmia r4!, {q8}
- veor q10, q10, q2
- vtbl.8 d2, {q9}, d24
- vtbl.8 d3, {q9}, d25
- vldmia r4!, {q9}
- veor q11, q11, q3
- vtbl.8 d4, {q10}, d24
- vtbl.8 d5, {q10}, d25
- vldmia r4!, {q10}
- vtbl.8 d6, {q11}, d24
- vtbl.8 d7, {q11}, d25
- vldmia r4!, {q11}
- veor q8, q8, q4
- veor q9, q9, q5
- vtbl.8 d8, {q8}, d24
- vtbl.8 d9, {q8}, d25
- veor q10, q10, q6
- vtbl.8 d10, {q9}, d24
- vtbl.8 d11, {q9}, d25
- veor q11, q11, q7
- vtbl.8 d12, {q10}, d24
- vtbl.8 d13, {q10}, d25
- vtbl.8 d14, {q11}, d24
- vtbl.8 d15, {q11}, d25
-.Ldec_sbox:
- veor q1, q1, q4
- veor q3, q3, q4
-
- veor q4, q4, q7
- veor q1, q1, q6
- veor q2, q2, q7
- veor q6, q6, q4
-
- veor q0, q0, q1
- veor q2, q2, q5
- veor q7, q7, q6
- veor q3, q3, q0
- veor q5, q5, q0
- veor q1, q1, q3
- veor q11, q3, q0
- veor q10, q7, q4
- veor q9, q1, q6
- veor q13, q4, q0
- vmov q8, q10
- veor q12, q5, q2
-
- vorr q10, q10, q9
- veor q15, q11, q8
- vand q14, q11, q12
- vorr q11, q11, q12
- veor q12, q12, q9
- vand q8, q8, q9
- veor q9, q6, q2
- vand q15, q15, q12
- vand q13, q13, q9
- veor q9, q3, q7
- veor q12, q1, q5
- veor q11, q11, q13
- veor q10, q10, q13
- vand q13, q9, q12
- vorr q9, q9, q12
- veor q11, q11, q15
- veor q8, q8, q13
- veor q10, q10, q14
- veor q9, q9, q15
- veor q8, q8, q14
- vand q12, q4, q6
- veor q9, q9, q14
- vand q13, q0, q2
- vand q14, q7, q1
- vorr q15, q3, q5
- veor q11, q11, q12
- veor q9, q9, q14
- veor q8, q8, q15
- veor q10, q10, q13
-
- @ Inv_GF16 0, 1, 2, 3, s0, s1, s2, s3
-
- @ new smaller inversion
-
- vand q14, q11, q9
- vmov q12, q8
-
- veor q13, q10, q14
- veor q15, q8, q14
- veor q14, q8, q14 @ q14=q15
-
- vbsl q13, q9, q8
- vbsl q15, q11, q10
- veor q11, q11, q10
-
- vbsl q12, q13, q14
- vbsl q8, q14, q13
-
- vand q14, q12, q15
- veor q9, q9, q8
-
- veor q14, q14, q11
- veor q12, q5, q2
- veor q8, q1, q6
- veor q10, q15, q14
- vand q10, q10, q5
- veor q5, q5, q1
- vand q11, q1, q15
- vand q5, q5, q14
- veor q1, q11, q10
- veor q5, q5, q11
- veor q15, q15, q13
- veor q14, q14, q9
- veor q11, q15, q14
- veor q10, q13, q9
- vand q11, q11, q12
- vand q10, q10, q2
- veor q12, q12, q8
- veor q2, q2, q6
- vand q8, q8, q15
- vand q6, q6, q13
- vand q12, q12, q14
- vand q2, q2, q9
- veor q8, q8, q12
- veor q2, q2, q6
- veor q12, q12, q11
- veor q6, q6, q10
- veor q5, q5, q12
- veor q2, q2, q12
- veor q1, q1, q8
- veor q6, q6, q8
-
- veor q12, q3, q0
- veor q8, q7, q4
- veor q11, q15, q14
- veor q10, q13, q9
- vand q11, q11, q12
- vand q10, q10, q0
- veor q12, q12, q8
- veor q0, q0, q4
- vand q8, q8, q15
- vand q4, q4, q13
- vand q12, q12, q14
- vand q0, q0, q9
- veor q8, q8, q12
- veor q0, q0, q4
- veor q12, q12, q11
- veor q4, q4, q10
- veor q15, q15, q13
- veor q14, q14, q9
- veor q10, q15, q14
- vand q10, q10, q3
- veor q3, q3, q7
- vand q11, q7, q15
- vand q3, q3, q14
- veor q7, q11, q10
- veor q3, q3, q11
- veor q3, q3, q12
- veor q0, q0, q12
- veor q7, q7, q8
- veor q4, q4, q8
- veor q1, q1, q7
- veor q6, q6, q5
-
- veor q4, q4, q1
- veor q2, q2, q7
- veor q5, q5, q7
- veor q4, q4, q2
- veor q7, q7, q0
- veor q4, q4, q5
- veor q3, q3, q6
- veor q6, q6, q1
- veor q3, q3, q4
-
- veor q4, q4, q0
- veor q7, q7, q3
- subs r5,r5,#1
- bcc .Ldec_done
- @ multiplication by 0x05-0x00-0x04-0x00
- vext.8 q8, q0, q0, #8
- vext.8 q14, q3, q3, #8
- vext.8 q15, q5, q5, #8
- veor q8, q8, q0
- vext.8 q9, q1, q1, #8
- veor q14, q14, q3
- vext.8 q10, q6, q6, #8
- veor q15, q15, q5
- vext.8 q11, q4, q4, #8
- veor q9, q9, q1
- vext.8 q12, q2, q2, #8
- veor q10, q10, q6
- vext.8 q13, q7, q7, #8
- veor q11, q11, q4
- veor q12, q12, q2
- veor q13, q13, q7
-
- veor q0, q0, q14
- veor q1, q1, q14
- veor q6, q6, q8
- veor q2, q2, q10
- veor q4, q4, q9
- veor q1, q1, q15
- veor q6, q6, q15
- veor q2, q2, q14
- veor q7, q7, q11
- veor q4, q4, q14
- veor q3, q3, q12
- veor q2, q2, q15
- veor q7, q7, q15
- veor q5, q5, q13
- vext.8 q8, q0, q0, #12 @ x0 <<< 32
- vext.8 q9, q1, q1, #12
- veor q0, q0, q8 @ x0 ^ (x0 <<< 32)
- vext.8 q10, q6, q6, #12
- veor q1, q1, q9
- vext.8 q11, q4, q4, #12
- veor q6, q6, q10
- vext.8 q12, q2, q2, #12
- veor q4, q4, q11
- vext.8 q13, q7, q7, #12
- veor q2, q2, q12
- vext.8 q14, q3, q3, #12
- veor q7, q7, q13
- vext.8 q15, q5, q5, #12
- veor q3, q3, q14
-
- veor q9, q9, q0
- veor q5, q5, q15
- vext.8 q0, q0, q0, #8 @ (x0 ^ (x0 <<< 32)) <<< 64)
- veor q10, q10, q1
- veor q8, q8, q5
- veor q9, q9, q5
- vext.8 q1, q1, q1, #8
- veor q13, q13, q2
- veor q0, q0, q8
- veor q14, q14, q7
- veor q1, q1, q9
- vext.8 q8, q2, q2, #8
- veor q12, q12, q4
- vext.8 q9, q7, q7, #8
- veor q15, q15, q3
- vext.8 q2, q4, q4, #8
- veor q11, q11, q6
- vext.8 q7, q5, q5, #8
- veor q12, q12, q5
- vext.8 q4, q3, q3, #8
- veor q11, q11, q5
- vext.8 q3, q6, q6, #8
- veor q5, q9, q13
- veor q11, q11, q2
- veor q7, q7, q15
- veor q6, q4, q14
- veor q4, q8, q12
- veor q2, q3, q10
- vmov q3, q11
- @ vmov q5, q9
- vldmia r6, {q12} @ .LISR
- ite eq @ Thumb2 thing, sanity check in ARM
- addeq r6,r6,#0x10
- bne .Ldec_loop
- vldmia r6, {q12} @ .LISRM0
- b .Ldec_loop
-.align 4
-.Ldec_done:
- vmov.i8 q8,#0x55 @ compose .LBS0
- vmov.i8 q9,#0x33 @ compose .LBS1
- vshr.u64 q10, q3, #1
- vshr.u64 q11, q2, #1
- veor q10, q10, q5
- veor q11, q11, q7
- vand q10, q10, q8
- vand q11, q11, q8
- veor q5, q5, q10
- vshl.u64 q10, q10, #1
- veor q7, q7, q11
- vshl.u64 q11, q11, #1
- veor q3, q3, q10
- veor q2, q2, q11
- vshr.u64 q10, q6, #1
- vshr.u64 q11, q0, #1
- veor q10, q10, q4
- veor q11, q11, q1
- vand q10, q10, q8
- vand q11, q11, q8
- veor q4, q4, q10
- vshl.u64 q10, q10, #1
- veor q1, q1, q11
- vshl.u64 q11, q11, #1
- veor q6, q6, q10
- veor q0, q0, q11
- vmov.i8 q8,#0x0f @ compose .LBS2
- vshr.u64 q10, q7, #2
- vshr.u64 q11, q2, #2
- veor q10, q10, q5
- veor q11, q11, q3
- vand q10, q10, q9
- vand q11, q11, q9
- veor q5, q5, q10
- vshl.u64 q10, q10, #2
- veor q3, q3, q11
- vshl.u64 q11, q11, #2
- veor q7, q7, q10
- veor q2, q2, q11
- vshr.u64 q10, q1, #2
- vshr.u64 q11, q0, #2
- veor q10, q10, q4
- veor q11, q11, q6
- vand q10, q10, q9
- vand q11, q11, q9
- veor q4, q4, q10
- vshl.u64 q10, q10, #2
- veor q6, q6, q11
- vshl.u64 q11, q11, #2
- veor q1, q1, q10
- veor q0, q0, q11
- vshr.u64 q10, q4, #4
- vshr.u64 q11, q6, #4
- veor q10, q10, q5
- veor q11, q11, q3
- vand q10, q10, q8
- vand q11, q11, q8
- veor q5, q5, q10
- vshl.u64 q10, q10, #4
- veor q3, q3, q11
- vshl.u64 q11, q11, #4
- veor q4, q4, q10
- veor q6, q6, q11
- vshr.u64 q10, q1, #4
- vshr.u64 q11, q0, #4
- veor q10, q10, q7
- veor q11, q11, q2
- vand q10, q10, q8
- vand q11, q11, q8
- veor q7, q7, q10
- vshl.u64 q10, q10, #4
- veor q2, q2, q11
- vshl.u64 q11, q11, #4
- veor q1, q1, q10
- veor q0, q0, q11
- vldmia r4, {q8} @ last round key
- veor q6, q6, q8
- veor q4, q4, q8
- veor q2, q2, q8
- veor q7, q7, q8
- veor q3, q3, q8
- veor q5, q5, q8
- veor q0, q0, q8
- veor q1, q1, q8
- bx lr
-.size _bsaes_decrypt8,.-_bsaes_decrypt8
-
-.type _bsaes_const,%object
-.align 6
-_bsaes_const:
-.LM0ISR: @ InvShiftRows constants
- .quad 0x0a0e0206070b0f03, 0x0004080c0d010509
-.LISR:
- .quad 0x0504070602010003, 0x0f0e0d0c080b0a09
-.LISRM0:
- .quad 0x01040b0e0205080f, 0x0306090c00070a0d
-.LM0SR: @ ShiftRows constants
- .quad 0x0a0e02060f03070b, 0x0004080c05090d01
-.LSR:
- .quad 0x0504070600030201, 0x0f0e0d0c0a09080b
-.LSRM0:
- .quad 0x0304090e00050a0f, 0x01060b0c0207080d
-.LM0:
- .quad 0x02060a0e03070b0f, 0x0004080c0105090d
-.LREVM0SR:
- .quad 0x090d01050c000408, 0x03070b0f060a0e02
-.asciz "Bit-sliced AES for NEON, CRYPTOGAMS by <appro@openssl.org>"
-.align 6
-.size _bsaes_const,.-_bsaes_const
-
-.type _bsaes_encrypt8,%function
-.align 4
-_bsaes_encrypt8:
- adr r6,_bsaes_encrypt8
- vldmia r4!, {q9} @ round 0 key
- sub r6,r6,#_bsaes_encrypt8-.LM0SR
-
- vldmia r6!, {q8} @ .LM0SR
-_bsaes_encrypt8_alt:
- veor q10, q0, q9 @ xor with round0 key
- veor q11, q1, q9
- vtbl.8 d0, {q10}, d16
- vtbl.8 d1, {q10}, d17
- veor q12, q2, q9
- vtbl.8 d2, {q11}, d16
- vtbl.8 d3, {q11}, d17
- veor q13, q3, q9
- vtbl.8 d4, {q12}, d16
- vtbl.8 d5, {q12}, d17
- veor q14, q4, q9
- vtbl.8 d6, {q13}, d16
- vtbl.8 d7, {q13}, d17
- veor q15, q5, q9
- vtbl.8 d8, {q14}, d16
- vtbl.8 d9, {q14}, d17
- veor q10, q6, q9
- vtbl.8 d10, {q15}, d16
- vtbl.8 d11, {q15}, d17
- veor q11, q7, q9
- vtbl.8 d12, {q10}, d16
- vtbl.8 d13, {q10}, d17
- vtbl.8 d14, {q11}, d16
- vtbl.8 d15, {q11}, d17
-_bsaes_encrypt8_bitslice:
- vmov.i8 q8,#0x55 @ compose .LBS0
- vmov.i8 q9,#0x33 @ compose .LBS1
- vshr.u64 q10, q6, #1
- vshr.u64 q11, q4, #1
- veor q10, q10, q7
- veor q11, q11, q5
- vand q10, q10, q8
- vand q11, q11, q8
- veor q7, q7, q10
- vshl.u64 q10, q10, #1
- veor q5, q5, q11
- vshl.u64 q11, q11, #1
- veor q6, q6, q10
- veor q4, q4, q11
- vshr.u64 q10, q2, #1
- vshr.u64 q11, q0, #1
- veor q10, q10, q3
- veor q11, q11, q1
- vand q10, q10, q8
- vand q11, q11, q8
- veor q3, q3, q10
- vshl.u64 q10, q10, #1
- veor q1, q1, q11
- vshl.u64 q11, q11, #1
- veor q2, q2, q10
- veor q0, q0, q11
- vmov.i8 q8,#0x0f @ compose .LBS2
- vshr.u64 q10, q5, #2
- vshr.u64 q11, q4, #2
- veor q10, q10, q7
- veor q11, q11, q6
- vand q10, q10, q9
- vand q11, q11, q9
- veor q7, q7, q10
- vshl.u64 q10, q10, #2
- veor q6, q6, q11
- vshl.u64 q11, q11, #2
- veor q5, q5, q10
- veor q4, q4, q11
- vshr.u64 q10, q1, #2
- vshr.u64 q11, q0, #2
- veor q10, q10, q3
- veor q11, q11, q2
- vand q10, q10, q9
- vand q11, q11, q9
- veor q3, q3, q10
- vshl.u64 q10, q10, #2
- veor q2, q2, q11
- vshl.u64 q11, q11, #2
- veor q1, q1, q10
- veor q0, q0, q11
- vshr.u64 q10, q3, #4
- vshr.u64 q11, q2, #4
- veor q10, q10, q7
- veor q11, q11, q6
- vand q10, q10, q8
- vand q11, q11, q8
- veor q7, q7, q10
- vshl.u64 q10, q10, #4
- veor q6, q6, q11
- vshl.u64 q11, q11, #4
- veor q3, q3, q10
- veor q2, q2, q11
- vshr.u64 q10, q1, #4
- vshr.u64 q11, q0, #4
- veor q10, q10, q5
- veor q11, q11, q4
- vand q10, q10, q8
- vand q11, q11, q8
- veor q5, q5, q10
- vshl.u64 q10, q10, #4
- veor q4, q4, q11
- vshl.u64 q11, q11, #4
- veor q1, q1, q10
- veor q0, q0, q11
- sub r5,r5,#1
- b .Lenc_sbox
-.align 4
-.Lenc_loop:
- vldmia r4!, {q8-q11}
- veor q8, q8, q0
- veor q9, q9, q1
- vtbl.8 d0, {q8}, d24
- vtbl.8 d1, {q8}, d25
- vldmia r4!, {q8}
- veor q10, q10, q2
- vtbl.8 d2, {q9}, d24
- vtbl.8 d3, {q9}, d25
- vldmia r4!, {q9}
- veor q11, q11, q3
- vtbl.8 d4, {q10}, d24
- vtbl.8 d5, {q10}, d25
- vldmia r4!, {q10}
- vtbl.8 d6, {q11}, d24
- vtbl.8 d7, {q11}, d25
- vldmia r4!, {q11}
- veor q8, q8, q4
- veor q9, q9, q5
- vtbl.8 d8, {q8}, d24
- vtbl.8 d9, {q8}, d25
- veor q10, q10, q6
- vtbl.8 d10, {q9}, d24
- vtbl.8 d11, {q9}, d25
- veor q11, q11, q7
- vtbl.8 d12, {q10}, d24
- vtbl.8 d13, {q10}, d25
- vtbl.8 d14, {q11}, d24
- vtbl.8 d15, {q11}, d25
-.Lenc_sbox:
- veor q2, q2, q1
- veor q5, q5, q6
- veor q3, q3, q0
- veor q6, q6, q2
- veor q5, q5, q0
-
- veor q6, q6, q3
- veor q3, q3, q7
- veor q7, q7, q5
- veor q3, q3, q4
- veor q4, q4, q5
-
- veor q2, q2, q7
- veor q3, q3, q1
- veor q1, q1, q5
- veor q11, q7, q4
- veor q10, q1, q2
- veor q9, q5, q3
- veor q13, q2, q4
- vmov q8, q10
- veor q12, q6, q0
-
- vorr q10, q10, q9
- veor q15, q11, q8
- vand q14, q11, q12
- vorr q11, q11, q12
- veor q12, q12, q9
- vand q8, q8, q9
- veor q9, q3, q0
- vand q15, q15, q12
- vand q13, q13, q9
- veor q9, q7, q1
- veor q12, q5, q6
- veor q11, q11, q13
- veor q10, q10, q13
- vand q13, q9, q12
- vorr q9, q9, q12
- veor q11, q11, q15
- veor q8, q8, q13
- veor q10, q10, q14
- veor q9, q9, q15
- veor q8, q8, q14
- vand q12, q2, q3
- veor q9, q9, q14
- vand q13, q4, q0
- vand q14, q1, q5
- vorr q15, q7, q6
- veor q11, q11, q12
- veor q9, q9, q14
- veor q8, q8, q15
- veor q10, q10, q13
-
- @ Inv_GF16 0, 1, 2, 3, s0, s1, s2, s3
-
- @ new smaller inversion
-
- vand q14, q11, q9
- vmov q12, q8
-
- veor q13, q10, q14
- veor q15, q8, q14
- veor q14, q8, q14 @ q14=q15
-
- vbsl q13, q9, q8
- vbsl q15, q11, q10
- veor q11, q11, q10
-
- vbsl q12, q13, q14
- vbsl q8, q14, q13
-
- vand q14, q12, q15
- veor q9, q9, q8
-
- veor q14, q14, q11
- veor q12, q6, q0
- veor q8, q5, q3
- veor q10, q15, q14
- vand q10, q10, q6
- veor q6, q6, q5
- vand q11, q5, q15
- vand q6, q6, q14
- veor q5, q11, q10
- veor q6, q6, q11
- veor q15, q15, q13
- veor q14, q14, q9
- veor q11, q15, q14
- veor q10, q13, q9
- vand q11, q11, q12
- vand q10, q10, q0
- veor q12, q12, q8
- veor q0, q0, q3
- vand q8, q8, q15
- vand q3, q3, q13
- vand q12, q12, q14
- vand q0, q0, q9
- veor q8, q8, q12
- veor q0, q0, q3
- veor q12, q12, q11
- veor q3, q3, q10
- veor q6, q6, q12
- veor q0, q0, q12
- veor q5, q5, q8
- veor q3, q3, q8
-
- veor q12, q7, q4
- veor q8, q1, q2
- veor q11, q15, q14
- veor q10, q13, q9
- vand q11, q11, q12
- vand q10, q10, q4
- veor q12, q12, q8
- veor q4, q4, q2
- vand q8, q8, q15
- vand q2, q2, q13
- vand q12, q12, q14
- vand q4, q4, q9
- veor q8, q8, q12
- veor q4, q4, q2
- veor q12, q12, q11
- veor q2, q2, q10
- veor q15, q15, q13
- veor q14, q14, q9
- veor q10, q15, q14
- vand q10, q10, q7
- veor q7, q7, q1
- vand q11, q1, q15
- vand q7, q7, q14
- veor q1, q11, q10
- veor q7, q7, q11
- veor q7, q7, q12
- veor q4, q4, q12
- veor q1, q1, q8
- veor q2, q2, q8
- veor q7, q7, q0
- veor q1, q1, q6
- veor q6, q6, q0
- veor q4, q4, q7
- veor q0, q0, q1
-
- veor q1, q1, q5
- veor q5, q5, q2
- veor q2, q2, q3
- veor q3, q3, q5
- veor q4, q4, q5
-
- veor q6, q6, q3
- subs r5,r5,#1
- bcc .Lenc_done
- vext.8 q8, q0, q0, #12 @ x0 <<< 32
- vext.8 q9, q1, q1, #12
- veor q0, q0, q8 @ x0 ^ (x0 <<< 32)
- vext.8 q10, q4, q4, #12
- veor q1, q1, q9
- vext.8 q11, q6, q6, #12
- veor q4, q4, q10
- vext.8 q12, q3, q3, #12
- veor q6, q6, q11
- vext.8 q13, q7, q7, #12
- veor q3, q3, q12
- vext.8 q14, q2, q2, #12
- veor q7, q7, q13
- vext.8 q15, q5, q5, #12
- veor q2, q2, q14
-
- veor q9, q9, q0
- veor q5, q5, q15
- vext.8 q0, q0, q0, #8 @ (x0 ^ (x0 <<< 32)) <<< 64)
- veor q10, q10, q1
- veor q8, q8, q5
- veor q9, q9, q5
- vext.8 q1, q1, q1, #8
- veor q13, q13, q3
- veor q0, q0, q8
- veor q14, q14, q7
- veor q1, q1, q9
- vext.8 q8, q3, q3, #8
- veor q12, q12, q6
- vext.8 q9, q7, q7, #8
- veor q15, q15, q2
- vext.8 q3, q6, q6, #8
- veor q11, q11, q4
- vext.8 q7, q5, q5, #8
- veor q12, q12, q5
- vext.8 q6, q2, q2, #8
- veor q11, q11, q5
- vext.8 q2, q4, q4, #8
- veor q5, q9, q13
- veor q4, q8, q12
- veor q3, q3, q11
- veor q7, q7, q15
- veor q6, q6, q14
- @ vmov q4, q8
- veor q2, q2, q10
- @ vmov q5, q9
- vldmia r6, {q12} @ .LSR
- ite eq @ Thumb2 thing, samity check in ARM
- addeq r6,r6,#0x10
- bne .Lenc_loop
- vldmia r6, {q12} @ .LSRM0
- b .Lenc_loop
-.align 4
-.Lenc_done:
- vmov.i8 q8,#0x55 @ compose .LBS0
- vmov.i8 q9,#0x33 @ compose .LBS1
- vshr.u64 q10, q2, #1
- vshr.u64 q11, q3, #1
- veor q10, q10, q5
- veor q11, q11, q7
- vand q10, q10, q8
- vand q11, q11, q8
- veor q5, q5, q10
- vshl.u64 q10, q10, #1
- veor q7, q7, q11
- vshl.u64 q11, q11, #1
- veor q2, q2, q10
- veor q3, q3, q11
- vshr.u64 q10, q4, #1
- vshr.u64 q11, q0, #1
- veor q10, q10, q6
- veor q11, q11, q1
- vand q10, q10, q8
- vand q11, q11, q8
- veor q6, q6, q10
- vshl.u64 q10, q10, #1
- veor q1, q1, q11
- vshl.u64 q11, q11, #1
- veor q4, q4, q10
- veor q0, q0, q11
- vmov.i8 q8,#0x0f @ compose .LBS2
- vshr.u64 q10, q7, #2
- vshr.u64 q11, q3, #2
- veor q10, q10, q5
- veor q11, q11, q2
- vand q10, q10, q9
- vand q11, q11, q9
- veor q5, q5, q10
- vshl.u64 q10, q10, #2
- veor q2, q2, q11
- vshl.u64 q11, q11, #2
- veor q7, q7, q10
- veor q3, q3, q11
- vshr.u64 q10, q1, #2
- vshr.u64 q11, q0, #2
- veor q10, q10, q6
- veor q11, q11, q4
- vand q10, q10, q9
- vand q11, q11, q9
- veor q6, q6, q10
- vshl.u64 q10, q10, #2
- veor q4, q4, q11
- vshl.u64 q11, q11, #2
- veor q1, q1, q10
- veor q0, q0, q11
- vshr.u64 q10, q6, #4
- vshr.u64 q11, q4, #4
- veor q10, q10, q5
- veor q11, q11, q2
- vand q10, q10, q8
- vand q11, q11, q8
- veor q5, q5, q10
- vshl.u64 q10, q10, #4
- veor q2, q2, q11
- vshl.u64 q11, q11, #4
- veor q6, q6, q10
- veor q4, q4, q11
- vshr.u64 q10, q1, #4
- vshr.u64 q11, q0, #4
- veor q10, q10, q7
- veor q11, q11, q3
- vand q10, q10, q8
- vand q11, q11, q8
- veor q7, q7, q10
- vshl.u64 q10, q10, #4
- veor q3, q3, q11
- vshl.u64 q11, q11, #4
- veor q1, q1, q10
- veor q0, q0, q11
- vldmia r4, {q8} @ last round key
- veor q4, q4, q8
- veor q6, q6, q8
- veor q3, q3, q8
- veor q7, q7, q8
- veor q2, q2, q8
- veor q5, q5, q8
- veor q0, q0, q8
- veor q1, q1, q8
- bx lr
-.size _bsaes_encrypt8,.-_bsaes_encrypt8
-.type _bsaes_key_convert,%function
-.align 4
-_bsaes_key_convert:
- adr r6,_bsaes_key_convert
- vld1.8 {q7}, [r4]! @ load round 0 key
- sub r6,r6,#_bsaes_key_convert-.LM0
- vld1.8 {q15}, [r4]! @ load round 1 key
-
- vmov.i8 q8, #0x01 @ bit masks
- vmov.i8 q9, #0x02
- vmov.i8 q10, #0x04
- vmov.i8 q11, #0x08
- vmov.i8 q12, #0x10
- vmov.i8 q13, #0x20
- vldmia r6, {q14} @ .LM0
-
-#ifdef __ARMEL__
- vrev32.8 q7, q7
- vrev32.8 q15, q15
-#endif
- sub r5,r5,#1
- vstmia r12!, {q7} @ save round 0 key
- b .Lkey_loop
-
-.align 4
-.Lkey_loop:
- vtbl.8 d14,{q15},d28
- vtbl.8 d15,{q15},d29
- vmov.i8 q6, #0x40
- vmov.i8 q15, #0x80
-
- vtst.8 q0, q7, q8
- vtst.8 q1, q7, q9
- vtst.8 q2, q7, q10
- vtst.8 q3, q7, q11
- vtst.8 q4, q7, q12
- vtst.8 q5, q7, q13
- vtst.8 q6, q7, q6
- vtst.8 q7, q7, q15
- vld1.8 {q15}, [r4]! @ load next round key
- vmvn q0, q0 @ "pnot"
- vmvn q1, q1
- vmvn q5, q5
- vmvn q6, q6
-#ifdef __ARMEL__
- vrev32.8 q15, q15
-#endif
- subs r5,r5,#1
- vstmia r12!,{q0-q7} @ write bit-sliced round key
- bne .Lkey_loop
-
- vmov.i8 q7,#0x63 @ compose .L63
- @ don't save last round key
- bx lr
-.size _bsaes_key_convert,.-_bsaes_key_convert
-.extern AES_cbc_encrypt
-.extern AES_decrypt
-
-.global bsaes_cbc_encrypt
-.type bsaes_cbc_encrypt,%function
-.align 5
-bsaes_cbc_encrypt:
-#ifndef __KERNEL__
- cmp r2, #128
-#ifndef __thumb__
- blo AES_cbc_encrypt
-#else
- bhs 1f
- b AES_cbc_encrypt
-1:
-#endif
-#endif
-
- @ it is up to the caller to make sure we are called with enc == 0
-
- mov ip, sp
- stmdb sp!, {r4-r10, lr}
- VFP_ABI_PUSH
- ldr r8, [ip] @ IV is 1st arg on the stack
- mov r2, r2, lsr#4 @ len in 16 byte blocks
- sub sp, #0x10 @ scratch space to carry over the IV
- mov r9, sp @ save sp
-
- ldr r10, [r3, #240] @ get # of rounds
-#ifndef BSAES_ASM_EXTENDED_KEY
- @ allocate the key schedule on the stack
- sub r12, sp, r10, lsl#7 @ 128 bytes per inner round key
- add r12, #96 @ sifze of bit-slices key schedule
-
- @ populate the key schedule
- mov r4, r3 @ pass key
- mov r5, r10 @ pass # of rounds
- mov sp, r12 @ sp is sp
- bl _bsaes_key_convert
- vldmia sp, {q6}
- vstmia r12, {q15} @ save last round key
- veor q7, q7, q6 @ fix up round 0 key
- vstmia sp, {q7}
-#else
- ldr r12, [r3, #244]
- eors r12, #1
- beq 0f
-
- @ populate the key schedule
- str r12, [r3, #244]
- mov r4, r3 @ pass key
- mov r5, r10 @ pass # of rounds
- add r12, r3, #248 @ pass key schedule
- bl _bsaes_key_convert
- add r4, r3, #248
- vldmia r4, {q6}
- vstmia r12, {q15} @ save last round key
- veor q7, q7, q6 @ fix up round 0 key
- vstmia r4, {q7}
-
-.align 2
-0:
-#endif
-
- vld1.8 {q15}, [r8] @ load IV
- b .Lcbc_dec_loop
-
-.align 4
-.Lcbc_dec_loop:
- subs r2, r2, #0x8
- bmi .Lcbc_dec_loop_finish
-
- vld1.8 {q0-q1}, [r0]! @ load input
- vld1.8 {q2-q3}, [r0]!
-#ifndef BSAES_ASM_EXTENDED_KEY
- mov r4, sp @ pass the key
-#else
- add r4, r3, #248
-#endif
- vld1.8 {q4-q5}, [r0]!
- mov r5, r10
- vld1.8 {q6-q7}, [r0]
- sub r0, r0, #0x60
- vstmia r9, {q15} @ put aside IV
-
- bl _bsaes_decrypt8
-
- vldmia r9, {q14} @ reload IV
- vld1.8 {q8-q9}, [r0]! @ reload input
- veor q0, q0, q14 @ ^= IV
- vld1.8 {q10-q11}, [r0]!
- veor q1, q1, q8
- veor q6, q6, q9
- vld1.8 {q12-q13}, [r0]!
- veor q4, q4, q10
- veor q2, q2, q11
- vld1.8 {q14-q15}, [r0]!
- veor q7, q7, q12
- vst1.8 {q0-q1}, [r1]! @ write output
- veor q3, q3, q13
- vst1.8 {q6}, [r1]!
- veor q5, q5, q14
- vst1.8 {q4}, [r1]!
- vst1.8 {q2}, [r1]!
- vst1.8 {q7}, [r1]!
- vst1.8 {q3}, [r1]!
- vst1.8 {q5}, [r1]!
-
- b .Lcbc_dec_loop
-
-.Lcbc_dec_loop_finish:
- adds r2, r2, #8
- beq .Lcbc_dec_done
-
- vld1.8 {q0}, [r0]! @ load input
- cmp r2, #2
- blo .Lcbc_dec_one
- vld1.8 {q1}, [r0]!
-#ifndef BSAES_ASM_EXTENDED_KEY
- mov r4, sp @ pass the key
-#else
- add r4, r3, #248
-#endif
- mov r5, r10
- vstmia r9, {q15} @ put aside IV
- beq .Lcbc_dec_two
- vld1.8 {q2}, [r0]!
- cmp r2, #4
- blo .Lcbc_dec_three
- vld1.8 {q3}, [r0]!
- beq .Lcbc_dec_four
- vld1.8 {q4}, [r0]!
- cmp r2, #6
- blo .Lcbc_dec_five
- vld1.8 {q5}, [r0]!
- beq .Lcbc_dec_six
- vld1.8 {q6}, [r0]!
- sub r0, r0, #0x70
-
- bl _bsaes_decrypt8
-
- vldmia r9, {q14} @ reload IV
- vld1.8 {q8-q9}, [r0]! @ reload input
- veor q0, q0, q14 @ ^= IV
- vld1.8 {q10-q11}, [r0]!
- veor q1, q1, q8
- veor q6, q6, q9
- vld1.8 {q12-q13}, [r0]!
- veor q4, q4, q10
- veor q2, q2, q11
- vld1.8 {q15}, [r0]!
- veor q7, q7, q12
- vst1.8 {q0-q1}, [r1]! @ write output
- veor q3, q3, q13
- vst1.8 {q6}, [r1]!
- vst1.8 {q4}, [r1]!
- vst1.8 {q2}, [r1]!
- vst1.8 {q7}, [r1]!
- vst1.8 {q3}, [r1]!
- b .Lcbc_dec_done
-.align 4
-.Lcbc_dec_six:
- sub r0, r0, #0x60
- bl _bsaes_decrypt8
- vldmia r9,{q14} @ reload IV
- vld1.8 {q8-q9}, [r0]! @ reload input
- veor q0, q0, q14 @ ^= IV
- vld1.8 {q10-q11}, [r0]!
- veor q1, q1, q8
- veor q6, q6, q9
- vld1.8 {q12}, [r0]!
- veor q4, q4, q10
- veor q2, q2, q11
- vld1.8 {q15}, [r0]!
- veor q7, q7, q12
- vst1.8 {q0-q1}, [r1]! @ write output
- vst1.8 {q6}, [r1]!
- vst1.8 {q4}, [r1]!
- vst1.8 {q2}, [r1]!
- vst1.8 {q7}, [r1]!
- b .Lcbc_dec_done
-.align 4
-.Lcbc_dec_five:
- sub r0, r0, #0x50
- bl _bsaes_decrypt8
- vldmia r9, {q14} @ reload IV
- vld1.8 {q8-q9}, [r0]! @ reload input
- veor q0, q0, q14 @ ^= IV
- vld1.8 {q10-q11}, [r0]!
- veor q1, q1, q8
- veor q6, q6, q9
- vld1.8 {q15}, [r0]!
- veor q4, q4, q10
- vst1.8 {q0-q1}, [r1]! @ write output
- veor q2, q2, q11
- vst1.8 {q6}, [r1]!
- vst1.8 {q4}, [r1]!
- vst1.8 {q2}, [r1]!
- b .Lcbc_dec_done
-.align 4
-.Lcbc_dec_four:
- sub r0, r0, #0x40
- bl _bsaes_decrypt8
- vldmia r9, {q14} @ reload IV
- vld1.8 {q8-q9}, [r0]! @ reload input
- veor q0, q0, q14 @ ^= IV
- vld1.8 {q10}, [r0]!
- veor q1, q1, q8
- veor q6, q6, q9
- vld1.8 {q15}, [r0]!
- veor q4, q4, q10
- vst1.8 {q0-q1}, [r1]! @ write output
- vst1.8 {q6}, [r1]!
- vst1.8 {q4}, [r1]!
- b .Lcbc_dec_done
-.align 4
-.Lcbc_dec_three:
- sub r0, r0, #0x30
- bl _bsaes_decrypt8
- vldmia r9, {q14} @ reload IV
- vld1.8 {q8-q9}, [r0]! @ reload input
- veor q0, q0, q14 @ ^= IV
- vld1.8 {q15}, [r0]!
- veor q1, q1, q8
- veor q6, q6, q9
- vst1.8 {q0-q1}, [r1]! @ write output
- vst1.8 {q6}, [r1]!
- b .Lcbc_dec_done
-.align 4
-.Lcbc_dec_two:
- sub r0, r0, #0x20
- bl _bsaes_decrypt8
- vldmia r9, {q14} @ reload IV
- vld1.8 {q8}, [r0]! @ reload input
- veor q0, q0, q14 @ ^= IV
- vld1.8 {q15}, [r0]! @ reload input
- veor q1, q1, q8
- vst1.8 {q0-q1}, [r1]! @ write output
- b .Lcbc_dec_done
-.align 4
-.Lcbc_dec_one:
- sub r0, r0, #0x10
- mov r10, r1 @ save original out pointer
- mov r1, r9 @ use the iv scratch space as out buffer
- mov r2, r3
- vmov q4,q15 @ just in case ensure that IV
- vmov q5,q0 @ and input are preserved
- bl AES_decrypt
- vld1.8 {q0}, [r9,:64] @ load result
- veor q0, q0, q4 @ ^= IV
- vmov q15, q5 @ q5 holds input
- vst1.8 {q0}, [r10] @ write output
-
-.Lcbc_dec_done:
-#ifndef BSAES_ASM_EXTENDED_KEY
- vmov.i32 q0, #0
- vmov.i32 q1, #0
-.Lcbc_dec_bzero: @ wipe key schedule [if any]
- vstmia sp!, {q0-q1}
- cmp sp, r9
- bne .Lcbc_dec_bzero
-#endif
-
- mov sp, r9
- add sp, #0x10 @ add sp,r9,#0x10 is no good for thumb
- vst1.8 {q15}, [r8] @ return IV
- VFP_ABI_POP
- ldmia sp!, {r4-r10, pc}
-.size bsaes_cbc_encrypt,.-bsaes_cbc_encrypt
-.extern AES_encrypt
-.global bsaes_ctr32_encrypt_blocks
-.type bsaes_ctr32_encrypt_blocks,%function
-.align 5
-bsaes_ctr32_encrypt_blocks:
- cmp r2, #8 @ use plain AES for
- blo .Lctr_enc_short @ small sizes
-
- mov ip, sp
- stmdb sp!, {r4-r10, lr}
- VFP_ABI_PUSH
- ldr r8, [ip] @ ctr is 1st arg on the stack
- sub sp, sp, #0x10 @ scratch space to carry over the ctr
- mov r9, sp @ save sp
-
- ldr r10, [r3, #240] @ get # of rounds
-#ifndef BSAES_ASM_EXTENDED_KEY
- @ allocate the key schedule on the stack
- sub r12, sp, r10, lsl#7 @ 128 bytes per inner round key
- add r12, #96 @ size of bit-sliced key schedule
-
- @ populate the key schedule
- mov r4, r3 @ pass key
- mov r5, r10 @ pass # of rounds
- mov sp, r12 @ sp is sp
- bl _bsaes_key_convert
- veor q7,q7,q15 @ fix up last round key
- vstmia r12, {q7} @ save last round key
-
- vld1.8 {q0}, [r8] @ load counter
- add r8, r6, #.LREVM0SR-.LM0 @ borrow r8
- vldmia sp, {q4} @ load round0 key
-#else
- ldr r12, [r3, #244]
- eors r12, #1
- beq 0f
-
- @ populate the key schedule
- str r12, [r3, #244]
- mov r4, r3 @ pass key
- mov r5, r10 @ pass # of rounds
- add r12, r3, #248 @ pass key schedule
- bl _bsaes_key_convert
- veor q7,q7,q15 @ fix up last round key
- vstmia r12, {q7} @ save last round key
-
-.align 2
-0: add r12, r3, #248
- vld1.8 {q0}, [r8] @ load counter
- adrl r8, .LREVM0SR @ borrow r8
- vldmia r12, {q4} @ load round0 key
- sub sp, #0x10 @ place for adjusted round0 key
-#endif
-
- vmov.i32 q8,#1 @ compose 1<<96
- veor q9,q9,q9
- vrev32.8 q0,q0
- vext.8 q8,q9,q8,#4
- vrev32.8 q4,q4
- vadd.u32 q9,q8,q8 @ compose 2<<96
- vstmia sp, {q4} @ save adjusted round0 key
- b .Lctr_enc_loop
-
-.align 4
-.Lctr_enc_loop:
- vadd.u32 q10, q8, q9 @ compose 3<<96
- vadd.u32 q1, q0, q8 @ +1
- vadd.u32 q2, q0, q9 @ +2
- vadd.u32 q3, q0, q10 @ +3
- vadd.u32 q4, q1, q10
- vadd.u32 q5, q2, q10
- vadd.u32 q6, q3, q10
- vadd.u32 q7, q4, q10
- vadd.u32 q10, q5, q10 @ next counter
-
- @ Borrow prologue from _bsaes_encrypt8 to use the opportunity
- @ to flip byte order in 32-bit counter
-
- vldmia sp, {q9} @ load round0 key
-#ifndef BSAES_ASM_EXTENDED_KEY
- add r4, sp, #0x10 @ pass next round key
-#else
- add r4, r3, #264
-#endif
- vldmia r8, {q8} @ .LREVM0SR
- mov r5, r10 @ pass rounds
- vstmia r9, {q10} @ save next counter
- sub r6, r8, #.LREVM0SR-.LSR @ pass constants
-
- bl _bsaes_encrypt8_alt
-
- subs r2, r2, #8
- blo .Lctr_enc_loop_done
-
- vld1.8 {q8-q9}, [r0]! @ load input
- vld1.8 {q10-q11}, [r0]!
- veor q0, q8
- veor q1, q9
- vld1.8 {q12-q13}, [r0]!
- veor q4, q10
- veor q6, q11
- vld1.8 {q14-q15}, [r0]!
- veor q3, q12
- vst1.8 {q0-q1}, [r1]! @ write output
- veor q7, q13
- veor q2, q14
- vst1.8 {q4}, [r1]!
- veor q5, q15
- vst1.8 {q6}, [r1]!
- vmov.i32 q8, #1 @ compose 1<<96
- vst1.8 {q3}, [r1]!
- veor q9, q9, q9
- vst1.8 {q7}, [r1]!
- vext.8 q8, q9, q8, #4
- vst1.8 {q2}, [r1]!
- vadd.u32 q9,q8,q8 @ compose 2<<96
- vst1.8 {q5}, [r1]!
- vldmia r9, {q0} @ load counter
-
- bne .Lctr_enc_loop
- b .Lctr_enc_done
-
-.align 4
-.Lctr_enc_loop_done:
- add r2, r2, #8
- vld1.8 {q8}, [r0]! @ load input
- veor q0, q8
- vst1.8 {q0}, [r1]! @ write output
- cmp r2, #2
- blo .Lctr_enc_done
- vld1.8 {q9}, [r0]!
- veor q1, q9
- vst1.8 {q1}, [r1]!
- beq .Lctr_enc_done
- vld1.8 {q10}, [r0]!
- veor q4, q10
- vst1.8 {q4}, [r1]!
- cmp r2, #4
- blo .Lctr_enc_done
- vld1.8 {q11}, [r0]!
- veor q6, q11
- vst1.8 {q6}, [r1]!
- beq .Lctr_enc_done
- vld1.8 {q12}, [r0]!
- veor q3, q12
- vst1.8 {q3}, [r1]!
- cmp r2, #6
- blo .Lctr_enc_done
- vld1.8 {q13}, [r0]!
- veor q7, q13
- vst1.8 {q7}, [r1]!
- beq .Lctr_enc_done
- vld1.8 {q14}, [r0]
- veor q2, q14
- vst1.8 {q2}, [r1]!
-
-.Lctr_enc_done:
- vmov.i32 q0, #0
- vmov.i32 q1, #0
-#ifndef BSAES_ASM_EXTENDED_KEY
-.Lctr_enc_bzero: @ wipe key schedule [if any]
- vstmia sp!, {q0-q1}
- cmp sp, r9
- bne .Lctr_enc_bzero
-#else
- vstmia sp, {q0-q1}
-#endif
-
- mov sp, r9
- add sp, #0x10 @ add sp,r9,#0x10 is no good for thumb
- VFP_ABI_POP
- ldmia sp!, {r4-r10, pc} @ return
-
-.align 4
-.Lctr_enc_short:
- ldr ip, [sp] @ ctr pointer is passed on stack
- stmdb sp!, {r4-r8, lr}
-
- mov r4, r0 @ copy arguments
- mov r5, r1
- mov r6, r2
- mov r7, r3
- ldr r8, [ip, #12] @ load counter LSW
- vld1.8 {q1}, [ip] @ load whole counter value
-#ifdef __ARMEL__
- rev r8, r8
-#endif
- sub sp, sp, #0x10
- vst1.8 {q1}, [sp,:64] @ copy counter value
- sub sp, sp, #0x10
-
-.Lctr_enc_short_loop:
- add r0, sp, #0x10 @ input counter value
- mov r1, sp @ output on the stack
- mov r2, r7 @ key
-
- bl AES_encrypt
-
- vld1.8 {q0}, [r4]! @ load input
- vld1.8 {q1}, [sp,:64] @ load encrypted counter
- add r8, r8, #1
-#ifdef __ARMEL__
- rev r0, r8
- str r0, [sp, #0x1c] @ next counter value
-#else
- str r8, [sp, #0x1c] @ next counter value
-#endif
- veor q0,q0,q1
- vst1.8 {q0}, [r5]! @ store output
- subs r6, r6, #1
- bne .Lctr_enc_short_loop
-
- vmov.i32 q0, #0
- vmov.i32 q1, #0
- vstmia sp!, {q0-q1}
-
- ldmia sp!, {r4-r8, pc}
-.size bsaes_ctr32_encrypt_blocks,.-bsaes_ctr32_encrypt_blocks
-.globl bsaes_xts_encrypt
-.type bsaes_xts_encrypt,%function
-.align 4
-bsaes_xts_encrypt:
- mov ip, sp
- stmdb sp!, {r4-r10, lr} @ 0x20
- VFP_ABI_PUSH
- mov r6, sp @ future r3
-
- mov r7, r0
- mov r8, r1
- mov r9, r2
- mov r10, r3
-
- sub r0, sp, #0x10 @ 0x10
- bic r0, #0xf @ align at 16 bytes
- mov sp, r0
-
-#ifdef XTS_CHAIN_TWEAK
- ldr r0, [ip] @ pointer to input tweak
-#else
- @ generate initial tweak
- ldr r0, [ip, #4] @ iv[]
- mov r1, sp
- ldr r2, [ip, #0] @ key2
- bl AES_encrypt
- mov r0,sp @ pointer to initial tweak
-#endif
-
- ldr r1, [r10, #240] @ get # of rounds
- mov r3, r6
-#ifndef BSAES_ASM_EXTENDED_KEY
- @ allocate the key schedule on the stack
- sub r12, sp, r1, lsl#7 @ 128 bytes per inner round key
- @ add r12, #96 @ size of bit-sliced key schedule
- sub r12, #48 @ place for tweak[9]
-
- @ populate the key schedule
- mov r4, r10 @ pass key
- mov r5, r1 @ pass # of rounds
- mov sp, r12
- add r12, #0x90 @ pass key schedule
- bl _bsaes_key_convert
- veor q7, q7, q15 @ fix up last round key
- vstmia r12, {q7} @ save last round key
-#else
- ldr r12, [r10, #244]
- eors r12, #1
- beq 0f
-
- str r12, [r10, #244]
- mov r4, r10 @ pass key
- mov r5, r1 @ pass # of rounds
- add r12, r10, #248 @ pass key schedule
- bl _bsaes_key_convert
- veor q7, q7, q15 @ fix up last round key
- vstmia r12, {q7}
-
-.align 2
-0: sub sp, #0x90 @ place for tweak[9]
-#endif
-
- vld1.8 {q8}, [r0] @ initial tweak
- adr r2, .Lxts_magic
-
- subs r9, #0x80
- blo .Lxts_enc_short
- b .Lxts_enc_loop
-
-.align 4
-.Lxts_enc_loop:
- vldmia r2, {q5} @ load XTS magic
- vshr.s64 q6, q8, #63
- mov r0, sp
- vand q6, q6, q5
- vadd.u64 q9, q8, q8
- vst1.64 {q8}, [r0,:128]!
- vswp d13,d12
- vshr.s64 q7, q9, #63
- veor q9, q9, q6
- vand q7, q7, q5
- vadd.u64 q10, q9, q9
- vst1.64 {q9}, [r0,:128]!
- vswp d15,d14
- vshr.s64 q6, q10, #63
- veor q10, q10, q7
- vand q6, q6, q5
- vld1.8 {q0}, [r7]!
- vadd.u64 q11, q10, q10
- vst1.64 {q10}, [r0,:128]!
- vswp d13,d12
- vshr.s64 q7, q11, #63
- veor q11, q11, q6
- vand q7, q7, q5
- vld1.8 {q1}, [r7]!
- veor q0, q0, q8
- vadd.u64 q12, q11, q11
- vst1.64 {q11}, [r0,:128]!
- vswp d15,d14
- vshr.s64 q6, q12, #63
- veor q12, q12, q7
- vand q6, q6, q5
- vld1.8 {q2}, [r7]!
- veor q1, q1, q9
- vadd.u64 q13, q12, q12
- vst1.64 {q12}, [r0,:128]!
- vswp d13,d12
- vshr.s64 q7, q13, #63
- veor q13, q13, q6
- vand q7, q7, q5
- vld1.8 {q3}, [r7]!
- veor q2, q2, q10
- vadd.u64 q14, q13, q13
- vst1.64 {q13}, [r0,:128]!
- vswp d15,d14
- vshr.s64 q6, q14, #63
- veor q14, q14, q7
- vand q6, q6, q5
- vld1.8 {q4}, [r7]!
- veor q3, q3, q11
- vadd.u64 q15, q14, q14
- vst1.64 {q14}, [r0,:128]!
- vswp d13,d12
- vshr.s64 q7, q15, #63
- veor q15, q15, q6
- vand q7, q7, q5
- vld1.8 {q5}, [r7]!
- veor q4, q4, q12
- vadd.u64 q8, q15, q15
- vst1.64 {q15}, [r0,:128]!
- vswp d15,d14
- veor q8, q8, q7
- vst1.64 {q8}, [r0,:128] @ next round tweak
-
- vld1.8 {q6-q7}, [r7]!
- veor q5, q5, q13
-#ifndef BSAES_ASM_EXTENDED_KEY
- add r4, sp, #0x90 @ pass key schedule
-#else
- add r4, r10, #248 @ pass key schedule
-#endif
- veor q6, q6, q14
- mov r5, r1 @ pass rounds
- veor q7, q7, q15
- mov r0, sp
-
- bl _bsaes_encrypt8
-
- vld1.64 {q8-q9}, [r0,:128]!
- vld1.64 {q10-q11}, [r0,:128]!
- veor q0, q0, q8
- vld1.64 {q12-q13}, [r0,:128]!
- veor q1, q1, q9
- veor q8, q4, q10
- vst1.8 {q0-q1}, [r8]!
- veor q9, q6, q11
- vld1.64 {q14-q15}, [r0,:128]!
- veor q10, q3, q12
- vst1.8 {q8-q9}, [r8]!
- veor q11, q7, q13
- veor q12, q2, q14
- vst1.8 {q10-q11}, [r8]!
- veor q13, q5, q15
- vst1.8 {q12-q13}, [r8]!
-
- vld1.64 {q8}, [r0,:128] @ next round tweak
-
- subs r9, #0x80
- bpl .Lxts_enc_loop
-
-.Lxts_enc_short:
- adds r9, #0x70
- bmi .Lxts_enc_done
-
- vldmia r2, {q5} @ load XTS magic
- vshr.s64 q7, q8, #63
- mov r0, sp
- vand q7, q7, q5
- vadd.u64 q9, q8, q8
- vst1.64 {q8}, [r0,:128]!
- vswp d15,d14
- vshr.s64 q6, q9, #63
- veor q9, q9, q7
- vand q6, q6, q5
- vadd.u64 q10, q9, q9
- vst1.64 {q9}, [r0,:128]!
- vswp d13,d12
- vshr.s64 q7, q10, #63
- veor q10, q10, q6
- vand q7, q7, q5
- vld1.8 {q0}, [r7]!
- subs r9, #0x10
- bmi .Lxts_enc_1
- vadd.u64 q11, q10, q10
- vst1.64 {q10}, [r0,:128]!
- vswp d15,d14
- vshr.s64 q6, q11, #63
- veor q11, q11, q7
- vand q6, q6, q5
- vld1.8 {q1}, [r7]!
- subs r9, #0x10
- bmi .Lxts_enc_2
- veor q0, q0, q8
- vadd.u64 q12, q11, q11
- vst1.64 {q11}, [r0,:128]!
- vswp d13,d12
- vshr.s64 q7, q12, #63
- veor q12, q12, q6
- vand q7, q7, q5
- vld1.8 {q2}, [r7]!
- subs r9, #0x10
- bmi .Lxts_enc_3
- veor q1, q1, q9
- vadd.u64 q13, q12, q12
- vst1.64 {q12}, [r0,:128]!
- vswp d15,d14
- vshr.s64 q6, q13, #63
- veor q13, q13, q7
- vand q6, q6, q5
- vld1.8 {q3}, [r7]!
- subs r9, #0x10
- bmi .Lxts_enc_4
- veor q2, q2, q10
- vadd.u64 q14, q13, q13
- vst1.64 {q13}, [r0,:128]!
- vswp d13,d12
- vshr.s64 q7, q14, #63
- veor q14, q14, q6
- vand q7, q7, q5
- vld1.8 {q4}, [r7]!
- subs r9, #0x10
- bmi .Lxts_enc_5
- veor q3, q3, q11
- vadd.u64 q15, q14, q14
- vst1.64 {q14}, [r0,:128]!
- vswp d15,d14
- vshr.s64 q6, q15, #63
- veor q15, q15, q7
- vand q6, q6, q5
- vld1.8 {q5}, [r7]!
- subs r9, #0x10
- bmi .Lxts_enc_6
- veor q4, q4, q12
- sub r9, #0x10
- vst1.64 {q15}, [r0,:128] @ next round tweak
-
- vld1.8 {q6}, [r7]!
- veor q5, q5, q13
-#ifndef BSAES_ASM_EXTENDED_KEY
- add r4, sp, #0x90 @ pass key schedule
-#else
- add r4, r10, #248 @ pass key schedule
-#endif
- veor q6, q6, q14
- mov r5, r1 @ pass rounds
- mov r0, sp
-
- bl _bsaes_encrypt8
-
- vld1.64 {q8-q9}, [r0,:128]!
- vld1.64 {q10-q11}, [r0,:128]!
- veor q0, q0, q8
- vld1.64 {q12-q13}, [r0,:128]!
- veor q1, q1, q9
- veor q8, q4, q10
- vst1.8 {q0-q1}, [r8]!
- veor q9, q6, q11
- vld1.64 {q14}, [r0,:128]!
- veor q10, q3, q12
- vst1.8 {q8-q9}, [r8]!
- veor q11, q7, q13
- veor q12, q2, q14
- vst1.8 {q10-q11}, [r8]!
- vst1.8 {q12}, [r8]!
-
- vld1.64 {q8}, [r0,:128] @ next round tweak
- b .Lxts_enc_done
-.align 4
-.Lxts_enc_6:
- vst1.64 {q14}, [r0,:128] @ next round tweak
-
- veor q4, q4, q12
-#ifndef BSAES_ASM_EXTENDED_KEY
- add r4, sp, #0x90 @ pass key schedule
-#else
- add r4, r10, #248 @ pass key schedule
-#endif
- veor q5, q5, q13
- mov r5, r1 @ pass rounds
- mov r0, sp
-
- bl _bsaes_encrypt8
-
- vld1.64 {q8-q9}, [r0,:128]!
- vld1.64 {q10-q11}, [r0,:128]!
- veor q0, q0, q8
- vld1.64 {q12-q13}, [r0,:128]!
- veor q1, q1, q9
- veor q8, q4, q10
- vst1.8 {q0-q1}, [r8]!
- veor q9, q6, q11
- veor q10, q3, q12
- vst1.8 {q8-q9}, [r8]!
- veor q11, q7, q13
- vst1.8 {q10-q11}, [r8]!
-
- vld1.64 {q8}, [r0,:128] @ next round tweak
- b .Lxts_enc_done
-
-@ put this in range for both ARM and Thumb mode adr instructions
-.align 5
-.Lxts_magic:
- .quad 1, 0x87
-
-.align 5
-.Lxts_enc_5:
- vst1.64 {q13}, [r0,:128] @ next round tweak
-
- veor q3, q3, q11
-#ifndef BSAES_ASM_EXTENDED_KEY
- add r4, sp, #0x90 @ pass key schedule
-#else
- add r4, r10, #248 @ pass key schedule
-#endif
- veor q4, q4, q12
- mov r5, r1 @ pass rounds
- mov r0, sp
-
- bl _bsaes_encrypt8
-
- vld1.64 {q8-q9}, [r0,:128]!
- vld1.64 {q10-q11}, [r0,:128]!
- veor q0, q0, q8
- vld1.64 {q12}, [r0,:128]!
- veor q1, q1, q9
- veor q8, q4, q10
- vst1.8 {q0-q1}, [r8]!
- veor q9, q6, q11
- veor q10, q3, q12
- vst1.8 {q8-q9}, [r8]!
- vst1.8 {q10}, [r8]!
-
- vld1.64 {q8}, [r0,:128] @ next round tweak
- b .Lxts_enc_done
-.align 4
-.Lxts_enc_4:
- vst1.64 {q12}, [r0,:128] @ next round tweak
-
- veor q2, q2, q10
-#ifndef BSAES_ASM_EXTENDED_KEY
- add r4, sp, #0x90 @ pass key schedule
-#else
- add r4, r10, #248 @ pass key schedule
-#endif
- veor q3, q3, q11
- mov r5, r1 @ pass rounds
- mov r0, sp
-
- bl _bsaes_encrypt8
-
- vld1.64 {q8-q9}, [r0,:128]!
- vld1.64 {q10-q11}, [r0,:128]!
- veor q0, q0, q8
- veor q1, q1, q9
- veor q8, q4, q10
- vst1.8 {q0-q1}, [r8]!
- veor q9, q6, q11
- vst1.8 {q8-q9}, [r8]!
-
- vld1.64 {q8}, [r0,:128] @ next round tweak
- b .Lxts_enc_done
-.align 4
-.Lxts_enc_3:
- vst1.64 {q11}, [r0,:128] @ next round tweak
-
- veor q1, q1, q9
-#ifndef BSAES_ASM_EXTENDED_KEY
- add r4, sp, #0x90 @ pass key schedule
-#else
- add r4, r10, #248 @ pass key schedule
-#endif
- veor q2, q2, q10
- mov r5, r1 @ pass rounds
- mov r0, sp
-
- bl _bsaes_encrypt8
-
- vld1.64 {q8-q9}, [r0,:128]!
- vld1.64 {q10}, [r0,:128]!
- veor q0, q0, q8
- veor q1, q1, q9
- veor q8, q4, q10
- vst1.8 {q0-q1}, [r8]!
- vst1.8 {q8}, [r8]!
-
- vld1.64 {q8}, [r0,:128] @ next round tweak
- b .Lxts_enc_done
-.align 4
-.Lxts_enc_2:
- vst1.64 {q10}, [r0,:128] @ next round tweak
-
- veor q0, q0, q8
-#ifndef BSAES_ASM_EXTENDED_KEY
- add r4, sp, #0x90 @ pass key schedule
-#else
- add r4, r10, #248 @ pass key schedule
-#endif
- veor q1, q1, q9
- mov r5, r1 @ pass rounds
- mov r0, sp
-
- bl _bsaes_encrypt8
-
- vld1.64 {q8-q9}, [r0,:128]!
- veor q0, q0, q8
- veor q1, q1, q9
- vst1.8 {q0-q1}, [r8]!
-
- vld1.64 {q8}, [r0,:128] @ next round tweak
- b .Lxts_enc_done
-.align 4
-.Lxts_enc_1:
- mov r0, sp
- veor q0, q8
- mov r1, sp
- vst1.8 {q0}, [sp,:128]
- mov r2, r10
- mov r4, r3 @ preserve fp
-
- bl AES_encrypt
-
- vld1.8 {q0}, [sp,:128]
- veor q0, q0, q8
- vst1.8 {q0}, [r8]!
- mov r3, r4
-
- vmov q8, q9 @ next round tweak
-
-.Lxts_enc_done:
-#ifndef XTS_CHAIN_TWEAK
- adds r9, #0x10
- beq .Lxts_enc_ret
- sub r6, r8, #0x10
-
-.Lxts_enc_steal:
- ldrb r0, [r7], #1
- ldrb r1, [r8, #-0x10]
- strb r0, [r8, #-0x10]
- strb r1, [r8], #1
-
- subs r9, #1
- bhi .Lxts_enc_steal
-
- vld1.8 {q0}, [r6]
- mov r0, sp
- veor q0, q0, q8
- mov r1, sp
- vst1.8 {q0}, [sp,:128]
- mov r2, r10
- mov r4, r3 @ preserve fp
-
- bl AES_encrypt
-
- vld1.8 {q0}, [sp,:128]
- veor q0, q0, q8
- vst1.8 {q0}, [r6]
- mov r3, r4
-#endif
-
-.Lxts_enc_ret:
- bic r0, r3, #0xf
- vmov.i32 q0, #0
- vmov.i32 q1, #0
-#ifdef XTS_CHAIN_TWEAK
- ldr r1, [r3, #0x20+VFP_ABI_FRAME] @ chain tweak
-#endif
-.Lxts_enc_bzero: @ wipe key schedule [if any]
- vstmia sp!, {q0-q1}
- cmp sp, r0
- bne .Lxts_enc_bzero
-
- mov sp, r3
-#ifdef XTS_CHAIN_TWEAK
- vst1.8 {q8}, [r1]
-#endif
- VFP_ABI_POP
- ldmia sp!, {r4-r10, pc} @ return
-
-.size bsaes_xts_encrypt,.-bsaes_xts_encrypt
-
-.globl bsaes_xts_decrypt
-.type bsaes_xts_decrypt,%function
-.align 4
-bsaes_xts_decrypt:
- mov ip, sp
- stmdb sp!, {r4-r10, lr} @ 0x20
- VFP_ABI_PUSH
- mov r6, sp @ future r3
-
- mov r7, r0
- mov r8, r1
- mov r9, r2
- mov r10, r3
-
- sub r0, sp, #0x10 @ 0x10
- bic r0, #0xf @ align at 16 bytes
- mov sp, r0
-
-#ifdef XTS_CHAIN_TWEAK
- ldr r0, [ip] @ pointer to input tweak
-#else
- @ generate initial tweak
- ldr r0, [ip, #4] @ iv[]
- mov r1, sp
- ldr r2, [ip, #0] @ key2
- bl AES_encrypt
- mov r0, sp @ pointer to initial tweak
-#endif
-
- ldr r1, [r10, #240] @ get # of rounds
- mov r3, r6
-#ifndef BSAES_ASM_EXTENDED_KEY
- @ allocate the key schedule on the stack
- sub r12, sp, r1, lsl#7 @ 128 bytes per inner round key
- @ add r12, #96 @ size of bit-sliced key schedule
- sub r12, #48 @ place for tweak[9]
-
- @ populate the key schedule
- mov r4, r10 @ pass key
- mov r5, r1 @ pass # of rounds
- mov sp, r12
- add r12, #0x90 @ pass key schedule
- bl _bsaes_key_convert
- add r4, sp, #0x90
- vldmia r4, {q6}
- vstmia r12, {q15} @ save last round key
- veor q7, q7, q6 @ fix up round 0 key
- vstmia r4, {q7}
-#else
- ldr r12, [r10, #244]
- eors r12, #1
- beq 0f
-
- str r12, [r10, #244]
- mov r4, r10 @ pass key
- mov r5, r1 @ pass # of rounds
- add r12, r10, #248 @ pass key schedule
- bl _bsaes_key_convert
- add r4, r10, #248
- vldmia r4, {q6}
- vstmia r12, {q15} @ save last round key
- veor q7, q7, q6 @ fix up round 0 key
- vstmia r4, {q7}
-
-.align 2
-0: sub sp, #0x90 @ place for tweak[9]
-#endif
- vld1.8 {q8}, [r0] @ initial tweak
- adr r2, .Lxts_magic
-
- tst r9, #0xf @ if not multiple of 16
- it ne @ Thumb2 thing, sanity check in ARM
- subne r9, #0x10 @ subtract another 16 bytes
- subs r9, #0x80
-
- blo .Lxts_dec_short
- b .Lxts_dec_loop
-
-.align 4
-.Lxts_dec_loop:
- vldmia r2, {q5} @ load XTS magic
- vshr.s64 q6, q8, #63
- mov r0, sp
- vand q6, q6, q5
- vadd.u64 q9, q8, q8
- vst1.64 {q8}, [r0,:128]!
- vswp d13,d12
- vshr.s64 q7, q9, #63
- veor q9, q9, q6
- vand q7, q7, q5
- vadd.u64 q10, q9, q9
- vst1.64 {q9}, [r0,:128]!
- vswp d15,d14
- vshr.s64 q6, q10, #63
- veor q10, q10, q7
- vand q6, q6, q5
- vld1.8 {q0}, [r7]!
- vadd.u64 q11, q10, q10
- vst1.64 {q10}, [r0,:128]!
- vswp d13,d12
- vshr.s64 q7, q11, #63
- veor q11, q11, q6
- vand q7, q7, q5
- vld1.8 {q1}, [r7]!
- veor q0, q0, q8
- vadd.u64 q12, q11, q11
- vst1.64 {q11}, [r0,:128]!
- vswp d15,d14
- vshr.s64 q6, q12, #63
- veor q12, q12, q7
- vand q6, q6, q5
- vld1.8 {q2}, [r7]!
- veor q1, q1, q9
- vadd.u64 q13, q12, q12
- vst1.64 {q12}, [r0,:128]!
- vswp d13,d12
- vshr.s64 q7, q13, #63
- veor q13, q13, q6
- vand q7, q7, q5
- vld1.8 {q3}, [r7]!
- veor q2, q2, q10
- vadd.u64 q14, q13, q13
- vst1.64 {q13}, [r0,:128]!
- vswp d15,d14
- vshr.s64 q6, q14, #63
- veor q14, q14, q7
- vand q6, q6, q5
- vld1.8 {q4}, [r7]!
- veor q3, q3, q11
- vadd.u64 q15, q14, q14
- vst1.64 {q14}, [r0,:128]!
- vswp d13,d12
- vshr.s64 q7, q15, #63
- veor q15, q15, q6
- vand q7, q7, q5
- vld1.8 {q5}, [r7]!
- veor q4, q4, q12
- vadd.u64 q8, q15, q15
- vst1.64 {q15}, [r0,:128]!
- vswp d15,d14
- veor q8, q8, q7
- vst1.64 {q8}, [r0,:128] @ next round tweak
-
- vld1.8 {q6-q7}, [r7]!
- veor q5, q5, q13
-#ifndef BSAES_ASM_EXTENDED_KEY
- add r4, sp, #0x90 @ pass key schedule
-#else
- add r4, r10, #248 @ pass key schedule
-#endif
- veor q6, q6, q14
- mov r5, r1 @ pass rounds
- veor q7, q7, q15
- mov r0, sp
-
- bl _bsaes_decrypt8
-
- vld1.64 {q8-q9}, [r0,:128]!
- vld1.64 {q10-q11}, [r0,:128]!
- veor q0, q0, q8
- vld1.64 {q12-q13}, [r0,:128]!
- veor q1, q1, q9
- veor q8, q6, q10
- vst1.8 {q0-q1}, [r8]!
- veor q9, q4, q11
- vld1.64 {q14-q15}, [r0,:128]!
- veor q10, q2, q12
- vst1.8 {q8-q9}, [r8]!
- veor q11, q7, q13
- veor q12, q3, q14
- vst1.8 {q10-q11}, [r8]!
- veor q13, q5, q15
- vst1.8 {q12-q13}, [r8]!
-
- vld1.64 {q8}, [r0,:128] @ next round tweak
-
- subs r9, #0x80
- bpl .Lxts_dec_loop
-
-.Lxts_dec_short:
- adds r9, #0x70
- bmi .Lxts_dec_done
-
- vldmia r2, {q5} @ load XTS magic
- vshr.s64 q7, q8, #63
- mov r0, sp
- vand q7, q7, q5
- vadd.u64 q9, q8, q8
- vst1.64 {q8}, [r0,:128]!
- vswp d15,d14
- vshr.s64 q6, q9, #63
- veor q9, q9, q7
- vand q6, q6, q5
- vadd.u64 q10, q9, q9
- vst1.64 {q9}, [r0,:128]!
- vswp d13,d12
- vshr.s64 q7, q10, #63
- veor q10, q10, q6
- vand q7, q7, q5
- vld1.8 {q0}, [r7]!
- subs r9, #0x10
- bmi .Lxts_dec_1
- vadd.u64 q11, q10, q10
- vst1.64 {q10}, [r0,:128]!
- vswp d15,d14
- vshr.s64 q6, q11, #63
- veor q11, q11, q7
- vand q6, q6, q5
- vld1.8 {q1}, [r7]!
- subs r9, #0x10
- bmi .Lxts_dec_2
- veor q0, q0, q8
- vadd.u64 q12, q11, q11
- vst1.64 {q11}, [r0,:128]!
- vswp d13,d12
- vshr.s64 q7, q12, #63
- veor q12, q12, q6
- vand q7, q7, q5
- vld1.8 {q2}, [r7]!
- subs r9, #0x10
- bmi .Lxts_dec_3
- veor q1, q1, q9
- vadd.u64 q13, q12, q12
- vst1.64 {q12}, [r0,:128]!
- vswp d15,d14
- vshr.s64 q6, q13, #63
- veor q13, q13, q7
- vand q6, q6, q5
- vld1.8 {q3}, [r7]!
- subs r9, #0x10
- bmi .Lxts_dec_4
- veor q2, q2, q10
- vadd.u64 q14, q13, q13
- vst1.64 {q13}, [r0,:128]!
- vswp d13,d12
- vshr.s64 q7, q14, #63
- veor q14, q14, q6
- vand q7, q7, q5
- vld1.8 {q4}, [r7]!
- subs r9, #0x10
- bmi .Lxts_dec_5
- veor q3, q3, q11
- vadd.u64 q15, q14, q14
- vst1.64 {q14}, [r0,:128]!
- vswp d15,d14
- vshr.s64 q6, q15, #63
- veor q15, q15, q7
- vand q6, q6, q5
- vld1.8 {q5}, [r7]!
- subs r9, #0x10
- bmi .Lxts_dec_6
- veor q4, q4, q12
- sub r9, #0x10
- vst1.64 {q15}, [r0,:128] @ next round tweak
-
- vld1.8 {q6}, [r7]!
- veor q5, q5, q13
-#ifndef BSAES_ASM_EXTENDED_KEY
- add r4, sp, #0x90 @ pass key schedule
-#else
- add r4, r10, #248 @ pass key schedule
-#endif
- veor q6, q6, q14
- mov r5, r1 @ pass rounds
- mov r0, sp
-
- bl _bsaes_decrypt8
-
- vld1.64 {q8-q9}, [r0,:128]!
- vld1.64 {q10-q11}, [r0,:128]!
- veor q0, q0, q8
- vld1.64 {q12-q13}, [r0,:128]!
- veor q1, q1, q9
- veor q8, q6, q10
- vst1.8 {q0-q1}, [r8]!
- veor q9, q4, q11
- vld1.64 {q14}, [r0,:128]!
- veor q10, q2, q12
- vst1.8 {q8-q9}, [r8]!
- veor q11, q7, q13
- veor q12, q3, q14
- vst1.8 {q10-q11}, [r8]!
- vst1.8 {q12}, [r8]!
-
- vld1.64 {q8}, [r0,:128] @ next round tweak
- b .Lxts_dec_done
-.align 4
-.Lxts_dec_6:
- vst1.64 {q14}, [r0,:128] @ next round tweak
-
- veor q4, q4, q12
-#ifndef BSAES_ASM_EXTENDED_KEY
- add r4, sp, #0x90 @ pass key schedule
-#else
- add r4, r10, #248 @ pass key schedule
-#endif
- veor q5, q5, q13
- mov r5, r1 @ pass rounds
- mov r0, sp
-
- bl _bsaes_decrypt8
-
- vld1.64 {q8-q9}, [r0,:128]!
- vld1.64 {q10-q11}, [r0,:128]!
- veor q0, q0, q8
- vld1.64 {q12-q13}, [r0,:128]!
- veor q1, q1, q9
- veor q8, q6, q10
- vst1.8 {q0-q1}, [r8]!
- veor q9, q4, q11
- veor q10, q2, q12
- vst1.8 {q8-q9}, [r8]!
- veor q11, q7, q13
- vst1.8 {q10-q11}, [r8]!
-
- vld1.64 {q8}, [r0,:128] @ next round tweak
- b .Lxts_dec_done
-.align 4
-.Lxts_dec_5:
- vst1.64 {q13}, [r0,:128] @ next round tweak
-
- veor q3, q3, q11
-#ifndef BSAES_ASM_EXTENDED_KEY
- add r4, sp, #0x90 @ pass key schedule
-#else
- add r4, r10, #248 @ pass key schedule
-#endif
- veor q4, q4, q12
- mov r5, r1 @ pass rounds
- mov r0, sp
-
- bl _bsaes_decrypt8
-
- vld1.64 {q8-q9}, [r0,:128]!
- vld1.64 {q10-q11}, [r0,:128]!
- veor q0, q0, q8
- vld1.64 {q12}, [r0,:128]!
- veor q1, q1, q9
- veor q8, q6, q10
- vst1.8 {q0-q1}, [r8]!
- veor q9, q4, q11
- veor q10, q2, q12
- vst1.8 {q8-q9}, [r8]!
- vst1.8 {q10}, [r8]!
-
- vld1.64 {q8}, [r0,:128] @ next round tweak
- b .Lxts_dec_done
-.align 4
-.Lxts_dec_4:
- vst1.64 {q12}, [r0,:128] @ next round tweak
-
- veor q2, q2, q10
-#ifndef BSAES_ASM_EXTENDED_KEY
- add r4, sp, #0x90 @ pass key schedule
-#else
- add r4, r10, #248 @ pass key schedule
-#endif
- veor q3, q3, q11
- mov r5, r1 @ pass rounds
- mov r0, sp
-
- bl _bsaes_decrypt8
-
- vld1.64 {q8-q9}, [r0,:128]!
- vld1.64 {q10-q11}, [r0,:128]!
- veor q0, q0, q8
- veor q1, q1, q9
- veor q8, q6, q10
- vst1.8 {q0-q1}, [r8]!
- veor q9, q4, q11
- vst1.8 {q8-q9}, [r8]!
-
- vld1.64 {q8}, [r0,:128] @ next round tweak
- b .Lxts_dec_done
-.align 4
-.Lxts_dec_3:
- vst1.64 {q11}, [r0,:128] @ next round tweak
-
- veor q1, q1, q9
-#ifndef BSAES_ASM_EXTENDED_KEY
- add r4, sp, #0x90 @ pass key schedule
-#else
- add r4, r10, #248 @ pass key schedule
-#endif
- veor q2, q2, q10
- mov r5, r1 @ pass rounds
- mov r0, sp
-
- bl _bsaes_decrypt8
-
- vld1.64 {q8-q9}, [r0,:128]!
- vld1.64 {q10}, [r0,:128]!
- veor q0, q0, q8
- veor q1, q1, q9
- veor q8, q6, q10
- vst1.8 {q0-q1}, [r8]!
- vst1.8 {q8}, [r8]!
-
- vld1.64 {q8}, [r0,:128] @ next round tweak
- b .Lxts_dec_done
-.align 4
-.Lxts_dec_2:
- vst1.64 {q10}, [r0,:128] @ next round tweak
-
- veor q0, q0, q8
-#ifndef BSAES_ASM_EXTENDED_KEY
- add r4, sp, #0x90 @ pass key schedule
-#else
- add r4, r10, #248 @ pass key schedule
-#endif
- veor q1, q1, q9
- mov r5, r1 @ pass rounds
- mov r0, sp
-
- bl _bsaes_decrypt8
-
- vld1.64 {q8-q9}, [r0,:128]!
- veor q0, q0, q8
- veor q1, q1, q9
- vst1.8 {q0-q1}, [r8]!
-
- vld1.64 {q8}, [r0,:128] @ next round tweak
- b .Lxts_dec_done
-.align 4
-.Lxts_dec_1:
- mov r0, sp
- veor q0, q8
- mov r1, sp
- vst1.8 {q0}, [sp,:128]
- mov r2, r10
- mov r4, r3 @ preserve fp
- mov r5, r2 @ preserve magic
-
- bl AES_decrypt
-
- vld1.8 {q0}, [sp,:128]
- veor q0, q0, q8
- vst1.8 {q0}, [r8]!
- mov r3, r4
- mov r2, r5
-
- vmov q8, q9 @ next round tweak
-
-.Lxts_dec_done:
-#ifndef XTS_CHAIN_TWEAK
- adds r9, #0x10
- beq .Lxts_dec_ret
-
- @ calculate one round of extra tweak for the stolen ciphertext
- vldmia r2, {q5}
- vshr.s64 q6, q8, #63
- vand q6, q6, q5
- vadd.u64 q9, q8, q8
- vswp d13,d12
- veor q9, q9, q6
-
- @ perform the final decryption with the last tweak value
- vld1.8 {q0}, [r7]!
- mov r0, sp
- veor q0, q0, q9
- mov r1, sp
- vst1.8 {q0}, [sp,:128]
- mov r2, r10
- mov r4, r3 @ preserve fp
-
- bl AES_decrypt
-
- vld1.8 {q0}, [sp,:128]
- veor q0, q0, q9
- vst1.8 {q0}, [r8]
-
- mov r6, r8
-.Lxts_dec_steal:
- ldrb r1, [r8]
- ldrb r0, [r7], #1
- strb r1, [r8, #0x10]
- strb r0, [r8], #1
-
- subs r9, #1
- bhi .Lxts_dec_steal
-
- vld1.8 {q0}, [r6]
- mov r0, sp
- veor q0, q8
- mov r1, sp
- vst1.8 {q0}, [sp,:128]
- mov r2, r10
-
- bl AES_decrypt
-
- vld1.8 {q0}, [sp,:128]
- veor q0, q0, q8
- vst1.8 {q0}, [r6]
- mov r3, r4
-#endif
-
-.Lxts_dec_ret:
- bic r0, r3, #0xf
- vmov.i32 q0, #0
- vmov.i32 q1, #0
-#ifdef XTS_CHAIN_TWEAK
- ldr r1, [r3, #0x20+VFP_ABI_FRAME] @ chain tweak
-#endif
-.Lxts_dec_bzero: @ wipe key schedule [if any]
- vstmia sp!, {q0-q1}
- cmp sp, r0
- bne .Lxts_dec_bzero
-
- mov sp, r3
-#ifdef XTS_CHAIN_TWEAK
- vst1.8 {q8}, [r1]
-#endif
- VFP_ABI_POP
- ldmia sp!, {r4-r10, pc} @ return
-
-.size bsaes_xts_decrypt,.-bsaes_xts_decrypt
-#endif
diff --git a/app/openssl/crypto/aes/asm/bsaes-armv7.pl b/app/openssl/crypto/aes/asm/bsaes-armv7.pl
deleted file mode 100644
index f3d96d93..00000000
--- a/app/openssl/crypto/aes/asm/bsaes-armv7.pl
+++ /dev/null
@@ -1,2467 +0,0 @@
-#!/usr/bin/env perl
-
-# ====================================================================
-# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
-# project. The module is, however, dual licensed under OpenSSL and
-# CRYPTOGAMS licenses depending on where you obtain it. For further
-# details see http://www.openssl.org/~appro/cryptogams/.
-#
-# Specific modes and adaptation for Linux kernel by Ard Biesheuvel
-# <ard.biesheuvel@linaro.org>. Permission to use under GPL terms is
-# granted.
-# ====================================================================
-
-# Bit-sliced AES for ARM NEON
-#
-# February 2012.
-#
-# This implementation is direct adaptation of bsaes-x86_64 module for
-# ARM NEON. Except that this module is endian-neutral [in sense that
-# it can be compiled for either endianness] by courtesy of vld1.8's
-# neutrality. Initial version doesn't implement interface to OpenSSL,
-# only low-level primitives and unsupported entry points, just enough
-# to collect performance results, which for Cortex-A8 core are:
-#
-# encrypt 19.5 cycles per byte processed with 128-bit key
-# decrypt 22.1 cycles per byte processed with 128-bit key
-# key conv. 440 cycles per 128-bit key/0.18 of 8x block
-#
-# Snapdragon S4 encrypts byte in 17.6 cycles and decrypts in 19.7,
-# which is [much] worse than anticipated (for further details see
-# http://www.openssl.org/~appro/Snapdragon-S4.html).
-#
-# Cortex-A15 manages in 14.2/16.1 cycles [when integer-only code
-# manages in 20.0 cycles].
-#
-# When comparing to x86_64 results keep in mind that NEON unit is
-# [mostly] single-issue and thus can't [fully] benefit from
-# instruction-level parallelism. And when comparing to aes-armv4
-# results keep in mind key schedule conversion overhead (see
-# bsaes-x86_64.pl for further details)...
-#
-# <appro@openssl.org>
-
-# April-August 2013
-#
-# Add CBC, CTR and XTS subroutines, adapt for kernel use.
-#
-# <ard.biesheuvel@linaro.org>
-
-while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
-open STDOUT,">$output";
-
-my ($inp,$out,$len,$key)=("r0","r1","r2","r3");
-my @XMM=map("q$_",(0..15));
-
-{
-my ($key,$rounds,$const)=("r4","r5","r6");
-
-sub Dlo() { shift=~m|q([1]?[0-9])|?"d".($1*2):""; }
-sub Dhi() { shift=~m|q([1]?[0-9])|?"d".($1*2+1):""; }
-
-sub Sbox {
-# input in lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
-# output in lsb > [b0, b1, b4, b6, b3, b7, b2, b5] < msb
-my @b=@_[0..7];
-my @t=@_[8..11];
-my @s=@_[12..15];
- &InBasisChange (@b);
- &Inv_GF256 (@b[6,5,0,3,7,1,4,2],@t,@s);
- &OutBasisChange (@b[7,1,4,2,6,5,0,3]);
-}
-
-sub InBasisChange {
-# input in lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
-# output in lsb > [b6, b5, b0, b3, b7, b1, b4, b2] < msb
-my @b=@_[0..7];
-$code.=<<___;
- veor @b[2], @b[2], @b[1]
- veor @b[5], @b[5], @b[6]
- veor @b[3], @b[3], @b[0]
- veor @b[6], @b[6], @b[2]
- veor @b[5], @b[5], @b[0]
-
- veor @b[6], @b[6], @b[3]
- veor @b[3], @b[3], @b[7]
- veor @b[7], @b[7], @b[5]
- veor @b[3], @b[3], @b[4]
- veor @b[4], @b[4], @b[5]
-
- veor @b[2], @b[2], @b[7]
- veor @b[3], @b[3], @b[1]
- veor @b[1], @b[1], @b[5]
-___
-}
-
-sub OutBasisChange {
-# input in lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
-# output in lsb > [b6, b1, b2, b4, b7, b0, b3, b5] < msb
-my @b=@_[0..7];
-$code.=<<___;
- veor @b[0], @b[0], @b[6]
- veor @b[1], @b[1], @b[4]
- veor @b[4], @b[4], @b[6]
- veor @b[2], @b[2], @b[0]
- veor @b[6], @b[6], @b[1]
-
- veor @b[1], @b[1], @b[5]
- veor @b[5], @b[5], @b[3]
- veor @b[3], @b[3], @b[7]
- veor @b[7], @b[7], @b[5]
- veor @b[2], @b[2], @b[5]
-
- veor @b[4], @b[4], @b[7]
-___
-}
-
-sub InvSbox {
-# input in lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
-# output in lsb > [b0, b1, b6, b4, b2, b7, b3, b5] < msb
-my @b=@_[0..7];
-my @t=@_[8..11];
-my @s=@_[12..15];
- &InvInBasisChange (@b);
- &Inv_GF256 (@b[5,1,2,6,3,7,0,4],@t,@s);
- &InvOutBasisChange (@b[3,7,0,4,5,1,2,6]);
-}
-
-sub InvInBasisChange { # OutBasisChange in reverse (with twist)
-my @b=@_[5,1,2,6,3,7,0,4];
-$code.=<<___
- veor @b[1], @b[1], @b[7]
- veor @b[4], @b[4], @b[7]
-
- veor @b[7], @b[7], @b[5]
- veor @b[1], @b[1], @b[3]
- veor @b[2], @b[2], @b[5]
- veor @b[3], @b[3], @b[7]
-
- veor @b[6], @b[6], @b[1]
- veor @b[2], @b[2], @b[0]
- veor @b[5], @b[5], @b[3]
- veor @b[4], @b[4], @b[6]
- veor @b[0], @b[0], @b[6]
- veor @b[1], @b[1], @b[4]
-___
-}
-
-sub InvOutBasisChange { # InBasisChange in reverse
-my @b=@_[2,5,7,3,6,1,0,4];
-$code.=<<___;
- veor @b[1], @b[1], @b[5]
- veor @b[2], @b[2], @b[7]
-
- veor @b[3], @b[3], @b[1]
- veor @b[4], @b[4], @b[5]
- veor @b[7], @b[7], @b[5]
- veor @b[3], @b[3], @b[4]
- veor @b[5], @b[5], @b[0]
- veor @b[3], @b[3], @b[7]
- veor @b[6], @b[6], @b[2]
- veor @b[2], @b[2], @b[1]
- veor @b[6], @b[6], @b[3]
-
- veor @b[3], @b[3], @b[0]
- veor @b[5], @b[5], @b[6]
-___
-}
-
-sub Mul_GF4 {
-#;*************************************************************
-#;* Mul_GF4: Input x0-x1,y0-y1 Output x0-x1 Temp t0 (8) *
-#;*************************************************************
-my ($x0,$x1,$y0,$y1,$t0,$t1)=@_;
-$code.=<<___;
- veor $t0, $y0, $y1
- vand $t0, $t0, $x0
- veor $x0, $x0, $x1
- vand $t1, $x1, $y0
- vand $x0, $x0, $y1
- veor $x1, $t1, $t0
- veor $x0, $x0, $t1
-___
-}
-
-sub Mul_GF4_N { # not used, see next subroutine
-# multiply and scale by N
-my ($x0,$x1,$y0,$y1,$t0)=@_;
-$code.=<<___;
- veor $t0, $y0, $y1
- vand $t0, $t0, $x0
- veor $x0, $x0, $x1
- vand $x1, $x1, $y0
- vand $x0, $x0, $y1
- veor $x1, $x1, $x0
- veor $x0, $x0, $t0
-___
-}
-
-sub Mul_GF4_N_GF4 {
-# interleaved Mul_GF4_N and Mul_GF4
-my ($x0,$x1,$y0,$y1,$t0,
- $x2,$x3,$y2,$y3,$t1)=@_;
-$code.=<<___;
- veor $t0, $y0, $y1
- veor $t1, $y2, $y3
- vand $t0, $t0, $x0
- vand $t1, $t1, $x2
- veor $x0, $x0, $x1
- veor $x2, $x2, $x3
- vand $x1, $x1, $y0
- vand $x3, $x3, $y2
- vand $x0, $x0, $y1
- vand $x2, $x2, $y3
- veor $x1, $x1, $x0
- veor $x2, $x2, $x3
- veor $x0, $x0, $t0
- veor $x3, $x3, $t1
-___
-}
-sub Mul_GF16_2 {
-my @x=@_[0..7];
-my @y=@_[8..11];
-my @t=@_[12..15];
-$code.=<<___;
- veor @t[0], @x[0], @x[2]
- veor @t[1], @x[1], @x[3]
-___
- &Mul_GF4 (@x[0], @x[1], @y[0], @y[1], @t[2..3]);
-$code.=<<___;
- veor @y[0], @y[0], @y[2]
- veor @y[1], @y[1], @y[3]
-___
- Mul_GF4_N_GF4 (@t[0], @t[1], @y[0], @y[1], @t[3],
- @x[2], @x[3], @y[2], @y[3], @t[2]);
-$code.=<<___;
- veor @x[0], @x[0], @t[0]
- veor @x[2], @x[2], @t[0]
- veor @x[1], @x[1], @t[1]
- veor @x[3], @x[3], @t[1]
-
- veor @t[0], @x[4], @x[6]
- veor @t[1], @x[5], @x[7]
-___
- &Mul_GF4_N_GF4 (@t[0], @t[1], @y[0], @y[1], @t[3],
- @x[6], @x[7], @y[2], @y[3], @t[2]);
-$code.=<<___;
- veor @y[0], @y[0], @y[2]
- veor @y[1], @y[1], @y[3]
-___
- &Mul_GF4 (@x[4], @x[5], @y[0], @y[1], @t[2..3]);
-$code.=<<___;
- veor @x[4], @x[4], @t[0]
- veor @x[6], @x[6], @t[0]
- veor @x[5], @x[5], @t[1]
- veor @x[7], @x[7], @t[1]
-___
-}
-sub Inv_GF256 {
-#;********************************************************************
-#;* Inv_GF256: Input x0-x7 Output x0-x7 Temp t0-t3,s0-s3 (144) *
-#;********************************************************************
-my @x=@_[0..7];
-my @t=@_[8..11];
-my @s=@_[12..15];
-# direct optimizations from hardware
-$code.=<<___;
- veor @t[3], @x[4], @x[6]
- veor @t[2], @x[5], @x[7]
- veor @t[1], @x[1], @x[3]
- veor @s[1], @x[7], @x[6]
- vmov @t[0], @t[2]
- veor @s[0], @x[0], @x[2]
-
- vorr @t[2], @t[2], @t[1]
- veor @s[3], @t[3], @t[0]
- vand @s[2], @t[3], @s[0]
- vorr @t[3], @t[3], @s[0]
- veor @s[0], @s[0], @t[1]
- vand @t[0], @t[0], @t[1]
- veor @t[1], @x[3], @x[2]
- vand @s[3], @s[3], @s[0]
- vand @s[1], @s[1], @t[1]
- veor @t[1], @x[4], @x[5]
- veor @s[0], @x[1], @x[0]
- veor @t[3], @t[3], @s[1]
- veor @t[2], @t[2], @s[1]
- vand @s[1], @t[1], @s[0]
- vorr @t[1], @t[1], @s[0]
- veor @t[3], @t[3], @s[3]
- veor @t[0], @t[0], @s[1]
- veor @t[2], @t[2], @s[2]
- veor @t[1], @t[1], @s[3]
- veor @t[0], @t[0], @s[2]
- vand @s[0], @x[7], @x[3]
- veor @t[1], @t[1], @s[2]
- vand @s[1], @x[6], @x[2]
- vand @s[2], @x[5], @x[1]
- vorr @s[3], @x[4], @x[0]
- veor @t[3], @t[3], @s[0]
- veor @t[1], @t[1], @s[2]
- veor @t[0], @t[0], @s[3]
- veor @t[2], @t[2], @s[1]
-
- @ Inv_GF16 \t0, \t1, \t2, \t3, \s0, \s1, \s2, \s3
-
- @ new smaller inversion
-
- vand @s[2], @t[3], @t[1]
- vmov @s[0], @t[0]
-
- veor @s[1], @t[2], @s[2]
- veor @s[3], @t[0], @s[2]
- veor @s[2], @t[0], @s[2] @ @s[2]=@s[3]
-
- vbsl @s[1], @t[1], @t[0]
- vbsl @s[3], @t[3], @t[2]
- veor @t[3], @t[3], @t[2]
-
- vbsl @s[0], @s[1], @s[2]
- vbsl @t[0], @s[2], @s[1]
-
- vand @s[2], @s[0], @s[3]
- veor @t[1], @t[1], @t[0]
-
- veor @s[2], @s[2], @t[3]
-___
-# output in s3, s2, s1, t1
-
-# Mul_GF16_2 \x0, \x1, \x2, \x3, \x4, \x5, \x6, \x7, \t2, \t3, \t0, \t1, \s0, \s1, \s2, \s3
-
-# Mul_GF16_2 \x0, \x1, \x2, \x3, \x4, \x5, \x6, \x7, \s3, \s2, \s1, \t1, \s0, \t0, \t2, \t3
- &Mul_GF16_2(@x,@s[3,2,1],@t[1],@s[0],@t[0,2,3]);
-
-### output msb > [x3,x2,x1,x0,x7,x6,x5,x4] < lsb
-}
-
-# AES linear components
-
-sub ShiftRows {
-my @x=@_[0..7];
-my @t=@_[8..11];
-my $mask=pop;
-$code.=<<___;
- vldmia $key!, {@t[0]-@t[3]}
- veor @t[0], @t[0], @x[0]
- veor @t[1], @t[1], @x[1]
- vtbl.8 `&Dlo(@x[0])`, {@t[0]}, `&Dlo($mask)`
- vtbl.8 `&Dhi(@x[0])`, {@t[0]}, `&Dhi($mask)`
- vldmia $key!, {@t[0]}
- veor @t[2], @t[2], @x[2]
- vtbl.8 `&Dlo(@x[1])`, {@t[1]}, `&Dlo($mask)`
- vtbl.8 `&Dhi(@x[1])`, {@t[1]}, `&Dhi($mask)`
- vldmia $key!, {@t[1]}
- veor @t[3], @t[3], @x[3]
- vtbl.8 `&Dlo(@x[2])`, {@t[2]}, `&Dlo($mask)`
- vtbl.8 `&Dhi(@x[2])`, {@t[2]}, `&Dhi($mask)`
- vldmia $key!, {@t[2]}
- vtbl.8 `&Dlo(@x[3])`, {@t[3]}, `&Dlo($mask)`
- vtbl.8 `&Dhi(@x[3])`, {@t[3]}, `&Dhi($mask)`
- vldmia $key!, {@t[3]}
- veor @t[0], @t[0], @x[4]
- veor @t[1], @t[1], @x[5]
- vtbl.8 `&Dlo(@x[4])`, {@t[0]}, `&Dlo($mask)`
- vtbl.8 `&Dhi(@x[4])`, {@t[0]}, `&Dhi($mask)`
- veor @t[2], @t[2], @x[6]
- vtbl.8 `&Dlo(@x[5])`, {@t[1]}, `&Dlo($mask)`
- vtbl.8 `&Dhi(@x[5])`, {@t[1]}, `&Dhi($mask)`
- veor @t[3], @t[3], @x[7]
- vtbl.8 `&Dlo(@x[6])`, {@t[2]}, `&Dlo($mask)`
- vtbl.8 `&Dhi(@x[6])`, {@t[2]}, `&Dhi($mask)`
- vtbl.8 `&Dlo(@x[7])`, {@t[3]}, `&Dlo($mask)`
- vtbl.8 `&Dhi(@x[7])`, {@t[3]}, `&Dhi($mask)`
-___
-}
-
-sub MixColumns {
-# modified to emit output in order suitable for feeding back to aesenc[last]
-my @x=@_[0..7];
-my @t=@_[8..15];
-my $inv=@_[16]; # optional
-$code.=<<___;
- vext.8 @t[0], @x[0], @x[0], #12 @ x0 <<< 32
- vext.8 @t[1], @x[1], @x[1], #12
- veor @x[0], @x[0], @t[0] @ x0 ^ (x0 <<< 32)
- vext.8 @t[2], @x[2], @x[2], #12
- veor @x[1], @x[1], @t[1]
- vext.8 @t[3], @x[3], @x[3], #12
- veor @x[2], @x[2], @t[2]
- vext.8 @t[4], @x[4], @x[4], #12
- veor @x[3], @x[3], @t[3]
- vext.8 @t[5], @x[5], @x[5], #12
- veor @x[4], @x[4], @t[4]
- vext.8 @t[6], @x[6], @x[6], #12
- veor @x[5], @x[5], @t[5]
- vext.8 @t[7], @x[7], @x[7], #12
- veor @x[6], @x[6], @t[6]
-
- veor @t[1], @t[1], @x[0]
- veor @x[7], @x[7], @t[7]
- vext.8 @x[0], @x[0], @x[0], #8 @ (x0 ^ (x0 <<< 32)) <<< 64)
- veor @t[2], @t[2], @x[1]
- veor @t[0], @t[0], @x[7]
- veor @t[1], @t[1], @x[7]
- vext.8 @x[1], @x[1], @x[1], #8
- veor @t[5], @t[5], @x[4]
- veor @x[0], @x[0], @t[0]
- veor @t[6], @t[6], @x[5]
- veor @x[1], @x[1], @t[1]
- vext.8 @t[0], @x[4], @x[4], #8
- veor @t[4], @t[4], @x[3]
- vext.8 @t[1], @x[5], @x[5], #8
- veor @t[7], @t[7], @x[6]
- vext.8 @x[4], @x[3], @x[3], #8
- veor @t[3], @t[3], @x[2]
- vext.8 @x[5], @x[7], @x[7], #8
- veor @t[4], @t[4], @x[7]
- vext.8 @x[3], @x[6], @x[6], #8
- veor @t[3], @t[3], @x[7]
- vext.8 @x[6], @x[2], @x[2], #8
- veor @x[7], @t[1], @t[5]
-___
-$code.=<<___ if (!$inv);
- veor @x[2], @t[0], @t[4]
- veor @x[4], @x[4], @t[3]
- veor @x[5], @x[5], @t[7]
- veor @x[3], @x[3], @t[6]
- @ vmov @x[2], @t[0]
- veor @x[6], @x[6], @t[2]
- @ vmov @x[7], @t[1]
-___
-$code.=<<___ if ($inv);
- veor @t[3], @t[3], @x[4]
- veor @x[5], @x[5], @t[7]
- veor @x[2], @x[3], @t[6]
- veor @x[3], @t[0], @t[4]
- veor @x[4], @x[6], @t[2]
- vmov @x[6], @t[3]
- @ vmov @x[7], @t[1]
-___
-}
-
-sub InvMixColumns_orig {
-my @x=@_[0..7];
-my @t=@_[8..15];
-
-$code.=<<___;
- @ multiplication by 0x0e
- vext.8 @t[7], @x[7], @x[7], #12
- vmov @t[2], @x[2]
- veor @x[2], @x[2], @x[5] @ 2 5
- veor @x[7], @x[7], @x[5] @ 7 5
- vext.8 @t[0], @x[0], @x[0], #12
- vmov @t[5], @x[5]
- veor @x[5], @x[5], @x[0] @ 5 0 [1]
- veor @x[0], @x[0], @x[1] @ 0 1
- vext.8 @t[1], @x[1], @x[1], #12
- veor @x[1], @x[1], @x[2] @ 1 25
- veor @x[0], @x[0], @x[6] @ 01 6 [2]
- vext.8 @t[3], @x[3], @x[3], #12
- veor @x[1], @x[1], @x[3] @ 125 3 [4]
- veor @x[2], @x[2], @x[0] @ 25 016 [3]
- veor @x[3], @x[3], @x[7] @ 3 75
- veor @x[7], @x[7], @x[6] @ 75 6 [0]
- vext.8 @t[6], @x[6], @x[6], #12
- vmov @t[4], @x[4]
- veor @x[6], @x[6], @x[4] @ 6 4
- veor @x[4], @x[4], @x[3] @ 4 375 [6]
- veor @x[3], @x[3], @x[7] @ 375 756=36
- veor @x[6], @x[6], @t[5] @ 64 5 [7]
- veor @x[3], @x[3], @t[2] @ 36 2
- vext.8 @t[5], @t[5], @t[5], #12
- veor @x[3], @x[3], @t[4] @ 362 4 [5]
-___
- my @y = @x[7,5,0,2,1,3,4,6];
-$code.=<<___;
- @ multiplication by 0x0b
- veor @y[1], @y[1], @y[0]
- veor @y[0], @y[0], @t[0]
- vext.8 @t[2], @t[2], @t[2], #12
- veor @y[1], @y[1], @t[1]
- veor @y[0], @y[0], @t[5]
- vext.8 @t[4], @t[4], @t[4], #12
- veor @y[1], @y[1], @t[6]
- veor @y[0], @y[0], @t[7]
- veor @t[7], @t[7], @t[6] @ clobber t[7]
-
- veor @y[3], @y[3], @t[0]
- veor @y[1], @y[1], @y[0]
- vext.8 @t[0], @t[0], @t[0], #12
- veor @y[2], @y[2], @t[1]
- veor @y[4], @y[4], @t[1]
- vext.8 @t[1], @t[1], @t[1], #12
- veor @y[2], @y[2], @t[2]
- veor @y[3], @y[3], @t[2]
- veor @y[5], @y[5], @t[2]
- veor @y[2], @y[2], @t[7]
- vext.8 @t[2], @t[2], @t[2], #12
- veor @y[3], @y[3], @t[3]
- veor @y[6], @y[6], @t[3]
- veor @y[4], @y[4], @t[3]
- veor @y[7], @y[7], @t[4]
- vext.8 @t[3], @t[3], @t[3], #12
- veor @y[5], @y[5], @t[4]
- veor @y[7], @y[7], @t[7]
- veor @t[7], @t[7], @t[5] @ clobber t[7] even more
- veor @y[3], @y[3], @t[5]
- veor @y[4], @y[4], @t[4]
-
- veor @y[5], @y[5], @t[7]
- vext.8 @t[4], @t[4], @t[4], #12
- veor @y[6], @y[6], @t[7]
- veor @y[4], @y[4], @t[7]
-
- veor @t[7], @t[7], @t[5]
- vext.8 @t[5], @t[5], @t[5], #12
-
- @ multiplication by 0x0d
- veor @y[4], @y[4], @y[7]
- veor @t[7], @t[7], @t[6] @ restore t[7]
- veor @y[7], @y[7], @t[4]
- vext.8 @t[6], @t[6], @t[6], #12
- veor @y[2], @y[2], @t[0]
- veor @y[7], @y[7], @t[5]
- vext.8 @t[7], @t[7], @t[7], #12
- veor @y[2], @y[2], @t[2]
-
- veor @y[3], @y[3], @y[1]
- veor @y[1], @y[1], @t[1]
- veor @y[0], @y[0], @t[0]
- veor @y[3], @y[3], @t[0]
- veor @y[1], @y[1], @t[5]
- veor @y[0], @y[0], @t[5]
- vext.8 @t[0], @t[0], @t[0], #12
- veor @y[1], @y[1], @t[7]
- veor @y[0], @y[0], @t[6]
- veor @y[3], @y[3], @y[1]
- veor @y[4], @y[4], @t[1]
- vext.8 @t[1], @t[1], @t[1], #12
-
- veor @y[7], @y[7], @t[7]
- veor @y[4], @y[4], @t[2]
- veor @y[5], @y[5], @t[2]
- veor @y[2], @y[2], @t[6]
- veor @t[6], @t[6], @t[3] @ clobber t[6]
- vext.8 @t[2], @t[2], @t[2], #12
- veor @y[4], @y[4], @y[7]
- veor @y[3], @y[3], @t[6]
-
- veor @y[6], @y[6], @t[6]
- veor @y[5], @y[5], @t[5]
- vext.8 @t[5], @t[5], @t[5], #12
- veor @y[6], @y[6], @t[4]
- vext.8 @t[4], @t[4], @t[4], #12
- veor @y[5], @y[5], @t[6]
- veor @y[6], @y[6], @t[7]
- vext.8 @t[7], @t[7], @t[7], #12
- veor @t[6], @t[6], @t[3] @ restore t[6]
- vext.8 @t[3], @t[3], @t[3], #12
-
- @ multiplication by 0x09
- veor @y[4], @y[4], @y[1]
- veor @t[1], @t[1], @y[1] @ t[1]=y[1]
- veor @t[0], @t[0], @t[5] @ clobber t[0]
- vext.8 @t[6], @t[6], @t[6], #12
- veor @t[1], @t[1], @t[5]
- veor @y[3], @y[3], @t[0]
- veor @t[0], @t[0], @y[0] @ t[0]=y[0]
- veor @t[1], @t[1], @t[6]
- veor @t[6], @t[6], @t[7] @ clobber t[6]
- veor @y[4], @y[4], @t[1]
- veor @y[7], @y[7], @t[4]
- veor @y[6], @y[6], @t[3]
- veor @y[5], @y[5], @t[2]
- veor @t[4], @t[4], @y[4] @ t[4]=y[4]
- veor @t[3], @t[3], @y[3] @ t[3]=y[3]
- veor @t[5], @t[5], @y[5] @ t[5]=y[5]
- veor @t[2], @t[2], @y[2] @ t[2]=y[2]
- veor @t[3], @t[3], @t[7]
- veor @XMM[5], @t[5], @t[6]
- veor @XMM[6], @t[6], @y[6] @ t[6]=y[6]
- veor @XMM[2], @t[2], @t[6]
- veor @XMM[7], @t[7], @y[7] @ t[7]=y[7]
-
- vmov @XMM[0], @t[0]
- vmov @XMM[1], @t[1]
- @ vmov @XMM[2], @t[2]
- vmov @XMM[3], @t[3]
- vmov @XMM[4], @t[4]
- @ vmov @XMM[5], @t[5]
- @ vmov @XMM[6], @t[6]
- @ vmov @XMM[7], @t[7]
-___
-}
-
-sub InvMixColumns {
-my @x=@_[0..7];
-my @t=@_[8..15];
-
-# Thanks to Jussi Kivilinna for providing pointer to
-#
-# | 0e 0b 0d 09 | | 02 03 01 01 | | 05 00 04 00 |
-# | 09 0e 0b 0d | = | 01 02 03 01 | x | 00 05 00 04 |
-# | 0d 09 0e 0b | | 01 01 02 03 | | 04 00 05 00 |
-# | 0b 0d 09 0e | | 03 01 01 02 | | 00 04 00 05 |
-
-$code.=<<___;
- @ multiplication by 0x05-0x00-0x04-0x00
- vext.8 @t[0], @x[0], @x[0], #8
- vext.8 @t[6], @x[6], @x[6], #8
- vext.8 @t[7], @x[7], @x[7], #8
- veor @t[0], @t[0], @x[0]
- vext.8 @t[1], @x[1], @x[1], #8
- veor @t[6], @t[6], @x[6]
- vext.8 @t[2], @x[2], @x[2], #8
- veor @t[7], @t[7], @x[7]
- vext.8 @t[3], @x[3], @x[3], #8
- veor @t[1], @t[1], @x[1]
- vext.8 @t[4], @x[4], @x[4], #8
- veor @t[2], @t[2], @x[2]
- vext.8 @t[5], @x[5], @x[5], #8
- veor @t[3], @t[3], @x[3]
- veor @t[4], @t[4], @x[4]
- veor @t[5], @t[5], @x[5]
-
- veor @x[0], @x[0], @t[6]
- veor @x[1], @x[1], @t[6]
- veor @x[2], @x[2], @t[0]
- veor @x[4], @x[4], @t[2]
- veor @x[3], @x[3], @t[1]
- veor @x[1], @x[1], @t[7]
- veor @x[2], @x[2], @t[7]
- veor @x[4], @x[4], @t[6]
- veor @x[5], @x[5], @t[3]
- veor @x[3], @x[3], @t[6]
- veor @x[6], @x[6], @t[4]
- veor @x[4], @x[4], @t[7]
- veor @x[5], @x[5], @t[7]
- veor @x[7], @x[7], @t[5]
-___
- &MixColumns (@x,@t,1); # flipped 2<->3 and 4<->6
-}
-
-sub swapmove {
-my ($a,$b,$n,$mask,$t)=@_;
-$code.=<<___;
- vshr.u64 $t, $b, #$n
- veor $t, $t, $a
- vand $t, $t, $mask
- veor $a, $a, $t
- vshl.u64 $t, $t, #$n
- veor $b, $b, $t
-___
-}
-sub swapmove2x {
-my ($a0,$b0,$a1,$b1,$n,$mask,$t0,$t1)=@_;
-$code.=<<___;
- vshr.u64 $t0, $b0, #$n
- vshr.u64 $t1, $b1, #$n
- veor $t0, $t0, $a0
- veor $t1, $t1, $a1
- vand $t0, $t0, $mask
- vand $t1, $t1, $mask
- veor $a0, $a0, $t0
- vshl.u64 $t0, $t0, #$n
- veor $a1, $a1, $t1
- vshl.u64 $t1, $t1, #$n
- veor $b0, $b0, $t0
- veor $b1, $b1, $t1
-___
-}
-
-sub bitslice {
-my @x=reverse(@_[0..7]);
-my ($t0,$t1,$t2,$t3)=@_[8..11];
-$code.=<<___;
- vmov.i8 $t0,#0x55 @ compose .LBS0
- vmov.i8 $t1,#0x33 @ compose .LBS1
-___
- &swapmove2x(@x[0,1,2,3],1,$t0,$t2,$t3);
- &swapmove2x(@x[4,5,6,7],1,$t0,$t2,$t3);
-$code.=<<___;
- vmov.i8 $t0,#0x0f @ compose .LBS2
-___
- &swapmove2x(@x[0,2,1,3],2,$t1,$t2,$t3);
- &swapmove2x(@x[4,6,5,7],2,$t1,$t2,$t3);
-
- &swapmove2x(@x[0,4,1,5],4,$t0,$t2,$t3);
- &swapmove2x(@x[2,6,3,7],4,$t0,$t2,$t3);
-}
-
-$code.=<<___;
-#ifndef __KERNEL__
-# include "arm_arch.h"
-
-# define VFP_ABI_PUSH vstmdb sp!,{d8-d15}
-# define VFP_ABI_POP vldmia sp!,{d8-d15}
-# define VFP_ABI_FRAME 0x40
-#else
-# define VFP_ABI_PUSH
-# define VFP_ABI_POP
-# define VFP_ABI_FRAME 0
-# define BSAES_ASM_EXTENDED_KEY
-# define XTS_CHAIN_TWEAK
-# define __ARM_ARCH__ __LINUX_ARM_ARCH__
-#endif
-
-#ifdef __thumb__
-# define adrl adr
-#endif
-
-#if __ARM_ARCH__>=7
-.text
-.syntax unified @ ARMv7-capable assembler is expected to handle this
-#ifdef __thumb2__
-.thumb
-#else
-.code 32
-#endif
-
-.fpu neon
-
-.type _bsaes_decrypt8,%function
-.align 4
-_bsaes_decrypt8:
- adr $const,_bsaes_decrypt8
- vldmia $key!, {@XMM[9]} @ round 0 key
- add $const,$const,#.LM0ISR-_bsaes_decrypt8
-
- vldmia $const!, {@XMM[8]} @ .LM0ISR
- veor @XMM[10], @XMM[0], @XMM[9] @ xor with round0 key
- veor @XMM[11], @XMM[1], @XMM[9]
- vtbl.8 `&Dlo(@XMM[0])`, {@XMM[10]}, `&Dlo(@XMM[8])`
- vtbl.8 `&Dhi(@XMM[0])`, {@XMM[10]}, `&Dhi(@XMM[8])`
- veor @XMM[12], @XMM[2], @XMM[9]
- vtbl.8 `&Dlo(@XMM[1])`, {@XMM[11]}, `&Dlo(@XMM[8])`
- vtbl.8 `&Dhi(@XMM[1])`, {@XMM[11]}, `&Dhi(@XMM[8])`
- veor @XMM[13], @XMM[3], @XMM[9]
- vtbl.8 `&Dlo(@XMM[2])`, {@XMM[12]}, `&Dlo(@XMM[8])`
- vtbl.8 `&Dhi(@XMM[2])`, {@XMM[12]}, `&Dhi(@XMM[8])`
- veor @XMM[14], @XMM[4], @XMM[9]
- vtbl.8 `&Dlo(@XMM[3])`, {@XMM[13]}, `&Dlo(@XMM[8])`
- vtbl.8 `&Dhi(@XMM[3])`, {@XMM[13]}, `&Dhi(@XMM[8])`
- veor @XMM[15], @XMM[5], @XMM[9]
- vtbl.8 `&Dlo(@XMM[4])`, {@XMM[14]}, `&Dlo(@XMM[8])`
- vtbl.8 `&Dhi(@XMM[4])`, {@XMM[14]}, `&Dhi(@XMM[8])`
- veor @XMM[10], @XMM[6], @XMM[9]
- vtbl.8 `&Dlo(@XMM[5])`, {@XMM[15]}, `&Dlo(@XMM[8])`
- vtbl.8 `&Dhi(@XMM[5])`, {@XMM[15]}, `&Dhi(@XMM[8])`
- veor @XMM[11], @XMM[7], @XMM[9]
- vtbl.8 `&Dlo(@XMM[6])`, {@XMM[10]}, `&Dlo(@XMM[8])`
- vtbl.8 `&Dhi(@XMM[6])`, {@XMM[10]}, `&Dhi(@XMM[8])`
- vtbl.8 `&Dlo(@XMM[7])`, {@XMM[11]}, `&Dlo(@XMM[8])`
- vtbl.8 `&Dhi(@XMM[7])`, {@XMM[11]}, `&Dhi(@XMM[8])`
-___
- &bitslice (@XMM[0..7, 8..11]);
-$code.=<<___;
- sub $rounds,$rounds,#1
- b .Ldec_sbox
-.align 4
-.Ldec_loop:
-___
- &ShiftRows (@XMM[0..7, 8..12]);
-$code.=".Ldec_sbox:\n";
- &InvSbox (@XMM[0..7, 8..15]);
-$code.=<<___;
- subs $rounds,$rounds,#1
- bcc .Ldec_done
-___
- &InvMixColumns (@XMM[0,1,6,4,2,7,3,5, 8..15]);
-$code.=<<___;
- vldmia $const, {@XMM[12]} @ .LISR
- ite eq @ Thumb2 thing, sanity check in ARM
- addeq $const,$const,#0x10
- bne .Ldec_loop
- vldmia $const, {@XMM[12]} @ .LISRM0
- b .Ldec_loop
-.align 4
-.Ldec_done:
-___
- &bitslice (@XMM[0,1,6,4,2,7,3,5, 8..11]);
-$code.=<<___;
- vldmia $key, {@XMM[8]} @ last round key
- veor @XMM[6], @XMM[6], @XMM[8]
- veor @XMM[4], @XMM[4], @XMM[8]
- veor @XMM[2], @XMM[2], @XMM[8]
- veor @XMM[7], @XMM[7], @XMM[8]
- veor @XMM[3], @XMM[3], @XMM[8]
- veor @XMM[5], @XMM[5], @XMM[8]
- veor @XMM[0], @XMM[0], @XMM[8]
- veor @XMM[1], @XMM[1], @XMM[8]
- bx lr
-.size _bsaes_decrypt8,.-_bsaes_decrypt8
-
-.type _bsaes_const,%object
-.align 6
-_bsaes_const:
-.LM0ISR: @ InvShiftRows constants
- .quad 0x0a0e0206070b0f03, 0x0004080c0d010509
-.LISR:
- .quad 0x0504070602010003, 0x0f0e0d0c080b0a09
-.LISRM0:
- .quad 0x01040b0e0205080f, 0x0306090c00070a0d
-.LM0SR: @ ShiftRows constants
- .quad 0x0a0e02060f03070b, 0x0004080c05090d01
-.LSR:
- .quad 0x0504070600030201, 0x0f0e0d0c0a09080b
-.LSRM0:
- .quad 0x0304090e00050a0f, 0x01060b0c0207080d
-.LM0:
- .quad 0x02060a0e03070b0f, 0x0004080c0105090d
-.LREVM0SR:
- .quad 0x090d01050c000408, 0x03070b0f060a0e02
-.asciz "Bit-sliced AES for NEON, CRYPTOGAMS by <appro\@openssl.org>"
-.align 6
-.size _bsaes_const,.-_bsaes_const
-
-.type _bsaes_encrypt8,%function
-.align 4
-_bsaes_encrypt8:
- adr $const,_bsaes_encrypt8
- vldmia $key!, {@XMM[9]} @ round 0 key
- sub $const,$const,#_bsaes_encrypt8-.LM0SR
-
- vldmia $const!, {@XMM[8]} @ .LM0SR
-_bsaes_encrypt8_alt:
- veor @XMM[10], @XMM[0], @XMM[9] @ xor with round0 key
- veor @XMM[11], @XMM[1], @XMM[9]
- vtbl.8 `&Dlo(@XMM[0])`, {@XMM[10]}, `&Dlo(@XMM[8])`
- vtbl.8 `&Dhi(@XMM[0])`, {@XMM[10]}, `&Dhi(@XMM[8])`
- veor @XMM[12], @XMM[2], @XMM[9]
- vtbl.8 `&Dlo(@XMM[1])`, {@XMM[11]}, `&Dlo(@XMM[8])`
- vtbl.8 `&Dhi(@XMM[1])`, {@XMM[11]}, `&Dhi(@XMM[8])`
- veor @XMM[13], @XMM[3], @XMM[9]
- vtbl.8 `&Dlo(@XMM[2])`, {@XMM[12]}, `&Dlo(@XMM[8])`
- vtbl.8 `&Dhi(@XMM[2])`, {@XMM[12]}, `&Dhi(@XMM[8])`
- veor @XMM[14], @XMM[4], @XMM[9]
- vtbl.8 `&Dlo(@XMM[3])`, {@XMM[13]}, `&Dlo(@XMM[8])`
- vtbl.8 `&Dhi(@XMM[3])`, {@XMM[13]}, `&Dhi(@XMM[8])`
- veor @XMM[15], @XMM[5], @XMM[9]
- vtbl.8 `&Dlo(@XMM[4])`, {@XMM[14]}, `&Dlo(@XMM[8])`
- vtbl.8 `&Dhi(@XMM[4])`, {@XMM[14]}, `&Dhi(@XMM[8])`
- veor @XMM[10], @XMM[6], @XMM[9]
- vtbl.8 `&Dlo(@XMM[5])`, {@XMM[15]}, `&Dlo(@XMM[8])`
- vtbl.8 `&Dhi(@XMM[5])`, {@XMM[15]}, `&Dhi(@XMM[8])`
- veor @XMM[11], @XMM[7], @XMM[9]
- vtbl.8 `&Dlo(@XMM[6])`, {@XMM[10]}, `&Dlo(@XMM[8])`
- vtbl.8 `&Dhi(@XMM[6])`, {@XMM[10]}, `&Dhi(@XMM[8])`
- vtbl.8 `&Dlo(@XMM[7])`, {@XMM[11]}, `&Dlo(@XMM[8])`
- vtbl.8 `&Dhi(@XMM[7])`, {@XMM[11]}, `&Dhi(@XMM[8])`
-_bsaes_encrypt8_bitslice:
-___
- &bitslice (@XMM[0..7, 8..11]);
-$code.=<<___;
- sub $rounds,$rounds,#1
- b .Lenc_sbox
-.align 4
-.Lenc_loop:
-___
- &ShiftRows (@XMM[0..7, 8..12]);
-$code.=".Lenc_sbox:\n";
- &Sbox (@XMM[0..7, 8..15]);
-$code.=<<___;
- subs $rounds,$rounds,#1
- bcc .Lenc_done
-___
- &MixColumns (@XMM[0,1,4,6,3,7,2,5, 8..15]);
-$code.=<<___;
- vldmia $const, {@XMM[12]} @ .LSR
- ite eq @ Thumb2 thing, samity check in ARM
- addeq $const,$const,#0x10
- bne .Lenc_loop
- vldmia $const, {@XMM[12]} @ .LSRM0
- b .Lenc_loop
-.align 4
-.Lenc_done:
-___
- # output in lsb > [t0, t1, t4, t6, t3, t7, t2, t5] < msb
- &bitslice (@XMM[0,1,4,6,3,7,2,5, 8..11]);
-$code.=<<___;
- vldmia $key, {@XMM[8]} @ last round key
- veor @XMM[4], @XMM[4], @XMM[8]
- veor @XMM[6], @XMM[6], @XMM[8]
- veor @XMM[3], @XMM[3], @XMM[8]
- veor @XMM[7], @XMM[7], @XMM[8]
- veor @XMM[2], @XMM[2], @XMM[8]
- veor @XMM[5], @XMM[5], @XMM[8]
- veor @XMM[0], @XMM[0], @XMM[8]
- veor @XMM[1], @XMM[1], @XMM[8]
- bx lr
-.size _bsaes_encrypt8,.-_bsaes_encrypt8
-___
-}
-{
-my ($out,$inp,$rounds,$const)=("r12","r4","r5","r6");
-
-sub bitslice_key {
-my @x=reverse(@_[0..7]);
-my ($bs0,$bs1,$bs2,$t2,$t3)=@_[8..12];
-
- &swapmove (@x[0,1],1,$bs0,$t2,$t3);
-$code.=<<___;
- @ &swapmove(@x[2,3],1,$t0,$t2,$t3);
- vmov @x[2], @x[0]
- vmov @x[3], @x[1]
-___
- #&swapmove2x(@x[4,5,6,7],1,$t0,$t2,$t3);
-
- &swapmove2x (@x[0,2,1,3],2,$bs1,$t2,$t3);
-$code.=<<___;
- @ &swapmove2x(@x[4,6,5,7],2,$t1,$t2,$t3);
- vmov @x[4], @x[0]
- vmov @x[6], @x[2]
- vmov @x[5], @x[1]
- vmov @x[7], @x[3]
-___
- &swapmove2x (@x[0,4,1,5],4,$bs2,$t2,$t3);
- &swapmove2x (@x[2,6,3,7],4,$bs2,$t2,$t3);
-}
-
-$code.=<<___;
-.type _bsaes_key_convert,%function
-.align 4
-_bsaes_key_convert:
- adr $const,_bsaes_key_convert
- vld1.8 {@XMM[7]}, [$inp]! @ load round 0 key
- sub $const,$const,#_bsaes_key_convert-.LM0
- vld1.8 {@XMM[15]}, [$inp]! @ load round 1 key
-
- vmov.i8 @XMM[8], #0x01 @ bit masks
- vmov.i8 @XMM[9], #0x02
- vmov.i8 @XMM[10], #0x04
- vmov.i8 @XMM[11], #0x08
- vmov.i8 @XMM[12], #0x10
- vmov.i8 @XMM[13], #0x20
- vldmia $const, {@XMM[14]} @ .LM0
-
-#ifdef __ARMEL__
- vrev32.8 @XMM[7], @XMM[7]
- vrev32.8 @XMM[15], @XMM[15]
-#endif
- sub $rounds,$rounds,#1
- vstmia $out!, {@XMM[7]} @ save round 0 key
- b .Lkey_loop
-
-.align 4
-.Lkey_loop:
- vtbl.8 `&Dlo(@XMM[7])`,{@XMM[15]},`&Dlo(@XMM[14])`
- vtbl.8 `&Dhi(@XMM[7])`,{@XMM[15]},`&Dhi(@XMM[14])`
- vmov.i8 @XMM[6], #0x40
- vmov.i8 @XMM[15], #0x80
-
- vtst.8 @XMM[0], @XMM[7], @XMM[8]
- vtst.8 @XMM[1], @XMM[7], @XMM[9]
- vtst.8 @XMM[2], @XMM[7], @XMM[10]
- vtst.8 @XMM[3], @XMM[7], @XMM[11]
- vtst.8 @XMM[4], @XMM[7], @XMM[12]
- vtst.8 @XMM[5], @XMM[7], @XMM[13]
- vtst.8 @XMM[6], @XMM[7], @XMM[6]
- vtst.8 @XMM[7], @XMM[7], @XMM[15]
- vld1.8 {@XMM[15]}, [$inp]! @ load next round key
- vmvn @XMM[0], @XMM[0] @ "pnot"
- vmvn @XMM[1], @XMM[1]
- vmvn @XMM[5], @XMM[5]
- vmvn @XMM[6], @XMM[6]
-#ifdef __ARMEL__
- vrev32.8 @XMM[15], @XMM[15]
-#endif
- subs $rounds,$rounds,#1
- vstmia $out!,{@XMM[0]-@XMM[7]} @ write bit-sliced round key
- bne .Lkey_loop
-
- vmov.i8 @XMM[7],#0x63 @ compose .L63
- @ don't save last round key
- bx lr
-.size _bsaes_key_convert,.-_bsaes_key_convert
-___
-}
-
-if (0) { # following four functions are unsupported interface
- # used for benchmarking...
-$code.=<<___;
-.globl bsaes_enc_key_convert
-.type bsaes_enc_key_convert,%function
-.align 4
-bsaes_enc_key_convert:
- stmdb sp!,{r4-r6,lr}
- vstmdb sp!,{d8-d15} @ ABI specification says so
-
- ldr r5,[$inp,#240] @ pass rounds
- mov r4,$inp @ pass key
- mov r12,$out @ pass key schedule
- bl _bsaes_key_convert
- veor @XMM[7],@XMM[7],@XMM[15] @ fix up last round key
- vstmia r12, {@XMM[7]} @ save last round key
-
- vldmia sp!,{d8-d15}
- ldmia sp!,{r4-r6,pc}
-.size bsaes_enc_key_convert,.-bsaes_enc_key_convert
-
-.globl bsaes_encrypt_128
-.type bsaes_encrypt_128,%function
-.align 4
-bsaes_encrypt_128:
- stmdb sp!,{r4-r6,lr}
- vstmdb sp!,{d8-d15} @ ABI specification says so
-.Lenc128_loop:
- vld1.8 {@XMM[0]-@XMM[1]}, [$inp]! @ load input
- vld1.8 {@XMM[2]-@XMM[3]}, [$inp]!
- mov r4,$key @ pass the key
- vld1.8 {@XMM[4]-@XMM[5]}, [$inp]!
- mov r5,#10 @ pass rounds
- vld1.8 {@XMM[6]-@XMM[7]}, [$inp]!
-
- bl _bsaes_encrypt8
-
- vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output
- vst1.8 {@XMM[4]}, [$out]!
- vst1.8 {@XMM[6]}, [$out]!
- vst1.8 {@XMM[3]}, [$out]!
- vst1.8 {@XMM[7]}, [$out]!
- vst1.8 {@XMM[2]}, [$out]!
- subs $len,$len,#0x80
- vst1.8 {@XMM[5]}, [$out]!
- bhi .Lenc128_loop
-
- vldmia sp!,{d8-d15}
- ldmia sp!,{r4-r6,pc}
-.size bsaes_encrypt_128,.-bsaes_encrypt_128
-
-.globl bsaes_dec_key_convert
-.type bsaes_dec_key_convert,%function
-.align 4
-bsaes_dec_key_convert:
- stmdb sp!,{r4-r6,lr}
- vstmdb sp!,{d8-d15} @ ABI specification says so
-
- ldr r5,[$inp,#240] @ pass rounds
- mov r4,$inp @ pass key
- mov r12,$out @ pass key schedule
- bl _bsaes_key_convert
- vldmia $out, {@XMM[6]}
- vstmia r12, {@XMM[15]} @ save last round key
- veor @XMM[7], @XMM[7], @XMM[6] @ fix up round 0 key
- vstmia $out, {@XMM[7]}
-
- vldmia sp!,{d8-d15}
- ldmia sp!,{r4-r6,pc}
-.size bsaes_dec_key_convert,.-bsaes_dec_key_convert
-
-.globl bsaes_decrypt_128
-.type bsaes_decrypt_128,%function
-.align 4
-bsaes_decrypt_128:
- stmdb sp!,{r4-r6,lr}
- vstmdb sp!,{d8-d15} @ ABI specification says so
-.Ldec128_loop:
- vld1.8 {@XMM[0]-@XMM[1]}, [$inp]! @ load input
- vld1.8 {@XMM[2]-@XMM[3]}, [$inp]!
- mov r4,$key @ pass the key
- vld1.8 {@XMM[4]-@XMM[5]}, [$inp]!
- mov r5,#10 @ pass rounds
- vld1.8 {@XMM[6]-@XMM[7]}, [$inp]!
-
- bl _bsaes_decrypt8
-
- vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output
- vst1.8 {@XMM[6]}, [$out]!
- vst1.8 {@XMM[4]}, [$out]!
- vst1.8 {@XMM[2]}, [$out]!
- vst1.8 {@XMM[7]}, [$out]!
- vst1.8 {@XMM[3]}, [$out]!
- subs $len,$len,#0x80
- vst1.8 {@XMM[5]}, [$out]!
- bhi .Ldec128_loop
-
- vldmia sp!,{d8-d15}
- ldmia sp!,{r4-r6,pc}
-.size bsaes_decrypt_128,.-bsaes_decrypt_128
-___
-}
-{
-my ($inp,$out,$len,$key, $ivp,$fp,$rounds)=map("r$_",(0..3,8..10));
-my ($keysched)=("sp");
-
-$code.=<<___;
-.extern AES_cbc_encrypt
-.extern AES_decrypt
-
-.global bsaes_cbc_encrypt
-.type bsaes_cbc_encrypt,%function
-.align 5
-bsaes_cbc_encrypt:
-#ifndef __KERNEL__
- cmp $len, #128
-#ifndef __thumb__
- blo AES_cbc_encrypt
-#else
- bhs 1f
- b AES_cbc_encrypt
-1:
-#endif
-#endif
-
- @ it is up to the caller to make sure we are called with enc == 0
-
- mov ip, sp
- stmdb sp!, {r4-r10, lr}
- VFP_ABI_PUSH
- ldr $ivp, [ip] @ IV is 1st arg on the stack
- mov $len, $len, lsr#4 @ len in 16 byte blocks
- sub sp, #0x10 @ scratch space to carry over the IV
- mov $fp, sp @ save sp
-
- ldr $rounds, [$key, #240] @ get # of rounds
-#ifndef BSAES_ASM_EXTENDED_KEY
- @ allocate the key schedule on the stack
- sub r12, sp, $rounds, lsl#7 @ 128 bytes per inner round key
- add r12, #`128-32` @ sifze of bit-slices key schedule
-
- @ populate the key schedule
- mov r4, $key @ pass key
- mov r5, $rounds @ pass # of rounds
- mov sp, r12 @ sp is $keysched
- bl _bsaes_key_convert
- vldmia $keysched, {@XMM[6]}
- vstmia r12, {@XMM[15]} @ save last round key
- veor @XMM[7], @XMM[7], @XMM[6] @ fix up round 0 key
- vstmia $keysched, {@XMM[7]}
-#else
- ldr r12, [$key, #244]
- eors r12, #1
- beq 0f
-
- @ populate the key schedule
- str r12, [$key, #244]
- mov r4, $key @ pass key
- mov r5, $rounds @ pass # of rounds
- add r12, $key, #248 @ pass key schedule
- bl _bsaes_key_convert
- add r4, $key, #248
- vldmia r4, {@XMM[6]}
- vstmia r12, {@XMM[15]} @ save last round key
- veor @XMM[7], @XMM[7], @XMM[6] @ fix up round 0 key
- vstmia r4, {@XMM[7]}
-
-.align 2
-0:
-#endif
-
- vld1.8 {@XMM[15]}, [$ivp] @ load IV
- b .Lcbc_dec_loop
-
-.align 4
-.Lcbc_dec_loop:
- subs $len, $len, #0x8
- bmi .Lcbc_dec_loop_finish
-
- vld1.8 {@XMM[0]-@XMM[1]}, [$inp]! @ load input
- vld1.8 {@XMM[2]-@XMM[3]}, [$inp]!
-#ifndef BSAES_ASM_EXTENDED_KEY
- mov r4, $keysched @ pass the key
-#else
- add r4, $key, #248
-#endif
- vld1.8 {@XMM[4]-@XMM[5]}, [$inp]!
- mov r5, $rounds
- vld1.8 {@XMM[6]-@XMM[7]}, [$inp]
- sub $inp, $inp, #0x60
- vstmia $fp, {@XMM[15]} @ put aside IV
-
- bl _bsaes_decrypt8
-
- vldmia $fp, {@XMM[14]} @ reload IV
- vld1.8 {@XMM[8]-@XMM[9]}, [$inp]! @ reload input
- veor @XMM[0], @XMM[0], @XMM[14] @ ^= IV
- vld1.8 {@XMM[10]-@XMM[11]}, [$inp]!
- veor @XMM[1], @XMM[1], @XMM[8]
- veor @XMM[6], @XMM[6], @XMM[9]
- vld1.8 {@XMM[12]-@XMM[13]}, [$inp]!
- veor @XMM[4], @XMM[4], @XMM[10]
- veor @XMM[2], @XMM[2], @XMM[11]
- vld1.8 {@XMM[14]-@XMM[15]}, [$inp]!
- veor @XMM[7], @XMM[7], @XMM[12]
- vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output
- veor @XMM[3], @XMM[3], @XMM[13]
- vst1.8 {@XMM[6]}, [$out]!
- veor @XMM[5], @XMM[5], @XMM[14]
- vst1.8 {@XMM[4]}, [$out]!
- vst1.8 {@XMM[2]}, [$out]!
- vst1.8 {@XMM[7]}, [$out]!
- vst1.8 {@XMM[3]}, [$out]!
- vst1.8 {@XMM[5]}, [$out]!
-
- b .Lcbc_dec_loop
-
-.Lcbc_dec_loop_finish:
- adds $len, $len, #8
- beq .Lcbc_dec_done
-
- vld1.8 {@XMM[0]}, [$inp]! @ load input
- cmp $len, #2
- blo .Lcbc_dec_one
- vld1.8 {@XMM[1]}, [$inp]!
-#ifndef BSAES_ASM_EXTENDED_KEY
- mov r4, $keysched @ pass the key
-#else
- add r4, $key, #248
-#endif
- mov r5, $rounds
- vstmia $fp, {@XMM[15]} @ put aside IV
- beq .Lcbc_dec_two
- vld1.8 {@XMM[2]}, [$inp]!
- cmp $len, #4
- blo .Lcbc_dec_three
- vld1.8 {@XMM[3]}, [$inp]!
- beq .Lcbc_dec_four
- vld1.8 {@XMM[4]}, [$inp]!
- cmp $len, #6
- blo .Lcbc_dec_five
- vld1.8 {@XMM[5]}, [$inp]!
- beq .Lcbc_dec_six
- vld1.8 {@XMM[6]}, [$inp]!
- sub $inp, $inp, #0x70
-
- bl _bsaes_decrypt8
-
- vldmia $fp, {@XMM[14]} @ reload IV
- vld1.8 {@XMM[8]-@XMM[9]}, [$inp]! @ reload input
- veor @XMM[0], @XMM[0], @XMM[14] @ ^= IV
- vld1.8 {@XMM[10]-@XMM[11]}, [$inp]!
- veor @XMM[1], @XMM[1], @XMM[8]
- veor @XMM[6], @XMM[6], @XMM[9]
- vld1.8 {@XMM[12]-@XMM[13]}, [$inp]!
- veor @XMM[4], @XMM[4], @XMM[10]
- veor @XMM[2], @XMM[2], @XMM[11]
- vld1.8 {@XMM[15]}, [$inp]!
- veor @XMM[7], @XMM[7], @XMM[12]
- vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output
- veor @XMM[3], @XMM[3], @XMM[13]
- vst1.8 {@XMM[6]}, [$out]!
- vst1.8 {@XMM[4]}, [$out]!
- vst1.8 {@XMM[2]}, [$out]!
- vst1.8 {@XMM[7]}, [$out]!
- vst1.8 {@XMM[3]}, [$out]!
- b .Lcbc_dec_done
-.align 4
-.Lcbc_dec_six:
- sub $inp, $inp, #0x60
- bl _bsaes_decrypt8
- vldmia $fp,{@XMM[14]} @ reload IV
- vld1.8 {@XMM[8]-@XMM[9]}, [$inp]! @ reload input
- veor @XMM[0], @XMM[0], @XMM[14] @ ^= IV
- vld1.8 {@XMM[10]-@XMM[11]}, [$inp]!
- veor @XMM[1], @XMM[1], @XMM[8]
- veor @XMM[6], @XMM[6], @XMM[9]
- vld1.8 {@XMM[12]}, [$inp]!
- veor @XMM[4], @XMM[4], @XMM[10]
- veor @XMM[2], @XMM[2], @XMM[11]
- vld1.8 {@XMM[15]}, [$inp]!
- veor @XMM[7], @XMM[7], @XMM[12]
- vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output
- vst1.8 {@XMM[6]}, [$out]!
- vst1.8 {@XMM[4]}, [$out]!
- vst1.8 {@XMM[2]}, [$out]!
- vst1.8 {@XMM[7]}, [$out]!
- b .Lcbc_dec_done
-.align 4
-.Lcbc_dec_five:
- sub $inp, $inp, #0x50
- bl _bsaes_decrypt8
- vldmia $fp, {@XMM[14]} @ reload IV
- vld1.8 {@XMM[8]-@XMM[9]}, [$inp]! @ reload input
- veor @XMM[0], @XMM[0], @XMM[14] @ ^= IV
- vld1.8 {@XMM[10]-@XMM[11]}, [$inp]!
- veor @XMM[1], @XMM[1], @XMM[8]
- veor @XMM[6], @XMM[6], @XMM[9]
- vld1.8 {@XMM[15]}, [$inp]!
- veor @XMM[4], @XMM[4], @XMM[10]
- vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output
- veor @XMM[2], @XMM[2], @XMM[11]
- vst1.8 {@XMM[6]}, [$out]!
- vst1.8 {@XMM[4]}, [$out]!
- vst1.8 {@XMM[2]}, [$out]!
- b .Lcbc_dec_done
-.align 4
-.Lcbc_dec_four:
- sub $inp, $inp, #0x40
- bl _bsaes_decrypt8
- vldmia $fp, {@XMM[14]} @ reload IV
- vld1.8 {@XMM[8]-@XMM[9]}, [$inp]! @ reload input
- veor @XMM[0], @XMM[0], @XMM[14] @ ^= IV
- vld1.8 {@XMM[10]}, [$inp]!
- veor @XMM[1], @XMM[1], @XMM[8]
- veor @XMM[6], @XMM[6], @XMM[9]
- vld1.8 {@XMM[15]}, [$inp]!
- veor @XMM[4], @XMM[4], @XMM[10]
- vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output
- vst1.8 {@XMM[6]}, [$out]!
- vst1.8 {@XMM[4]}, [$out]!
- b .Lcbc_dec_done
-.align 4
-.Lcbc_dec_three:
- sub $inp, $inp, #0x30
- bl _bsaes_decrypt8
- vldmia $fp, {@XMM[14]} @ reload IV
- vld1.8 {@XMM[8]-@XMM[9]}, [$inp]! @ reload input
- veor @XMM[0], @XMM[0], @XMM[14] @ ^= IV
- vld1.8 {@XMM[15]}, [$inp]!
- veor @XMM[1], @XMM[1], @XMM[8]
- veor @XMM[6], @XMM[6], @XMM[9]
- vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output
- vst1.8 {@XMM[6]}, [$out]!
- b .Lcbc_dec_done
-.align 4
-.Lcbc_dec_two:
- sub $inp, $inp, #0x20
- bl _bsaes_decrypt8
- vldmia $fp, {@XMM[14]} @ reload IV
- vld1.8 {@XMM[8]}, [$inp]! @ reload input
- veor @XMM[0], @XMM[0], @XMM[14] @ ^= IV
- vld1.8 {@XMM[15]}, [$inp]! @ reload input
- veor @XMM[1], @XMM[1], @XMM[8]
- vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output
- b .Lcbc_dec_done
-.align 4
-.Lcbc_dec_one:
- sub $inp, $inp, #0x10
- mov $rounds, $out @ save original out pointer
- mov $out, $fp @ use the iv scratch space as out buffer
- mov r2, $key
- vmov @XMM[4],@XMM[15] @ just in case ensure that IV
- vmov @XMM[5],@XMM[0] @ and input are preserved
- bl AES_decrypt
- vld1.8 {@XMM[0]}, [$fp,:64] @ load result
- veor @XMM[0], @XMM[0], @XMM[4] @ ^= IV
- vmov @XMM[15], @XMM[5] @ @XMM[5] holds input
- vst1.8 {@XMM[0]}, [$rounds] @ write output
-
-.Lcbc_dec_done:
-#ifndef BSAES_ASM_EXTENDED_KEY
- vmov.i32 q0, #0
- vmov.i32 q1, #0
-.Lcbc_dec_bzero: @ wipe key schedule [if any]
- vstmia $keysched!, {q0-q1}
- cmp $keysched, $fp
- bne .Lcbc_dec_bzero
-#endif
-
- mov sp, $fp
- add sp, #0x10 @ add sp,$fp,#0x10 is no good for thumb
- vst1.8 {@XMM[15]}, [$ivp] @ return IV
- VFP_ABI_POP
- ldmia sp!, {r4-r10, pc}
-.size bsaes_cbc_encrypt,.-bsaes_cbc_encrypt
-___
-}
-{
-my ($inp,$out,$len,$key, $ctr,$fp,$rounds)=(map("r$_",(0..3,8..10)));
-my $const = "r6"; # shared with _bsaes_encrypt8_alt
-my $keysched = "sp";
-
-$code.=<<___;
-.extern AES_encrypt
-.global bsaes_ctr32_encrypt_blocks
-.type bsaes_ctr32_encrypt_blocks,%function
-.align 5
-bsaes_ctr32_encrypt_blocks:
- cmp $len, #8 @ use plain AES for
- blo .Lctr_enc_short @ small sizes
-
- mov ip, sp
- stmdb sp!, {r4-r10, lr}
- VFP_ABI_PUSH
- ldr $ctr, [ip] @ ctr is 1st arg on the stack
- sub sp, sp, #0x10 @ scratch space to carry over the ctr
- mov $fp, sp @ save sp
-
- ldr $rounds, [$key, #240] @ get # of rounds
-#ifndef BSAES_ASM_EXTENDED_KEY
- @ allocate the key schedule on the stack
- sub r12, sp, $rounds, lsl#7 @ 128 bytes per inner round key
- add r12, #`128-32` @ size of bit-sliced key schedule
-
- @ populate the key schedule
- mov r4, $key @ pass key
- mov r5, $rounds @ pass # of rounds
- mov sp, r12 @ sp is $keysched
- bl _bsaes_key_convert
- veor @XMM[7],@XMM[7],@XMM[15] @ fix up last round key
- vstmia r12, {@XMM[7]} @ save last round key
-
- vld1.8 {@XMM[0]}, [$ctr] @ load counter
- add $ctr, $const, #.LREVM0SR-.LM0 @ borrow $ctr
- vldmia $keysched, {@XMM[4]} @ load round0 key
-#else
- ldr r12, [$key, #244]
- eors r12, #1
- beq 0f
-
- @ populate the key schedule
- str r12, [$key, #244]
- mov r4, $key @ pass key
- mov r5, $rounds @ pass # of rounds
- add r12, $key, #248 @ pass key schedule
- bl _bsaes_key_convert
- veor @XMM[7],@XMM[7],@XMM[15] @ fix up last round key
- vstmia r12, {@XMM[7]} @ save last round key
-
-.align 2
-0: add r12, $key, #248
- vld1.8 {@XMM[0]}, [$ctr] @ load counter
- adrl $ctr, .LREVM0SR @ borrow $ctr
- vldmia r12, {@XMM[4]} @ load round0 key
- sub sp, #0x10 @ place for adjusted round0 key
-#endif
-
- vmov.i32 @XMM[8],#1 @ compose 1<<96
- veor @XMM[9],@XMM[9],@XMM[9]
- vrev32.8 @XMM[0],@XMM[0]
- vext.8 @XMM[8],@XMM[9],@XMM[8],#4
- vrev32.8 @XMM[4],@XMM[4]
- vadd.u32 @XMM[9],@XMM[8],@XMM[8] @ compose 2<<96
- vstmia $keysched, {@XMM[4]} @ save adjusted round0 key
- b .Lctr_enc_loop
-
-.align 4
-.Lctr_enc_loop:
- vadd.u32 @XMM[10], @XMM[8], @XMM[9] @ compose 3<<96
- vadd.u32 @XMM[1], @XMM[0], @XMM[8] @ +1
- vadd.u32 @XMM[2], @XMM[0], @XMM[9] @ +2
- vadd.u32 @XMM[3], @XMM[0], @XMM[10] @ +3
- vadd.u32 @XMM[4], @XMM[1], @XMM[10]
- vadd.u32 @XMM[5], @XMM[2], @XMM[10]
- vadd.u32 @XMM[6], @XMM[3], @XMM[10]
- vadd.u32 @XMM[7], @XMM[4], @XMM[10]
- vadd.u32 @XMM[10], @XMM[5], @XMM[10] @ next counter
-
- @ Borrow prologue from _bsaes_encrypt8 to use the opportunity
- @ to flip byte order in 32-bit counter
-
- vldmia $keysched, {@XMM[9]} @ load round0 key
-#ifndef BSAES_ASM_EXTENDED_KEY
- add r4, $keysched, #0x10 @ pass next round key
-#else
- add r4, $key, #`248+16`
-#endif
- vldmia $ctr, {@XMM[8]} @ .LREVM0SR
- mov r5, $rounds @ pass rounds
- vstmia $fp, {@XMM[10]} @ save next counter
- sub $const, $ctr, #.LREVM0SR-.LSR @ pass constants
-
- bl _bsaes_encrypt8_alt
-
- subs $len, $len, #8
- blo .Lctr_enc_loop_done
-
- vld1.8 {@XMM[8]-@XMM[9]}, [$inp]! @ load input
- vld1.8 {@XMM[10]-@XMM[11]}, [$inp]!
- veor @XMM[0], @XMM[8]
- veor @XMM[1], @XMM[9]
- vld1.8 {@XMM[12]-@XMM[13]}, [$inp]!
- veor @XMM[4], @XMM[10]
- veor @XMM[6], @XMM[11]
- vld1.8 {@XMM[14]-@XMM[15]}, [$inp]!
- veor @XMM[3], @XMM[12]
- vst1.8 {@XMM[0]-@XMM[1]}, [$out]! @ write output
- veor @XMM[7], @XMM[13]
- veor @XMM[2], @XMM[14]
- vst1.8 {@XMM[4]}, [$out]!
- veor @XMM[5], @XMM[15]
- vst1.8 {@XMM[6]}, [$out]!
- vmov.i32 @XMM[8], #1 @ compose 1<<96
- vst1.8 {@XMM[3]}, [$out]!
- veor @XMM[9], @XMM[9], @XMM[9]
- vst1.8 {@XMM[7]}, [$out]!
- vext.8 @XMM[8], @XMM[9], @XMM[8], #4
- vst1.8 {@XMM[2]}, [$out]!
- vadd.u32 @XMM[9],@XMM[8],@XMM[8] @ compose 2<<96
- vst1.8 {@XMM[5]}, [$out]!
- vldmia $fp, {@XMM[0]} @ load counter
-
- bne .Lctr_enc_loop
- b .Lctr_enc_done
-
-.align 4
-.Lctr_enc_loop_done:
- add $len, $len, #8
- vld1.8 {@XMM[8]}, [$inp]! @ load input
- veor @XMM[0], @XMM[8]
- vst1.8 {@XMM[0]}, [$out]! @ write output
- cmp $len, #2
- blo .Lctr_enc_done
- vld1.8 {@XMM[9]}, [$inp]!
- veor @XMM[1], @XMM[9]
- vst1.8 {@XMM[1]}, [$out]!
- beq .Lctr_enc_done
- vld1.8 {@XMM[10]}, [$inp]!
- veor @XMM[4], @XMM[10]
- vst1.8 {@XMM[4]}, [$out]!
- cmp $len, #4
- blo .Lctr_enc_done
- vld1.8 {@XMM[11]}, [$inp]!
- veor @XMM[6], @XMM[11]
- vst1.8 {@XMM[6]}, [$out]!
- beq .Lctr_enc_done
- vld1.8 {@XMM[12]}, [$inp]!
- veor @XMM[3], @XMM[12]
- vst1.8 {@XMM[3]}, [$out]!
- cmp $len, #6
- blo .Lctr_enc_done
- vld1.8 {@XMM[13]}, [$inp]!
- veor @XMM[7], @XMM[13]
- vst1.8 {@XMM[7]}, [$out]!
- beq .Lctr_enc_done
- vld1.8 {@XMM[14]}, [$inp]
- veor @XMM[2], @XMM[14]
- vst1.8 {@XMM[2]}, [$out]!
-
-.Lctr_enc_done:
- vmov.i32 q0, #0
- vmov.i32 q1, #0
-#ifndef BSAES_ASM_EXTENDED_KEY
-.Lctr_enc_bzero: @ wipe key schedule [if any]
- vstmia $keysched!, {q0-q1}
- cmp $keysched, $fp
- bne .Lctr_enc_bzero
-#else
- vstmia $keysched, {q0-q1}
-#endif
-
- mov sp, $fp
- add sp, #0x10 @ add sp,$fp,#0x10 is no good for thumb
- VFP_ABI_POP
- ldmia sp!, {r4-r10, pc} @ return
-
-.align 4
-.Lctr_enc_short:
- ldr ip, [sp] @ ctr pointer is passed on stack
- stmdb sp!, {r4-r8, lr}
-
- mov r4, $inp @ copy arguments
- mov r5, $out
- mov r6, $len
- mov r7, $key
- ldr r8, [ip, #12] @ load counter LSW
- vld1.8 {@XMM[1]}, [ip] @ load whole counter value
-#ifdef __ARMEL__
- rev r8, r8
-#endif
- sub sp, sp, #0x10
- vst1.8 {@XMM[1]}, [sp,:64] @ copy counter value
- sub sp, sp, #0x10
-
-.Lctr_enc_short_loop:
- add r0, sp, #0x10 @ input counter value
- mov r1, sp @ output on the stack
- mov r2, r7 @ key
-
- bl AES_encrypt
-
- vld1.8 {@XMM[0]}, [r4]! @ load input
- vld1.8 {@XMM[1]}, [sp,:64] @ load encrypted counter
- add r8, r8, #1
-#ifdef __ARMEL__
- rev r0, r8
- str r0, [sp, #0x1c] @ next counter value
-#else
- str r8, [sp, #0x1c] @ next counter value
-#endif
- veor @XMM[0],@XMM[0],@XMM[1]
- vst1.8 {@XMM[0]}, [r5]! @ store output
- subs r6, r6, #1
- bne .Lctr_enc_short_loop
-
- vmov.i32 q0, #0
- vmov.i32 q1, #0
- vstmia sp!, {q0-q1}
-
- ldmia sp!, {r4-r8, pc}
-.size bsaes_ctr32_encrypt_blocks,.-bsaes_ctr32_encrypt_blocks
-___
-}
-{
-######################################################################
-# void bsaes_xts_[en|de]crypt(const char *inp,char *out,size_t len,
-# const AES_KEY *key1, const AES_KEY *key2,
-# const unsigned char iv[16]);
-#
-my ($inp,$out,$len,$key,$rounds,$magic,$fp)=(map("r$_",(7..10,1..3)));
-my $const="r6"; # returned by _bsaes_key_convert
-my $twmask=@XMM[5];
-my @T=@XMM[6..7];
-
-$code.=<<___;
-.globl bsaes_xts_encrypt
-.type bsaes_xts_encrypt,%function
-.align 4
-bsaes_xts_encrypt:
- mov ip, sp
- stmdb sp!, {r4-r10, lr} @ 0x20
- VFP_ABI_PUSH
- mov r6, sp @ future $fp
-
- mov $inp, r0
- mov $out, r1
- mov $len, r2
- mov $key, r3
-
- sub r0, sp, #0x10 @ 0x10
- bic r0, #0xf @ align at 16 bytes
- mov sp, r0
-
-#ifdef XTS_CHAIN_TWEAK
- ldr r0, [ip] @ pointer to input tweak
-#else
- @ generate initial tweak
- ldr r0, [ip, #4] @ iv[]
- mov r1, sp
- ldr r2, [ip, #0] @ key2
- bl AES_encrypt
- mov r0,sp @ pointer to initial tweak
-#endif
-
- ldr $rounds, [$key, #240] @ get # of rounds
- mov $fp, r6
-#ifndef BSAES_ASM_EXTENDED_KEY
- @ allocate the key schedule on the stack
- sub r12, sp, $rounds, lsl#7 @ 128 bytes per inner round key
- @ add r12, #`128-32` @ size of bit-sliced key schedule
- sub r12, #`32+16` @ place for tweak[9]
-
- @ populate the key schedule
- mov r4, $key @ pass key
- mov r5, $rounds @ pass # of rounds
- mov sp, r12
- add r12, #0x90 @ pass key schedule
- bl _bsaes_key_convert
- veor @XMM[7], @XMM[7], @XMM[15] @ fix up last round key
- vstmia r12, {@XMM[7]} @ save last round key
-#else
- ldr r12, [$key, #244]
- eors r12, #1
- beq 0f
-
- str r12, [$key, #244]
- mov r4, $key @ pass key
- mov r5, $rounds @ pass # of rounds
- add r12, $key, #248 @ pass key schedule
- bl _bsaes_key_convert
- veor @XMM[7], @XMM[7], @XMM[15] @ fix up last round key
- vstmia r12, {@XMM[7]}
-
-.align 2
-0: sub sp, #0x90 @ place for tweak[9]
-#endif
-
- vld1.8 {@XMM[8]}, [r0] @ initial tweak
- adr $magic, .Lxts_magic
-
- subs $len, #0x80
- blo .Lxts_enc_short
- b .Lxts_enc_loop
-
-.align 4
-.Lxts_enc_loop:
- vldmia $magic, {$twmask} @ load XTS magic
- vshr.s64 @T[0], @XMM[8], #63
- mov r0, sp
- vand @T[0], @T[0], $twmask
-___
-for($i=9;$i<16;$i++) {
-$code.=<<___;
- vadd.u64 @XMM[$i], @XMM[$i-1], @XMM[$i-1]
- vst1.64 {@XMM[$i-1]}, [r0,:128]!
- vswp `&Dhi("@T[0]")`,`&Dlo("@T[0]")`
- vshr.s64 @T[1], @XMM[$i], #63
- veor @XMM[$i], @XMM[$i], @T[0]
- vand @T[1], @T[1], $twmask
-___
- @T=reverse(@T);
-
-$code.=<<___ if ($i>=10);
- vld1.8 {@XMM[$i-10]}, [$inp]!
-___
-$code.=<<___ if ($i>=11);
- veor @XMM[$i-11], @XMM[$i-11], @XMM[$i-3]
-___
-}
-$code.=<<___;
- vadd.u64 @XMM[8], @XMM[15], @XMM[15]
- vst1.64 {@XMM[15]}, [r0,:128]!
- vswp `&Dhi("@T[0]")`,`&Dlo("@T[0]")`
- veor @XMM[8], @XMM[8], @T[0]
- vst1.64 {@XMM[8]}, [r0,:128] @ next round tweak
-
- vld1.8 {@XMM[6]-@XMM[7]}, [$inp]!
- veor @XMM[5], @XMM[5], @XMM[13]
-#ifndef BSAES_ASM_EXTENDED_KEY
- add r4, sp, #0x90 @ pass key schedule
-#else
- add r4, $key, #248 @ pass key schedule
-#endif
- veor @XMM[6], @XMM[6], @XMM[14]
- mov r5, $rounds @ pass rounds
- veor @XMM[7], @XMM[7], @XMM[15]
- mov r0, sp
-
- bl _bsaes_encrypt8
-
- vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
- vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]!
- veor @XMM[0], @XMM[0], @XMM[ 8]
- vld1.64 {@XMM[12]-@XMM[13]}, [r0,:128]!
- veor @XMM[1], @XMM[1], @XMM[ 9]
- veor @XMM[8], @XMM[4], @XMM[10]
- vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
- veor @XMM[9], @XMM[6], @XMM[11]
- vld1.64 {@XMM[14]-@XMM[15]}, [r0,:128]!
- veor @XMM[10], @XMM[3], @XMM[12]
- vst1.8 {@XMM[8]-@XMM[9]}, [$out]!
- veor @XMM[11], @XMM[7], @XMM[13]
- veor @XMM[12], @XMM[2], @XMM[14]
- vst1.8 {@XMM[10]-@XMM[11]}, [$out]!
- veor @XMM[13], @XMM[5], @XMM[15]
- vst1.8 {@XMM[12]-@XMM[13]}, [$out]!
-
- vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
-
- subs $len, #0x80
- bpl .Lxts_enc_loop
-
-.Lxts_enc_short:
- adds $len, #0x70
- bmi .Lxts_enc_done
-
- vldmia $magic, {$twmask} @ load XTS magic
- vshr.s64 @T[0], @XMM[8], #63
- mov r0, sp
- vand @T[0], @T[0], $twmask
-___
-for($i=9;$i<16;$i++) {
-$code.=<<___;
- vadd.u64 @XMM[$i], @XMM[$i-1], @XMM[$i-1]
- vst1.64 {@XMM[$i-1]}, [r0,:128]!
- vswp `&Dhi("@T[0]")`,`&Dlo("@T[0]")`
- vshr.s64 @T[1], @XMM[$i], #63
- veor @XMM[$i], @XMM[$i], @T[0]
- vand @T[1], @T[1], $twmask
-___
- @T=reverse(@T);
-
-$code.=<<___ if ($i>=10);
- vld1.8 {@XMM[$i-10]}, [$inp]!
- subs $len, #0x10
- bmi .Lxts_enc_`$i-9`
-___
-$code.=<<___ if ($i>=11);
- veor @XMM[$i-11], @XMM[$i-11], @XMM[$i-3]
-___
-}
-$code.=<<___;
- sub $len, #0x10
- vst1.64 {@XMM[15]}, [r0,:128] @ next round tweak
-
- vld1.8 {@XMM[6]}, [$inp]!
- veor @XMM[5], @XMM[5], @XMM[13]
-#ifndef BSAES_ASM_EXTENDED_KEY
- add r4, sp, #0x90 @ pass key schedule
-#else
- add r4, $key, #248 @ pass key schedule
-#endif
- veor @XMM[6], @XMM[6], @XMM[14]
- mov r5, $rounds @ pass rounds
- mov r0, sp
-
- bl _bsaes_encrypt8
-
- vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
- vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]!
- veor @XMM[0], @XMM[0], @XMM[ 8]
- vld1.64 {@XMM[12]-@XMM[13]}, [r0,:128]!
- veor @XMM[1], @XMM[1], @XMM[ 9]
- veor @XMM[8], @XMM[4], @XMM[10]
- vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
- veor @XMM[9], @XMM[6], @XMM[11]
- vld1.64 {@XMM[14]}, [r0,:128]!
- veor @XMM[10], @XMM[3], @XMM[12]
- vst1.8 {@XMM[8]-@XMM[9]}, [$out]!
- veor @XMM[11], @XMM[7], @XMM[13]
- veor @XMM[12], @XMM[2], @XMM[14]
- vst1.8 {@XMM[10]-@XMM[11]}, [$out]!
- vst1.8 {@XMM[12]}, [$out]!
-
- vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
- b .Lxts_enc_done
-.align 4
-.Lxts_enc_6:
- vst1.64 {@XMM[14]}, [r0,:128] @ next round tweak
-
- veor @XMM[4], @XMM[4], @XMM[12]
-#ifndef BSAES_ASM_EXTENDED_KEY
- add r4, sp, #0x90 @ pass key schedule
-#else
- add r4, $key, #248 @ pass key schedule
-#endif
- veor @XMM[5], @XMM[5], @XMM[13]
- mov r5, $rounds @ pass rounds
- mov r0, sp
-
- bl _bsaes_encrypt8
-
- vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
- vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]!
- veor @XMM[0], @XMM[0], @XMM[ 8]
- vld1.64 {@XMM[12]-@XMM[13]}, [r0,:128]!
- veor @XMM[1], @XMM[1], @XMM[ 9]
- veor @XMM[8], @XMM[4], @XMM[10]
- vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
- veor @XMM[9], @XMM[6], @XMM[11]
- veor @XMM[10], @XMM[3], @XMM[12]
- vst1.8 {@XMM[8]-@XMM[9]}, [$out]!
- veor @XMM[11], @XMM[7], @XMM[13]
- vst1.8 {@XMM[10]-@XMM[11]}, [$out]!
-
- vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
- b .Lxts_enc_done
-
-@ put this in range for both ARM and Thumb mode adr instructions
-.align 5
-.Lxts_magic:
- .quad 1, 0x87
-
-.align 5
-.Lxts_enc_5:
- vst1.64 {@XMM[13]}, [r0,:128] @ next round tweak
-
- veor @XMM[3], @XMM[3], @XMM[11]
-#ifndef BSAES_ASM_EXTENDED_KEY
- add r4, sp, #0x90 @ pass key schedule
-#else
- add r4, $key, #248 @ pass key schedule
-#endif
- veor @XMM[4], @XMM[4], @XMM[12]
- mov r5, $rounds @ pass rounds
- mov r0, sp
-
- bl _bsaes_encrypt8
-
- vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
- vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]!
- veor @XMM[0], @XMM[0], @XMM[ 8]
- vld1.64 {@XMM[12]}, [r0,:128]!
- veor @XMM[1], @XMM[1], @XMM[ 9]
- veor @XMM[8], @XMM[4], @XMM[10]
- vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
- veor @XMM[9], @XMM[6], @XMM[11]
- veor @XMM[10], @XMM[3], @XMM[12]
- vst1.8 {@XMM[8]-@XMM[9]}, [$out]!
- vst1.8 {@XMM[10]}, [$out]!
-
- vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
- b .Lxts_enc_done
-.align 4
-.Lxts_enc_4:
- vst1.64 {@XMM[12]}, [r0,:128] @ next round tweak
-
- veor @XMM[2], @XMM[2], @XMM[10]
-#ifndef BSAES_ASM_EXTENDED_KEY
- add r4, sp, #0x90 @ pass key schedule
-#else
- add r4, $key, #248 @ pass key schedule
-#endif
- veor @XMM[3], @XMM[3], @XMM[11]
- mov r5, $rounds @ pass rounds
- mov r0, sp
-
- bl _bsaes_encrypt8
-
- vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
- vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]!
- veor @XMM[0], @XMM[0], @XMM[ 8]
- veor @XMM[1], @XMM[1], @XMM[ 9]
- veor @XMM[8], @XMM[4], @XMM[10]
- vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
- veor @XMM[9], @XMM[6], @XMM[11]
- vst1.8 {@XMM[8]-@XMM[9]}, [$out]!
-
- vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
- b .Lxts_enc_done
-.align 4
-.Lxts_enc_3:
- vst1.64 {@XMM[11]}, [r0,:128] @ next round tweak
-
- veor @XMM[1], @XMM[1], @XMM[9]
-#ifndef BSAES_ASM_EXTENDED_KEY
- add r4, sp, #0x90 @ pass key schedule
-#else
- add r4, $key, #248 @ pass key schedule
-#endif
- veor @XMM[2], @XMM[2], @XMM[10]
- mov r5, $rounds @ pass rounds
- mov r0, sp
-
- bl _bsaes_encrypt8
-
- vld1.64 {@XMM[8]-@XMM[9]}, [r0,:128]!
- vld1.64 {@XMM[10]}, [r0,:128]!
- veor @XMM[0], @XMM[0], @XMM[ 8]
- veor @XMM[1], @XMM[1], @XMM[ 9]
- veor @XMM[8], @XMM[4], @XMM[10]
- vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
- vst1.8 {@XMM[8]}, [$out]!
-
- vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
- b .Lxts_enc_done
-.align 4
-.Lxts_enc_2:
- vst1.64 {@XMM[10]}, [r0,:128] @ next round tweak
-
- veor @XMM[0], @XMM[0], @XMM[8]
-#ifndef BSAES_ASM_EXTENDED_KEY
- add r4, sp, #0x90 @ pass key schedule
-#else
- add r4, $key, #248 @ pass key schedule
-#endif
- veor @XMM[1], @XMM[1], @XMM[9]
- mov r5, $rounds @ pass rounds
- mov r0, sp
-
- bl _bsaes_encrypt8
-
- vld1.64 {@XMM[8]-@XMM[9]}, [r0,:128]!
- veor @XMM[0], @XMM[0], @XMM[ 8]
- veor @XMM[1], @XMM[1], @XMM[ 9]
- vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
-
- vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
- b .Lxts_enc_done
-.align 4
-.Lxts_enc_1:
- mov r0, sp
- veor @XMM[0], @XMM[8]
- mov r1, sp
- vst1.8 {@XMM[0]}, [sp,:128]
- mov r2, $key
- mov r4, $fp @ preserve fp
-
- bl AES_encrypt
-
- vld1.8 {@XMM[0]}, [sp,:128]
- veor @XMM[0], @XMM[0], @XMM[8]
- vst1.8 {@XMM[0]}, [$out]!
- mov $fp, r4
-
- vmov @XMM[8], @XMM[9] @ next round tweak
-
-.Lxts_enc_done:
-#ifndef XTS_CHAIN_TWEAK
- adds $len, #0x10
- beq .Lxts_enc_ret
- sub r6, $out, #0x10
-
-.Lxts_enc_steal:
- ldrb r0, [$inp], #1
- ldrb r1, [$out, #-0x10]
- strb r0, [$out, #-0x10]
- strb r1, [$out], #1
-
- subs $len, #1
- bhi .Lxts_enc_steal
-
- vld1.8 {@XMM[0]}, [r6]
- mov r0, sp
- veor @XMM[0], @XMM[0], @XMM[8]
- mov r1, sp
- vst1.8 {@XMM[0]}, [sp,:128]
- mov r2, $key
- mov r4, $fp @ preserve fp
-
- bl AES_encrypt
-
- vld1.8 {@XMM[0]}, [sp,:128]
- veor @XMM[0], @XMM[0], @XMM[8]
- vst1.8 {@XMM[0]}, [r6]
- mov $fp, r4
-#endif
-
-.Lxts_enc_ret:
- bic r0, $fp, #0xf
- vmov.i32 q0, #0
- vmov.i32 q1, #0
-#ifdef XTS_CHAIN_TWEAK
- ldr r1, [$fp, #0x20+VFP_ABI_FRAME] @ chain tweak
-#endif
-.Lxts_enc_bzero: @ wipe key schedule [if any]
- vstmia sp!, {q0-q1}
- cmp sp, r0
- bne .Lxts_enc_bzero
-
- mov sp, $fp
-#ifdef XTS_CHAIN_TWEAK
- vst1.8 {@XMM[8]}, [r1]
-#endif
- VFP_ABI_POP
- ldmia sp!, {r4-r10, pc} @ return
-
-.size bsaes_xts_encrypt,.-bsaes_xts_encrypt
-
-.globl bsaes_xts_decrypt
-.type bsaes_xts_decrypt,%function
-.align 4
-bsaes_xts_decrypt:
- mov ip, sp
- stmdb sp!, {r4-r10, lr} @ 0x20
- VFP_ABI_PUSH
- mov r6, sp @ future $fp
-
- mov $inp, r0
- mov $out, r1
- mov $len, r2
- mov $key, r3
-
- sub r0, sp, #0x10 @ 0x10
- bic r0, #0xf @ align at 16 bytes
- mov sp, r0
-
-#ifdef XTS_CHAIN_TWEAK
- ldr r0, [ip] @ pointer to input tweak
-#else
- @ generate initial tweak
- ldr r0, [ip, #4] @ iv[]
- mov r1, sp
- ldr r2, [ip, #0] @ key2
- bl AES_encrypt
- mov r0, sp @ pointer to initial tweak
-#endif
-
- ldr $rounds, [$key, #240] @ get # of rounds
- mov $fp, r6
-#ifndef BSAES_ASM_EXTENDED_KEY
- @ allocate the key schedule on the stack
- sub r12, sp, $rounds, lsl#7 @ 128 bytes per inner round key
- @ add r12, #`128-32` @ size of bit-sliced key schedule
- sub r12, #`32+16` @ place for tweak[9]
-
- @ populate the key schedule
- mov r4, $key @ pass key
- mov r5, $rounds @ pass # of rounds
- mov sp, r12
- add r12, #0x90 @ pass key schedule
- bl _bsaes_key_convert
- add r4, sp, #0x90
- vldmia r4, {@XMM[6]}
- vstmia r12, {@XMM[15]} @ save last round key
- veor @XMM[7], @XMM[7], @XMM[6] @ fix up round 0 key
- vstmia r4, {@XMM[7]}
-#else
- ldr r12, [$key, #244]
- eors r12, #1
- beq 0f
-
- str r12, [$key, #244]
- mov r4, $key @ pass key
- mov r5, $rounds @ pass # of rounds
- add r12, $key, #248 @ pass key schedule
- bl _bsaes_key_convert
- add r4, $key, #248
- vldmia r4, {@XMM[6]}
- vstmia r12, {@XMM[15]} @ save last round key
- veor @XMM[7], @XMM[7], @XMM[6] @ fix up round 0 key
- vstmia r4, {@XMM[7]}
-
-.align 2
-0: sub sp, #0x90 @ place for tweak[9]
-#endif
- vld1.8 {@XMM[8]}, [r0] @ initial tweak
- adr $magic, .Lxts_magic
-
- tst $len, #0xf @ if not multiple of 16
- it ne @ Thumb2 thing, sanity check in ARM
- subne $len, #0x10 @ subtract another 16 bytes
- subs $len, #0x80
-
- blo .Lxts_dec_short
- b .Lxts_dec_loop
-
-.align 4
-.Lxts_dec_loop:
- vldmia $magic, {$twmask} @ load XTS magic
- vshr.s64 @T[0], @XMM[8], #63
- mov r0, sp
- vand @T[0], @T[0], $twmask
-___
-for($i=9;$i<16;$i++) {
-$code.=<<___;
- vadd.u64 @XMM[$i], @XMM[$i-1], @XMM[$i-1]
- vst1.64 {@XMM[$i-1]}, [r0,:128]!
- vswp `&Dhi("@T[0]")`,`&Dlo("@T[0]")`
- vshr.s64 @T[1], @XMM[$i], #63
- veor @XMM[$i], @XMM[$i], @T[0]
- vand @T[1], @T[1], $twmask
-___
- @T=reverse(@T);
-
-$code.=<<___ if ($i>=10);
- vld1.8 {@XMM[$i-10]}, [$inp]!
-___
-$code.=<<___ if ($i>=11);
- veor @XMM[$i-11], @XMM[$i-11], @XMM[$i-3]
-___
-}
-$code.=<<___;
- vadd.u64 @XMM[8], @XMM[15], @XMM[15]
- vst1.64 {@XMM[15]}, [r0,:128]!
- vswp `&Dhi("@T[0]")`,`&Dlo("@T[0]")`
- veor @XMM[8], @XMM[8], @T[0]
- vst1.64 {@XMM[8]}, [r0,:128] @ next round tweak
-
- vld1.8 {@XMM[6]-@XMM[7]}, [$inp]!
- veor @XMM[5], @XMM[5], @XMM[13]
-#ifndef BSAES_ASM_EXTENDED_KEY
- add r4, sp, #0x90 @ pass key schedule
-#else
- add r4, $key, #248 @ pass key schedule
-#endif
- veor @XMM[6], @XMM[6], @XMM[14]
- mov r5, $rounds @ pass rounds
- veor @XMM[7], @XMM[7], @XMM[15]
- mov r0, sp
-
- bl _bsaes_decrypt8
-
- vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
- vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]!
- veor @XMM[0], @XMM[0], @XMM[ 8]
- vld1.64 {@XMM[12]-@XMM[13]}, [r0,:128]!
- veor @XMM[1], @XMM[1], @XMM[ 9]
- veor @XMM[8], @XMM[6], @XMM[10]
- vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
- veor @XMM[9], @XMM[4], @XMM[11]
- vld1.64 {@XMM[14]-@XMM[15]}, [r0,:128]!
- veor @XMM[10], @XMM[2], @XMM[12]
- vst1.8 {@XMM[8]-@XMM[9]}, [$out]!
- veor @XMM[11], @XMM[7], @XMM[13]
- veor @XMM[12], @XMM[3], @XMM[14]
- vst1.8 {@XMM[10]-@XMM[11]}, [$out]!
- veor @XMM[13], @XMM[5], @XMM[15]
- vst1.8 {@XMM[12]-@XMM[13]}, [$out]!
-
- vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
-
- subs $len, #0x80
- bpl .Lxts_dec_loop
-
-.Lxts_dec_short:
- adds $len, #0x70
- bmi .Lxts_dec_done
-
- vldmia $magic, {$twmask} @ load XTS magic
- vshr.s64 @T[0], @XMM[8], #63
- mov r0, sp
- vand @T[0], @T[0], $twmask
-___
-for($i=9;$i<16;$i++) {
-$code.=<<___;
- vadd.u64 @XMM[$i], @XMM[$i-1], @XMM[$i-1]
- vst1.64 {@XMM[$i-1]}, [r0,:128]!
- vswp `&Dhi("@T[0]")`,`&Dlo("@T[0]")`
- vshr.s64 @T[1], @XMM[$i], #63
- veor @XMM[$i], @XMM[$i], @T[0]
- vand @T[1], @T[1], $twmask
-___
- @T=reverse(@T);
-
-$code.=<<___ if ($i>=10);
- vld1.8 {@XMM[$i-10]}, [$inp]!
- subs $len, #0x10
- bmi .Lxts_dec_`$i-9`
-___
-$code.=<<___ if ($i>=11);
- veor @XMM[$i-11], @XMM[$i-11], @XMM[$i-3]
-___
-}
-$code.=<<___;
- sub $len, #0x10
- vst1.64 {@XMM[15]}, [r0,:128] @ next round tweak
-
- vld1.8 {@XMM[6]}, [$inp]!
- veor @XMM[5], @XMM[5], @XMM[13]
-#ifndef BSAES_ASM_EXTENDED_KEY
- add r4, sp, #0x90 @ pass key schedule
-#else
- add r4, $key, #248 @ pass key schedule
-#endif
- veor @XMM[6], @XMM[6], @XMM[14]
- mov r5, $rounds @ pass rounds
- mov r0, sp
-
- bl _bsaes_decrypt8
-
- vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
- vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]!
- veor @XMM[0], @XMM[0], @XMM[ 8]
- vld1.64 {@XMM[12]-@XMM[13]}, [r0,:128]!
- veor @XMM[1], @XMM[1], @XMM[ 9]
- veor @XMM[8], @XMM[6], @XMM[10]
- vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
- veor @XMM[9], @XMM[4], @XMM[11]
- vld1.64 {@XMM[14]}, [r0,:128]!
- veor @XMM[10], @XMM[2], @XMM[12]
- vst1.8 {@XMM[8]-@XMM[9]}, [$out]!
- veor @XMM[11], @XMM[7], @XMM[13]
- veor @XMM[12], @XMM[3], @XMM[14]
- vst1.8 {@XMM[10]-@XMM[11]}, [$out]!
- vst1.8 {@XMM[12]}, [$out]!
-
- vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
- b .Lxts_dec_done
-.align 4
-.Lxts_dec_6:
- vst1.64 {@XMM[14]}, [r0,:128] @ next round tweak
-
- veor @XMM[4], @XMM[4], @XMM[12]
-#ifndef BSAES_ASM_EXTENDED_KEY
- add r4, sp, #0x90 @ pass key schedule
-#else
- add r4, $key, #248 @ pass key schedule
-#endif
- veor @XMM[5], @XMM[5], @XMM[13]
- mov r5, $rounds @ pass rounds
- mov r0, sp
-
- bl _bsaes_decrypt8
-
- vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
- vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]!
- veor @XMM[0], @XMM[0], @XMM[ 8]
- vld1.64 {@XMM[12]-@XMM[13]}, [r0,:128]!
- veor @XMM[1], @XMM[1], @XMM[ 9]
- veor @XMM[8], @XMM[6], @XMM[10]
- vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
- veor @XMM[9], @XMM[4], @XMM[11]
- veor @XMM[10], @XMM[2], @XMM[12]
- vst1.8 {@XMM[8]-@XMM[9]}, [$out]!
- veor @XMM[11], @XMM[7], @XMM[13]
- vst1.8 {@XMM[10]-@XMM[11]}, [$out]!
-
- vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
- b .Lxts_dec_done
-.align 4
-.Lxts_dec_5:
- vst1.64 {@XMM[13]}, [r0,:128] @ next round tweak
-
- veor @XMM[3], @XMM[3], @XMM[11]
-#ifndef BSAES_ASM_EXTENDED_KEY
- add r4, sp, #0x90 @ pass key schedule
-#else
- add r4, $key, #248 @ pass key schedule
-#endif
- veor @XMM[4], @XMM[4], @XMM[12]
- mov r5, $rounds @ pass rounds
- mov r0, sp
-
- bl _bsaes_decrypt8
-
- vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
- vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]!
- veor @XMM[0], @XMM[0], @XMM[ 8]
- vld1.64 {@XMM[12]}, [r0,:128]!
- veor @XMM[1], @XMM[1], @XMM[ 9]
- veor @XMM[8], @XMM[6], @XMM[10]
- vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
- veor @XMM[9], @XMM[4], @XMM[11]
- veor @XMM[10], @XMM[2], @XMM[12]
- vst1.8 {@XMM[8]-@XMM[9]}, [$out]!
- vst1.8 {@XMM[10]}, [$out]!
-
- vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
- b .Lxts_dec_done
-.align 4
-.Lxts_dec_4:
- vst1.64 {@XMM[12]}, [r0,:128] @ next round tweak
-
- veor @XMM[2], @XMM[2], @XMM[10]
-#ifndef BSAES_ASM_EXTENDED_KEY
- add r4, sp, #0x90 @ pass key schedule
-#else
- add r4, $key, #248 @ pass key schedule
-#endif
- veor @XMM[3], @XMM[3], @XMM[11]
- mov r5, $rounds @ pass rounds
- mov r0, sp
-
- bl _bsaes_decrypt8
-
- vld1.64 {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
- vld1.64 {@XMM[10]-@XMM[11]}, [r0,:128]!
- veor @XMM[0], @XMM[0], @XMM[ 8]
- veor @XMM[1], @XMM[1], @XMM[ 9]
- veor @XMM[8], @XMM[6], @XMM[10]
- vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
- veor @XMM[9], @XMM[4], @XMM[11]
- vst1.8 {@XMM[8]-@XMM[9]}, [$out]!
-
- vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
- b .Lxts_dec_done
-.align 4
-.Lxts_dec_3:
- vst1.64 {@XMM[11]}, [r0,:128] @ next round tweak
-
- veor @XMM[1], @XMM[1], @XMM[9]
-#ifndef BSAES_ASM_EXTENDED_KEY
- add r4, sp, #0x90 @ pass key schedule
-#else
- add r4, $key, #248 @ pass key schedule
-#endif
- veor @XMM[2], @XMM[2], @XMM[10]
- mov r5, $rounds @ pass rounds
- mov r0, sp
-
- bl _bsaes_decrypt8
-
- vld1.64 {@XMM[8]-@XMM[9]}, [r0,:128]!
- vld1.64 {@XMM[10]}, [r0,:128]!
- veor @XMM[0], @XMM[0], @XMM[ 8]
- veor @XMM[1], @XMM[1], @XMM[ 9]
- veor @XMM[8], @XMM[6], @XMM[10]
- vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
- vst1.8 {@XMM[8]}, [$out]!
-
- vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
- b .Lxts_dec_done
-.align 4
-.Lxts_dec_2:
- vst1.64 {@XMM[10]}, [r0,:128] @ next round tweak
-
- veor @XMM[0], @XMM[0], @XMM[8]
-#ifndef BSAES_ASM_EXTENDED_KEY
- add r4, sp, #0x90 @ pass key schedule
-#else
- add r4, $key, #248 @ pass key schedule
-#endif
- veor @XMM[1], @XMM[1], @XMM[9]
- mov r5, $rounds @ pass rounds
- mov r0, sp
-
- bl _bsaes_decrypt8
-
- vld1.64 {@XMM[8]-@XMM[9]}, [r0,:128]!
- veor @XMM[0], @XMM[0], @XMM[ 8]
- veor @XMM[1], @XMM[1], @XMM[ 9]
- vst1.8 {@XMM[0]-@XMM[1]}, [$out]!
-
- vld1.64 {@XMM[8]}, [r0,:128] @ next round tweak
- b .Lxts_dec_done
-.align 4
-.Lxts_dec_1:
- mov r0, sp
- veor @XMM[0], @XMM[8]
- mov r1, sp
- vst1.8 {@XMM[0]}, [sp,:128]
- mov r2, $key
- mov r4, $fp @ preserve fp
- mov r5, $magic @ preserve magic
-
- bl AES_decrypt
-
- vld1.8 {@XMM[0]}, [sp,:128]
- veor @XMM[0], @XMM[0], @XMM[8]
- vst1.8 {@XMM[0]}, [$out]!
- mov $fp, r4
- mov $magic, r5
-
- vmov @XMM[8], @XMM[9] @ next round tweak
-
-.Lxts_dec_done:
-#ifndef XTS_CHAIN_TWEAK
- adds $len, #0x10
- beq .Lxts_dec_ret
-
- @ calculate one round of extra tweak for the stolen ciphertext
- vldmia $magic, {$twmask}
- vshr.s64 @XMM[6], @XMM[8], #63
- vand @XMM[6], @XMM[6], $twmask
- vadd.u64 @XMM[9], @XMM[8], @XMM[8]
- vswp `&Dhi("@XMM[6]")`,`&Dlo("@XMM[6]")`
- veor @XMM[9], @XMM[9], @XMM[6]
-
- @ perform the final decryption with the last tweak value
- vld1.8 {@XMM[0]}, [$inp]!
- mov r0, sp
- veor @XMM[0], @XMM[0], @XMM[9]
- mov r1, sp
- vst1.8 {@XMM[0]}, [sp,:128]
- mov r2, $key
- mov r4, $fp @ preserve fp
-
- bl AES_decrypt
-
- vld1.8 {@XMM[0]}, [sp,:128]
- veor @XMM[0], @XMM[0], @XMM[9]
- vst1.8 {@XMM[0]}, [$out]
-
- mov r6, $out
-.Lxts_dec_steal:
- ldrb r1, [$out]
- ldrb r0, [$inp], #1
- strb r1, [$out, #0x10]
- strb r0, [$out], #1
-
- subs $len, #1
- bhi .Lxts_dec_steal
-
- vld1.8 {@XMM[0]}, [r6]
- mov r0, sp
- veor @XMM[0], @XMM[8]
- mov r1, sp
- vst1.8 {@XMM[0]}, [sp,:128]
- mov r2, $key
-
- bl AES_decrypt
-
- vld1.8 {@XMM[0]}, [sp,:128]
- veor @XMM[0], @XMM[0], @XMM[8]
- vst1.8 {@XMM[0]}, [r6]
- mov $fp, r4
-#endif
-
-.Lxts_dec_ret:
- bic r0, $fp, #0xf
- vmov.i32 q0, #0
- vmov.i32 q1, #0
-#ifdef XTS_CHAIN_TWEAK
- ldr r1, [$fp, #0x20+VFP_ABI_FRAME] @ chain tweak
-#endif
-.Lxts_dec_bzero: @ wipe key schedule [if any]
- vstmia sp!, {q0-q1}
- cmp sp, r0
- bne .Lxts_dec_bzero
-
- mov sp, $fp
-#ifdef XTS_CHAIN_TWEAK
- vst1.8 {@XMM[8]}, [r1]
-#endif
- VFP_ABI_POP
- ldmia sp!, {r4-r10, pc} @ return
-
-.size bsaes_xts_decrypt,.-bsaes_xts_decrypt
-___
-}
-$code.=<<___;
-#endif
-___
-
-$code =~ s/\`([^\`]*)\`/eval($1)/gem;
-
-open SELF,$0;
-while(<SELF>) {
- next if (/^#!/);
- last if (!s/^#/@/ and !/^$/);
- print;
-}
-close SELF;
-
-print $code;
-
-close STDOUT;
diff --git a/app/openssl/crypto/aes/asm/bsaes-x86_64.S b/app/openssl/crypto/aes/asm/bsaes-x86_64.S
deleted file mode 100644
index dc92d4da..00000000
--- a/app/openssl/crypto/aes/asm/bsaes-x86_64.S
+++ /dev/null
@@ -1,2498 +0,0 @@
-.text
-
-
-
-
-.type _bsaes_encrypt8,@function
-.align 64
-_bsaes_encrypt8:
- leaq .LBS0(%rip),%r11
-
- movdqa (%rax),%xmm8
- leaq 16(%rax),%rax
- movdqa 80(%r11),%xmm7
- pxor %xmm8,%xmm15
- pxor %xmm8,%xmm0
-.byte 102,68,15,56,0,255
- pxor %xmm8,%xmm1
-.byte 102,15,56,0,199
- pxor %xmm8,%xmm2
-.byte 102,15,56,0,207
- pxor %xmm8,%xmm3
-.byte 102,15,56,0,215
- pxor %xmm8,%xmm4
-.byte 102,15,56,0,223
- pxor %xmm8,%xmm5
-.byte 102,15,56,0,231
- pxor %xmm8,%xmm6
-.byte 102,15,56,0,239
-.byte 102,15,56,0,247
-_bsaes_encrypt8_bitslice:
- movdqa 0(%r11),%xmm7
- movdqa 16(%r11),%xmm8
- movdqa %xmm5,%xmm9
- psrlq $1,%xmm5
- movdqa %xmm3,%xmm10
- psrlq $1,%xmm3
- pxor %xmm6,%xmm5
- pxor %xmm4,%xmm3
- pand %xmm7,%xmm5
- pand %xmm7,%xmm3
- pxor %xmm5,%xmm6
- psllq $1,%xmm5
- pxor %xmm3,%xmm4
- psllq $1,%xmm3
- pxor %xmm9,%xmm5
- pxor %xmm10,%xmm3
- movdqa %xmm1,%xmm9
- psrlq $1,%xmm1
- movdqa %xmm15,%xmm10
- psrlq $1,%xmm15
- pxor %xmm2,%xmm1
- pxor %xmm0,%xmm15
- pand %xmm7,%xmm1
- pand %xmm7,%xmm15
- pxor %xmm1,%xmm2
- psllq $1,%xmm1
- pxor %xmm15,%xmm0
- psllq $1,%xmm15
- pxor %xmm9,%xmm1
- pxor %xmm10,%xmm15
- movdqa 32(%r11),%xmm7
- movdqa %xmm4,%xmm9
- psrlq $2,%xmm4
- movdqa %xmm3,%xmm10
- psrlq $2,%xmm3
- pxor %xmm6,%xmm4
- pxor %xmm5,%xmm3
- pand %xmm8,%xmm4
- pand %xmm8,%xmm3
- pxor %xmm4,%xmm6
- psllq $2,%xmm4
- pxor %xmm3,%xmm5
- psllq $2,%xmm3
- pxor %xmm9,%xmm4
- pxor %xmm10,%xmm3
- movdqa %xmm0,%xmm9
- psrlq $2,%xmm0
- movdqa %xmm15,%xmm10
- psrlq $2,%xmm15
- pxor %xmm2,%xmm0
- pxor %xmm1,%xmm15
- pand %xmm8,%xmm0
- pand %xmm8,%xmm15
- pxor %xmm0,%xmm2
- psllq $2,%xmm0
- pxor %xmm15,%xmm1
- psllq $2,%xmm15
- pxor %xmm9,%xmm0
- pxor %xmm10,%xmm15
- movdqa %xmm2,%xmm9
- psrlq $4,%xmm2
- movdqa %xmm1,%xmm10
- psrlq $4,%xmm1
- pxor %xmm6,%xmm2
- pxor %xmm5,%xmm1
- pand %xmm7,%xmm2
- pand %xmm7,%xmm1
- pxor %xmm2,%xmm6
- psllq $4,%xmm2
- pxor %xmm1,%xmm5
- psllq $4,%xmm1
- pxor %xmm9,%xmm2
- pxor %xmm10,%xmm1
- movdqa %xmm0,%xmm9
- psrlq $4,%xmm0
- movdqa %xmm15,%xmm10
- psrlq $4,%xmm15
- pxor %xmm4,%xmm0
- pxor %xmm3,%xmm15
- pand %xmm7,%xmm0
- pand %xmm7,%xmm15
- pxor %xmm0,%xmm4
- psllq $4,%xmm0
- pxor %xmm15,%xmm3
- psllq $4,%xmm15
- pxor %xmm9,%xmm0
- pxor %xmm10,%xmm15
- decl %r10d
- jmp .Lenc_sbox
-.align 16
-.Lenc_loop:
- pxor 0(%rax),%xmm15
- pxor 16(%rax),%xmm0
-.byte 102,68,15,56,0,255
- pxor 32(%rax),%xmm1
-.byte 102,15,56,0,199
- pxor 48(%rax),%xmm2
-.byte 102,15,56,0,207
- pxor 64(%rax),%xmm3
-.byte 102,15,56,0,215
- pxor 80(%rax),%xmm4
-.byte 102,15,56,0,223
- pxor 96(%rax),%xmm5
-.byte 102,15,56,0,231
- pxor 112(%rax),%xmm6
-.byte 102,15,56,0,239
- leaq 128(%rax),%rax
-.byte 102,15,56,0,247
-.Lenc_sbox:
- pxor %xmm5,%xmm4
- pxor %xmm0,%xmm1
- pxor %xmm15,%xmm2
- pxor %xmm1,%xmm5
- pxor %xmm15,%xmm4
-
- pxor %xmm2,%xmm5
- pxor %xmm6,%xmm2
- pxor %xmm4,%xmm6
- pxor %xmm3,%xmm2
- pxor %xmm4,%xmm3
- pxor %xmm0,%xmm2
-
- pxor %xmm6,%xmm1
- pxor %xmm4,%xmm0
- movdqa %xmm6,%xmm10
- movdqa %xmm0,%xmm9
- movdqa %xmm4,%xmm8
- movdqa %xmm1,%xmm12
- movdqa %xmm5,%xmm11
-
- pxor %xmm3,%xmm10
- pxor %xmm1,%xmm9
- pxor %xmm2,%xmm8
- movdqa %xmm10,%xmm13
- pxor %xmm3,%xmm12
- movdqa %xmm9,%xmm7
- pxor %xmm15,%xmm11
- movdqa %xmm10,%xmm14
-
- por %xmm8,%xmm9
- por %xmm11,%xmm10
- pxor %xmm7,%xmm14
- pand %xmm11,%xmm13
- pxor %xmm8,%xmm11
- pand %xmm8,%xmm7
- pand %xmm11,%xmm14
- movdqa %xmm2,%xmm11
- pxor %xmm15,%xmm11
- pand %xmm11,%xmm12
- pxor %xmm12,%xmm10
- pxor %xmm12,%xmm9
- movdqa %xmm6,%xmm12
- movdqa %xmm4,%xmm11
- pxor %xmm0,%xmm12
- pxor %xmm5,%xmm11
- movdqa %xmm12,%xmm8
- pand %xmm11,%xmm12
- por %xmm11,%xmm8
- pxor %xmm12,%xmm7
- pxor %xmm14,%xmm10
- pxor %xmm13,%xmm9
- pxor %xmm14,%xmm8
- movdqa %xmm1,%xmm11
- pxor %xmm13,%xmm7
- movdqa %xmm3,%xmm12
- pxor %xmm13,%xmm8
- movdqa %xmm0,%xmm13
- pand %xmm2,%xmm11
- movdqa %xmm6,%xmm14
- pand %xmm15,%xmm12
- pand %xmm4,%xmm13
- por %xmm5,%xmm14
- pxor %xmm11,%xmm10
- pxor %xmm12,%xmm9
- pxor %xmm13,%xmm8
- pxor %xmm14,%xmm7
-
-
-
-
-
- movdqa %xmm10,%xmm11
- pand %xmm8,%xmm10
- pxor %xmm9,%xmm11
-
- movdqa %xmm7,%xmm13
- movdqa %xmm11,%xmm14
- pxor %xmm10,%xmm13
- pand %xmm13,%xmm14
-
- movdqa %xmm8,%xmm12
- pxor %xmm9,%xmm14
- pxor %xmm7,%xmm12
-
- pxor %xmm9,%xmm10
-
- pand %xmm10,%xmm12
-
- movdqa %xmm13,%xmm9
- pxor %xmm7,%xmm12
-
- pxor %xmm12,%xmm9
- pxor %xmm12,%xmm8
-
- pand %xmm7,%xmm9
-
- pxor %xmm9,%xmm13
- pxor %xmm9,%xmm8
-
- pand %xmm14,%xmm13
-
- pxor %xmm11,%xmm13
- movdqa %xmm5,%xmm11
- movdqa %xmm4,%xmm7
- movdqa %xmm14,%xmm9
- pxor %xmm13,%xmm9
- pand %xmm5,%xmm9
- pxor %xmm4,%xmm5
- pand %xmm14,%xmm4
- pand %xmm13,%xmm5
- pxor %xmm4,%xmm5
- pxor %xmm9,%xmm4
- pxor %xmm15,%xmm11
- pxor %xmm2,%xmm7
- pxor %xmm12,%xmm14
- pxor %xmm8,%xmm13
- movdqa %xmm14,%xmm10
- movdqa %xmm12,%xmm9
- pxor %xmm13,%xmm10
- pxor %xmm8,%xmm9
- pand %xmm11,%xmm10
- pand %xmm15,%xmm9
- pxor %xmm7,%xmm11
- pxor %xmm2,%xmm15
- pand %xmm14,%xmm7
- pand %xmm12,%xmm2
- pand %xmm13,%xmm11
- pand %xmm8,%xmm15
- pxor %xmm11,%xmm7
- pxor %xmm2,%xmm15
- pxor %xmm10,%xmm11
- pxor %xmm9,%xmm2
- pxor %xmm11,%xmm5
- pxor %xmm11,%xmm15
- pxor %xmm7,%xmm4
- pxor %xmm7,%xmm2
-
- movdqa %xmm6,%xmm11
- movdqa %xmm0,%xmm7
- pxor %xmm3,%xmm11
- pxor %xmm1,%xmm7
- movdqa %xmm14,%xmm10
- movdqa %xmm12,%xmm9
- pxor %xmm13,%xmm10
- pxor %xmm8,%xmm9
- pand %xmm11,%xmm10
- pand %xmm3,%xmm9
- pxor %xmm7,%xmm11
- pxor %xmm1,%xmm3
- pand %xmm14,%xmm7
- pand %xmm12,%xmm1
- pand %xmm13,%xmm11
- pand %xmm8,%xmm3
- pxor %xmm11,%xmm7
- pxor %xmm1,%xmm3
- pxor %xmm10,%xmm11
- pxor %xmm9,%xmm1
- pxor %xmm12,%xmm14
- pxor %xmm8,%xmm13
- movdqa %xmm14,%xmm10
- pxor %xmm13,%xmm10
- pand %xmm6,%xmm10
- pxor %xmm0,%xmm6
- pand %xmm14,%xmm0
- pand %xmm13,%xmm6
- pxor %xmm0,%xmm6
- pxor %xmm10,%xmm0
- pxor %xmm11,%xmm6
- pxor %xmm11,%xmm3
- pxor %xmm7,%xmm0
- pxor %xmm7,%xmm1
- pxor %xmm15,%xmm6
- pxor %xmm5,%xmm0
- pxor %xmm6,%xmm3
- pxor %xmm15,%xmm5
- pxor %xmm0,%xmm15
-
- pxor %xmm4,%xmm0
- pxor %xmm1,%xmm4
- pxor %xmm2,%xmm1
- pxor %xmm4,%xmm2
- pxor %xmm4,%xmm3
-
- pxor %xmm2,%xmm5
- decl %r10d
- jl .Lenc_done
- pshufd $147,%xmm15,%xmm7
- pshufd $147,%xmm0,%xmm8
- pxor %xmm7,%xmm15
- pshufd $147,%xmm3,%xmm9
- pxor %xmm8,%xmm0
- pshufd $147,%xmm5,%xmm10
- pxor %xmm9,%xmm3
- pshufd $147,%xmm2,%xmm11
- pxor %xmm10,%xmm5
- pshufd $147,%xmm6,%xmm12
- pxor %xmm11,%xmm2
- pshufd $147,%xmm1,%xmm13
- pxor %xmm12,%xmm6
- pshufd $147,%xmm4,%xmm14
- pxor %xmm13,%xmm1
- pxor %xmm14,%xmm4
-
- pxor %xmm15,%xmm8
- pxor %xmm4,%xmm7
- pxor %xmm4,%xmm8
- pshufd $78,%xmm15,%xmm15
- pxor %xmm0,%xmm9
- pshufd $78,%xmm0,%xmm0
- pxor %xmm2,%xmm12
- pxor %xmm7,%xmm15
- pxor %xmm6,%xmm13
- pxor %xmm8,%xmm0
- pxor %xmm5,%xmm11
- pshufd $78,%xmm2,%xmm7
- pxor %xmm1,%xmm14
- pshufd $78,%xmm6,%xmm8
- pxor %xmm3,%xmm10
- pshufd $78,%xmm5,%xmm2
- pxor %xmm4,%xmm10
- pshufd $78,%xmm4,%xmm6
- pxor %xmm4,%xmm11
- pshufd $78,%xmm1,%xmm5
- pxor %xmm11,%xmm7
- pshufd $78,%xmm3,%xmm1
- pxor %xmm12,%xmm8
- pxor %xmm10,%xmm2
- pxor %xmm14,%xmm6
- pxor %xmm13,%xmm5
- movdqa %xmm7,%xmm3
- pxor %xmm9,%xmm1
- movdqa %xmm8,%xmm4
- movdqa 48(%r11),%xmm7
- jnz .Lenc_loop
- movdqa 64(%r11),%xmm7
- jmp .Lenc_loop
-.align 16
-.Lenc_done:
- movdqa 0(%r11),%xmm7
- movdqa 16(%r11),%xmm8
- movdqa %xmm1,%xmm9
- psrlq $1,%xmm1
- movdqa %xmm2,%xmm10
- psrlq $1,%xmm2
- pxor %xmm4,%xmm1
- pxor %xmm6,%xmm2
- pand %xmm7,%xmm1
- pand %xmm7,%xmm2
- pxor %xmm1,%xmm4
- psllq $1,%xmm1
- pxor %xmm2,%xmm6
- psllq $1,%xmm2
- pxor %xmm9,%xmm1
- pxor %xmm10,%xmm2
- movdqa %xmm3,%xmm9
- psrlq $1,%xmm3
- movdqa %xmm15,%xmm10
- psrlq $1,%xmm15
- pxor %xmm5,%xmm3
- pxor %xmm0,%xmm15
- pand %xmm7,%xmm3
- pand %xmm7,%xmm15
- pxor %xmm3,%xmm5
- psllq $1,%xmm3
- pxor %xmm15,%xmm0
- psllq $1,%xmm15
- pxor %xmm9,%xmm3
- pxor %xmm10,%xmm15
- movdqa 32(%r11),%xmm7
- movdqa %xmm6,%xmm9
- psrlq $2,%xmm6
- movdqa %xmm2,%xmm10
- psrlq $2,%xmm2
- pxor %xmm4,%xmm6
- pxor %xmm1,%xmm2
- pand %xmm8,%xmm6
- pand %xmm8,%xmm2
- pxor %xmm6,%xmm4
- psllq $2,%xmm6
- pxor %xmm2,%xmm1
- psllq $2,%xmm2
- pxor %xmm9,%xmm6
- pxor %xmm10,%xmm2
- movdqa %xmm0,%xmm9
- psrlq $2,%xmm0
- movdqa %xmm15,%xmm10
- psrlq $2,%xmm15
- pxor %xmm5,%xmm0
- pxor %xmm3,%xmm15
- pand %xmm8,%xmm0
- pand %xmm8,%xmm15
- pxor %xmm0,%xmm5
- psllq $2,%xmm0
- pxor %xmm15,%xmm3
- psllq $2,%xmm15
- pxor %xmm9,%xmm0
- pxor %xmm10,%xmm15
- movdqa %xmm5,%xmm9
- psrlq $4,%xmm5
- movdqa %xmm3,%xmm10
- psrlq $4,%xmm3
- pxor %xmm4,%xmm5
- pxor %xmm1,%xmm3
- pand %xmm7,%xmm5
- pand %xmm7,%xmm3
- pxor %xmm5,%xmm4
- psllq $4,%xmm5
- pxor %xmm3,%xmm1
- psllq $4,%xmm3
- pxor %xmm9,%xmm5
- pxor %xmm10,%xmm3
- movdqa %xmm0,%xmm9
- psrlq $4,%xmm0
- movdqa %xmm15,%xmm10
- psrlq $4,%xmm15
- pxor %xmm6,%xmm0
- pxor %xmm2,%xmm15
- pand %xmm7,%xmm0
- pand %xmm7,%xmm15
- pxor %xmm0,%xmm6
- psllq $4,%xmm0
- pxor %xmm15,%xmm2
- psllq $4,%xmm15
- pxor %xmm9,%xmm0
- pxor %xmm10,%xmm15
- movdqa (%rax),%xmm7
- pxor %xmm7,%xmm3
- pxor %xmm7,%xmm5
- pxor %xmm7,%xmm2
- pxor %xmm7,%xmm6
- pxor %xmm7,%xmm1
- pxor %xmm7,%xmm4
- pxor %xmm7,%xmm15
- pxor %xmm7,%xmm0
- .byte 0xf3,0xc3
-.size _bsaes_encrypt8,.-_bsaes_encrypt8
-
-.type _bsaes_decrypt8,@function
-.align 64
-_bsaes_decrypt8:
- leaq .LBS0(%rip),%r11
-
- movdqa (%rax),%xmm8
- leaq 16(%rax),%rax
- movdqa -48(%r11),%xmm7
- pxor %xmm8,%xmm15
- pxor %xmm8,%xmm0
-.byte 102,68,15,56,0,255
- pxor %xmm8,%xmm1
-.byte 102,15,56,0,199
- pxor %xmm8,%xmm2
-.byte 102,15,56,0,207
- pxor %xmm8,%xmm3
-.byte 102,15,56,0,215
- pxor %xmm8,%xmm4
-.byte 102,15,56,0,223
- pxor %xmm8,%xmm5
-.byte 102,15,56,0,231
- pxor %xmm8,%xmm6
-.byte 102,15,56,0,239
-.byte 102,15,56,0,247
- movdqa 0(%r11),%xmm7
- movdqa 16(%r11),%xmm8
- movdqa %xmm5,%xmm9
- psrlq $1,%xmm5
- movdqa %xmm3,%xmm10
- psrlq $1,%xmm3
- pxor %xmm6,%xmm5
- pxor %xmm4,%xmm3
- pand %xmm7,%xmm5
- pand %xmm7,%xmm3
- pxor %xmm5,%xmm6
- psllq $1,%xmm5
- pxor %xmm3,%xmm4
- psllq $1,%xmm3
- pxor %xmm9,%xmm5
- pxor %xmm10,%xmm3
- movdqa %xmm1,%xmm9
- psrlq $1,%xmm1
- movdqa %xmm15,%xmm10
- psrlq $1,%xmm15
- pxor %xmm2,%xmm1
- pxor %xmm0,%xmm15
- pand %xmm7,%xmm1
- pand %xmm7,%xmm15
- pxor %xmm1,%xmm2
- psllq $1,%xmm1
- pxor %xmm15,%xmm0
- psllq $1,%xmm15
- pxor %xmm9,%xmm1
- pxor %xmm10,%xmm15
- movdqa 32(%r11),%xmm7
- movdqa %xmm4,%xmm9
- psrlq $2,%xmm4
- movdqa %xmm3,%xmm10
- psrlq $2,%xmm3
- pxor %xmm6,%xmm4
- pxor %xmm5,%xmm3
- pand %xmm8,%xmm4
- pand %xmm8,%xmm3
- pxor %xmm4,%xmm6
- psllq $2,%xmm4
- pxor %xmm3,%xmm5
- psllq $2,%xmm3
- pxor %xmm9,%xmm4
- pxor %xmm10,%xmm3
- movdqa %xmm0,%xmm9
- psrlq $2,%xmm0
- movdqa %xmm15,%xmm10
- psrlq $2,%xmm15
- pxor %xmm2,%xmm0
- pxor %xmm1,%xmm15
- pand %xmm8,%xmm0
- pand %xmm8,%xmm15
- pxor %xmm0,%xmm2
- psllq $2,%xmm0
- pxor %xmm15,%xmm1
- psllq $2,%xmm15
- pxor %xmm9,%xmm0
- pxor %xmm10,%xmm15
- movdqa %xmm2,%xmm9
- psrlq $4,%xmm2
- movdqa %xmm1,%xmm10
- psrlq $4,%xmm1
- pxor %xmm6,%xmm2
- pxor %xmm5,%xmm1
- pand %xmm7,%xmm2
- pand %xmm7,%xmm1
- pxor %xmm2,%xmm6
- psllq $4,%xmm2
- pxor %xmm1,%xmm5
- psllq $4,%xmm1
- pxor %xmm9,%xmm2
- pxor %xmm10,%xmm1
- movdqa %xmm0,%xmm9
- psrlq $4,%xmm0
- movdqa %xmm15,%xmm10
- psrlq $4,%xmm15
- pxor %xmm4,%xmm0
- pxor %xmm3,%xmm15
- pand %xmm7,%xmm0
- pand %xmm7,%xmm15
- pxor %xmm0,%xmm4
- psllq $4,%xmm0
- pxor %xmm15,%xmm3
- psllq $4,%xmm15
- pxor %xmm9,%xmm0
- pxor %xmm10,%xmm15
- decl %r10d
- jmp .Ldec_sbox
-.align 16
-.Ldec_loop:
- pxor 0(%rax),%xmm15
- pxor 16(%rax),%xmm0
-.byte 102,68,15,56,0,255
- pxor 32(%rax),%xmm1
-.byte 102,15,56,0,199
- pxor 48(%rax),%xmm2
-.byte 102,15,56,0,207
- pxor 64(%rax),%xmm3
-.byte 102,15,56,0,215
- pxor 80(%rax),%xmm4
-.byte 102,15,56,0,223
- pxor 96(%rax),%xmm5
-.byte 102,15,56,0,231
- pxor 112(%rax),%xmm6
-.byte 102,15,56,0,239
- leaq 128(%rax),%rax
-.byte 102,15,56,0,247
-.Ldec_sbox:
- pxor %xmm3,%xmm2
-
- pxor %xmm6,%xmm3
- pxor %xmm6,%xmm1
- pxor %xmm3,%xmm5
- pxor %xmm5,%xmm6
- pxor %xmm6,%xmm0
-
- pxor %xmm0,%xmm15
- pxor %xmm4,%xmm1
- pxor %xmm15,%xmm2
- pxor %xmm15,%xmm4
- pxor %xmm2,%xmm0
- movdqa %xmm2,%xmm10
- movdqa %xmm6,%xmm9
- movdqa %xmm0,%xmm8
- movdqa %xmm3,%xmm12
- movdqa %xmm4,%xmm11
-
- pxor %xmm15,%xmm10
- pxor %xmm3,%xmm9
- pxor %xmm5,%xmm8
- movdqa %xmm10,%xmm13
- pxor %xmm15,%xmm12
- movdqa %xmm9,%xmm7
- pxor %xmm1,%xmm11
- movdqa %xmm10,%xmm14
-
- por %xmm8,%xmm9
- por %xmm11,%xmm10
- pxor %xmm7,%xmm14
- pand %xmm11,%xmm13
- pxor %xmm8,%xmm11
- pand %xmm8,%xmm7
- pand %xmm11,%xmm14
- movdqa %xmm5,%xmm11
- pxor %xmm1,%xmm11
- pand %xmm11,%xmm12
- pxor %xmm12,%xmm10
- pxor %xmm12,%xmm9
- movdqa %xmm2,%xmm12
- movdqa %xmm0,%xmm11
- pxor %xmm6,%xmm12
- pxor %xmm4,%xmm11
- movdqa %xmm12,%xmm8
- pand %xmm11,%xmm12
- por %xmm11,%xmm8
- pxor %xmm12,%xmm7
- pxor %xmm14,%xmm10
- pxor %xmm13,%xmm9
- pxor %xmm14,%xmm8
- movdqa %xmm3,%xmm11
- pxor %xmm13,%xmm7
- movdqa %xmm15,%xmm12
- pxor %xmm13,%xmm8
- movdqa %xmm6,%xmm13
- pand %xmm5,%xmm11
- movdqa %xmm2,%xmm14
- pand %xmm1,%xmm12
- pand %xmm0,%xmm13
- por %xmm4,%xmm14
- pxor %xmm11,%xmm10
- pxor %xmm12,%xmm9
- pxor %xmm13,%xmm8
- pxor %xmm14,%xmm7
-
-
-
-
-
- movdqa %xmm10,%xmm11
- pand %xmm8,%xmm10
- pxor %xmm9,%xmm11
-
- movdqa %xmm7,%xmm13
- movdqa %xmm11,%xmm14
- pxor %xmm10,%xmm13
- pand %xmm13,%xmm14
-
- movdqa %xmm8,%xmm12
- pxor %xmm9,%xmm14
- pxor %xmm7,%xmm12
-
- pxor %xmm9,%xmm10
-
- pand %xmm10,%xmm12
-
- movdqa %xmm13,%xmm9
- pxor %xmm7,%xmm12
-
- pxor %xmm12,%xmm9
- pxor %xmm12,%xmm8
-
- pand %xmm7,%xmm9
-
- pxor %xmm9,%xmm13
- pxor %xmm9,%xmm8
-
- pand %xmm14,%xmm13
-
- pxor %xmm11,%xmm13
- movdqa %xmm4,%xmm11
- movdqa %xmm0,%xmm7
- movdqa %xmm14,%xmm9
- pxor %xmm13,%xmm9
- pand %xmm4,%xmm9
- pxor %xmm0,%xmm4
- pand %xmm14,%xmm0
- pand %xmm13,%xmm4
- pxor %xmm0,%xmm4
- pxor %xmm9,%xmm0
- pxor %xmm1,%xmm11
- pxor %xmm5,%xmm7
- pxor %xmm12,%xmm14
- pxor %xmm8,%xmm13
- movdqa %xmm14,%xmm10
- movdqa %xmm12,%xmm9
- pxor %xmm13,%xmm10
- pxor %xmm8,%xmm9
- pand %xmm11,%xmm10
- pand %xmm1,%xmm9
- pxor %xmm7,%xmm11
- pxor %xmm5,%xmm1
- pand %xmm14,%xmm7
- pand %xmm12,%xmm5
- pand %xmm13,%xmm11
- pand %xmm8,%xmm1
- pxor %xmm11,%xmm7
- pxor %xmm5,%xmm1
- pxor %xmm10,%xmm11
- pxor %xmm9,%xmm5
- pxor %xmm11,%xmm4
- pxor %xmm11,%xmm1
- pxor %xmm7,%xmm0
- pxor %xmm7,%xmm5
-
- movdqa %xmm2,%xmm11
- movdqa %xmm6,%xmm7
- pxor %xmm15,%xmm11
- pxor %xmm3,%xmm7
- movdqa %xmm14,%xmm10
- movdqa %xmm12,%xmm9
- pxor %xmm13,%xmm10
- pxor %xmm8,%xmm9
- pand %xmm11,%xmm10
- pand %xmm15,%xmm9
- pxor %xmm7,%xmm11
- pxor %xmm3,%xmm15
- pand %xmm14,%xmm7
- pand %xmm12,%xmm3
- pand %xmm13,%xmm11
- pand %xmm8,%xmm15
- pxor %xmm11,%xmm7
- pxor %xmm3,%xmm15
- pxor %xmm10,%xmm11
- pxor %xmm9,%xmm3
- pxor %xmm12,%xmm14
- pxor %xmm8,%xmm13
- movdqa %xmm14,%xmm10
- pxor %xmm13,%xmm10
- pand %xmm2,%xmm10
- pxor %xmm6,%xmm2
- pand %xmm14,%xmm6
- pand %xmm13,%xmm2
- pxor %xmm6,%xmm2
- pxor %xmm10,%xmm6
- pxor %xmm11,%xmm2
- pxor %xmm11,%xmm15
- pxor %xmm7,%xmm6
- pxor %xmm7,%xmm3
- pxor %xmm6,%xmm0
- pxor %xmm4,%xmm5
-
- pxor %xmm0,%xmm3
- pxor %xmm6,%xmm1
- pxor %xmm6,%xmm4
- pxor %xmm1,%xmm3
- pxor %xmm15,%xmm6
- pxor %xmm4,%xmm3
- pxor %xmm5,%xmm2
- pxor %xmm0,%xmm5
- pxor %xmm3,%xmm2
-
- pxor %xmm15,%xmm3
- pxor %xmm2,%xmm6
- decl %r10d
- jl .Ldec_done
-
- pshufd $78,%xmm15,%xmm7
- pshufd $78,%xmm2,%xmm13
- pxor %xmm15,%xmm7
- pshufd $78,%xmm4,%xmm14
- pxor %xmm2,%xmm13
- pshufd $78,%xmm0,%xmm8
- pxor %xmm4,%xmm14
- pshufd $78,%xmm5,%xmm9
- pxor %xmm0,%xmm8
- pshufd $78,%xmm3,%xmm10
- pxor %xmm5,%xmm9
- pxor %xmm13,%xmm15
- pxor %xmm13,%xmm0
- pshufd $78,%xmm1,%xmm11
- pxor %xmm3,%xmm10
- pxor %xmm7,%xmm5
- pxor %xmm8,%xmm3
- pshufd $78,%xmm6,%xmm12
- pxor %xmm1,%xmm11
- pxor %xmm14,%xmm0
- pxor %xmm9,%xmm1
- pxor %xmm6,%xmm12
-
- pxor %xmm14,%xmm5
- pxor %xmm13,%xmm3
- pxor %xmm13,%xmm1
- pxor %xmm10,%xmm6
- pxor %xmm11,%xmm2
- pxor %xmm14,%xmm1
- pxor %xmm14,%xmm6
- pxor %xmm12,%xmm4
- pshufd $147,%xmm15,%xmm7
- pshufd $147,%xmm0,%xmm8
- pxor %xmm7,%xmm15
- pshufd $147,%xmm5,%xmm9
- pxor %xmm8,%xmm0
- pshufd $147,%xmm3,%xmm10
- pxor %xmm9,%xmm5
- pshufd $147,%xmm1,%xmm11
- pxor %xmm10,%xmm3
- pshufd $147,%xmm6,%xmm12
- pxor %xmm11,%xmm1
- pshufd $147,%xmm2,%xmm13
- pxor %xmm12,%xmm6
- pshufd $147,%xmm4,%xmm14
- pxor %xmm13,%xmm2
- pxor %xmm14,%xmm4
-
- pxor %xmm15,%xmm8
- pxor %xmm4,%xmm7
- pxor %xmm4,%xmm8
- pshufd $78,%xmm15,%xmm15
- pxor %xmm0,%xmm9
- pshufd $78,%xmm0,%xmm0
- pxor %xmm1,%xmm12
- pxor %xmm7,%xmm15
- pxor %xmm6,%xmm13
- pxor %xmm8,%xmm0
- pxor %xmm3,%xmm11
- pshufd $78,%xmm1,%xmm7
- pxor %xmm2,%xmm14
- pshufd $78,%xmm6,%xmm8
- pxor %xmm5,%xmm10
- pshufd $78,%xmm3,%xmm1
- pxor %xmm4,%xmm10
- pshufd $78,%xmm4,%xmm6
- pxor %xmm4,%xmm11
- pshufd $78,%xmm2,%xmm3
- pxor %xmm11,%xmm7
- pshufd $78,%xmm5,%xmm2
- pxor %xmm12,%xmm8
- pxor %xmm1,%xmm10
- pxor %xmm14,%xmm6
- pxor %xmm3,%xmm13
- movdqa %xmm7,%xmm3
- pxor %xmm9,%xmm2
- movdqa %xmm13,%xmm5
- movdqa %xmm8,%xmm4
- movdqa %xmm2,%xmm1
- movdqa %xmm10,%xmm2
- movdqa -16(%r11),%xmm7
- jnz .Ldec_loop
- movdqa -32(%r11),%xmm7
- jmp .Ldec_loop
-.align 16
-.Ldec_done:
- movdqa 0(%r11),%xmm7
- movdqa 16(%r11),%xmm8
- movdqa %xmm2,%xmm9
- psrlq $1,%xmm2
- movdqa %xmm1,%xmm10
- psrlq $1,%xmm1
- pxor %xmm4,%xmm2
- pxor %xmm6,%xmm1
- pand %xmm7,%xmm2
- pand %xmm7,%xmm1
- pxor %xmm2,%xmm4
- psllq $1,%xmm2
- pxor %xmm1,%xmm6
- psllq $1,%xmm1
- pxor %xmm9,%xmm2
- pxor %xmm10,%xmm1
- movdqa %xmm5,%xmm9
- psrlq $1,%xmm5
- movdqa %xmm15,%xmm10
- psrlq $1,%xmm15
- pxor %xmm3,%xmm5
- pxor %xmm0,%xmm15
- pand %xmm7,%xmm5
- pand %xmm7,%xmm15
- pxor %xmm5,%xmm3
- psllq $1,%xmm5
- pxor %xmm15,%xmm0
- psllq $1,%xmm15
- pxor %xmm9,%xmm5
- pxor %xmm10,%xmm15
- movdqa 32(%r11),%xmm7
- movdqa %xmm6,%xmm9
- psrlq $2,%xmm6
- movdqa %xmm1,%xmm10
- psrlq $2,%xmm1
- pxor %xmm4,%xmm6
- pxor %xmm2,%xmm1
- pand %xmm8,%xmm6
- pand %xmm8,%xmm1
- pxor %xmm6,%xmm4
- psllq $2,%xmm6
- pxor %xmm1,%xmm2
- psllq $2,%xmm1
- pxor %xmm9,%xmm6
- pxor %xmm10,%xmm1
- movdqa %xmm0,%xmm9
- psrlq $2,%xmm0
- movdqa %xmm15,%xmm10
- psrlq $2,%xmm15
- pxor %xmm3,%xmm0
- pxor %xmm5,%xmm15
- pand %xmm8,%xmm0
- pand %xmm8,%xmm15
- pxor %xmm0,%xmm3
- psllq $2,%xmm0
- pxor %xmm15,%xmm5
- psllq $2,%xmm15
- pxor %xmm9,%xmm0
- pxor %xmm10,%xmm15
- movdqa %xmm3,%xmm9
- psrlq $4,%xmm3
- movdqa %xmm5,%xmm10
- psrlq $4,%xmm5
- pxor %xmm4,%xmm3
- pxor %xmm2,%xmm5
- pand %xmm7,%xmm3
- pand %xmm7,%xmm5
- pxor %xmm3,%xmm4
- psllq $4,%xmm3
- pxor %xmm5,%xmm2
- psllq $4,%xmm5
- pxor %xmm9,%xmm3
- pxor %xmm10,%xmm5
- movdqa %xmm0,%xmm9
- psrlq $4,%xmm0
- movdqa %xmm15,%xmm10
- psrlq $4,%xmm15
- pxor %xmm6,%xmm0
- pxor %xmm1,%xmm15
- pand %xmm7,%xmm0
- pand %xmm7,%xmm15
- pxor %xmm0,%xmm6
- psllq $4,%xmm0
- pxor %xmm15,%xmm1
- psllq $4,%xmm15
- pxor %xmm9,%xmm0
- pxor %xmm10,%xmm15
- movdqa (%rax),%xmm7
- pxor %xmm7,%xmm5
- pxor %xmm7,%xmm3
- pxor %xmm7,%xmm1
- pxor %xmm7,%xmm6
- pxor %xmm7,%xmm2
- pxor %xmm7,%xmm4
- pxor %xmm7,%xmm15
- pxor %xmm7,%xmm0
- .byte 0xf3,0xc3
-.size _bsaes_decrypt8,.-_bsaes_decrypt8
-.type _bsaes_key_convert,@function
-.align 16
-_bsaes_key_convert:
- leaq .Lmasks(%rip),%r11
- movdqu (%rcx),%xmm7
- leaq 16(%rcx),%rcx
- movdqa 0(%r11),%xmm0
- movdqa 16(%r11),%xmm1
- movdqa 32(%r11),%xmm2
- movdqa 48(%r11),%xmm3
- movdqa 64(%r11),%xmm4
- pcmpeqd %xmm5,%xmm5
-
- movdqu (%rcx),%xmm6
- movdqa %xmm7,(%rax)
- leaq 16(%rax),%rax
- decl %r10d
- jmp .Lkey_loop
-.align 16
-.Lkey_loop:
-.byte 102,15,56,0,244
-
- movdqa %xmm0,%xmm8
- movdqa %xmm1,%xmm9
-
- pand %xmm6,%xmm8
- pand %xmm6,%xmm9
- movdqa %xmm2,%xmm10
- pcmpeqb %xmm0,%xmm8
- psllq $4,%xmm0
- movdqa %xmm3,%xmm11
- pcmpeqb %xmm1,%xmm9
- psllq $4,%xmm1
-
- pand %xmm6,%xmm10
- pand %xmm6,%xmm11
- movdqa %xmm0,%xmm12
- pcmpeqb %xmm2,%xmm10
- psllq $4,%xmm2
- movdqa %xmm1,%xmm13
- pcmpeqb %xmm3,%xmm11
- psllq $4,%xmm3
-
- movdqa %xmm2,%xmm14
- movdqa %xmm3,%xmm15
- pxor %xmm5,%xmm8
- pxor %xmm5,%xmm9
-
- pand %xmm6,%xmm12
- pand %xmm6,%xmm13
- movdqa %xmm8,0(%rax)
- pcmpeqb %xmm0,%xmm12
- psrlq $4,%xmm0
- movdqa %xmm9,16(%rax)
- pcmpeqb %xmm1,%xmm13
- psrlq $4,%xmm1
- leaq 16(%rcx),%rcx
-
- pand %xmm6,%xmm14
- pand %xmm6,%xmm15
- movdqa %xmm10,32(%rax)
- pcmpeqb %xmm2,%xmm14
- psrlq $4,%xmm2
- movdqa %xmm11,48(%rax)
- pcmpeqb %xmm3,%xmm15
- psrlq $4,%xmm3
- movdqu (%rcx),%xmm6
-
- pxor %xmm5,%xmm13
- pxor %xmm5,%xmm14
- movdqa %xmm12,64(%rax)
- movdqa %xmm13,80(%rax)
- movdqa %xmm14,96(%rax)
- movdqa %xmm15,112(%rax)
- leaq 128(%rax),%rax
- decl %r10d
- jnz .Lkey_loop
-
- movdqa 80(%r11),%xmm7
-
- .byte 0xf3,0xc3
-.size _bsaes_key_convert,.-_bsaes_key_convert
-
-.globl bsaes_cbc_encrypt
-.type bsaes_cbc_encrypt,@function
-.align 16
-bsaes_cbc_encrypt:
- cmpl $0,%r9d
- jne asm_AES_cbc_encrypt
- cmpq $128,%rdx
- jb asm_AES_cbc_encrypt
-
- movq %rsp,%rax
-.Lcbc_dec_prologue:
- pushq %rbp
- pushq %rbx
- pushq %r12
- pushq %r13
- pushq %r14
- pushq %r15
- leaq -72(%rsp),%rsp
- movq %rsp,%rbp
- movl 240(%rcx),%eax
- movq %rdi,%r12
- movq %rsi,%r13
- movq %rdx,%r14
- movq %rcx,%r15
- movq %r8,%rbx
- shrq $4,%r14
-
- movl %eax,%edx
- shlq $7,%rax
- subq $96,%rax
- subq %rax,%rsp
-
- movq %rsp,%rax
- movq %r15,%rcx
- movl %edx,%r10d
- call _bsaes_key_convert
- pxor (%rsp),%xmm7
- movdqa %xmm6,(%rax)
- movdqa %xmm7,(%rsp)
-
- movdqu (%rbx),%xmm14
- subq $8,%r14
-.Lcbc_dec_loop:
- movdqu 0(%r12),%xmm15
- movdqu 16(%r12),%xmm0
- movdqu 32(%r12),%xmm1
- movdqu 48(%r12),%xmm2
- movdqu 64(%r12),%xmm3
- movdqu 80(%r12),%xmm4
- movq %rsp,%rax
- movdqu 96(%r12),%xmm5
- movl %edx,%r10d
- movdqu 112(%r12),%xmm6
- movdqa %xmm14,32(%rbp)
-
- call _bsaes_decrypt8
-
- pxor 32(%rbp),%xmm15
- movdqu 0(%r12),%xmm7
- movdqu 16(%r12),%xmm8
- pxor %xmm7,%xmm0
- movdqu 32(%r12),%xmm9
- pxor %xmm8,%xmm5
- movdqu 48(%r12),%xmm10
- pxor %xmm9,%xmm3
- movdqu 64(%r12),%xmm11
- pxor %xmm10,%xmm1
- movdqu 80(%r12),%xmm12
- pxor %xmm11,%xmm6
- movdqu 96(%r12),%xmm13
- pxor %xmm12,%xmm2
- movdqu 112(%r12),%xmm14
- pxor %xmm13,%xmm4
- movdqu %xmm15,0(%r13)
- leaq 128(%r12),%r12
- movdqu %xmm0,16(%r13)
- movdqu %xmm5,32(%r13)
- movdqu %xmm3,48(%r13)
- movdqu %xmm1,64(%r13)
- movdqu %xmm6,80(%r13)
- movdqu %xmm2,96(%r13)
- movdqu %xmm4,112(%r13)
- leaq 128(%r13),%r13
- subq $8,%r14
- jnc .Lcbc_dec_loop
-
- addq $8,%r14
- jz .Lcbc_dec_done
-
- movdqu 0(%r12),%xmm15
- movq %rsp,%rax
- movl %edx,%r10d
- cmpq $2,%r14
- jb .Lcbc_dec_one
- movdqu 16(%r12),%xmm0
- je .Lcbc_dec_two
- movdqu 32(%r12),%xmm1
- cmpq $4,%r14
- jb .Lcbc_dec_three
- movdqu 48(%r12),%xmm2
- je .Lcbc_dec_four
- movdqu 64(%r12),%xmm3
- cmpq $6,%r14
- jb .Lcbc_dec_five
- movdqu 80(%r12),%xmm4
- je .Lcbc_dec_six
- movdqu 96(%r12),%xmm5
- movdqa %xmm14,32(%rbp)
- call _bsaes_decrypt8
- pxor 32(%rbp),%xmm15
- movdqu 0(%r12),%xmm7
- movdqu 16(%r12),%xmm8
- pxor %xmm7,%xmm0
- movdqu 32(%r12),%xmm9
- pxor %xmm8,%xmm5
- movdqu 48(%r12),%xmm10
- pxor %xmm9,%xmm3
- movdqu 64(%r12),%xmm11
- pxor %xmm10,%xmm1
- movdqu 80(%r12),%xmm12
- pxor %xmm11,%xmm6
- movdqu 96(%r12),%xmm14
- pxor %xmm12,%xmm2
- movdqu %xmm15,0(%r13)
- movdqu %xmm0,16(%r13)
- movdqu %xmm5,32(%r13)
- movdqu %xmm3,48(%r13)
- movdqu %xmm1,64(%r13)
- movdqu %xmm6,80(%r13)
- movdqu %xmm2,96(%r13)
- jmp .Lcbc_dec_done
-.align 16
-.Lcbc_dec_six:
- movdqa %xmm14,32(%rbp)
- call _bsaes_decrypt8
- pxor 32(%rbp),%xmm15
- movdqu 0(%r12),%xmm7
- movdqu 16(%r12),%xmm8
- pxor %xmm7,%xmm0
- movdqu 32(%r12),%xmm9
- pxor %xmm8,%xmm5
- movdqu 48(%r12),%xmm10
- pxor %xmm9,%xmm3
- movdqu 64(%r12),%xmm11
- pxor %xmm10,%xmm1
- movdqu 80(%r12),%xmm14
- pxor %xmm11,%xmm6
- movdqu %xmm15,0(%r13)
- movdqu %xmm0,16(%r13)
- movdqu %xmm5,32(%r13)
- movdqu %xmm3,48(%r13)
- movdqu %xmm1,64(%r13)
- movdqu %xmm6,80(%r13)
- jmp .Lcbc_dec_done
-.align 16
-.Lcbc_dec_five:
- movdqa %xmm14,32(%rbp)
- call _bsaes_decrypt8
- pxor 32(%rbp),%xmm15
- movdqu 0(%r12),%xmm7
- movdqu 16(%r12),%xmm8
- pxor %xmm7,%xmm0
- movdqu 32(%r12),%xmm9
- pxor %xmm8,%xmm5
- movdqu 48(%r12),%xmm10
- pxor %xmm9,%xmm3
- movdqu 64(%r12),%xmm14
- pxor %xmm10,%xmm1
- movdqu %xmm15,0(%r13)
- movdqu %xmm0,16(%r13)
- movdqu %xmm5,32(%r13)
- movdqu %xmm3,48(%r13)
- movdqu %xmm1,64(%r13)
- jmp .Lcbc_dec_done
-.align 16
-.Lcbc_dec_four:
- movdqa %xmm14,32(%rbp)
- call _bsaes_decrypt8
- pxor 32(%rbp),%xmm15
- movdqu 0(%r12),%xmm7
- movdqu 16(%r12),%xmm8
- pxor %xmm7,%xmm0
- movdqu 32(%r12),%xmm9
- pxor %xmm8,%xmm5
- movdqu 48(%r12),%xmm14
- pxor %xmm9,%xmm3
- movdqu %xmm15,0(%r13)
- movdqu %xmm0,16(%r13)
- movdqu %xmm5,32(%r13)
- movdqu %xmm3,48(%r13)
- jmp .Lcbc_dec_done
-.align 16
-.Lcbc_dec_three:
- movdqa %xmm14,32(%rbp)
- call _bsaes_decrypt8
- pxor 32(%rbp),%xmm15
- movdqu 0(%r12),%xmm7
- movdqu 16(%r12),%xmm8
- pxor %xmm7,%xmm0
- movdqu 32(%r12),%xmm14
- pxor %xmm8,%xmm5
- movdqu %xmm15,0(%r13)
- movdqu %xmm0,16(%r13)
- movdqu %xmm5,32(%r13)
- jmp .Lcbc_dec_done
-.align 16
-.Lcbc_dec_two:
- movdqa %xmm14,32(%rbp)
- call _bsaes_decrypt8
- pxor 32(%rbp),%xmm15
- movdqu 0(%r12),%xmm7
- movdqu 16(%r12),%xmm14
- pxor %xmm7,%xmm0
- movdqu %xmm15,0(%r13)
- movdqu %xmm0,16(%r13)
- jmp .Lcbc_dec_done
-.align 16
-.Lcbc_dec_one:
- leaq (%r12),%rdi
- leaq 32(%rbp),%rsi
- leaq (%r15),%rdx
- call asm_AES_decrypt
- pxor 32(%rbp),%xmm14
- movdqu %xmm14,(%r13)
- movdqa %xmm15,%xmm14
-
-.Lcbc_dec_done:
- movdqu %xmm14,(%rbx)
- leaq (%rsp),%rax
- pxor %xmm0,%xmm0
-.Lcbc_dec_bzero:
- movdqa %xmm0,0(%rax)
- movdqa %xmm0,16(%rax)
- leaq 32(%rax),%rax
- cmpq %rax,%rbp
- ja .Lcbc_dec_bzero
-
- leaq (%rbp),%rsp
- movq 72(%rsp),%r15
- movq 80(%rsp),%r14
- movq 88(%rsp),%r13
- movq 96(%rsp),%r12
- movq 104(%rsp),%rbx
- movq 112(%rsp),%rax
- leaq 120(%rsp),%rsp
- movq %rax,%rbp
-.Lcbc_dec_epilogue:
- .byte 0xf3,0xc3
-.size bsaes_cbc_encrypt,.-bsaes_cbc_encrypt
-
-.globl bsaes_ctr32_encrypt_blocks
-.type bsaes_ctr32_encrypt_blocks,@function
-.align 16
-bsaes_ctr32_encrypt_blocks:
- movq %rsp,%rax
-.Lctr_enc_prologue:
- pushq %rbp
- pushq %rbx
- pushq %r12
- pushq %r13
- pushq %r14
- pushq %r15
- leaq -72(%rsp),%rsp
- movq %rsp,%rbp
- movdqu (%r8),%xmm0
- movl 240(%rcx),%eax
- movq %rdi,%r12
- movq %rsi,%r13
- movq %rdx,%r14
- movq %rcx,%r15
- movdqa %xmm0,32(%rbp)
- cmpq $8,%rdx
- jb .Lctr_enc_short
-
- movl %eax,%ebx
- shlq $7,%rax
- subq $96,%rax
- subq %rax,%rsp
-
- movq %rsp,%rax
- movq %r15,%rcx
- movl %ebx,%r10d
- call _bsaes_key_convert
- pxor %xmm6,%xmm7
- movdqa %xmm7,(%rax)
-
- movdqa (%rsp),%xmm8
- leaq .LADD1(%rip),%r11
- movdqa 32(%rbp),%xmm15
- movdqa -32(%r11),%xmm7
-.byte 102,68,15,56,0,199
-.byte 102,68,15,56,0,255
- movdqa %xmm8,(%rsp)
- jmp .Lctr_enc_loop
-.align 16
-.Lctr_enc_loop:
- movdqa %xmm15,32(%rbp)
- movdqa %xmm15,%xmm0
- movdqa %xmm15,%xmm1
- paddd 0(%r11),%xmm0
- movdqa %xmm15,%xmm2
- paddd 16(%r11),%xmm1
- movdqa %xmm15,%xmm3
- paddd 32(%r11),%xmm2
- movdqa %xmm15,%xmm4
- paddd 48(%r11),%xmm3
- movdqa %xmm15,%xmm5
- paddd 64(%r11),%xmm4
- movdqa %xmm15,%xmm6
- paddd 80(%r11),%xmm5
- paddd 96(%r11),%xmm6
-
-
-
- movdqa (%rsp),%xmm8
- leaq 16(%rsp),%rax
- movdqa -16(%r11),%xmm7
- pxor %xmm8,%xmm15
- pxor %xmm8,%xmm0
-.byte 102,68,15,56,0,255
- pxor %xmm8,%xmm1
-.byte 102,15,56,0,199
- pxor %xmm8,%xmm2
-.byte 102,15,56,0,207
- pxor %xmm8,%xmm3
-.byte 102,15,56,0,215
- pxor %xmm8,%xmm4
-.byte 102,15,56,0,223
- pxor %xmm8,%xmm5
-.byte 102,15,56,0,231
- pxor %xmm8,%xmm6
-.byte 102,15,56,0,239
- leaq .LBS0(%rip),%r11
-.byte 102,15,56,0,247
- movl %ebx,%r10d
-
- call _bsaes_encrypt8_bitslice
-
- subq $8,%r14
- jc .Lctr_enc_loop_done
-
- movdqu 0(%r12),%xmm7
- movdqu 16(%r12),%xmm8
- movdqu 32(%r12),%xmm9
- movdqu 48(%r12),%xmm10
- movdqu 64(%r12),%xmm11
- movdqu 80(%r12),%xmm12
- movdqu 96(%r12),%xmm13
- movdqu 112(%r12),%xmm14
- leaq 128(%r12),%r12
- pxor %xmm15,%xmm7
- movdqa 32(%rbp),%xmm15
- pxor %xmm8,%xmm0
- movdqu %xmm7,0(%r13)
- pxor %xmm9,%xmm3
- movdqu %xmm0,16(%r13)
- pxor %xmm10,%xmm5
- movdqu %xmm3,32(%r13)
- pxor %xmm11,%xmm2
- movdqu %xmm5,48(%r13)
- pxor %xmm12,%xmm6
- movdqu %xmm2,64(%r13)
- pxor %xmm13,%xmm1
- movdqu %xmm6,80(%r13)
- pxor %xmm14,%xmm4
- movdqu %xmm1,96(%r13)
- leaq .LADD1(%rip),%r11
- movdqu %xmm4,112(%r13)
- leaq 128(%r13),%r13
- paddd 112(%r11),%xmm15
- jnz .Lctr_enc_loop
-
- jmp .Lctr_enc_done
-.align 16
-.Lctr_enc_loop_done:
- addq $8,%r14
- movdqu 0(%r12),%xmm7
- pxor %xmm7,%xmm15
- movdqu %xmm15,0(%r13)
- cmpq $2,%r14
- jb .Lctr_enc_done
- movdqu 16(%r12),%xmm8
- pxor %xmm8,%xmm0
- movdqu %xmm0,16(%r13)
- je .Lctr_enc_done
- movdqu 32(%r12),%xmm9
- pxor %xmm9,%xmm3
- movdqu %xmm3,32(%r13)
- cmpq $4,%r14
- jb .Lctr_enc_done
- movdqu 48(%r12),%xmm10
- pxor %xmm10,%xmm5
- movdqu %xmm5,48(%r13)
- je .Lctr_enc_done
- movdqu 64(%r12),%xmm11
- pxor %xmm11,%xmm2
- movdqu %xmm2,64(%r13)
- cmpq $6,%r14
- jb .Lctr_enc_done
- movdqu 80(%r12),%xmm12
- pxor %xmm12,%xmm6
- movdqu %xmm6,80(%r13)
- je .Lctr_enc_done
- movdqu 96(%r12),%xmm13
- pxor %xmm13,%xmm1
- movdqu %xmm1,96(%r13)
- jmp .Lctr_enc_done
-
-.align 16
-.Lctr_enc_short:
- leaq 32(%rbp),%rdi
- leaq 48(%rbp),%rsi
- leaq (%r15),%rdx
- call asm_AES_encrypt
- movdqu (%r12),%xmm0
- leaq 16(%r12),%r12
- movl 44(%rbp),%eax
- bswapl %eax
- pxor 48(%rbp),%xmm0
- incl %eax
- movdqu %xmm0,(%r13)
- bswapl %eax
- leaq 16(%r13),%r13
- movl %eax,44(%rsp)
- decq %r14
- jnz .Lctr_enc_short
-
-.Lctr_enc_done:
- leaq (%rsp),%rax
- pxor %xmm0,%xmm0
-.Lctr_enc_bzero:
- movdqa %xmm0,0(%rax)
- movdqa %xmm0,16(%rax)
- leaq 32(%rax),%rax
- cmpq %rax,%rbp
- ja .Lctr_enc_bzero
-
- leaq (%rbp),%rsp
- movq 72(%rsp),%r15
- movq 80(%rsp),%r14
- movq 88(%rsp),%r13
- movq 96(%rsp),%r12
- movq 104(%rsp),%rbx
- movq 112(%rsp),%rax
- leaq 120(%rsp),%rsp
- movq %rax,%rbp
-.Lctr_enc_epilogue:
- .byte 0xf3,0xc3
-.size bsaes_ctr32_encrypt_blocks,.-bsaes_ctr32_encrypt_blocks
-.globl bsaes_xts_encrypt
-.type bsaes_xts_encrypt,@function
-.align 16
-bsaes_xts_encrypt:
- movq %rsp,%rax
-.Lxts_enc_prologue:
- pushq %rbp
- pushq %rbx
- pushq %r12
- pushq %r13
- pushq %r14
- pushq %r15
- leaq -72(%rsp),%rsp
- movq %rsp,%rbp
- movq %rdi,%r12
- movq %rsi,%r13
- movq %rdx,%r14
- movq %rcx,%r15
-
- leaq (%r9),%rdi
- leaq 32(%rbp),%rsi
- leaq (%r8),%rdx
- call asm_AES_encrypt
-
- movl 240(%r15),%eax
- movq %r14,%rbx
-
- movl %eax,%edx
- shlq $7,%rax
- subq $96,%rax
- subq %rax,%rsp
-
- movq %rsp,%rax
- movq %r15,%rcx
- movl %edx,%r10d
- call _bsaes_key_convert
- pxor %xmm6,%xmm7
- movdqa %xmm7,(%rax)
-
- andq $-16,%r14
- subq $128,%rsp
- movdqa 32(%rbp),%xmm6
-
- pxor %xmm14,%xmm14
- movdqa .Lxts_magic(%rip),%xmm12
- pcmpgtd %xmm6,%xmm14
-
- subq $128,%r14
- jc .Lxts_enc_short
- jmp .Lxts_enc_loop
-
-.align 16
-.Lxts_enc_loop:
- pshufd $19,%xmm14,%xmm13
- pxor %xmm14,%xmm14
- movdqa %xmm6,%xmm15
- movdqa %xmm6,0(%rsp)
- paddq %xmm6,%xmm6
- pand %xmm12,%xmm13
- pcmpgtd %xmm6,%xmm14
- pxor %xmm13,%xmm6
- pshufd $19,%xmm14,%xmm13
- pxor %xmm14,%xmm14
- movdqa %xmm6,%xmm0
- movdqa %xmm6,16(%rsp)
- paddq %xmm6,%xmm6
- pand %xmm12,%xmm13
- pcmpgtd %xmm6,%xmm14
- pxor %xmm13,%xmm6
- movdqu 0(%r12),%xmm7
- pshufd $19,%xmm14,%xmm13
- pxor %xmm14,%xmm14
- movdqa %xmm6,%xmm1
- movdqa %xmm6,32(%rsp)
- paddq %xmm6,%xmm6
- pand %xmm12,%xmm13
- pcmpgtd %xmm6,%xmm14
- pxor %xmm13,%xmm6
- movdqu 16(%r12),%xmm8
- pxor %xmm7,%xmm15
- pshufd $19,%xmm14,%xmm13
- pxor %xmm14,%xmm14
- movdqa %xmm6,%xmm2
- movdqa %xmm6,48(%rsp)
- paddq %xmm6,%xmm6
- pand %xmm12,%xmm13
- pcmpgtd %xmm6,%xmm14
- pxor %xmm13,%xmm6
- movdqu 32(%r12),%xmm9
- pxor %xmm8,%xmm0
- pshufd $19,%xmm14,%xmm13
- pxor %xmm14,%xmm14
- movdqa %xmm6,%xmm3
- movdqa %xmm6,64(%rsp)
- paddq %xmm6,%xmm6
- pand %xmm12,%xmm13
- pcmpgtd %xmm6,%xmm14
- pxor %xmm13,%xmm6
- movdqu 48(%r12),%xmm10
- pxor %xmm9,%xmm1
- pshufd $19,%xmm14,%xmm13
- pxor %xmm14,%xmm14
- movdqa %xmm6,%xmm4
- movdqa %xmm6,80(%rsp)
- paddq %xmm6,%xmm6
- pand %xmm12,%xmm13
- pcmpgtd %xmm6,%xmm14
- pxor %xmm13,%xmm6
- movdqu 64(%r12),%xmm11
- pxor %xmm10,%xmm2
- pshufd $19,%xmm14,%xmm13
- pxor %xmm14,%xmm14
- movdqa %xmm6,%xmm5
- movdqa %xmm6,96(%rsp)
- paddq %xmm6,%xmm6
- pand %xmm12,%xmm13
- pcmpgtd %xmm6,%xmm14
- pxor %xmm13,%xmm6
- movdqu 80(%r12),%xmm12
- pxor %xmm11,%xmm3
- movdqu 96(%r12),%xmm13
- pxor %xmm12,%xmm4
- movdqu 112(%r12),%xmm14
- leaq 128(%r12),%r12
- movdqa %xmm6,112(%rsp)
- pxor %xmm13,%xmm5
- leaq 128(%rsp),%rax
- pxor %xmm14,%xmm6
- movl %edx,%r10d
-
- call _bsaes_encrypt8
-
- pxor 0(%rsp),%xmm15
- pxor 16(%rsp),%xmm0
- movdqu %xmm15,0(%r13)
- pxor 32(%rsp),%xmm3
- movdqu %xmm0,16(%r13)
- pxor 48(%rsp),%xmm5
- movdqu %xmm3,32(%r13)
- pxor 64(%rsp),%xmm2
- movdqu %xmm5,48(%r13)
- pxor 80(%rsp),%xmm6
- movdqu %xmm2,64(%r13)
- pxor 96(%rsp),%xmm1
- movdqu %xmm6,80(%r13)
- pxor 112(%rsp),%xmm4
- movdqu %xmm1,96(%r13)
- movdqu %xmm4,112(%r13)
- leaq 128(%r13),%r13
-
- movdqa 112(%rsp),%xmm6
- pxor %xmm14,%xmm14
- movdqa .Lxts_magic(%rip),%xmm12
- pcmpgtd %xmm6,%xmm14
- pshufd $19,%xmm14,%xmm13
- pxor %xmm14,%xmm14
- paddq %xmm6,%xmm6
- pand %xmm12,%xmm13
- pcmpgtd %xmm6,%xmm14
- pxor %xmm13,%xmm6
-
- subq $128,%r14
- jnc .Lxts_enc_loop
-
-.Lxts_enc_short:
- addq $128,%r14
- jz .Lxts_enc_done
- pshufd $19,%xmm14,%xmm13
- pxor %xmm14,%xmm14
- movdqa %xmm6,%xmm15
- movdqa %xmm6,0(%rsp)
- paddq %xmm6,%xmm6
- pand %xmm12,%xmm13
- pcmpgtd %xmm6,%xmm14
- pxor %xmm13,%xmm6
- pshufd $19,%xmm14,%xmm13
- pxor %xmm14,%xmm14
- movdqa %xmm6,%xmm0
- movdqa %xmm6,16(%rsp)
- paddq %xmm6,%xmm6
- pand %xmm12,%xmm13
- pcmpgtd %xmm6,%xmm14
- pxor %xmm13,%xmm6
- movdqu 0(%r12),%xmm7
- cmpq $16,%r14
- je .Lxts_enc_1
- pshufd $19,%xmm14,%xmm13
- pxor %xmm14,%xmm14
- movdqa %xmm6,%xmm1
- movdqa %xmm6,32(%rsp)
- paddq %xmm6,%xmm6
- pand %xmm12,%xmm13
- pcmpgtd %xmm6,%xmm14
- pxor %xmm13,%xmm6
- movdqu 16(%r12),%xmm8
- cmpq $32,%r14
- je .Lxts_enc_2
- pxor %xmm7,%xmm15
- pshufd $19,%xmm14,%xmm13
- pxor %xmm14,%xmm14
- movdqa %xmm6,%xmm2
- movdqa %xmm6,48(%rsp)
- paddq %xmm6,%xmm6
- pand %xmm12,%xmm13
- pcmpgtd %xmm6,%xmm14
- pxor %xmm13,%xmm6
- movdqu 32(%r12),%xmm9
- cmpq $48,%r14
- je .Lxts_enc_3
- pxor %xmm8,%xmm0
- pshufd $19,%xmm14,%xmm13
- pxor %xmm14,%xmm14
- movdqa %xmm6,%xmm3
- movdqa %xmm6,64(%rsp)
- paddq %xmm6,%xmm6
- pand %xmm12,%xmm13
- pcmpgtd %xmm6,%xmm14
- pxor %xmm13,%xmm6
- movdqu 48(%r12),%xmm10
- cmpq $64,%r14
- je .Lxts_enc_4
- pxor %xmm9,%xmm1
- pshufd $19,%xmm14,%xmm13
- pxor %xmm14,%xmm14
- movdqa %xmm6,%xmm4
- movdqa %xmm6,80(%rsp)
- paddq %xmm6,%xmm6
- pand %xmm12,%xmm13
- pcmpgtd %xmm6,%xmm14
- pxor %xmm13,%xmm6
- movdqu 64(%r12),%xmm11
- cmpq $80,%r14
- je .Lxts_enc_5
- pxor %xmm10,%xmm2
- pshufd $19,%xmm14,%xmm13
- pxor %xmm14,%xmm14
- movdqa %xmm6,%xmm5
- movdqa %xmm6,96(%rsp)
- paddq %xmm6,%xmm6
- pand %xmm12,%xmm13
- pcmpgtd %xmm6,%xmm14
- pxor %xmm13,%xmm6
- movdqu 80(%r12),%xmm12
- cmpq $96,%r14
- je .Lxts_enc_6
- pxor %xmm11,%xmm3
- movdqu 96(%r12),%xmm13
- pxor %xmm12,%xmm4
- movdqa %xmm6,112(%rsp)
- leaq 112(%r12),%r12
- pxor %xmm13,%xmm5
- leaq 128(%rsp),%rax
- movl %edx,%r10d
-
- call _bsaes_encrypt8
-
- pxor 0(%rsp),%xmm15
- pxor 16(%rsp),%xmm0
- movdqu %xmm15,0(%r13)
- pxor 32(%rsp),%xmm3
- movdqu %xmm0,16(%r13)
- pxor 48(%rsp),%xmm5
- movdqu %xmm3,32(%r13)
- pxor 64(%rsp),%xmm2
- movdqu %xmm5,48(%r13)
- pxor 80(%rsp),%xmm6
- movdqu %xmm2,64(%r13)
- pxor 96(%rsp),%xmm1
- movdqu %xmm6,80(%r13)
- movdqu %xmm1,96(%r13)
- leaq 112(%r13),%r13
-
- movdqa 112(%rsp),%xmm6
- jmp .Lxts_enc_done
-.align 16
-.Lxts_enc_6:
- pxor %xmm11,%xmm3
- leaq 96(%r12),%r12
- pxor %xmm12,%xmm4
- leaq 128(%rsp),%rax
- movl %edx,%r10d
-
- call _bsaes_encrypt8
-
- pxor 0(%rsp),%xmm15
- pxor 16(%rsp),%xmm0
- movdqu %xmm15,0(%r13)
- pxor 32(%rsp),%xmm3
- movdqu %xmm0,16(%r13)
- pxor 48(%rsp),%xmm5
- movdqu %xmm3,32(%r13)
- pxor 64(%rsp),%xmm2
- movdqu %xmm5,48(%r13)
- pxor 80(%rsp),%xmm6
- movdqu %xmm2,64(%r13)
- movdqu %xmm6,80(%r13)
- leaq 96(%r13),%r13
-
- movdqa 96(%rsp),%xmm6
- jmp .Lxts_enc_done
-.align 16
-.Lxts_enc_5:
- pxor %xmm10,%xmm2
- leaq 80(%r12),%r12
- pxor %xmm11,%xmm3
- leaq 128(%rsp),%rax
- movl %edx,%r10d
-
- call _bsaes_encrypt8
-
- pxor 0(%rsp),%xmm15
- pxor 16(%rsp),%xmm0
- movdqu %xmm15,0(%r13)
- pxor 32(%rsp),%xmm3
- movdqu %xmm0,16(%r13)
- pxor 48(%rsp),%xmm5
- movdqu %xmm3,32(%r13)
- pxor 64(%rsp),%xmm2
- movdqu %xmm5,48(%r13)
- movdqu %xmm2,64(%r13)
- leaq 80(%r13),%r13
-
- movdqa 80(%rsp),%xmm6
- jmp .Lxts_enc_done
-.align 16
-.Lxts_enc_4:
- pxor %xmm9,%xmm1
- leaq 64(%r12),%r12
- pxor %xmm10,%xmm2
- leaq 128(%rsp),%rax
- movl %edx,%r10d
-
- call _bsaes_encrypt8
-
- pxor 0(%rsp),%xmm15
- pxor 16(%rsp),%xmm0
- movdqu %xmm15,0(%r13)
- pxor 32(%rsp),%xmm3
- movdqu %xmm0,16(%r13)
- pxor 48(%rsp),%xmm5
- movdqu %xmm3,32(%r13)
- movdqu %xmm5,48(%r13)
- leaq 64(%r13),%r13
-
- movdqa 64(%rsp),%xmm6
- jmp .Lxts_enc_done
-.align 16
-.Lxts_enc_3:
- pxor %xmm8,%xmm0
- leaq 48(%r12),%r12
- pxor %xmm9,%xmm1
- leaq 128(%rsp),%rax
- movl %edx,%r10d
-
- call _bsaes_encrypt8
-
- pxor 0(%rsp),%xmm15
- pxor 16(%rsp),%xmm0
- movdqu %xmm15,0(%r13)
- pxor 32(%rsp),%xmm3
- movdqu %xmm0,16(%r13)
- movdqu %xmm3,32(%r13)
- leaq 48(%r13),%r13
-
- movdqa 48(%rsp),%xmm6
- jmp .Lxts_enc_done
-.align 16
-.Lxts_enc_2:
- pxor %xmm7,%xmm15
- leaq 32(%r12),%r12
- pxor %xmm8,%xmm0
- leaq 128(%rsp),%rax
- movl %edx,%r10d
-
- call _bsaes_encrypt8
-
- pxor 0(%rsp),%xmm15
- pxor 16(%rsp),%xmm0
- movdqu %xmm15,0(%r13)
- movdqu %xmm0,16(%r13)
- leaq 32(%r13),%r13
-
- movdqa 32(%rsp),%xmm6
- jmp .Lxts_enc_done
-.align 16
-.Lxts_enc_1:
- pxor %xmm15,%xmm7
- leaq 16(%r12),%r12
- movdqa %xmm7,32(%rbp)
- leaq 32(%rbp),%rdi
- leaq 32(%rbp),%rsi
- leaq (%r15),%rdx
- call asm_AES_encrypt
- pxor 32(%rbp),%xmm15
-
-
-
-
-
- movdqu %xmm15,0(%r13)
- leaq 16(%r13),%r13
-
- movdqa 16(%rsp),%xmm6
-
-.Lxts_enc_done:
- andl $15,%ebx
- jz .Lxts_enc_ret
- movq %r13,%rdx
-
-.Lxts_enc_steal:
- movzbl (%r12),%eax
- movzbl -16(%rdx),%ecx
- leaq 1(%r12),%r12
- movb %al,-16(%rdx)
- movb %cl,0(%rdx)
- leaq 1(%rdx),%rdx
- subl $1,%ebx
- jnz .Lxts_enc_steal
-
- movdqu -16(%r13),%xmm15
- leaq 32(%rbp),%rdi
- pxor %xmm6,%xmm15
- leaq 32(%rbp),%rsi
- movdqa %xmm15,32(%rbp)
- leaq (%r15),%rdx
- call asm_AES_encrypt
- pxor 32(%rbp),%xmm6
- movdqu %xmm6,-16(%r13)
-
-.Lxts_enc_ret:
- leaq (%rsp),%rax
- pxor %xmm0,%xmm0
-.Lxts_enc_bzero:
- movdqa %xmm0,0(%rax)
- movdqa %xmm0,16(%rax)
- leaq 32(%rax),%rax
- cmpq %rax,%rbp
- ja .Lxts_enc_bzero
-
- leaq (%rbp),%rsp
- movq 72(%rsp),%r15
- movq 80(%rsp),%r14
- movq 88(%rsp),%r13
- movq 96(%rsp),%r12
- movq 104(%rsp),%rbx
- movq 112(%rsp),%rax
- leaq 120(%rsp),%rsp
- movq %rax,%rbp
-.Lxts_enc_epilogue:
- .byte 0xf3,0xc3
-.size bsaes_xts_encrypt,.-bsaes_xts_encrypt
-
-.globl bsaes_xts_decrypt
-.type bsaes_xts_decrypt,@function
-.align 16
-bsaes_xts_decrypt:
- movq %rsp,%rax
-.Lxts_dec_prologue:
- pushq %rbp
- pushq %rbx
- pushq %r12
- pushq %r13
- pushq %r14
- pushq %r15
- leaq -72(%rsp),%rsp
- movq %rsp,%rbp
- movq %rdi,%r12
- movq %rsi,%r13
- movq %rdx,%r14
- movq %rcx,%r15
-
- leaq (%r9),%rdi
- leaq 32(%rbp),%rsi
- leaq (%r8),%rdx
- call asm_AES_encrypt
-
- movl 240(%r15),%eax
- movq %r14,%rbx
-
- movl %eax,%edx
- shlq $7,%rax
- subq $96,%rax
- subq %rax,%rsp
-
- movq %rsp,%rax
- movq %r15,%rcx
- movl %edx,%r10d
- call _bsaes_key_convert
- pxor (%rsp),%xmm7
- movdqa %xmm6,(%rax)
- movdqa %xmm7,(%rsp)
-
- xorl %eax,%eax
- andq $-16,%r14
- testl $15,%ebx
- setnz %al
- shlq $4,%rax
- subq %rax,%r14
-
- subq $128,%rsp
- movdqa 32(%rbp),%xmm6
-
- pxor %xmm14,%xmm14
- movdqa .Lxts_magic(%rip),%xmm12
- pcmpgtd %xmm6,%xmm14
-
- subq $128,%r14
- jc .Lxts_dec_short
- jmp .Lxts_dec_loop
-
-.align 16
-.Lxts_dec_loop:
- pshufd $19,%xmm14,%xmm13
- pxor %xmm14,%xmm14
- movdqa %xmm6,%xmm15
- movdqa %xmm6,0(%rsp)
- paddq %xmm6,%xmm6
- pand %xmm12,%xmm13
- pcmpgtd %xmm6,%xmm14
- pxor %xmm13,%xmm6
- pshufd $19,%xmm14,%xmm13
- pxor %xmm14,%xmm14
- movdqa %xmm6,%xmm0
- movdqa %xmm6,16(%rsp)
- paddq %xmm6,%xmm6
- pand %xmm12,%xmm13
- pcmpgtd %xmm6,%xmm14
- pxor %xmm13,%xmm6
- movdqu 0(%r12),%xmm7
- pshufd $19,%xmm14,%xmm13
- pxor %xmm14,%xmm14
- movdqa %xmm6,%xmm1
- movdqa %xmm6,32(%rsp)
- paddq %xmm6,%xmm6
- pand %xmm12,%xmm13
- pcmpgtd %xmm6,%xmm14
- pxor %xmm13,%xmm6
- movdqu 16(%r12),%xmm8
- pxor %xmm7,%xmm15
- pshufd $19,%xmm14,%xmm13
- pxor %xmm14,%xmm14
- movdqa %xmm6,%xmm2
- movdqa %xmm6,48(%rsp)
- paddq %xmm6,%xmm6
- pand %xmm12,%xmm13
- pcmpgtd %xmm6,%xmm14
- pxor %xmm13,%xmm6
- movdqu 32(%r12),%xmm9
- pxor %xmm8,%xmm0
- pshufd $19,%xmm14,%xmm13
- pxor %xmm14,%xmm14
- movdqa %xmm6,%xmm3
- movdqa %xmm6,64(%rsp)
- paddq %xmm6,%xmm6
- pand %xmm12,%xmm13
- pcmpgtd %xmm6,%xmm14
- pxor %xmm13,%xmm6
- movdqu 48(%r12),%xmm10
- pxor %xmm9,%xmm1
- pshufd $19,%xmm14,%xmm13
- pxor %xmm14,%xmm14
- movdqa %xmm6,%xmm4
- movdqa %xmm6,80(%rsp)
- paddq %xmm6,%xmm6
- pand %xmm12,%xmm13
- pcmpgtd %xmm6,%xmm14
- pxor %xmm13,%xmm6
- movdqu 64(%r12),%xmm11
- pxor %xmm10,%xmm2
- pshufd $19,%xmm14,%xmm13
- pxor %xmm14,%xmm14
- movdqa %xmm6,%xmm5
- movdqa %xmm6,96(%rsp)
- paddq %xmm6,%xmm6
- pand %xmm12,%xmm13
- pcmpgtd %xmm6,%xmm14
- pxor %xmm13,%xmm6
- movdqu 80(%r12),%xmm12
- pxor %xmm11,%xmm3
- movdqu 96(%r12),%xmm13
- pxor %xmm12,%xmm4
- movdqu 112(%r12),%xmm14
- leaq 128(%r12),%r12
- movdqa %xmm6,112(%rsp)
- pxor %xmm13,%xmm5
- leaq 128(%rsp),%rax
- pxor %xmm14,%xmm6
- movl %edx,%r10d
-
- call _bsaes_decrypt8
-
- pxor 0(%rsp),%xmm15
- pxor 16(%rsp),%xmm0
- movdqu %xmm15,0(%r13)
- pxor 32(%rsp),%xmm5
- movdqu %xmm0,16(%r13)
- pxor 48(%rsp),%xmm3
- movdqu %xmm5,32(%r13)
- pxor 64(%rsp),%xmm1
- movdqu %xmm3,48(%r13)
- pxor 80(%rsp),%xmm6
- movdqu %xmm1,64(%r13)
- pxor 96(%rsp),%xmm2
- movdqu %xmm6,80(%r13)
- pxor 112(%rsp),%xmm4
- movdqu %xmm2,96(%r13)
- movdqu %xmm4,112(%r13)
- leaq 128(%r13),%r13
-
- movdqa 112(%rsp),%xmm6
- pxor %xmm14,%xmm14
- movdqa .Lxts_magic(%rip),%xmm12
- pcmpgtd %xmm6,%xmm14
- pshufd $19,%xmm14,%xmm13
- pxor %xmm14,%xmm14
- paddq %xmm6,%xmm6
- pand %xmm12,%xmm13
- pcmpgtd %xmm6,%xmm14
- pxor %xmm13,%xmm6
-
- subq $128,%r14
- jnc .Lxts_dec_loop
-
-.Lxts_dec_short:
- addq $128,%r14
- jz .Lxts_dec_done
- pshufd $19,%xmm14,%xmm13
- pxor %xmm14,%xmm14
- movdqa %xmm6,%xmm15
- movdqa %xmm6,0(%rsp)
- paddq %xmm6,%xmm6
- pand %xmm12,%xmm13
- pcmpgtd %xmm6,%xmm14
- pxor %xmm13,%xmm6
- pshufd $19,%xmm14,%xmm13
- pxor %xmm14,%xmm14
- movdqa %xmm6,%xmm0
- movdqa %xmm6,16(%rsp)
- paddq %xmm6,%xmm6
- pand %xmm12,%xmm13
- pcmpgtd %xmm6,%xmm14
- pxor %xmm13,%xmm6
- movdqu 0(%r12),%xmm7
- cmpq $16,%r14
- je .Lxts_dec_1
- pshufd $19,%xmm14,%xmm13
- pxor %xmm14,%xmm14
- movdqa %xmm6,%xmm1
- movdqa %xmm6,32(%rsp)
- paddq %xmm6,%xmm6
- pand %xmm12,%xmm13
- pcmpgtd %xmm6,%xmm14
- pxor %xmm13,%xmm6
- movdqu 16(%r12),%xmm8
- cmpq $32,%r14
- je .Lxts_dec_2
- pxor %xmm7,%xmm15
- pshufd $19,%xmm14,%xmm13
- pxor %xmm14,%xmm14
- movdqa %xmm6,%xmm2
- movdqa %xmm6,48(%rsp)
- paddq %xmm6,%xmm6
- pand %xmm12,%xmm13
- pcmpgtd %xmm6,%xmm14
- pxor %xmm13,%xmm6
- movdqu 32(%r12),%xmm9
- cmpq $48,%r14
- je .Lxts_dec_3
- pxor %xmm8,%xmm0
- pshufd $19,%xmm14,%xmm13
- pxor %xmm14,%xmm14
- movdqa %xmm6,%xmm3
- movdqa %xmm6,64(%rsp)
- paddq %xmm6,%xmm6
- pand %xmm12,%xmm13
- pcmpgtd %xmm6,%xmm14
- pxor %xmm13,%xmm6
- movdqu 48(%r12),%xmm10
- cmpq $64,%r14
- je .Lxts_dec_4
- pxor %xmm9,%xmm1
- pshufd $19,%xmm14,%xmm13
- pxor %xmm14,%xmm14
- movdqa %xmm6,%xmm4
- movdqa %xmm6,80(%rsp)
- paddq %xmm6,%xmm6
- pand %xmm12,%xmm13
- pcmpgtd %xmm6,%xmm14
- pxor %xmm13,%xmm6
- movdqu 64(%r12),%xmm11
- cmpq $80,%r14
- je .Lxts_dec_5
- pxor %xmm10,%xmm2
- pshufd $19,%xmm14,%xmm13
- pxor %xmm14,%xmm14
- movdqa %xmm6,%xmm5
- movdqa %xmm6,96(%rsp)
- paddq %xmm6,%xmm6
- pand %xmm12,%xmm13
- pcmpgtd %xmm6,%xmm14
- pxor %xmm13,%xmm6
- movdqu 80(%r12),%xmm12
- cmpq $96,%r14
- je .Lxts_dec_6
- pxor %xmm11,%xmm3
- movdqu 96(%r12),%xmm13
- pxor %xmm12,%xmm4
- movdqa %xmm6,112(%rsp)
- leaq 112(%r12),%r12
- pxor %xmm13,%xmm5
- leaq 128(%rsp),%rax
- movl %edx,%r10d
-
- call _bsaes_decrypt8
-
- pxor 0(%rsp),%xmm15
- pxor 16(%rsp),%xmm0
- movdqu %xmm15,0(%r13)
- pxor 32(%rsp),%xmm5
- movdqu %xmm0,16(%r13)
- pxor 48(%rsp),%xmm3
- movdqu %xmm5,32(%r13)
- pxor 64(%rsp),%xmm1
- movdqu %xmm3,48(%r13)
- pxor 80(%rsp),%xmm6
- movdqu %xmm1,64(%r13)
- pxor 96(%rsp),%xmm2
- movdqu %xmm6,80(%r13)
- movdqu %xmm2,96(%r13)
- leaq 112(%r13),%r13
-
- movdqa 112(%rsp),%xmm6
- jmp .Lxts_dec_done
-.align 16
-.Lxts_dec_6:
- pxor %xmm11,%xmm3
- leaq 96(%r12),%r12
- pxor %xmm12,%xmm4
- leaq 128(%rsp),%rax
- movl %edx,%r10d
-
- call _bsaes_decrypt8
-
- pxor 0(%rsp),%xmm15
- pxor 16(%rsp),%xmm0
- movdqu %xmm15,0(%r13)
- pxor 32(%rsp),%xmm5
- movdqu %xmm0,16(%r13)
- pxor 48(%rsp),%xmm3
- movdqu %xmm5,32(%r13)
- pxor 64(%rsp),%xmm1
- movdqu %xmm3,48(%r13)
- pxor 80(%rsp),%xmm6
- movdqu %xmm1,64(%r13)
- movdqu %xmm6,80(%r13)
- leaq 96(%r13),%r13
-
- movdqa 96(%rsp),%xmm6
- jmp .Lxts_dec_done
-.align 16
-.Lxts_dec_5:
- pxor %xmm10,%xmm2
- leaq 80(%r12),%r12
- pxor %xmm11,%xmm3
- leaq 128(%rsp),%rax
- movl %edx,%r10d
-
- call _bsaes_decrypt8
-
- pxor 0(%rsp),%xmm15
- pxor 16(%rsp),%xmm0
- movdqu %xmm15,0(%r13)
- pxor 32(%rsp),%xmm5
- movdqu %xmm0,16(%r13)
- pxor 48(%rsp),%xmm3
- movdqu %xmm5,32(%r13)
- pxor 64(%rsp),%xmm1
- movdqu %xmm3,48(%r13)
- movdqu %xmm1,64(%r13)
- leaq 80(%r13),%r13
-
- movdqa 80(%rsp),%xmm6
- jmp .Lxts_dec_done
-.align 16
-.Lxts_dec_4:
- pxor %xmm9,%xmm1
- leaq 64(%r12),%r12
- pxor %xmm10,%xmm2
- leaq 128(%rsp),%rax
- movl %edx,%r10d
-
- call _bsaes_decrypt8
-
- pxor 0(%rsp),%xmm15
- pxor 16(%rsp),%xmm0
- movdqu %xmm15,0(%r13)
- pxor 32(%rsp),%xmm5
- movdqu %xmm0,16(%r13)
- pxor 48(%rsp),%xmm3
- movdqu %xmm5,32(%r13)
- movdqu %xmm3,48(%r13)
- leaq 64(%r13),%r13
-
- movdqa 64(%rsp),%xmm6
- jmp .Lxts_dec_done
-.align 16
-.Lxts_dec_3:
- pxor %xmm8,%xmm0
- leaq 48(%r12),%r12
- pxor %xmm9,%xmm1
- leaq 128(%rsp),%rax
- movl %edx,%r10d
-
- call _bsaes_decrypt8
-
- pxor 0(%rsp),%xmm15
- pxor 16(%rsp),%xmm0
- movdqu %xmm15,0(%r13)
- pxor 32(%rsp),%xmm5
- movdqu %xmm0,16(%r13)
- movdqu %xmm5,32(%r13)
- leaq 48(%r13),%r13
-
- movdqa 48(%rsp),%xmm6
- jmp .Lxts_dec_done
-.align 16
-.Lxts_dec_2:
- pxor %xmm7,%xmm15
- leaq 32(%r12),%r12
- pxor %xmm8,%xmm0
- leaq 128(%rsp),%rax
- movl %edx,%r10d
-
- call _bsaes_decrypt8
-
- pxor 0(%rsp),%xmm15
- pxor 16(%rsp),%xmm0
- movdqu %xmm15,0(%r13)
- movdqu %xmm0,16(%r13)
- leaq 32(%r13),%r13
-
- movdqa 32(%rsp),%xmm6
- jmp .Lxts_dec_done
-.align 16
-.Lxts_dec_1:
- pxor %xmm15,%xmm7
- leaq 16(%r12),%r12
- movdqa %xmm7,32(%rbp)
- leaq 32(%rbp),%rdi
- leaq 32(%rbp),%rsi
- leaq (%r15),%rdx
- call asm_AES_decrypt
- pxor 32(%rbp),%xmm15
-
-
-
-
-
- movdqu %xmm15,0(%r13)
- leaq 16(%r13),%r13
-
- movdqa 16(%rsp),%xmm6
-
-.Lxts_dec_done:
- andl $15,%ebx
- jz .Lxts_dec_ret
-
- pxor %xmm14,%xmm14
- movdqa .Lxts_magic(%rip),%xmm12
- pcmpgtd %xmm6,%xmm14
- pshufd $19,%xmm14,%xmm13
- movdqa %xmm6,%xmm5
- paddq %xmm6,%xmm6
- pand %xmm12,%xmm13
- movdqu (%r12),%xmm15
- pxor %xmm13,%xmm6
-
- leaq 32(%rbp),%rdi
- pxor %xmm6,%xmm15
- leaq 32(%rbp),%rsi
- movdqa %xmm15,32(%rbp)
- leaq (%r15),%rdx
- call asm_AES_decrypt
- pxor 32(%rbp),%xmm6
- movq %r13,%rdx
- movdqu %xmm6,(%r13)
-
-.Lxts_dec_steal:
- movzbl 16(%r12),%eax
- movzbl (%rdx),%ecx
- leaq 1(%r12),%r12
- movb %al,(%rdx)
- movb %cl,16(%rdx)
- leaq 1(%rdx),%rdx
- subl $1,%ebx
- jnz .Lxts_dec_steal
-
- movdqu (%r13),%xmm15
- leaq 32(%rbp),%rdi
- pxor %xmm5,%xmm15
- leaq 32(%rbp),%rsi
- movdqa %xmm15,32(%rbp)
- leaq (%r15),%rdx
- call asm_AES_decrypt
- pxor 32(%rbp),%xmm5
- movdqu %xmm5,(%r13)
-
-.Lxts_dec_ret:
- leaq (%rsp),%rax
- pxor %xmm0,%xmm0
-.Lxts_dec_bzero:
- movdqa %xmm0,0(%rax)
- movdqa %xmm0,16(%rax)
- leaq 32(%rax),%rax
- cmpq %rax,%rbp
- ja .Lxts_dec_bzero
-
- leaq (%rbp),%rsp
- movq 72(%rsp),%r15
- movq 80(%rsp),%r14
- movq 88(%rsp),%r13
- movq 96(%rsp),%r12
- movq 104(%rsp),%rbx
- movq 112(%rsp),%rax
- leaq 120(%rsp),%rsp
- movq %rax,%rbp
-.Lxts_dec_epilogue:
- .byte 0xf3,0xc3
-.size bsaes_xts_decrypt,.-bsaes_xts_decrypt
-.type _bsaes_const,@object
-.align 64
-_bsaes_const:
-.LM0ISR:
-.quad 0x0a0e0206070b0f03, 0x0004080c0d010509
-.LISRM0:
-.quad 0x01040b0e0205080f, 0x0306090c00070a0d
-.LISR:
-.quad 0x0504070602010003, 0x0f0e0d0c080b0a09
-.LBS0:
-.quad 0x5555555555555555, 0x5555555555555555
-.LBS1:
-.quad 0x3333333333333333, 0x3333333333333333
-.LBS2:
-.quad 0x0f0f0f0f0f0f0f0f, 0x0f0f0f0f0f0f0f0f
-.LSR:
-.quad 0x0504070600030201, 0x0f0e0d0c0a09080b
-.LSRM0:
-.quad 0x0304090e00050a0f, 0x01060b0c0207080d
-.LM0SR:
-.quad 0x0a0e02060f03070b, 0x0004080c05090d01
-.LSWPUP:
-.quad 0x0706050403020100, 0x0c0d0e0f0b0a0908
-.LSWPUPM0SR:
-.quad 0x0a0d02060c03070b, 0x0004080f05090e01
-.LADD1:
-.quad 0x0000000000000000, 0x0000000100000000
-.LADD2:
-.quad 0x0000000000000000, 0x0000000200000000
-.LADD3:
-.quad 0x0000000000000000, 0x0000000300000000
-.LADD4:
-.quad 0x0000000000000000, 0x0000000400000000
-.LADD5:
-.quad 0x0000000000000000, 0x0000000500000000
-.LADD6:
-.quad 0x0000000000000000, 0x0000000600000000
-.LADD7:
-.quad 0x0000000000000000, 0x0000000700000000
-.LADD8:
-.quad 0x0000000000000000, 0x0000000800000000
-.Lxts_magic:
-.long 0x87,0,1,0
-.Lmasks:
-.quad 0x0101010101010101, 0x0101010101010101
-.quad 0x0202020202020202, 0x0202020202020202
-.quad 0x0404040404040404, 0x0404040404040404
-.quad 0x0808080808080808, 0x0808080808080808
-.LM0:
-.quad 0x02060a0e03070b0f, 0x0004080c0105090d
-.L63:
-.quad 0x6363636363636363, 0x6363636363636363
-.byte 66,105,116,45,115,108,105,99,101,100,32,65,69,83,32,102,111,114,32,120,56,54,95,54,52,47,83,83,83,69,51,44,32,69,109,105,108,105,97,32,75,195,164,115,112,101,114,44,32,80,101,116,101,114,32,83,99,104,119,97,98,101,44,32,65,110,100,121,32,80,111,108,121,97,107,111,118,0
-.align 64
-.size _bsaes_const,.-_bsaes_const
diff --git a/app/openssl/crypto/aes/asm/bsaes-x86_64.pl b/app/openssl/crypto/aes/asm/bsaes-x86_64.pl
deleted file mode 100644
index 41b90f08..00000000
--- a/app/openssl/crypto/aes/asm/bsaes-x86_64.pl
+++ /dev/null
@@ -1,3108 +0,0 @@
-#!/usr/bin/env perl
-
-###################################################################
-### AES-128 [originally in CTR mode] ###
-### bitsliced implementation for Intel Core 2 processors ###
-### requires support of SSE extensions up to SSSE3 ###
-### Author: Emilia Käsper and Peter Schwabe ###
-### Date: 2009-03-19 ###
-### Public domain ###
-### ###
-### See http://homes.esat.kuleuven.be/~ekasper/#software for ###
-### further information. ###
-###################################################################
-#
-# September 2011.
-#
-# Started as transliteration to "perlasm" the original code has
-# undergone following changes:
-#
-# - code was made position-independent;
-# - rounds were folded into a loop resulting in >5x size reduction
-# from 12.5KB to 2.2KB;
-# - above was possibile thanks to mixcolumns() modification that
-# allowed to feed its output back to aesenc[last], this was
-# achieved at cost of two additional inter-registers moves;
-# - some instruction reordering and interleaving;
-# - this module doesn't implement key setup subroutine, instead it
-# relies on conversion of "conventional" key schedule as returned
-# by AES_set_encrypt_key (see discussion below);
-# - first and last round keys are treated differently, which allowed
-# to skip one shiftrows(), reduce bit-sliced key schedule and
-# speed-up conversion by 22%;
-# - support for 192- and 256-bit keys was added;
-#
-# Resulting performance in CPU cycles spent to encrypt one byte out
-# of 4096-byte buffer with 128-bit key is:
-#
-# Emilia's this(*) difference
-#
-# Core 2 9.30 8.69 +7%
-# Nehalem(**) 7.63 6.98 +9%
-# Atom 17.1 17.4 -2%(***)
-#
-# (*) Comparison is not completely fair, because "this" is ECB,
-# i.e. no extra processing such as counter values calculation
-# and xor-ing input as in Emilia's CTR implementation is
-# performed. However, the CTR calculations stand for not more
-# than 1% of total time, so comparison is *rather* fair.
-#
-# (**) Results were collected on Westmere, which is considered to
-# be equivalent to Nehalem for this code.
-#
-# (***) Slowdown on Atom is rather strange per se, because original
-# implementation has a number of 9+-bytes instructions, which
-# are bad for Atom front-end, and which I eliminated completely.
-# In attempt to address deterioration sbox() was tested in FP
-# SIMD "domain" (movaps instead of movdqa, xorps instead of
-# pxor, etc.). While it resulted in nominal 4% improvement on
-# Atom, it hurted Westmere by more than 2x factor.
-#
-# As for key schedule conversion subroutine. Interface to OpenSSL
-# relies on per-invocation on-the-fly conversion. This naturally
-# has impact on performance, especially for short inputs. Conversion
-# time in CPU cycles and its ratio to CPU cycles spent in 8x block
-# function is:
-#
-# conversion conversion/8x block
-# Core 2 240 0.22
-# Nehalem 180 0.20
-# Atom 430 0.19
-#
-# The ratio values mean that 128-byte blocks will be processed
-# 16-18% slower, 256-byte blocks - 9-10%, 384-byte blocks - 6-7%,
-# etc. Then keep in mind that input sizes not divisible by 128 are
-# *effectively* slower, especially shortest ones, e.g. consecutive
-# 144-byte blocks are processed 44% slower than one would expect,
-# 272 - 29%, 400 - 22%, etc. Yet, despite all these "shortcomings"
-# it's still faster than ["hyper-threading-safe" code path in]
-# aes-x86_64.pl on all lengths above 64 bytes...
-#
-# October 2011.
-#
-# Add decryption procedure. Performance in CPU cycles spent to decrypt
-# one byte out of 4096-byte buffer with 128-bit key is:
-#
-# Core 2 9.83
-# Nehalem 7.74
-# Atom 19.0
-#
-# November 2011.
-#
-# Add bsaes_xts_[en|de]crypt. Less-than-80-bytes-block performance is
-# suboptimal, but XTS is meant to be used with larger blocks...
-#
-# <appro@openssl.org>
-
-$flavour = shift;
-$output = shift;
-if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
-
-$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
-
-$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
-( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
-( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
-die "can't locate x86_64-xlate.pl";
-
-open OUT,"| \"$^X\" $xlate $flavour $output";
-*STDOUT=*OUT;
-
-my ($inp,$out,$len,$key,$ivp)=("%rdi","%rsi","%rdx","%rcx");
-my @XMM=map("%xmm$_",(15,0..14)); # best on Atom, +10% over (0..15)
-my $ecb=0; # suppress unreferenced ECB subroutines, spare some space...
-
-{
-my ($key,$rounds,$const)=("%rax","%r10d","%r11");
-
-sub Sbox {
-# input in lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
-# output in lsb > [b0, b1, b4, b6, b3, b7, b2, b5] < msb
-my @b=@_[0..7];
-my @t=@_[8..11];
-my @s=@_[12..15];
- &InBasisChange (@b);
- &Inv_GF256 (@b[6,5,0,3,7,1,4,2],@t,@s);
- &OutBasisChange (@b[7,1,4,2,6,5,0,3]);
-}
-
-sub InBasisChange {
-# input in lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
-# output in lsb > [b6, b5, b0, b3, b7, b1, b4, b2] < msb
-my @b=@_[0..7];
-$code.=<<___;
- pxor @b[6], @b[5]
- pxor @b[1], @b[2]
- pxor @b[0], @b[3]
- pxor @b[2], @b[6]
- pxor @b[0], @b[5]
-
- pxor @b[3], @b[6]
- pxor @b[7], @b[3]
- pxor @b[5], @b[7]
- pxor @b[4], @b[3]
- pxor @b[5], @b[4]
- pxor @b[1], @b[3]
-
- pxor @b[7], @b[2]
- pxor @b[5], @b[1]
-___
-}
-
-sub OutBasisChange {
-# input in lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
-# output in lsb > [b6, b1, b2, b4, b7, b0, b3, b5] < msb
-my @b=@_[0..7];
-$code.=<<___;
- pxor @b[6], @b[0]
- pxor @b[4], @b[1]
- pxor @b[0], @b[2]
- pxor @b[6], @b[4]
- pxor @b[1], @b[6]
-
- pxor @b[5], @b[1]
- pxor @b[3], @b[5]
- pxor @b[7], @b[3]
- pxor @b[5], @b[7]
- pxor @b[5], @b[2]
-
- pxor @b[7], @b[4]
-___
-}
-
-sub InvSbox {
-# input in lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
-# output in lsb > [b0, b1, b6, b4, b2, b7, b3, b5] < msb
-my @b=@_[0..7];
-my @t=@_[8..11];
-my @s=@_[12..15];
- &InvInBasisChange (@b);
- &Inv_GF256 (@b[5,1,2,6,3,7,0,4],@t,@s);
- &InvOutBasisChange (@b[3,7,0,4,5,1,2,6]);
-}
-
-sub InvInBasisChange { # OutBasisChange in reverse
-my @b=@_[5,1,2,6,3,7,0,4];
-$code.=<<___
- pxor @b[7], @b[4]
-
- pxor @b[5], @b[7]
- pxor @b[5], @b[2]
- pxor @b[7], @b[3]
- pxor @b[3], @b[5]
- pxor @b[5], @b[1]
-
- pxor @b[1], @b[6]
- pxor @b[0], @b[2]
- pxor @b[6], @b[4]
- pxor @b[6], @b[0]
- pxor @b[4], @b[1]
-___
-}
-
-sub InvOutBasisChange { # InBasisChange in reverse
-my @b=@_[2,5,7,3,6,1,0,4];
-$code.=<<___;
- pxor @b[5], @b[1]
- pxor @b[7], @b[2]
-
- pxor @b[1], @b[3]
- pxor @b[5], @b[4]
- pxor @b[5], @b[7]
- pxor @b[4], @b[3]
- pxor @b[0], @b[5]
- pxor @b[7], @b[3]
- pxor @b[2], @b[6]
- pxor @b[1], @b[2]
- pxor @b[3], @b[6]
-
- pxor @b[0], @b[3]
- pxor @b[6], @b[5]
-___
-}
-
-sub Mul_GF4 {
-#;*************************************************************
-#;* Mul_GF4: Input x0-x1,y0-y1 Output x0-x1 Temp t0 (8) *
-#;*************************************************************
-my ($x0,$x1,$y0,$y1,$t0)=@_;
-$code.=<<___;
- movdqa $y0, $t0
- pxor $y1, $t0
- pand $x0, $t0
- pxor $x1, $x0
- pand $y0, $x1
- pand $y1, $x0
- pxor $x1, $x0
- pxor $t0, $x1
-___
-}
-
-sub Mul_GF4_N { # not used, see next subroutine
-# multiply and scale by N
-my ($x0,$x1,$y0,$y1,$t0)=@_;
-$code.=<<___;
- movdqa $y0, $t0
- pxor $y1, $t0
- pand $x0, $t0
- pxor $x1, $x0
- pand $y0, $x1
- pand $y1, $x0
- pxor $x0, $x1
- pxor $t0, $x0
-___
-}
-
-sub Mul_GF4_N_GF4 {
-# interleaved Mul_GF4_N and Mul_GF4
-my ($x0,$x1,$y0,$y1,$t0,
- $x2,$x3,$y2,$y3,$t1)=@_;
-$code.=<<___;
- movdqa $y0, $t0
- movdqa $y2, $t1
- pxor $y1, $t0
- pxor $y3, $t1
- pand $x0, $t0
- pand $x2, $t1
- pxor $x1, $x0
- pxor $x3, $x2
- pand $y0, $x1
- pand $y2, $x3
- pand $y1, $x0
- pand $y3, $x2
- pxor $x0, $x1
- pxor $x3, $x2
- pxor $t0, $x0
- pxor $t1, $x3
-___
-}
-sub Mul_GF16_2 {
-my @x=@_[0..7];
-my @y=@_[8..11];
-my @t=@_[12..15];
-$code.=<<___;
- movdqa @x[0], @t[0]
- movdqa @x[1], @t[1]
-___
- &Mul_GF4 (@x[0], @x[1], @y[0], @y[1], @t[2]);
-$code.=<<___;
- pxor @x[2], @t[0]
- pxor @x[3], @t[1]
- pxor @y[2], @y[0]
- pxor @y[3], @y[1]
-___
- Mul_GF4_N_GF4 (@t[0], @t[1], @y[0], @y[1], @t[3],
- @x[2], @x[3], @y[2], @y[3], @t[2]);
-$code.=<<___;
- pxor @t[0], @x[0]
- pxor @t[0], @x[2]
- pxor @t[1], @x[1]
- pxor @t[1], @x[3]
-
- movdqa @x[4], @t[0]
- movdqa @x[5], @t[1]
- pxor @x[6], @t[0]
- pxor @x[7], @t[1]
-___
- &Mul_GF4_N_GF4 (@t[0], @t[1], @y[0], @y[1], @t[3],
- @x[6], @x[7], @y[2], @y[3], @t[2]);
-$code.=<<___;
- pxor @y[2], @y[0]
- pxor @y[3], @y[1]
-___
- &Mul_GF4 (@x[4], @x[5], @y[0], @y[1], @t[3]);
-$code.=<<___;
- pxor @t[0], @x[4]
- pxor @t[0], @x[6]
- pxor @t[1], @x[5]
- pxor @t[1], @x[7]
-___
-}
-sub Inv_GF256 {
-#;********************************************************************
-#;* Inv_GF256: Input x0-x7 Output x0-x7 Temp t0-t3,s0-s3 (144) *
-#;********************************************************************
-my @x=@_[0..7];
-my @t=@_[8..11];
-my @s=@_[12..15];
-# direct optimizations from hardware
-$code.=<<___;
- movdqa @x[4], @t[3]
- movdqa @x[5], @t[2]
- movdqa @x[1], @t[1]
- movdqa @x[7], @s[1]
- movdqa @x[0], @s[0]
-
- pxor @x[6], @t[3]
- pxor @x[7], @t[2]
- pxor @x[3], @t[1]
- movdqa @t[3], @s[2]
- pxor @x[6], @s[1]
- movdqa @t[2], @t[0]
- pxor @x[2], @s[0]
- movdqa @t[3], @s[3]
-
- por @t[1], @t[2]
- por @s[0], @t[3]
- pxor @t[0], @s[3]
- pand @s[0], @s[2]
- pxor @t[1], @s[0]
- pand @t[1], @t[0]
- pand @s[0], @s[3]
- movdqa @x[3], @s[0]
- pxor @x[2], @s[0]
- pand @s[0], @s[1]
- pxor @s[1], @t[3]
- pxor @s[1], @t[2]
- movdqa @x[4], @s[1]
- movdqa @x[1], @s[0]
- pxor @x[5], @s[1]
- pxor @x[0], @s[0]
- movdqa @s[1], @t[1]
- pand @s[0], @s[1]
- por @s[0], @t[1]
- pxor @s[1], @t[0]
- pxor @s[3], @t[3]
- pxor @s[2], @t[2]
- pxor @s[3], @t[1]
- movdqa @x[7], @s[0]
- pxor @s[2], @t[0]
- movdqa @x[6], @s[1]
- pxor @s[2], @t[1]
- movdqa @x[5], @s[2]
- pand @x[3], @s[0]
- movdqa @x[4], @s[3]
- pand @x[2], @s[1]
- pand @x[1], @s[2]
- por @x[0], @s[3]
- pxor @s[0], @t[3]
- pxor @s[1], @t[2]
- pxor @s[2], @t[1]
- pxor @s[3], @t[0]
-
- #Inv_GF16 \t0, \t1, \t2, \t3, \s0, \s1, \s2, \s3
-
- # new smaller inversion
-
- movdqa @t[3], @s[0]
- pand @t[1], @t[3]
- pxor @t[2], @s[0]
-
- movdqa @t[0], @s[2]
- movdqa @s[0], @s[3]
- pxor @t[3], @s[2]
- pand @s[2], @s[3]
-
- movdqa @t[1], @s[1]
- pxor @t[2], @s[3]
- pxor @t[0], @s[1]
-
- pxor @t[2], @t[3]
-
- pand @t[3], @s[1]
-
- movdqa @s[2], @t[2]
- pxor @t[0], @s[1]
-
- pxor @s[1], @t[2]
- pxor @s[1], @t[1]
-
- pand @t[0], @t[2]
-
- pxor @t[2], @s[2]
- pxor @t[2], @t[1]
-
- pand @s[3], @s[2]
-
- pxor @s[0], @s[2]
-___
-# output in s3, s2, s1, t1
-
-# Mul_GF16_2 \x0, \x1, \x2, \x3, \x4, \x5, \x6, \x7, \t2, \t3, \t0, \t1, \s0, \s1, \s2, \s3
-
-# Mul_GF16_2 \x0, \x1, \x2, \x3, \x4, \x5, \x6, \x7, \s3, \s2, \s1, \t1, \s0, \t0, \t2, \t3
- &Mul_GF16_2(@x,@s[3,2,1],@t[1],@s[0],@t[0,2,3]);
-
-### output msb > [x3,x2,x1,x0,x7,x6,x5,x4] < lsb
-}
-
-# AES linear components
-
-sub ShiftRows {
-my @x=@_[0..7];
-my $mask=pop;
-$code.=<<___;
- pxor 0x00($key),@x[0]
- pxor 0x10($key),@x[1]
- pshufb $mask,@x[0]
- pxor 0x20($key),@x[2]
- pshufb $mask,@x[1]
- pxor 0x30($key),@x[3]
- pshufb $mask,@x[2]
- pxor 0x40($key),@x[4]
- pshufb $mask,@x[3]
- pxor 0x50($key),@x[5]
- pshufb $mask,@x[4]
- pxor 0x60($key),@x[6]
- pshufb $mask,@x[5]
- pxor 0x70($key),@x[7]
- pshufb $mask,@x[6]
- lea 0x80($key),$key
- pshufb $mask,@x[7]
-___
-}
-
-sub MixColumns {
-# modified to emit output in order suitable for feeding back to aesenc[last]
-my @x=@_[0..7];
-my @t=@_[8..15];
-my $inv=@_[16]; # optional
-$code.=<<___;
- pshufd \$0x93, @x[0], @t[0] # x0 <<< 32
- pshufd \$0x93, @x[1], @t[1]
- pxor @t[0], @x[0] # x0 ^ (x0 <<< 32)
- pshufd \$0x93, @x[2], @t[2]
- pxor @t[1], @x[1]
- pshufd \$0x93, @x[3], @t[3]
- pxor @t[2], @x[2]
- pshufd \$0x93, @x[4], @t[4]
- pxor @t[3], @x[3]
- pshufd \$0x93, @x[5], @t[5]
- pxor @t[4], @x[4]
- pshufd \$0x93, @x[6], @t[6]
- pxor @t[5], @x[5]
- pshufd \$0x93, @x[7], @t[7]
- pxor @t[6], @x[6]
- pxor @t[7], @x[7]
-
- pxor @x[0], @t[1]
- pxor @x[7], @t[0]
- pxor @x[7], @t[1]
- pshufd \$0x4E, @x[0], @x[0] # (x0 ^ (x0 <<< 32)) <<< 64)
- pxor @x[1], @t[2]
- pshufd \$0x4E, @x[1], @x[1]
- pxor @x[4], @t[5]
- pxor @t[0], @x[0]
- pxor @x[5], @t[6]
- pxor @t[1], @x[1]
- pxor @x[3], @t[4]
- pshufd \$0x4E, @x[4], @t[0]
- pxor @x[6], @t[7]
- pshufd \$0x4E, @x[5], @t[1]
- pxor @x[2], @t[3]
- pshufd \$0x4E, @x[3], @x[4]
- pxor @x[7], @t[3]
- pshufd \$0x4E, @x[7], @x[5]
- pxor @x[7], @t[4]
- pshufd \$0x4E, @x[6], @x[3]
- pxor @t[4], @t[0]
- pshufd \$0x4E, @x[2], @x[6]
- pxor @t[5], @t[1]
-___
-$code.=<<___ if (!$inv);
- pxor @t[3], @x[4]
- pxor @t[7], @x[5]
- pxor @t[6], @x[3]
- movdqa @t[0], @x[2]
- pxor @t[2], @x[6]
- movdqa @t[1], @x[7]
-___
-$code.=<<___ if ($inv);
- pxor @x[4], @t[3]
- pxor @t[7], @x[5]
- pxor @x[3], @t[6]
- movdqa @t[0], @x[3]
- pxor @t[2], @x[6]
- movdqa @t[6], @x[2]
- movdqa @t[1], @x[7]
- movdqa @x[6], @x[4]
- movdqa @t[3], @x[6]
-___
-}
-
-sub InvMixColumns_orig {
-my @x=@_[0..7];
-my @t=@_[8..15];
-
-$code.=<<___;
- # multiplication by 0x0e
- pshufd \$0x93, @x[7], @t[7]
- movdqa @x[2], @t[2]
- pxor @x[5], @x[7] # 7 5
- pxor @x[5], @x[2] # 2 5
- pshufd \$0x93, @x[0], @t[0]
- movdqa @x[5], @t[5]
- pxor @x[0], @x[5] # 5 0 [1]
- pxor @x[1], @x[0] # 0 1
- pshufd \$0x93, @x[1], @t[1]
- pxor @x[2], @x[1] # 1 25
- pxor @x[6], @x[0] # 01 6 [2]
- pxor @x[3], @x[1] # 125 3 [4]
- pshufd \$0x93, @x[3], @t[3]
- pxor @x[0], @x[2] # 25 016 [3]
- pxor @x[7], @x[3] # 3 75
- pxor @x[6], @x[7] # 75 6 [0]
- pshufd \$0x93, @x[6], @t[6]
- movdqa @x[4], @t[4]
- pxor @x[4], @x[6] # 6 4
- pxor @x[3], @x[4] # 4 375 [6]
- pxor @x[7], @x[3] # 375 756=36
- pxor @t[5], @x[6] # 64 5 [7]
- pxor @t[2], @x[3] # 36 2
- pxor @t[4], @x[3] # 362 4 [5]
- pshufd \$0x93, @t[5], @t[5]
-___
- my @y = @x[7,5,0,2,1,3,4,6];
-$code.=<<___;
- # multiplication by 0x0b
- pxor @y[0], @y[1]
- pxor @t[0], @y[0]
- pxor @t[1], @y[1]
- pshufd \$0x93, @t[2], @t[2]
- pxor @t[5], @y[0]
- pxor @t[6], @y[1]
- pxor @t[7], @y[0]
- pshufd \$0x93, @t[4], @t[4]
- pxor @t[6], @t[7] # clobber t[7]
- pxor @y[0], @y[1]
-
- pxor @t[0], @y[3]
- pshufd \$0x93, @t[0], @t[0]
- pxor @t[1], @y[2]
- pxor @t[1], @y[4]
- pxor @t[2], @y[2]
- pshufd \$0x93, @t[1], @t[1]
- pxor @t[2], @y[3]
- pxor @t[2], @y[5]
- pxor @t[7], @y[2]
- pshufd \$0x93, @t[2], @t[2]
- pxor @t[3], @y[3]
- pxor @t[3], @y[6]
- pxor @t[3], @y[4]
- pshufd \$0x93, @t[3], @t[3]
- pxor @t[4], @y[7]
- pxor @t[4], @y[5]
- pxor @t[7], @y[7]
- pxor @t[5], @y[3]
- pxor @t[4], @y[4]
- pxor @t[5], @t[7] # clobber t[7] even more
-
- pxor @t[7], @y[5]
- pshufd \$0x93, @t[4], @t[4]
- pxor @t[7], @y[6]
- pxor @t[7], @y[4]
-
- pxor @t[5], @t[7]
- pshufd \$0x93, @t[5], @t[5]
- pxor @t[6], @t[7] # restore t[7]
-
- # multiplication by 0x0d
- pxor @y[7], @y[4]
- pxor @t[4], @y[7]
- pshufd \$0x93, @t[6], @t[6]
- pxor @t[0], @y[2]
- pxor @t[5], @y[7]
- pxor @t[2], @y[2]
- pshufd \$0x93, @t[7], @t[7]
-
- pxor @y[1], @y[3]
- pxor @t[1], @y[1]
- pxor @t[0], @y[0]
- pxor @t[0], @y[3]
- pxor @t[5], @y[1]
- pxor @t[5], @y[0]
- pxor @t[7], @y[1]
- pshufd \$0x93, @t[0], @t[0]
- pxor @t[6], @y[0]
- pxor @y[1], @y[3]
- pxor @t[1], @y[4]
- pshufd \$0x93, @t[1], @t[1]
-
- pxor @t[7], @y[7]
- pxor @t[2], @y[4]
- pxor @t[2], @y[5]
- pshufd \$0x93, @t[2], @t[2]
- pxor @t[6], @y[2]
- pxor @t[3], @t[6] # clobber t[6]
- pxor @y[7], @y[4]
- pxor @t[6], @y[3]
-
- pxor @t[6], @y[6]
- pxor @t[5], @y[5]
- pxor @t[4], @y[6]
- pshufd \$0x93, @t[4], @t[4]
- pxor @t[6], @y[5]
- pxor @t[7], @y[6]
- pxor @t[3], @t[6] # restore t[6]
-
- pshufd \$0x93, @t[5], @t[5]
- pshufd \$0x93, @t[6], @t[6]
- pshufd \$0x93, @t[7], @t[7]
- pshufd \$0x93, @t[3], @t[3]
-
- # multiplication by 0x09
- pxor @y[1], @y[4]
- pxor @y[1], @t[1] # t[1]=y[1]
- pxor @t[5], @t[0] # clobber t[0]
- pxor @t[5], @t[1]
- pxor @t[0], @y[3]
- pxor @y[0], @t[0] # t[0]=y[0]
- pxor @t[6], @t[1]
- pxor @t[7], @t[6] # clobber t[6]
- pxor @t[1], @y[4]
- pxor @t[4], @y[7]
- pxor @y[4], @t[4] # t[4]=y[4]
- pxor @t[3], @y[6]
- pxor @y[3], @t[3] # t[3]=y[3]
- pxor @t[2], @y[5]
- pxor @y[2], @t[2] # t[2]=y[2]
- pxor @t[7], @t[3]
- pxor @y[5], @t[5] # t[5]=y[5]
- pxor @t[6], @t[2]
- pxor @t[6], @t[5]
- pxor @y[6], @t[6] # t[6]=y[6]
- pxor @y[7], @t[7] # t[7]=y[7]
-
- movdqa @t[0],@XMM[0]
- movdqa @t[1],@XMM[1]
- movdqa @t[2],@XMM[2]
- movdqa @t[3],@XMM[3]
- movdqa @t[4],@XMM[4]
- movdqa @t[5],@XMM[5]
- movdqa @t[6],@XMM[6]
- movdqa @t[7],@XMM[7]
-___
-}
-
-sub InvMixColumns {
-my @x=@_[0..7];
-my @t=@_[8..15];
-
-# Thanks to Jussi Kivilinna for providing pointer to
-#
-# | 0e 0b 0d 09 | | 02 03 01 01 | | 05 00 04 00 |
-# | 09 0e 0b 0d | = | 01 02 03 01 | x | 00 05 00 04 |
-# | 0d 09 0e 0b | | 01 01 02 03 | | 04 00 05 00 |
-# | 0b 0d 09 0e | | 03 01 01 02 | | 00 04 00 05 |
-
-$code.=<<___;
- # multiplication by 0x05-0x00-0x04-0x00
- pshufd \$0x4E, @x[0], @t[0]
- pshufd \$0x4E, @x[6], @t[6]
- pxor @x[0], @t[0]
- pshufd \$0x4E, @x[7], @t[7]
- pxor @x[6], @t[6]
- pshufd \$0x4E, @x[1], @t[1]
- pxor @x[7], @t[7]
- pshufd \$0x4E, @x[2], @t[2]
- pxor @x[1], @t[1]
- pshufd \$0x4E, @x[3], @t[3]
- pxor @x[2], @t[2]
- pxor @t[6], @x[0]
- pxor @t[6], @x[1]
- pshufd \$0x4E, @x[4], @t[4]
- pxor @x[3], @t[3]
- pxor @t[0], @x[2]
- pxor @t[1], @x[3]
- pshufd \$0x4E, @x[5], @t[5]
- pxor @x[4], @t[4]
- pxor @t[7], @x[1]
- pxor @t[2], @x[4]
- pxor @x[5], @t[5]
-
- pxor @t[7], @x[2]
- pxor @t[6], @x[3]
- pxor @t[6], @x[4]
- pxor @t[3], @x[5]
- pxor @t[4], @x[6]
- pxor @t[7], @x[4]
- pxor @t[7], @x[5]
- pxor @t[5], @x[7]
-___
- &MixColumns (@x,@t,1); # flipped 2<->3 and 4<->6
-}
-
-sub aesenc { # not used
-my @b=@_[0..7];
-my @t=@_[8..15];
-$code.=<<___;
- movdqa 0x30($const),@t[0] # .LSR
-___
- &ShiftRows (@b,@t[0]);
- &Sbox (@b,@t);
- &MixColumns (@b[0,1,4,6,3,7,2,5],@t);
-}
-
-sub aesenclast { # not used
-my @b=@_[0..7];
-my @t=@_[8..15];
-$code.=<<___;
- movdqa 0x40($const),@t[0] # .LSRM0
-___
- &ShiftRows (@b,@t[0]);
- &Sbox (@b,@t);
-$code.=<<___
- pxor 0x00($key),@b[0]
- pxor 0x10($key),@b[1]
- pxor 0x20($key),@b[4]
- pxor 0x30($key),@b[6]
- pxor 0x40($key),@b[3]
- pxor 0x50($key),@b[7]
- pxor 0x60($key),@b[2]
- pxor 0x70($key),@b[5]
-___
-}
-
-sub swapmove {
-my ($a,$b,$n,$mask,$t)=@_;
-$code.=<<___;
- movdqa $b,$t
- psrlq \$$n,$b
- pxor $a,$b
- pand $mask,$b
- pxor $b,$a
- psllq \$$n,$b
- pxor $t,$b
-___
-}
-sub swapmove2x {
-my ($a0,$b0,$a1,$b1,$n,$mask,$t0,$t1)=@_;
-$code.=<<___;
- movdqa $b0,$t0
- psrlq \$$n,$b0
- movdqa $b1,$t1
- psrlq \$$n,$b1
- pxor $a0,$b0
- pxor $a1,$b1
- pand $mask,$b0
- pand $mask,$b1
- pxor $b0,$a0
- psllq \$$n,$b0
- pxor $b1,$a1
- psllq \$$n,$b1
- pxor $t0,$b0
- pxor $t1,$b1
-___
-}
-
-sub bitslice {
-my @x=reverse(@_[0..7]);
-my ($t0,$t1,$t2,$t3)=@_[8..11];
-$code.=<<___;
- movdqa 0x00($const),$t0 # .LBS0
- movdqa 0x10($const),$t1 # .LBS1
-___
- &swapmove2x(@x[0,1,2,3],1,$t0,$t2,$t3);
- &swapmove2x(@x[4,5,6,7],1,$t0,$t2,$t3);
-$code.=<<___;
- movdqa 0x20($const),$t0 # .LBS2
-___
- &swapmove2x(@x[0,2,1,3],2,$t1,$t2,$t3);
- &swapmove2x(@x[4,6,5,7],2,$t1,$t2,$t3);
-
- &swapmove2x(@x[0,4,1,5],4,$t0,$t2,$t3);
- &swapmove2x(@x[2,6,3,7],4,$t0,$t2,$t3);
-}
-
-$code.=<<___;
-.text
-
-.extern asm_AES_encrypt
-.extern asm_AES_decrypt
-
-.type _bsaes_encrypt8,\@abi-omnipotent
-.align 64
-_bsaes_encrypt8:
- lea .LBS0(%rip), $const # constants table
-
- movdqa ($key), @XMM[9] # round 0 key
- lea 0x10($key), $key
- movdqa 0x50($const), @XMM[8] # .LM0SR
- pxor @XMM[9], @XMM[0] # xor with round0 key
- pxor @XMM[9], @XMM[1]
- pshufb @XMM[8], @XMM[0]
- pxor @XMM[9], @XMM[2]
- pshufb @XMM[8], @XMM[1]
- pxor @XMM[9], @XMM[3]
- pshufb @XMM[8], @XMM[2]
- pxor @XMM[9], @XMM[4]
- pshufb @XMM[8], @XMM[3]
- pxor @XMM[9], @XMM[5]
- pshufb @XMM[8], @XMM[4]
- pxor @XMM[9], @XMM[6]
- pshufb @XMM[8], @XMM[5]
- pxor @XMM[9], @XMM[7]
- pshufb @XMM[8], @XMM[6]
- pshufb @XMM[8], @XMM[7]
-_bsaes_encrypt8_bitslice:
-___
- &bitslice (@XMM[0..7, 8..11]);
-$code.=<<___;
- dec $rounds
- jmp .Lenc_sbox
-.align 16
-.Lenc_loop:
-___
- &ShiftRows (@XMM[0..7, 8]);
-$code.=".Lenc_sbox:\n";
- &Sbox (@XMM[0..7, 8..15]);
-$code.=<<___;
- dec $rounds
- jl .Lenc_done
-___
- &MixColumns (@XMM[0,1,4,6,3,7,2,5, 8..15]);
-$code.=<<___;
- movdqa 0x30($const), @XMM[8] # .LSR
- jnz .Lenc_loop
- movdqa 0x40($const), @XMM[8] # .LSRM0
- jmp .Lenc_loop
-.align 16
-.Lenc_done:
-___
- # output in lsb > [t0, t1, t4, t6, t3, t7, t2, t5] < msb
- &bitslice (@XMM[0,1,4,6,3,7,2,5, 8..11]);
-$code.=<<___;
- movdqa ($key), @XMM[8] # last round key
- pxor @XMM[8], @XMM[4]
- pxor @XMM[8], @XMM[6]
- pxor @XMM[8], @XMM[3]
- pxor @XMM[8], @XMM[7]
- pxor @XMM[8], @XMM[2]
- pxor @XMM[8], @XMM[5]
- pxor @XMM[8], @XMM[0]
- pxor @XMM[8], @XMM[1]
- ret
-.size _bsaes_encrypt8,.-_bsaes_encrypt8
-
-.type _bsaes_decrypt8,\@abi-omnipotent
-.align 64
-_bsaes_decrypt8:
- lea .LBS0(%rip), $const # constants table
-
- movdqa ($key), @XMM[9] # round 0 key
- lea 0x10($key), $key
- movdqa -0x30($const), @XMM[8] # .LM0ISR
- pxor @XMM[9], @XMM[0] # xor with round0 key
- pxor @XMM[9], @XMM[1]
- pshufb @XMM[8], @XMM[0]
- pxor @XMM[9], @XMM[2]
- pshufb @XMM[8], @XMM[1]
- pxor @XMM[9], @XMM[3]
- pshufb @XMM[8], @XMM[2]
- pxor @XMM[9], @XMM[4]
- pshufb @XMM[8], @XMM[3]
- pxor @XMM[9], @XMM[5]
- pshufb @XMM[8], @XMM[4]
- pxor @XMM[9], @XMM[6]
- pshufb @XMM[8], @XMM[5]
- pxor @XMM[9], @XMM[7]
- pshufb @XMM[8], @XMM[6]
- pshufb @XMM[8], @XMM[7]
-___
- &bitslice (@XMM[0..7, 8..11]);
-$code.=<<___;
- dec $rounds
- jmp .Ldec_sbox
-.align 16
-.Ldec_loop:
-___
- &ShiftRows (@XMM[0..7, 8]);
-$code.=".Ldec_sbox:\n";
- &InvSbox (@XMM[0..7, 8..15]);
-$code.=<<___;
- dec $rounds
- jl .Ldec_done
-___
- &InvMixColumns (@XMM[0,1,6,4,2,7,3,5, 8..15]);
-$code.=<<___;
- movdqa -0x10($const), @XMM[8] # .LISR
- jnz .Ldec_loop
- movdqa -0x20($const), @XMM[8] # .LISRM0
- jmp .Ldec_loop
-.align 16
-.Ldec_done:
-___
- &bitslice (@XMM[0,1,6,4,2,7,3,5, 8..11]);
-$code.=<<___;
- movdqa ($key), @XMM[8] # last round key
- pxor @XMM[8], @XMM[6]
- pxor @XMM[8], @XMM[4]
- pxor @XMM[8], @XMM[2]
- pxor @XMM[8], @XMM[7]
- pxor @XMM[8], @XMM[3]
- pxor @XMM[8], @XMM[5]
- pxor @XMM[8], @XMM[0]
- pxor @XMM[8], @XMM[1]
- ret
-.size _bsaes_decrypt8,.-_bsaes_decrypt8
-___
-}
-{
-my ($out,$inp,$rounds,$const)=("%rax","%rcx","%r10d","%r11");
-
-sub bitslice_key {
-my @x=reverse(@_[0..7]);
-my ($bs0,$bs1,$bs2,$t2,$t3)=@_[8..12];
-
- &swapmove (@x[0,1],1,$bs0,$t2,$t3);
-$code.=<<___;
- #&swapmove(@x[2,3],1,$t0,$t2,$t3);
- movdqa @x[0], @x[2]
- movdqa @x[1], @x[3]
-___
- #&swapmove2x(@x[4,5,6,7],1,$t0,$t2,$t3);
-
- &swapmove2x (@x[0,2,1,3],2,$bs1,$t2,$t3);
-$code.=<<___;
- #&swapmove2x(@x[4,6,5,7],2,$t1,$t2,$t3);
- movdqa @x[0], @x[4]
- movdqa @x[2], @x[6]
- movdqa @x[1], @x[5]
- movdqa @x[3], @x[7]
-___
- &swapmove2x (@x[0,4,1,5],4,$bs2,$t2,$t3);
- &swapmove2x (@x[2,6,3,7],4,$bs2,$t2,$t3);
-}
-
-$code.=<<___;
-.type _bsaes_key_convert,\@abi-omnipotent
-.align 16
-_bsaes_key_convert:
- lea .Lmasks(%rip), $const
- movdqu ($inp), %xmm7 # load round 0 key
- lea 0x10($inp), $inp
- movdqa 0x00($const), %xmm0 # 0x01...
- movdqa 0x10($const), %xmm1 # 0x02...
- movdqa 0x20($const), %xmm2 # 0x04...
- movdqa 0x30($const), %xmm3 # 0x08...
- movdqa 0x40($const), %xmm4 # .LM0
- pcmpeqd %xmm5, %xmm5 # .LNOT
-
- movdqu ($inp), %xmm6 # load round 1 key
- movdqa %xmm7, ($out) # save round 0 key
- lea 0x10($out), $out
- dec $rounds
- jmp .Lkey_loop
-.align 16
-.Lkey_loop:
- pshufb %xmm4, %xmm6 # .LM0
-
- movdqa %xmm0, %xmm8
- movdqa %xmm1, %xmm9
-
- pand %xmm6, %xmm8
- pand %xmm6, %xmm9
- movdqa %xmm2, %xmm10
- pcmpeqb %xmm0, %xmm8
- psllq \$4, %xmm0 # 0x10...
- movdqa %xmm3, %xmm11
- pcmpeqb %xmm1, %xmm9
- psllq \$4, %xmm1 # 0x20...
-
- pand %xmm6, %xmm10
- pand %xmm6, %xmm11
- movdqa %xmm0, %xmm12
- pcmpeqb %xmm2, %xmm10
- psllq \$4, %xmm2 # 0x40...
- movdqa %xmm1, %xmm13
- pcmpeqb %xmm3, %xmm11
- psllq \$4, %xmm3 # 0x80...
-
- movdqa %xmm2, %xmm14
- movdqa %xmm3, %xmm15
- pxor %xmm5, %xmm8 # "pnot"
- pxor %xmm5, %xmm9
-
- pand %xmm6, %xmm12
- pand %xmm6, %xmm13
- movdqa %xmm8, 0x00($out) # write bit-sliced round key
- pcmpeqb %xmm0, %xmm12
- psrlq \$4, %xmm0 # 0x01...
- movdqa %xmm9, 0x10($out)
- pcmpeqb %xmm1, %xmm13
- psrlq \$4, %xmm1 # 0x02...
- lea 0x10($inp), $inp
-
- pand %xmm6, %xmm14
- pand %xmm6, %xmm15
- movdqa %xmm10, 0x20($out)
- pcmpeqb %xmm2, %xmm14
- psrlq \$4, %xmm2 # 0x04...
- movdqa %xmm11, 0x30($out)
- pcmpeqb %xmm3, %xmm15
- psrlq \$4, %xmm3 # 0x08...
- movdqu ($inp), %xmm6 # load next round key
-
- pxor %xmm5, %xmm13 # "pnot"
- pxor %xmm5, %xmm14
- movdqa %xmm12, 0x40($out)
- movdqa %xmm13, 0x50($out)
- movdqa %xmm14, 0x60($out)
- movdqa %xmm15, 0x70($out)
- lea 0x80($out),$out
- dec $rounds
- jnz .Lkey_loop
-
- movdqa 0x50($const), %xmm7 # .L63
- #movdqa %xmm6, ($out) # don't save last round key
- ret
-.size _bsaes_key_convert,.-_bsaes_key_convert
-___
-}
-
-if (0 && !$win64) { # following four functions are unsupported interface
- # used for benchmarking...
-$code.=<<___;
-.globl bsaes_enc_key_convert
-.type bsaes_enc_key_convert,\@function,2
-.align 16
-bsaes_enc_key_convert:
- mov 240($inp),%r10d # pass rounds
- mov $inp,%rcx # pass key
- mov $out,%rax # pass key schedule
- call _bsaes_key_convert
- pxor %xmm6,%xmm7 # fix up last round key
- movdqa %xmm7,(%rax) # save last round key
- ret
-.size bsaes_enc_key_convert,.-bsaes_enc_key_convert
-
-.globl bsaes_encrypt_128
-.type bsaes_encrypt_128,\@function,4
-.align 16
-bsaes_encrypt_128:
-.Lenc128_loop:
- movdqu 0x00($inp), @XMM[0] # load input
- movdqu 0x10($inp), @XMM[1]
- movdqu 0x20($inp), @XMM[2]
- movdqu 0x30($inp), @XMM[3]
- movdqu 0x40($inp), @XMM[4]
- movdqu 0x50($inp), @XMM[5]
- movdqu 0x60($inp), @XMM[6]
- movdqu 0x70($inp), @XMM[7]
- mov $key, %rax # pass the $key
- lea 0x80($inp), $inp
- mov \$10,%r10d
-
- call _bsaes_encrypt8
-
- movdqu @XMM[0], 0x00($out) # write output
- movdqu @XMM[1], 0x10($out)
- movdqu @XMM[4], 0x20($out)
- movdqu @XMM[6], 0x30($out)
- movdqu @XMM[3], 0x40($out)
- movdqu @XMM[7], 0x50($out)
- movdqu @XMM[2], 0x60($out)
- movdqu @XMM[5], 0x70($out)
- lea 0x80($out), $out
- sub \$0x80,$len
- ja .Lenc128_loop
- ret
-.size bsaes_encrypt_128,.-bsaes_encrypt_128
-
-.globl bsaes_dec_key_convert
-.type bsaes_dec_key_convert,\@function,2
-.align 16
-bsaes_dec_key_convert:
- mov 240($inp),%r10d # pass rounds
- mov $inp,%rcx # pass key
- mov $out,%rax # pass key schedule
- call _bsaes_key_convert
- pxor ($out),%xmm7 # fix up round 0 key
- movdqa %xmm6,(%rax) # save last round key
- movdqa %xmm7,($out)
- ret
-.size bsaes_dec_key_convert,.-bsaes_dec_key_convert
-
-.globl bsaes_decrypt_128
-.type bsaes_decrypt_128,\@function,4
-.align 16
-bsaes_decrypt_128:
-.Ldec128_loop:
- movdqu 0x00($inp), @XMM[0] # load input
- movdqu 0x10($inp), @XMM[1]
- movdqu 0x20($inp), @XMM[2]
- movdqu 0x30($inp), @XMM[3]
- movdqu 0x40($inp), @XMM[4]
- movdqu 0x50($inp), @XMM[5]
- movdqu 0x60($inp), @XMM[6]
- movdqu 0x70($inp), @XMM[7]
- mov $key, %rax # pass the $key
- lea 0x80($inp), $inp
- mov \$10,%r10d
-
- call _bsaes_decrypt8
-
- movdqu @XMM[0], 0x00($out) # write output
- movdqu @XMM[1], 0x10($out)
- movdqu @XMM[6], 0x20($out)
- movdqu @XMM[4], 0x30($out)
- movdqu @XMM[2], 0x40($out)
- movdqu @XMM[7], 0x50($out)
- movdqu @XMM[3], 0x60($out)
- movdqu @XMM[5], 0x70($out)
- lea 0x80($out), $out
- sub \$0x80,$len
- ja .Ldec128_loop
- ret
-.size bsaes_decrypt_128,.-bsaes_decrypt_128
-___
-}
-{
-######################################################################
-#
-# OpenSSL interface
-#
-my ($arg1,$arg2,$arg3,$arg4,$arg5,$arg6)=$win64 ? ("%rcx","%rdx","%r8","%r9","%r10","%r11d")
- : ("%rdi","%rsi","%rdx","%rcx","%r8","%r9d");
-my ($inp,$out,$len,$key)=("%r12","%r13","%r14","%r15");
-
-if ($ecb) {
-$code.=<<___;
-.globl bsaes_ecb_encrypt_blocks
-.type bsaes_ecb_encrypt_blocks,\@abi-omnipotent
-.align 16
-bsaes_ecb_encrypt_blocks:
- mov %rsp, %rax
-.Lecb_enc_prologue:
- push %rbp
- push %rbx
- push %r12
- push %r13
- push %r14
- push %r15
- lea -0x48(%rsp),%rsp
-___
-$code.=<<___ if ($win64);
- lea -0xa0(%rsp), %rsp
- movaps %xmm6, 0x40(%rsp)
- movaps %xmm7, 0x50(%rsp)
- movaps %xmm8, 0x60(%rsp)
- movaps %xmm9, 0x70(%rsp)
- movaps %xmm10, 0x80(%rsp)
- movaps %xmm11, 0x90(%rsp)
- movaps %xmm12, 0xa0(%rsp)
- movaps %xmm13, 0xb0(%rsp)
- movaps %xmm14, 0xc0(%rsp)
- movaps %xmm15, 0xd0(%rsp)
-.Lecb_enc_body:
-___
-$code.=<<___;
- mov %rsp,%rbp # backup %rsp
- mov 240($arg4),%eax # rounds
- mov $arg1,$inp # backup arguments
- mov $arg2,$out
- mov $arg3,$len
- mov $arg4,$key
- cmp \$8,$arg3
- jb .Lecb_enc_short
-
- mov %eax,%ebx # backup rounds
- shl \$7,%rax # 128 bytes per inner round key
- sub \$`128-32`,%rax # size of bit-sliced key schedule
- sub %rax,%rsp
- mov %rsp,%rax # pass key schedule
- mov $key,%rcx # pass key
- mov %ebx,%r10d # pass rounds
- call _bsaes_key_convert
- pxor %xmm6,%xmm7 # fix up last round key
- movdqa %xmm7,(%rax) # save last round key
-
- sub \$8,$len
-.Lecb_enc_loop:
- movdqu 0x00($inp), @XMM[0] # load input
- movdqu 0x10($inp), @XMM[1]
- movdqu 0x20($inp), @XMM[2]
- movdqu 0x30($inp), @XMM[3]
- movdqu 0x40($inp), @XMM[4]
- movdqu 0x50($inp), @XMM[5]
- mov %rsp, %rax # pass key schedule
- movdqu 0x60($inp), @XMM[6]
- mov %ebx,%r10d # pass rounds
- movdqu 0x70($inp), @XMM[7]
- lea 0x80($inp), $inp
-
- call _bsaes_encrypt8
-
- movdqu @XMM[0], 0x00($out) # write output
- movdqu @XMM[1], 0x10($out)
- movdqu @XMM[4], 0x20($out)
- movdqu @XMM[6], 0x30($out)
- movdqu @XMM[3], 0x40($out)
- movdqu @XMM[7], 0x50($out)
- movdqu @XMM[2], 0x60($out)
- movdqu @XMM[5], 0x70($out)
- lea 0x80($out), $out
- sub \$8,$len
- jnc .Lecb_enc_loop
-
- add \$8,$len
- jz .Lecb_enc_done
-
- movdqu 0x00($inp), @XMM[0] # load input
- mov %rsp, %rax # pass key schedule
- mov %ebx,%r10d # pass rounds
- cmp \$2,$len
- jb .Lecb_enc_one
- movdqu 0x10($inp), @XMM[1]
- je .Lecb_enc_two
- movdqu 0x20($inp), @XMM[2]
- cmp \$4,$len
- jb .Lecb_enc_three
- movdqu 0x30($inp), @XMM[3]
- je .Lecb_enc_four
- movdqu 0x40($inp), @XMM[4]
- cmp \$6,$len
- jb .Lecb_enc_five
- movdqu 0x50($inp), @XMM[5]
- je .Lecb_enc_six
- movdqu 0x60($inp), @XMM[6]
- call _bsaes_encrypt8
- movdqu @XMM[0], 0x00($out) # write output
- movdqu @XMM[1], 0x10($out)
- movdqu @XMM[4], 0x20($out)
- movdqu @XMM[6], 0x30($out)
- movdqu @XMM[3], 0x40($out)
- movdqu @XMM[7], 0x50($out)
- movdqu @XMM[2], 0x60($out)
- jmp .Lecb_enc_done
-.align 16
-.Lecb_enc_six:
- call _bsaes_encrypt8
- movdqu @XMM[0], 0x00($out) # write output
- movdqu @XMM[1], 0x10($out)
- movdqu @XMM[4], 0x20($out)
- movdqu @XMM[6], 0x30($out)
- movdqu @XMM[3], 0x40($out)
- movdqu @XMM[7], 0x50($out)
- jmp .Lecb_enc_done
-.align 16
-.Lecb_enc_five:
- call _bsaes_encrypt8
- movdqu @XMM[0], 0x00($out) # write output
- movdqu @XMM[1], 0x10($out)
- movdqu @XMM[4], 0x20($out)
- movdqu @XMM[6], 0x30($out)
- movdqu @XMM[3], 0x40($out)
- jmp .Lecb_enc_done
-.align 16
-.Lecb_enc_four:
- call _bsaes_encrypt8
- movdqu @XMM[0], 0x00($out) # write output
- movdqu @XMM[1], 0x10($out)
- movdqu @XMM[4], 0x20($out)
- movdqu @XMM[6], 0x30($out)
- jmp .Lecb_enc_done
-.align 16
-.Lecb_enc_three:
- call _bsaes_encrypt8
- movdqu @XMM[0], 0x00($out) # write output
- movdqu @XMM[1], 0x10($out)
- movdqu @XMM[4], 0x20($out)
- jmp .Lecb_enc_done
-.align 16
-.Lecb_enc_two:
- call _bsaes_encrypt8
- movdqu @XMM[0], 0x00($out) # write output
- movdqu @XMM[1], 0x10($out)
- jmp .Lecb_enc_done
-.align 16
-.Lecb_enc_one:
- call _bsaes_encrypt8
- movdqu @XMM[0], 0x00($out) # write output
- jmp .Lecb_enc_done
-.align 16
-.Lecb_enc_short:
- lea ($inp), $arg1
- lea ($out), $arg2
- lea ($key), $arg3
- call asm_AES_encrypt
- lea 16($inp), $inp
- lea 16($out), $out
- dec $len
- jnz .Lecb_enc_short
-
-.Lecb_enc_done:
- lea (%rsp),%rax
- pxor %xmm0, %xmm0
-.Lecb_enc_bzero: # wipe key schedule [if any]
- movdqa %xmm0, 0x00(%rax)
- movdqa %xmm0, 0x10(%rax)
- lea 0x20(%rax), %rax
- cmp %rax, %rbp
- jb .Lecb_enc_bzero
-
- lea (%rbp),%rsp # restore %rsp
-___
-$code.=<<___ if ($win64);
- movaps 0x40(%rbp), %xmm6
- movaps 0x50(%rbp), %xmm7
- movaps 0x60(%rbp), %xmm8
- movaps 0x70(%rbp), %xmm9
- movaps 0x80(%rbp), %xmm10
- movaps 0x90(%rbp), %xmm11
- movaps 0xa0(%rbp), %xmm12
- movaps 0xb0(%rbp), %xmm13
- movaps 0xc0(%rbp), %xmm14
- movaps 0xd0(%rbp), %xmm15
- lea 0xa0(%rbp), %rsp
-___
-$code.=<<___;
- mov 0x48(%rsp), %r15
- mov 0x50(%rsp), %r14
- mov 0x58(%rsp), %r13
- mov 0x60(%rsp), %r12
- mov 0x68(%rsp), %rbx
- mov 0x70(%rsp), %rax
- lea 0x78(%rsp), %rsp
- mov %rax, %rbp
-.Lecb_enc_epilogue:
- ret
-.size bsaes_ecb_encrypt_blocks,.-bsaes_ecb_encrypt_blocks
-
-.globl bsaes_ecb_decrypt_blocks
-.type bsaes_ecb_decrypt_blocks,\@abi-omnipotent
-.align 16
-bsaes_ecb_decrypt_blocks:
- mov %rsp, %rax
-.Lecb_dec_prologue:
- push %rbp
- push %rbx
- push %r12
- push %r13
- push %r14
- push %r15
- lea -0x48(%rsp),%rsp
-___
-$code.=<<___ if ($win64);
- lea -0xa0(%rsp), %rsp
- movaps %xmm6, 0x40(%rsp)
- movaps %xmm7, 0x50(%rsp)
- movaps %xmm8, 0x60(%rsp)
- movaps %xmm9, 0x70(%rsp)
- movaps %xmm10, 0x80(%rsp)
- movaps %xmm11, 0x90(%rsp)
- movaps %xmm12, 0xa0(%rsp)
- movaps %xmm13, 0xb0(%rsp)
- movaps %xmm14, 0xc0(%rsp)
- movaps %xmm15, 0xd0(%rsp)
-.Lecb_dec_body:
-___
-$code.=<<___;
- mov %rsp,%rbp # backup %rsp
- mov 240($arg4),%eax # rounds
- mov $arg1,$inp # backup arguments
- mov $arg2,$out
- mov $arg3,$len
- mov $arg4,$key
- cmp \$8,$arg3
- jb .Lecb_dec_short
-
- mov %eax,%ebx # backup rounds
- shl \$7,%rax # 128 bytes per inner round key
- sub \$`128-32`,%rax # size of bit-sliced key schedule
- sub %rax,%rsp
- mov %rsp,%rax # pass key schedule
- mov $key,%rcx # pass key
- mov %ebx,%r10d # pass rounds
- call _bsaes_key_convert
- pxor (%rsp),%xmm7 # fix up 0 round key
- movdqa %xmm6,(%rax) # save last round key
- movdqa %xmm7,(%rsp)
-
- sub \$8,$len
-.Lecb_dec_loop:
- movdqu 0x00($inp), @XMM[0] # load input
- movdqu 0x10($inp), @XMM[1]
- movdqu 0x20($inp), @XMM[2]
- movdqu 0x30($inp), @XMM[3]
- movdqu 0x40($inp), @XMM[4]
- movdqu 0x50($inp), @XMM[5]
- mov %rsp, %rax # pass key schedule
- movdqu 0x60($inp), @XMM[6]
- mov %ebx,%r10d # pass rounds
- movdqu 0x70($inp), @XMM[7]
- lea 0x80($inp), $inp
-
- call _bsaes_decrypt8
-
- movdqu @XMM[0], 0x00($out) # write output
- movdqu @XMM[1], 0x10($out)
- movdqu @XMM[6], 0x20($out)
- movdqu @XMM[4], 0x30($out)
- movdqu @XMM[2], 0x40($out)
- movdqu @XMM[7], 0x50($out)
- movdqu @XMM[3], 0x60($out)
- movdqu @XMM[5], 0x70($out)
- lea 0x80($out), $out
- sub \$8,$len
- jnc .Lecb_dec_loop
-
- add \$8,$len
- jz .Lecb_dec_done
-
- movdqu 0x00($inp), @XMM[0] # load input
- mov %rsp, %rax # pass key schedule
- mov %ebx,%r10d # pass rounds
- cmp \$2,$len
- jb .Lecb_dec_one
- movdqu 0x10($inp), @XMM[1]
- je .Lecb_dec_two
- movdqu 0x20($inp), @XMM[2]
- cmp \$4,$len
- jb .Lecb_dec_three
- movdqu 0x30($inp), @XMM[3]
- je .Lecb_dec_four
- movdqu 0x40($inp), @XMM[4]
- cmp \$6,$len
- jb .Lecb_dec_five
- movdqu 0x50($inp), @XMM[5]
- je .Lecb_dec_six
- movdqu 0x60($inp), @XMM[6]
- call _bsaes_decrypt8
- movdqu @XMM[0], 0x00($out) # write output
- movdqu @XMM[1], 0x10($out)
- movdqu @XMM[6], 0x20($out)
- movdqu @XMM[4], 0x30($out)
- movdqu @XMM[2], 0x40($out)
- movdqu @XMM[7], 0x50($out)
- movdqu @XMM[3], 0x60($out)
- jmp .Lecb_dec_done
-.align 16
-.Lecb_dec_six:
- call _bsaes_decrypt8
- movdqu @XMM[0], 0x00($out) # write output
- movdqu @XMM[1], 0x10($out)
- movdqu @XMM[6], 0x20($out)
- movdqu @XMM[4], 0x30($out)
- movdqu @XMM[2], 0x40($out)
- movdqu @XMM[7], 0x50($out)
- jmp .Lecb_dec_done
-.align 16
-.Lecb_dec_five:
- call _bsaes_decrypt8
- movdqu @XMM[0], 0x00($out) # write output
- movdqu @XMM[1], 0x10($out)
- movdqu @XMM[6], 0x20($out)
- movdqu @XMM[4], 0x30($out)
- movdqu @XMM[2], 0x40($out)
- jmp .Lecb_dec_done
-.align 16
-.Lecb_dec_four:
- call _bsaes_decrypt8
- movdqu @XMM[0], 0x00($out) # write output
- movdqu @XMM[1], 0x10($out)
- movdqu @XMM[6], 0x20($out)
- movdqu @XMM[4], 0x30($out)
- jmp .Lecb_dec_done
-.align 16
-.Lecb_dec_three:
- call _bsaes_decrypt8
- movdqu @XMM[0], 0x00($out) # write output
- movdqu @XMM[1], 0x10($out)
- movdqu @XMM[6], 0x20($out)
- jmp .Lecb_dec_done
-.align 16
-.Lecb_dec_two:
- call _bsaes_decrypt8
- movdqu @XMM[0], 0x00($out) # write output
- movdqu @XMM[1], 0x10($out)
- jmp .Lecb_dec_done
-.align 16
-.Lecb_dec_one:
- call _bsaes_decrypt8
- movdqu @XMM[0], 0x00($out) # write output
- jmp .Lecb_dec_done
-.align 16
-.Lecb_dec_short:
- lea ($inp), $arg1
- lea ($out), $arg2
- lea ($key), $arg3
- call asm_AES_decrypt
- lea 16($inp), $inp
- lea 16($out), $out
- dec $len
- jnz .Lecb_dec_short
-
-.Lecb_dec_done:
- lea (%rsp),%rax
- pxor %xmm0, %xmm0
-.Lecb_dec_bzero: # wipe key schedule [if any]
- movdqa %xmm0, 0x00(%rax)
- movdqa %xmm0, 0x10(%rax)
- lea 0x20(%rax), %rax
- cmp %rax, %rbp
- jb .Lecb_dec_bzero
-
- lea (%rbp),%rsp # restore %rsp
-___
-$code.=<<___ if ($win64);
- movaps 0x40(%rbp), %xmm6
- movaps 0x50(%rbp), %xmm7
- movaps 0x60(%rbp), %xmm8
- movaps 0x70(%rbp), %xmm9
- movaps 0x80(%rbp), %xmm10
- movaps 0x90(%rbp), %xmm11
- movaps 0xa0(%rbp), %xmm12
- movaps 0xb0(%rbp), %xmm13
- movaps 0xc0(%rbp), %xmm14
- movaps 0xd0(%rbp), %xmm15
- lea 0xa0(%rbp), %rsp
-___
-$code.=<<___;
- mov 0x48(%rsp), %r15
- mov 0x50(%rsp), %r14
- mov 0x58(%rsp), %r13
- mov 0x60(%rsp), %r12
- mov 0x68(%rsp), %rbx
- mov 0x70(%rsp), %rax
- lea 0x78(%rsp), %rsp
- mov %rax, %rbp
-.Lecb_dec_epilogue:
- ret
-.size bsaes_ecb_decrypt_blocks,.-bsaes_ecb_decrypt_blocks
-___
-}
-$code.=<<___;
-.extern asm_AES_cbc_encrypt
-.globl bsaes_cbc_encrypt
-.type bsaes_cbc_encrypt,\@abi-omnipotent
-.align 16
-bsaes_cbc_encrypt:
-___
-$code.=<<___ if ($win64);
- mov 48(%rsp),$arg6 # pull direction flag
-___
-$code.=<<___;
- cmp \$0,$arg6
- jne asm_AES_cbc_encrypt
- cmp \$128,$arg3
- jb asm_AES_cbc_encrypt
-
- mov %rsp, %rax
-.Lcbc_dec_prologue:
- push %rbp
- push %rbx
- push %r12
- push %r13
- push %r14
- push %r15
- lea -0x48(%rsp), %rsp
-___
-$code.=<<___ if ($win64);
- mov 0xa0(%rsp),$arg5 # pull ivp
- lea -0xa0(%rsp), %rsp
- movaps %xmm6, 0x40(%rsp)
- movaps %xmm7, 0x50(%rsp)
- movaps %xmm8, 0x60(%rsp)
- movaps %xmm9, 0x70(%rsp)
- movaps %xmm10, 0x80(%rsp)
- movaps %xmm11, 0x90(%rsp)
- movaps %xmm12, 0xa0(%rsp)
- movaps %xmm13, 0xb0(%rsp)
- movaps %xmm14, 0xc0(%rsp)
- movaps %xmm15, 0xd0(%rsp)
-.Lcbc_dec_body:
-___
-$code.=<<___;
- mov %rsp, %rbp # backup %rsp
- mov 240($arg4), %eax # rounds
- mov $arg1, $inp # backup arguments
- mov $arg2, $out
- mov $arg3, $len
- mov $arg4, $key
- mov $arg5, %rbx
- shr \$4, $len # bytes to blocks
-
- mov %eax, %edx # rounds
- shl \$7, %rax # 128 bytes per inner round key
- sub \$`128-32`, %rax # size of bit-sliced key schedule
- sub %rax, %rsp
-
- mov %rsp, %rax # pass key schedule
- mov $key, %rcx # pass key
- mov %edx, %r10d # pass rounds
- call _bsaes_key_convert
- pxor (%rsp),%xmm7 # fix up 0 round key
- movdqa %xmm6,(%rax) # save last round key
- movdqa %xmm7,(%rsp)
-
- movdqu (%rbx), @XMM[15] # load IV
- sub \$8,$len
-.Lcbc_dec_loop:
- movdqu 0x00($inp), @XMM[0] # load input
- movdqu 0x10($inp), @XMM[1]
- movdqu 0x20($inp), @XMM[2]
- movdqu 0x30($inp), @XMM[3]
- movdqu 0x40($inp), @XMM[4]
- movdqu 0x50($inp), @XMM[5]
- mov %rsp, %rax # pass key schedule
- movdqu 0x60($inp), @XMM[6]
- mov %edx,%r10d # pass rounds
- movdqu 0x70($inp), @XMM[7]
- movdqa @XMM[15], 0x20(%rbp) # put aside IV
-
- call _bsaes_decrypt8
-
- pxor 0x20(%rbp), @XMM[0] # ^= IV
- movdqu 0x00($inp), @XMM[8] # re-load input
- movdqu 0x10($inp), @XMM[9]
- pxor @XMM[8], @XMM[1]
- movdqu 0x20($inp), @XMM[10]
- pxor @XMM[9], @XMM[6]
- movdqu 0x30($inp), @XMM[11]
- pxor @XMM[10], @XMM[4]
- movdqu 0x40($inp), @XMM[12]
- pxor @XMM[11], @XMM[2]
- movdqu 0x50($inp), @XMM[13]
- pxor @XMM[12], @XMM[7]
- movdqu 0x60($inp), @XMM[14]
- pxor @XMM[13], @XMM[3]
- movdqu 0x70($inp), @XMM[15] # IV
- pxor @XMM[14], @XMM[5]
- movdqu @XMM[0], 0x00($out) # write output
- lea 0x80($inp), $inp
- movdqu @XMM[1], 0x10($out)
- movdqu @XMM[6], 0x20($out)
- movdqu @XMM[4], 0x30($out)
- movdqu @XMM[2], 0x40($out)
- movdqu @XMM[7], 0x50($out)
- movdqu @XMM[3], 0x60($out)
- movdqu @XMM[5], 0x70($out)
- lea 0x80($out), $out
- sub \$8,$len
- jnc .Lcbc_dec_loop
-
- add \$8,$len
- jz .Lcbc_dec_done
-
- movdqu 0x00($inp), @XMM[0] # load input
- mov %rsp, %rax # pass key schedule
- mov %edx, %r10d # pass rounds
- cmp \$2,$len
- jb .Lcbc_dec_one
- movdqu 0x10($inp), @XMM[1]
- je .Lcbc_dec_two
- movdqu 0x20($inp), @XMM[2]
- cmp \$4,$len
- jb .Lcbc_dec_three
- movdqu 0x30($inp), @XMM[3]
- je .Lcbc_dec_four
- movdqu 0x40($inp), @XMM[4]
- cmp \$6,$len
- jb .Lcbc_dec_five
- movdqu 0x50($inp), @XMM[5]
- je .Lcbc_dec_six
- movdqu 0x60($inp), @XMM[6]
- movdqa @XMM[15], 0x20(%rbp) # put aside IV
- call _bsaes_decrypt8
- pxor 0x20(%rbp), @XMM[0] # ^= IV
- movdqu 0x00($inp), @XMM[8] # re-load input
- movdqu 0x10($inp), @XMM[9]
- pxor @XMM[8], @XMM[1]
- movdqu 0x20($inp), @XMM[10]
- pxor @XMM[9], @XMM[6]
- movdqu 0x30($inp), @XMM[11]
- pxor @XMM[10], @XMM[4]
- movdqu 0x40($inp), @XMM[12]
- pxor @XMM[11], @XMM[2]
- movdqu 0x50($inp), @XMM[13]
- pxor @XMM[12], @XMM[7]
- movdqu 0x60($inp), @XMM[15] # IV
- pxor @XMM[13], @XMM[3]
- movdqu @XMM[0], 0x00($out) # write output
- movdqu @XMM[1], 0x10($out)
- movdqu @XMM[6], 0x20($out)
- movdqu @XMM[4], 0x30($out)
- movdqu @XMM[2], 0x40($out)
- movdqu @XMM[7], 0x50($out)
- movdqu @XMM[3], 0x60($out)
- jmp .Lcbc_dec_done
-.align 16
-.Lcbc_dec_six:
- movdqa @XMM[15], 0x20(%rbp) # put aside IV
- call _bsaes_decrypt8
- pxor 0x20(%rbp), @XMM[0] # ^= IV
- movdqu 0x00($inp), @XMM[8] # re-load input
- movdqu 0x10($inp), @XMM[9]
- pxor @XMM[8], @XMM[1]
- movdqu 0x20($inp), @XMM[10]
- pxor @XMM[9], @XMM[6]
- movdqu 0x30($inp), @XMM[11]
- pxor @XMM[10], @XMM[4]
- movdqu 0x40($inp), @XMM[12]
- pxor @XMM[11], @XMM[2]
- movdqu 0x50($inp), @XMM[15] # IV
- pxor @XMM[12], @XMM[7]
- movdqu @XMM[0], 0x00($out) # write output
- movdqu @XMM[1], 0x10($out)
- movdqu @XMM[6], 0x20($out)
- movdqu @XMM[4], 0x30($out)
- movdqu @XMM[2], 0x40($out)
- movdqu @XMM[7], 0x50($out)
- jmp .Lcbc_dec_done
-.align 16
-.Lcbc_dec_five:
- movdqa @XMM[15], 0x20(%rbp) # put aside IV
- call _bsaes_decrypt8
- pxor 0x20(%rbp), @XMM[0] # ^= IV
- movdqu 0x00($inp), @XMM[8] # re-load input
- movdqu 0x10($inp), @XMM[9]
- pxor @XMM[8], @XMM[1]
- movdqu 0x20($inp), @XMM[10]
- pxor @XMM[9], @XMM[6]
- movdqu 0x30($inp), @XMM[11]
- pxor @XMM[10], @XMM[4]
- movdqu 0x40($inp), @XMM[15] # IV
- pxor @XMM[11], @XMM[2]
- movdqu @XMM[0], 0x00($out) # write output
- movdqu @XMM[1], 0x10($out)
- movdqu @XMM[6], 0x20($out)
- movdqu @XMM[4], 0x30($out)
- movdqu @XMM[2], 0x40($out)
- jmp .Lcbc_dec_done
-.align 16
-.Lcbc_dec_four:
- movdqa @XMM[15], 0x20(%rbp) # put aside IV
- call _bsaes_decrypt8
- pxor 0x20(%rbp), @XMM[0] # ^= IV
- movdqu 0x00($inp), @XMM[8] # re-load input
- movdqu 0x10($inp), @XMM[9]
- pxor @XMM[8], @XMM[1]
- movdqu 0x20($inp), @XMM[10]
- pxor @XMM[9], @XMM[6]
- movdqu 0x30($inp), @XMM[15] # IV
- pxor @XMM[10], @XMM[4]
- movdqu @XMM[0], 0x00($out) # write output
- movdqu @XMM[1], 0x10($out)
- movdqu @XMM[6], 0x20($out)
- movdqu @XMM[4], 0x30($out)
- jmp .Lcbc_dec_done
-.align 16
-.Lcbc_dec_three:
- movdqa @XMM[15], 0x20(%rbp) # put aside IV
- call _bsaes_decrypt8
- pxor 0x20(%rbp), @XMM[0] # ^= IV
- movdqu 0x00($inp), @XMM[8] # re-load input
- movdqu 0x10($inp), @XMM[9]
- pxor @XMM[8], @XMM[1]
- movdqu 0x20($inp), @XMM[15] # IV
- pxor @XMM[9], @XMM[6]
- movdqu @XMM[0], 0x00($out) # write output
- movdqu @XMM[1], 0x10($out)
- movdqu @XMM[6], 0x20($out)
- jmp .Lcbc_dec_done
-.align 16
-.Lcbc_dec_two:
- movdqa @XMM[15], 0x20(%rbp) # put aside IV
- call _bsaes_decrypt8
- pxor 0x20(%rbp), @XMM[0] # ^= IV
- movdqu 0x00($inp), @XMM[8] # re-load input
- movdqu 0x10($inp), @XMM[15] # IV
- pxor @XMM[8], @XMM[1]
- movdqu @XMM[0], 0x00($out) # write output
- movdqu @XMM[1], 0x10($out)
- jmp .Lcbc_dec_done
-.align 16
-.Lcbc_dec_one:
- lea ($inp), $arg1
- lea 0x20(%rbp), $arg2 # buffer output
- lea ($key), $arg3
- call asm_AES_decrypt # doesn't touch %xmm
- pxor 0x20(%rbp), @XMM[15] # ^= IV
- movdqu @XMM[15], ($out) # write output
- movdqa @XMM[0], @XMM[15] # IV
-
-.Lcbc_dec_done:
- movdqu @XMM[15], (%rbx) # return IV
- lea (%rsp), %rax
- pxor %xmm0, %xmm0
-.Lcbc_dec_bzero: # wipe key schedule [if any]
- movdqa %xmm0, 0x00(%rax)
- movdqa %xmm0, 0x10(%rax)
- lea 0x20(%rax), %rax
- cmp %rax, %rbp
- ja .Lcbc_dec_bzero
-
- lea (%rbp),%rsp # restore %rsp
-___
-$code.=<<___ if ($win64);
- movaps 0x40(%rbp), %xmm6
- movaps 0x50(%rbp), %xmm7
- movaps 0x60(%rbp), %xmm8
- movaps 0x70(%rbp), %xmm9
- movaps 0x80(%rbp), %xmm10
- movaps 0x90(%rbp), %xmm11
- movaps 0xa0(%rbp), %xmm12
- movaps 0xb0(%rbp), %xmm13
- movaps 0xc0(%rbp), %xmm14
- movaps 0xd0(%rbp), %xmm15
- lea 0xa0(%rbp), %rsp
-___
-$code.=<<___;
- mov 0x48(%rsp), %r15
- mov 0x50(%rsp), %r14
- mov 0x58(%rsp), %r13
- mov 0x60(%rsp), %r12
- mov 0x68(%rsp), %rbx
- mov 0x70(%rsp), %rax
- lea 0x78(%rsp), %rsp
- mov %rax, %rbp
-.Lcbc_dec_epilogue:
- ret
-.size bsaes_cbc_encrypt,.-bsaes_cbc_encrypt
-
-.globl bsaes_ctr32_encrypt_blocks
-.type bsaes_ctr32_encrypt_blocks,\@abi-omnipotent
-.align 16
-bsaes_ctr32_encrypt_blocks:
- mov %rsp, %rax
-.Lctr_enc_prologue:
- push %rbp
- push %rbx
- push %r12
- push %r13
- push %r14
- push %r15
- lea -0x48(%rsp), %rsp
-___
-$code.=<<___ if ($win64);
- mov 0xa0(%rsp),$arg5 # pull ivp
- lea -0xa0(%rsp), %rsp
- movaps %xmm6, 0x40(%rsp)
- movaps %xmm7, 0x50(%rsp)
- movaps %xmm8, 0x60(%rsp)
- movaps %xmm9, 0x70(%rsp)
- movaps %xmm10, 0x80(%rsp)
- movaps %xmm11, 0x90(%rsp)
- movaps %xmm12, 0xa0(%rsp)
- movaps %xmm13, 0xb0(%rsp)
- movaps %xmm14, 0xc0(%rsp)
- movaps %xmm15, 0xd0(%rsp)
-.Lctr_enc_body:
-___
-$code.=<<___;
- mov %rsp, %rbp # backup %rsp
- movdqu ($arg5), %xmm0 # load counter
- mov 240($arg4), %eax # rounds
- mov $arg1, $inp # backup arguments
- mov $arg2, $out
- mov $arg3, $len
- mov $arg4, $key
- movdqa %xmm0, 0x20(%rbp) # copy counter
- cmp \$8, $arg3
- jb .Lctr_enc_short
-
- mov %eax, %ebx # rounds
- shl \$7, %rax # 128 bytes per inner round key
- sub \$`128-32`, %rax # size of bit-sliced key schedule
- sub %rax, %rsp
-
- mov %rsp, %rax # pass key schedule
- mov $key, %rcx # pass key
- mov %ebx, %r10d # pass rounds
- call _bsaes_key_convert
- pxor %xmm6,%xmm7 # fix up last round key
- movdqa %xmm7,(%rax) # save last round key
-
- movdqa (%rsp), @XMM[9] # load round0 key
- lea .LADD1(%rip), %r11
- movdqa 0x20(%rbp), @XMM[0] # counter copy
- movdqa -0x20(%r11), @XMM[8] # .LSWPUP
- pshufb @XMM[8], @XMM[9] # byte swap upper part
- pshufb @XMM[8], @XMM[0]
- movdqa @XMM[9], (%rsp) # save adjusted round0 key
- jmp .Lctr_enc_loop
-.align 16
-.Lctr_enc_loop:
- movdqa @XMM[0], 0x20(%rbp) # save counter
- movdqa @XMM[0], @XMM[1] # prepare 8 counter values
- movdqa @XMM[0], @XMM[2]
- paddd 0x00(%r11), @XMM[1] # .LADD1
- movdqa @XMM[0], @XMM[3]
- paddd 0x10(%r11), @XMM[2] # .LADD2
- movdqa @XMM[0], @XMM[4]
- paddd 0x20(%r11), @XMM[3] # .LADD3
- movdqa @XMM[0], @XMM[5]
- paddd 0x30(%r11), @XMM[4] # .LADD4
- movdqa @XMM[0], @XMM[6]
- paddd 0x40(%r11), @XMM[5] # .LADD5
- movdqa @XMM[0], @XMM[7]
- paddd 0x50(%r11), @XMM[6] # .LADD6
- paddd 0x60(%r11), @XMM[7] # .LADD7
-
- # Borrow prologue from _bsaes_encrypt8 to use the opportunity
- # to flip byte order in 32-bit counter
- movdqa (%rsp), @XMM[9] # round 0 key
- lea 0x10(%rsp), %rax # pass key schedule
- movdqa -0x10(%r11), @XMM[8] # .LSWPUPM0SR
- pxor @XMM[9], @XMM[0] # xor with round0 key
- pxor @XMM[9], @XMM[1]
- pshufb @XMM[8], @XMM[0]
- pxor @XMM[9], @XMM[2]
- pshufb @XMM[8], @XMM[1]
- pxor @XMM[9], @XMM[3]
- pshufb @XMM[8], @XMM[2]
- pxor @XMM[9], @XMM[4]
- pshufb @XMM[8], @XMM[3]
- pxor @XMM[9], @XMM[5]
- pshufb @XMM[8], @XMM[4]
- pxor @XMM[9], @XMM[6]
- pshufb @XMM[8], @XMM[5]
- pxor @XMM[9], @XMM[7]
- pshufb @XMM[8], @XMM[6]
- lea .LBS0(%rip), %r11 # constants table
- pshufb @XMM[8], @XMM[7]
- mov %ebx,%r10d # pass rounds
-
- call _bsaes_encrypt8_bitslice
-
- sub \$8,$len
- jc .Lctr_enc_loop_done
-
- movdqu 0x00($inp), @XMM[8] # load input
- movdqu 0x10($inp), @XMM[9]
- movdqu 0x20($inp), @XMM[10]
- movdqu 0x30($inp), @XMM[11]
- movdqu 0x40($inp), @XMM[12]
- movdqu 0x50($inp), @XMM[13]
- movdqu 0x60($inp), @XMM[14]
- movdqu 0x70($inp), @XMM[15]
- lea 0x80($inp),$inp
- pxor @XMM[0], @XMM[8]
- movdqa 0x20(%rbp), @XMM[0] # load counter
- pxor @XMM[9], @XMM[1]
- movdqu @XMM[8], 0x00($out) # write output
- pxor @XMM[10], @XMM[4]
- movdqu @XMM[1], 0x10($out)
- pxor @XMM[11], @XMM[6]
- movdqu @XMM[4], 0x20($out)
- pxor @XMM[12], @XMM[3]
- movdqu @XMM[6], 0x30($out)
- pxor @XMM[13], @XMM[7]
- movdqu @XMM[3], 0x40($out)
- pxor @XMM[14], @XMM[2]
- movdqu @XMM[7], 0x50($out)
- pxor @XMM[15], @XMM[5]
- movdqu @XMM[2], 0x60($out)
- lea .LADD1(%rip), %r11
- movdqu @XMM[5], 0x70($out)
- lea 0x80($out), $out
- paddd 0x70(%r11), @XMM[0] # .LADD8
- jnz .Lctr_enc_loop
-
- jmp .Lctr_enc_done
-.align 16
-.Lctr_enc_loop_done:
- add \$8, $len
- movdqu 0x00($inp), @XMM[8] # load input
- pxor @XMM[8], @XMM[0]
- movdqu @XMM[0], 0x00($out) # write output
- cmp \$2,$len
- jb .Lctr_enc_done
- movdqu 0x10($inp), @XMM[9]
- pxor @XMM[9], @XMM[1]
- movdqu @XMM[1], 0x10($out)
- je .Lctr_enc_done
- movdqu 0x20($inp), @XMM[10]
- pxor @XMM[10], @XMM[4]
- movdqu @XMM[4], 0x20($out)
- cmp \$4,$len
- jb .Lctr_enc_done
- movdqu 0x30($inp), @XMM[11]
- pxor @XMM[11], @XMM[6]
- movdqu @XMM[6], 0x30($out)
- je .Lctr_enc_done
- movdqu 0x40($inp), @XMM[12]
- pxor @XMM[12], @XMM[3]
- movdqu @XMM[3], 0x40($out)
- cmp \$6,$len
- jb .Lctr_enc_done
- movdqu 0x50($inp), @XMM[13]
- pxor @XMM[13], @XMM[7]
- movdqu @XMM[7], 0x50($out)
- je .Lctr_enc_done
- movdqu 0x60($inp), @XMM[14]
- pxor @XMM[14], @XMM[2]
- movdqu @XMM[2], 0x60($out)
- jmp .Lctr_enc_done
-
-.align 16
-.Lctr_enc_short:
- lea 0x20(%rbp), $arg1
- lea 0x30(%rbp), $arg2
- lea ($key), $arg3
- call asm_AES_encrypt
- movdqu ($inp), @XMM[1]
- lea 16($inp), $inp
- mov 0x2c(%rbp), %eax # load 32-bit counter
- bswap %eax
- pxor 0x30(%rbp), @XMM[1]
- inc %eax # increment
- movdqu @XMM[1], ($out)
- bswap %eax
- lea 16($out), $out
- mov %eax, 0x2c(%rsp) # save 32-bit counter
- dec $len
- jnz .Lctr_enc_short
-
-.Lctr_enc_done:
- lea (%rsp), %rax
- pxor %xmm0, %xmm0
-.Lctr_enc_bzero: # wipe key schedule [if any]
- movdqa %xmm0, 0x00(%rax)
- movdqa %xmm0, 0x10(%rax)
- lea 0x20(%rax), %rax
- cmp %rax, %rbp
- ja .Lctr_enc_bzero
-
- lea (%rbp),%rsp # restore %rsp
-___
-$code.=<<___ if ($win64);
- movaps 0x40(%rbp), %xmm6
- movaps 0x50(%rbp), %xmm7
- movaps 0x60(%rbp), %xmm8
- movaps 0x70(%rbp), %xmm9
- movaps 0x80(%rbp), %xmm10
- movaps 0x90(%rbp), %xmm11
- movaps 0xa0(%rbp), %xmm12
- movaps 0xb0(%rbp), %xmm13
- movaps 0xc0(%rbp), %xmm14
- movaps 0xd0(%rbp), %xmm15
- lea 0xa0(%rbp), %rsp
-___
-$code.=<<___;
- mov 0x48(%rsp), %r15
- mov 0x50(%rsp), %r14
- mov 0x58(%rsp), %r13
- mov 0x60(%rsp), %r12
- mov 0x68(%rsp), %rbx
- mov 0x70(%rsp), %rax
- lea 0x78(%rsp), %rsp
- mov %rax, %rbp
-.Lctr_enc_epilogue:
- ret
-.size bsaes_ctr32_encrypt_blocks,.-bsaes_ctr32_encrypt_blocks
-___
-######################################################################
-# void bsaes_xts_[en|de]crypt(const char *inp,char *out,size_t len,
-# const AES_KEY *key1, const AES_KEY *key2,
-# const unsigned char iv[16]);
-#
-my ($twmask,$twres,$twtmp)=@XMM[13..15];
-$arg6=~s/d$//;
-
-$code.=<<___;
-.globl bsaes_xts_encrypt
-.type bsaes_xts_encrypt,\@abi-omnipotent
-.align 16
-bsaes_xts_encrypt:
- mov %rsp, %rax
-.Lxts_enc_prologue:
- push %rbp
- push %rbx
- push %r12
- push %r13
- push %r14
- push %r15
- lea -0x48(%rsp), %rsp
-___
-$code.=<<___ if ($win64);
- mov 0xa0(%rsp),$arg5 # pull key2
- mov 0xa8(%rsp),$arg6 # pull ivp
- lea -0xa0(%rsp), %rsp
- movaps %xmm6, 0x40(%rsp)
- movaps %xmm7, 0x50(%rsp)
- movaps %xmm8, 0x60(%rsp)
- movaps %xmm9, 0x70(%rsp)
- movaps %xmm10, 0x80(%rsp)
- movaps %xmm11, 0x90(%rsp)
- movaps %xmm12, 0xa0(%rsp)
- movaps %xmm13, 0xb0(%rsp)
- movaps %xmm14, 0xc0(%rsp)
- movaps %xmm15, 0xd0(%rsp)
-.Lxts_enc_body:
-___
-$code.=<<___;
- mov %rsp, %rbp # backup %rsp
- mov $arg1, $inp # backup arguments
- mov $arg2, $out
- mov $arg3, $len
- mov $arg4, $key
-
- lea ($arg6), $arg1
- lea 0x20(%rbp), $arg2
- lea ($arg5), $arg3
- call asm_AES_encrypt # generate initial tweak
-
- mov 240($key), %eax # rounds
- mov $len, %rbx # backup $len
-
- mov %eax, %edx # rounds
- shl \$7, %rax # 128 bytes per inner round key
- sub \$`128-32`, %rax # size of bit-sliced key schedule
- sub %rax, %rsp
-
- mov %rsp, %rax # pass key schedule
- mov $key, %rcx # pass key
- mov %edx, %r10d # pass rounds
- call _bsaes_key_convert
- pxor %xmm6, %xmm7 # fix up last round key
- movdqa %xmm7, (%rax) # save last round key
-
- and \$-16, $len
- sub \$0x80, %rsp # place for tweak[8]
- movdqa 0x20(%rbp), @XMM[7] # initial tweak
-
- pxor $twtmp, $twtmp
- movdqa .Lxts_magic(%rip), $twmask
- pcmpgtd @XMM[7], $twtmp # broadcast upper bits
-
- sub \$0x80, $len
- jc .Lxts_enc_short
- jmp .Lxts_enc_loop
-
-.align 16
-.Lxts_enc_loop:
-___
- for ($i=0;$i<7;$i++) {
- $code.=<<___;
- pshufd \$0x13, $twtmp, $twres
- pxor $twtmp, $twtmp
- movdqa @XMM[7], @XMM[$i]
- movdqa @XMM[7], `0x10*$i`(%rsp)# save tweak[$i]
- paddq @XMM[7], @XMM[7] # psllq 1,$tweak
- pand $twmask, $twres # isolate carry and residue
- pcmpgtd @XMM[7], $twtmp # broadcast upper bits
- pxor $twres, @XMM[7]
-___
- $code.=<<___ if ($i>=1);
- movdqu `0x10*($i-1)`($inp), @XMM[8+$i-1]
-___
- $code.=<<___ if ($i>=2);
- pxor @XMM[8+$i-2], @XMM[$i-2]# input[] ^ tweak[]
-___
- }
-$code.=<<___;
- movdqu 0x60($inp), @XMM[8+6]
- pxor @XMM[8+5], @XMM[5]
- movdqu 0x70($inp), @XMM[8+7]
- lea 0x80($inp), $inp
- movdqa @XMM[7], 0x70(%rsp)
- pxor @XMM[8+6], @XMM[6]
- lea 0x80(%rsp), %rax # pass key schedule
- pxor @XMM[8+7], @XMM[7]
- mov %edx, %r10d # pass rounds
-
- call _bsaes_encrypt8
-
- pxor 0x00(%rsp), @XMM[0] # ^= tweak[]
- pxor 0x10(%rsp), @XMM[1]
- movdqu @XMM[0], 0x00($out) # write output
- pxor 0x20(%rsp), @XMM[4]
- movdqu @XMM[1], 0x10($out)
- pxor 0x30(%rsp), @XMM[6]
- movdqu @XMM[4], 0x20($out)
- pxor 0x40(%rsp), @XMM[3]
- movdqu @XMM[6], 0x30($out)
- pxor 0x50(%rsp), @XMM[7]
- movdqu @XMM[3], 0x40($out)
- pxor 0x60(%rsp), @XMM[2]
- movdqu @XMM[7], 0x50($out)
- pxor 0x70(%rsp), @XMM[5]
- movdqu @XMM[2], 0x60($out)
- movdqu @XMM[5], 0x70($out)
- lea 0x80($out), $out
-
- movdqa 0x70(%rsp), @XMM[7] # prepare next iteration tweak
- pxor $twtmp, $twtmp
- movdqa .Lxts_magic(%rip), $twmask
- pcmpgtd @XMM[7], $twtmp
- pshufd \$0x13, $twtmp, $twres
- pxor $twtmp, $twtmp
- paddq @XMM[7], @XMM[7] # psllq 1,$tweak
- pand $twmask, $twres # isolate carry and residue
- pcmpgtd @XMM[7], $twtmp # broadcast upper bits
- pxor $twres, @XMM[7]
-
- sub \$0x80,$len
- jnc .Lxts_enc_loop
-
-.Lxts_enc_short:
- add \$0x80, $len
- jz .Lxts_enc_done
-___
- for ($i=0;$i<7;$i++) {
- $code.=<<___;
- pshufd \$0x13, $twtmp, $twres
- pxor $twtmp, $twtmp
- movdqa @XMM[7], @XMM[$i]
- movdqa @XMM[7], `0x10*$i`(%rsp)# save tweak[$i]
- paddq @XMM[7], @XMM[7] # psllq 1,$tweak
- pand $twmask, $twres # isolate carry and residue
- pcmpgtd @XMM[7], $twtmp # broadcast upper bits
- pxor $twres, @XMM[7]
-___
- $code.=<<___ if ($i>=1);
- movdqu `0x10*($i-1)`($inp), @XMM[8+$i-1]
- cmp \$`0x10*$i`,$len
- je .Lxts_enc_$i
-___
- $code.=<<___ if ($i>=2);
- pxor @XMM[8+$i-2], @XMM[$i-2]# input[] ^ tweak[]
-___
- }
-$code.=<<___;
- movdqu 0x60($inp), @XMM[8+6]
- pxor @XMM[8+5], @XMM[5]
- movdqa @XMM[7], 0x70(%rsp)
- lea 0x70($inp), $inp
- pxor @XMM[8+6], @XMM[6]
- lea 0x80(%rsp), %rax # pass key schedule
- mov %edx, %r10d # pass rounds
-
- call _bsaes_encrypt8
-
- pxor 0x00(%rsp), @XMM[0] # ^= tweak[]
- pxor 0x10(%rsp), @XMM[1]
- movdqu @XMM[0], 0x00($out) # write output
- pxor 0x20(%rsp), @XMM[4]
- movdqu @XMM[1], 0x10($out)
- pxor 0x30(%rsp), @XMM[6]
- movdqu @XMM[4], 0x20($out)
- pxor 0x40(%rsp), @XMM[3]
- movdqu @XMM[6], 0x30($out)
- pxor 0x50(%rsp), @XMM[7]
- movdqu @XMM[3], 0x40($out)
- pxor 0x60(%rsp), @XMM[2]
- movdqu @XMM[7], 0x50($out)
- movdqu @XMM[2], 0x60($out)
- lea 0x70($out), $out
-
- movdqa 0x70(%rsp), @XMM[7] # next iteration tweak
- jmp .Lxts_enc_done
-.align 16
-.Lxts_enc_6:
- pxor @XMM[8+4], @XMM[4]
- lea 0x60($inp), $inp
- pxor @XMM[8+5], @XMM[5]
- lea 0x80(%rsp), %rax # pass key schedule
- mov %edx, %r10d # pass rounds
-
- call _bsaes_encrypt8
-
- pxor 0x00(%rsp), @XMM[0] # ^= tweak[]
- pxor 0x10(%rsp), @XMM[1]
- movdqu @XMM[0], 0x00($out) # write output
- pxor 0x20(%rsp), @XMM[4]
- movdqu @XMM[1], 0x10($out)
- pxor 0x30(%rsp), @XMM[6]
- movdqu @XMM[4], 0x20($out)
- pxor 0x40(%rsp), @XMM[3]
- movdqu @XMM[6], 0x30($out)
- pxor 0x50(%rsp), @XMM[7]
- movdqu @XMM[3], 0x40($out)
- movdqu @XMM[7], 0x50($out)
- lea 0x60($out), $out
-
- movdqa 0x60(%rsp), @XMM[7] # next iteration tweak
- jmp .Lxts_enc_done
-.align 16
-.Lxts_enc_5:
- pxor @XMM[8+3], @XMM[3]
- lea 0x50($inp), $inp
- pxor @XMM[8+4], @XMM[4]
- lea 0x80(%rsp), %rax # pass key schedule
- mov %edx, %r10d # pass rounds
-
- call _bsaes_encrypt8
-
- pxor 0x00(%rsp), @XMM[0] # ^= tweak[]
- pxor 0x10(%rsp), @XMM[1]
- movdqu @XMM[0], 0x00($out) # write output
- pxor 0x20(%rsp), @XMM[4]
- movdqu @XMM[1], 0x10($out)
- pxor 0x30(%rsp), @XMM[6]
- movdqu @XMM[4], 0x20($out)
- pxor 0x40(%rsp), @XMM[3]
- movdqu @XMM[6], 0x30($out)
- movdqu @XMM[3], 0x40($out)
- lea 0x50($out), $out
-
- movdqa 0x50(%rsp), @XMM[7] # next iteration tweak
- jmp .Lxts_enc_done
-.align 16
-.Lxts_enc_4:
- pxor @XMM[8+2], @XMM[2]
- lea 0x40($inp), $inp
- pxor @XMM[8+3], @XMM[3]
- lea 0x80(%rsp), %rax # pass key schedule
- mov %edx, %r10d # pass rounds
-
- call _bsaes_encrypt8
-
- pxor 0x00(%rsp), @XMM[0] # ^= tweak[]
- pxor 0x10(%rsp), @XMM[1]
- movdqu @XMM[0], 0x00($out) # write output
- pxor 0x20(%rsp), @XMM[4]
- movdqu @XMM[1], 0x10($out)
- pxor 0x30(%rsp), @XMM[6]
- movdqu @XMM[4], 0x20($out)
- movdqu @XMM[6], 0x30($out)
- lea 0x40($out), $out
-
- movdqa 0x40(%rsp), @XMM[7] # next iteration tweak
- jmp .Lxts_enc_done
-.align 16
-.Lxts_enc_3:
- pxor @XMM[8+1], @XMM[1]
- lea 0x30($inp), $inp
- pxor @XMM[8+2], @XMM[2]
- lea 0x80(%rsp), %rax # pass key schedule
- mov %edx, %r10d # pass rounds
-
- call _bsaes_encrypt8
-
- pxor 0x00(%rsp), @XMM[0] # ^= tweak[]
- pxor 0x10(%rsp), @XMM[1]
- movdqu @XMM[0], 0x00($out) # write output
- pxor 0x20(%rsp), @XMM[4]
- movdqu @XMM[1], 0x10($out)
- movdqu @XMM[4], 0x20($out)
- lea 0x30($out), $out
-
- movdqa 0x30(%rsp), @XMM[7] # next iteration tweak
- jmp .Lxts_enc_done
-.align 16
-.Lxts_enc_2:
- pxor @XMM[8+0], @XMM[0]
- lea 0x20($inp), $inp
- pxor @XMM[8+1], @XMM[1]
- lea 0x80(%rsp), %rax # pass key schedule
- mov %edx, %r10d # pass rounds
-
- call _bsaes_encrypt8
-
- pxor 0x00(%rsp), @XMM[0] # ^= tweak[]
- pxor 0x10(%rsp), @XMM[1]
- movdqu @XMM[0], 0x00($out) # write output
- movdqu @XMM[1], 0x10($out)
- lea 0x20($out), $out
-
- movdqa 0x20(%rsp), @XMM[7] # next iteration tweak
- jmp .Lxts_enc_done
-.align 16
-.Lxts_enc_1:
- pxor @XMM[0], @XMM[8]
- lea 0x10($inp), $inp
- movdqa @XMM[8], 0x20(%rbp)
- lea 0x20(%rbp), $arg1
- lea 0x20(%rbp), $arg2
- lea ($key), $arg3
- call asm_AES_encrypt # doesn't touch %xmm
- pxor 0x20(%rbp), @XMM[0] # ^= tweak[]
- #pxor @XMM[8], @XMM[0]
- #lea 0x80(%rsp), %rax # pass key schedule
- #mov %edx, %r10d # pass rounds
- #call _bsaes_encrypt8
- #pxor 0x00(%rsp), @XMM[0] # ^= tweak[]
- movdqu @XMM[0], 0x00($out) # write output
- lea 0x10($out), $out
-
- movdqa 0x10(%rsp), @XMM[7] # next iteration tweak
-
-.Lxts_enc_done:
- and \$15, %ebx
- jz .Lxts_enc_ret
- mov $out, %rdx
-
-.Lxts_enc_steal:
- movzb ($inp), %eax
- movzb -16(%rdx), %ecx
- lea 1($inp), $inp
- mov %al, -16(%rdx)
- mov %cl, 0(%rdx)
- lea 1(%rdx), %rdx
- sub \$1,%ebx
- jnz .Lxts_enc_steal
-
- movdqu -16($out), @XMM[0]
- lea 0x20(%rbp), $arg1
- pxor @XMM[7], @XMM[0]
- lea 0x20(%rbp), $arg2
- movdqa @XMM[0], 0x20(%rbp)
- lea ($key), $arg3
- call asm_AES_encrypt # doesn't touch %xmm
- pxor 0x20(%rbp), @XMM[7]
- movdqu @XMM[7], -16($out)
-
-.Lxts_enc_ret:
- lea (%rsp), %rax
- pxor %xmm0, %xmm0
-.Lxts_enc_bzero: # wipe key schedule [if any]
- movdqa %xmm0, 0x00(%rax)
- movdqa %xmm0, 0x10(%rax)
- lea 0x20(%rax), %rax
- cmp %rax, %rbp
- ja .Lxts_enc_bzero
-
- lea (%rbp),%rsp # restore %rsp
-___
-$code.=<<___ if ($win64);
- movaps 0x40(%rbp), %xmm6
- movaps 0x50(%rbp), %xmm7
- movaps 0x60(%rbp), %xmm8
- movaps 0x70(%rbp), %xmm9
- movaps 0x80(%rbp), %xmm10
- movaps 0x90(%rbp), %xmm11
- movaps 0xa0(%rbp), %xmm12
- movaps 0xb0(%rbp), %xmm13
- movaps 0xc0(%rbp), %xmm14
- movaps 0xd0(%rbp), %xmm15
- lea 0xa0(%rbp), %rsp
-___
-$code.=<<___;
- mov 0x48(%rsp), %r15
- mov 0x50(%rsp), %r14
- mov 0x58(%rsp), %r13
- mov 0x60(%rsp), %r12
- mov 0x68(%rsp), %rbx
- mov 0x70(%rsp), %rax
- lea 0x78(%rsp), %rsp
- mov %rax, %rbp
-.Lxts_enc_epilogue:
- ret
-.size bsaes_xts_encrypt,.-bsaes_xts_encrypt
-
-.globl bsaes_xts_decrypt
-.type bsaes_xts_decrypt,\@abi-omnipotent
-.align 16
-bsaes_xts_decrypt:
- mov %rsp, %rax
-.Lxts_dec_prologue:
- push %rbp
- push %rbx
- push %r12
- push %r13
- push %r14
- push %r15
- lea -0x48(%rsp), %rsp
-___
-$code.=<<___ if ($win64);
- mov 0xa0(%rsp),$arg5 # pull key2
- mov 0xa8(%rsp),$arg6 # pull ivp
- lea -0xa0(%rsp), %rsp
- movaps %xmm6, 0x40(%rsp)
- movaps %xmm7, 0x50(%rsp)
- movaps %xmm8, 0x60(%rsp)
- movaps %xmm9, 0x70(%rsp)
- movaps %xmm10, 0x80(%rsp)
- movaps %xmm11, 0x90(%rsp)
- movaps %xmm12, 0xa0(%rsp)
- movaps %xmm13, 0xb0(%rsp)
- movaps %xmm14, 0xc0(%rsp)
- movaps %xmm15, 0xd0(%rsp)
-.Lxts_dec_body:
-___
-$code.=<<___;
- mov %rsp, %rbp # backup %rsp
- mov $arg1, $inp # backup arguments
- mov $arg2, $out
- mov $arg3, $len
- mov $arg4, $key
-
- lea ($arg6), $arg1
- lea 0x20(%rbp), $arg2
- lea ($arg5), $arg3
- call asm_AES_encrypt # generate initial tweak
-
- mov 240($key), %eax # rounds
- mov $len, %rbx # backup $len
-
- mov %eax, %edx # rounds
- shl \$7, %rax # 128 bytes per inner round key
- sub \$`128-32`, %rax # size of bit-sliced key schedule
- sub %rax, %rsp
-
- mov %rsp, %rax # pass key schedule
- mov $key, %rcx # pass key
- mov %edx, %r10d # pass rounds
- call _bsaes_key_convert
- pxor (%rsp), %xmm7 # fix up round 0 key
- movdqa %xmm6, (%rax) # save last round key
- movdqa %xmm7, (%rsp)
-
- xor %eax, %eax # if ($len%16) len-=16;
- and \$-16, $len
- test \$15, %ebx
- setnz %al
- shl \$4, %rax
- sub %rax, $len
-
- sub \$0x80, %rsp # place for tweak[8]
- movdqa 0x20(%rbp), @XMM[7] # initial tweak
-
- pxor $twtmp, $twtmp
- movdqa .Lxts_magic(%rip), $twmask
- pcmpgtd @XMM[7], $twtmp # broadcast upper bits
-
- sub \$0x80, $len
- jc .Lxts_dec_short
- jmp .Lxts_dec_loop
-
-.align 16
-.Lxts_dec_loop:
-___
- for ($i=0;$i<7;$i++) {
- $code.=<<___;
- pshufd \$0x13, $twtmp, $twres
- pxor $twtmp, $twtmp
- movdqa @XMM[7], @XMM[$i]
- movdqa @XMM[7], `0x10*$i`(%rsp)# save tweak[$i]
- paddq @XMM[7], @XMM[7] # psllq 1,$tweak
- pand $twmask, $twres # isolate carry and residue
- pcmpgtd @XMM[7], $twtmp # broadcast upper bits
- pxor $twres, @XMM[7]
-___
- $code.=<<___ if ($i>=1);
- movdqu `0x10*($i-1)`($inp), @XMM[8+$i-1]
-___
- $code.=<<___ if ($i>=2);
- pxor @XMM[8+$i-2], @XMM[$i-2]# input[] ^ tweak[]
-___
- }
-$code.=<<___;
- movdqu 0x60($inp), @XMM[8+6]
- pxor @XMM[8+5], @XMM[5]
- movdqu 0x70($inp), @XMM[8+7]
- lea 0x80($inp), $inp
- movdqa @XMM[7], 0x70(%rsp)
- pxor @XMM[8+6], @XMM[6]
- lea 0x80(%rsp), %rax # pass key schedule
- pxor @XMM[8+7], @XMM[7]
- mov %edx, %r10d # pass rounds
-
- call _bsaes_decrypt8
-
- pxor 0x00(%rsp), @XMM[0] # ^= tweak[]
- pxor 0x10(%rsp), @XMM[1]
- movdqu @XMM[0], 0x00($out) # write output
- pxor 0x20(%rsp), @XMM[6]
- movdqu @XMM[1], 0x10($out)
- pxor 0x30(%rsp), @XMM[4]
- movdqu @XMM[6], 0x20($out)
- pxor 0x40(%rsp), @XMM[2]
- movdqu @XMM[4], 0x30($out)
- pxor 0x50(%rsp), @XMM[7]
- movdqu @XMM[2], 0x40($out)
- pxor 0x60(%rsp), @XMM[3]
- movdqu @XMM[7], 0x50($out)
- pxor 0x70(%rsp), @XMM[5]
- movdqu @XMM[3], 0x60($out)
- movdqu @XMM[5], 0x70($out)
- lea 0x80($out), $out
-
- movdqa 0x70(%rsp), @XMM[7] # prepare next iteration tweak
- pxor $twtmp, $twtmp
- movdqa .Lxts_magic(%rip), $twmask
- pcmpgtd @XMM[7], $twtmp
- pshufd \$0x13, $twtmp, $twres
- pxor $twtmp, $twtmp
- paddq @XMM[7], @XMM[7] # psllq 1,$tweak
- pand $twmask, $twres # isolate carry and residue
- pcmpgtd @XMM[7], $twtmp # broadcast upper bits
- pxor $twres, @XMM[7]
-
- sub \$0x80,$len
- jnc .Lxts_dec_loop
-
-.Lxts_dec_short:
- add \$0x80, $len
- jz .Lxts_dec_done
-___
- for ($i=0;$i<7;$i++) {
- $code.=<<___;
- pshufd \$0x13, $twtmp, $twres
- pxor $twtmp, $twtmp
- movdqa @XMM[7], @XMM[$i]
- movdqa @XMM[7], `0x10*$i`(%rsp)# save tweak[$i]
- paddq @XMM[7], @XMM[7] # psllq 1,$tweak
- pand $twmask, $twres # isolate carry and residue
- pcmpgtd @XMM[7], $twtmp # broadcast upper bits
- pxor $twres, @XMM[7]
-___
- $code.=<<___ if ($i>=1);
- movdqu `0x10*($i-1)`($inp), @XMM[8+$i-1]
- cmp \$`0x10*$i`,$len
- je .Lxts_dec_$i
-___
- $code.=<<___ if ($i>=2);
- pxor @XMM[8+$i-2], @XMM[$i-2]# input[] ^ tweak[]
-___
- }
-$code.=<<___;
- movdqu 0x60($inp), @XMM[8+6]
- pxor @XMM[8+5], @XMM[5]
- movdqa @XMM[7], 0x70(%rsp)
- lea 0x70($inp), $inp
- pxor @XMM[8+6], @XMM[6]
- lea 0x80(%rsp), %rax # pass key schedule
- mov %edx, %r10d # pass rounds
-
- call _bsaes_decrypt8
-
- pxor 0x00(%rsp), @XMM[0] # ^= tweak[]
- pxor 0x10(%rsp), @XMM[1]
- movdqu @XMM[0], 0x00($out) # write output
- pxor 0x20(%rsp), @XMM[6]
- movdqu @XMM[1], 0x10($out)
- pxor 0x30(%rsp), @XMM[4]
- movdqu @XMM[6], 0x20($out)
- pxor 0x40(%rsp), @XMM[2]
- movdqu @XMM[4], 0x30($out)
- pxor 0x50(%rsp), @XMM[7]
- movdqu @XMM[2], 0x40($out)
- pxor 0x60(%rsp), @XMM[3]
- movdqu @XMM[7], 0x50($out)
- movdqu @XMM[3], 0x60($out)
- lea 0x70($out), $out
-
- movdqa 0x70(%rsp), @XMM[7] # next iteration tweak
- jmp .Lxts_dec_done
-.align 16
-.Lxts_dec_6:
- pxor @XMM[8+4], @XMM[4]
- lea 0x60($inp), $inp
- pxor @XMM[8+5], @XMM[5]
- lea 0x80(%rsp), %rax # pass key schedule
- mov %edx, %r10d # pass rounds
-
- call _bsaes_decrypt8
-
- pxor 0x00(%rsp), @XMM[0] # ^= tweak[]
- pxor 0x10(%rsp), @XMM[1]
- movdqu @XMM[0], 0x00($out) # write output
- pxor 0x20(%rsp), @XMM[6]
- movdqu @XMM[1], 0x10($out)
- pxor 0x30(%rsp), @XMM[4]
- movdqu @XMM[6], 0x20($out)
- pxor 0x40(%rsp), @XMM[2]
- movdqu @XMM[4], 0x30($out)
- pxor 0x50(%rsp), @XMM[7]
- movdqu @XMM[2], 0x40($out)
- movdqu @XMM[7], 0x50($out)
- lea 0x60($out), $out
-
- movdqa 0x60(%rsp), @XMM[7] # next iteration tweak
- jmp .Lxts_dec_done
-.align 16
-.Lxts_dec_5:
- pxor @XMM[8+3], @XMM[3]
- lea 0x50($inp), $inp
- pxor @XMM[8+4], @XMM[4]
- lea 0x80(%rsp), %rax # pass key schedule
- mov %edx, %r10d # pass rounds
-
- call _bsaes_decrypt8
-
- pxor 0x00(%rsp), @XMM[0] # ^= tweak[]
- pxor 0x10(%rsp), @XMM[1]
- movdqu @XMM[0], 0x00($out) # write output
- pxor 0x20(%rsp), @XMM[6]
- movdqu @XMM[1], 0x10($out)
- pxor 0x30(%rsp), @XMM[4]
- movdqu @XMM[6], 0x20($out)
- pxor 0x40(%rsp), @XMM[2]
- movdqu @XMM[4], 0x30($out)
- movdqu @XMM[2], 0x40($out)
- lea 0x50($out), $out
-
- movdqa 0x50(%rsp), @XMM[7] # next iteration tweak
- jmp .Lxts_dec_done
-.align 16
-.Lxts_dec_4:
- pxor @XMM[8+2], @XMM[2]
- lea 0x40($inp), $inp
- pxor @XMM[8+3], @XMM[3]
- lea 0x80(%rsp), %rax # pass key schedule
- mov %edx, %r10d # pass rounds
-
- call _bsaes_decrypt8
-
- pxor 0x00(%rsp), @XMM[0] # ^= tweak[]
- pxor 0x10(%rsp), @XMM[1]
- movdqu @XMM[0], 0x00($out) # write output
- pxor 0x20(%rsp), @XMM[6]
- movdqu @XMM[1], 0x10($out)
- pxor 0x30(%rsp), @XMM[4]
- movdqu @XMM[6], 0x20($out)
- movdqu @XMM[4], 0x30($out)
- lea 0x40($out), $out
-
- movdqa 0x40(%rsp), @XMM[7] # next iteration tweak
- jmp .Lxts_dec_done
-.align 16
-.Lxts_dec_3:
- pxor @XMM[8+1], @XMM[1]
- lea 0x30($inp), $inp
- pxor @XMM[8+2], @XMM[2]
- lea 0x80(%rsp), %rax # pass key schedule
- mov %edx, %r10d # pass rounds
-
- call _bsaes_decrypt8
-
- pxor 0x00(%rsp), @XMM[0] # ^= tweak[]
- pxor 0x10(%rsp), @XMM[1]
- movdqu @XMM[0], 0x00($out) # write output
- pxor 0x20(%rsp), @XMM[6]
- movdqu @XMM[1], 0x10($out)
- movdqu @XMM[6], 0x20($out)
- lea 0x30($out), $out
-
- movdqa 0x30(%rsp), @XMM[7] # next iteration tweak
- jmp .Lxts_dec_done
-.align 16
-.Lxts_dec_2:
- pxor @XMM[8+0], @XMM[0]
- lea 0x20($inp), $inp
- pxor @XMM[8+1], @XMM[1]
- lea 0x80(%rsp), %rax # pass key schedule
- mov %edx, %r10d # pass rounds
-
- call _bsaes_decrypt8
-
- pxor 0x00(%rsp), @XMM[0] # ^= tweak[]
- pxor 0x10(%rsp), @XMM[1]
- movdqu @XMM[0], 0x00($out) # write output
- movdqu @XMM[1], 0x10($out)
- lea 0x20($out), $out
-
- movdqa 0x20(%rsp), @XMM[7] # next iteration tweak
- jmp .Lxts_dec_done
-.align 16
-.Lxts_dec_1:
- pxor @XMM[0], @XMM[8]
- lea 0x10($inp), $inp
- movdqa @XMM[8], 0x20(%rbp)
- lea 0x20(%rbp), $arg1
- lea 0x20(%rbp), $arg2
- lea ($key), $arg3
- call asm_AES_decrypt # doesn't touch %xmm
- pxor 0x20(%rbp), @XMM[0] # ^= tweak[]
- #pxor @XMM[8], @XMM[0]
- #lea 0x80(%rsp), %rax # pass key schedule
- #mov %edx, %r10d # pass rounds
- #call _bsaes_decrypt8
- #pxor 0x00(%rsp), @XMM[0] # ^= tweak[]
- movdqu @XMM[0], 0x00($out) # write output
- lea 0x10($out), $out
-
- movdqa 0x10(%rsp), @XMM[7] # next iteration tweak
-
-.Lxts_dec_done:
- and \$15, %ebx
- jz .Lxts_dec_ret
-
- pxor $twtmp, $twtmp
- movdqa .Lxts_magic(%rip), $twmask
- pcmpgtd @XMM[7], $twtmp
- pshufd \$0x13, $twtmp, $twres
- movdqa @XMM[7], @XMM[6]
- paddq @XMM[7], @XMM[7] # psllq 1,$tweak
- pand $twmask, $twres # isolate carry and residue
- movdqu ($inp), @XMM[0]
- pxor $twres, @XMM[7]
-
- lea 0x20(%rbp), $arg1
- pxor @XMM[7], @XMM[0]
- lea 0x20(%rbp), $arg2
- movdqa @XMM[0], 0x20(%rbp)
- lea ($key), $arg3
- call asm_AES_decrypt # doesn't touch %xmm
- pxor 0x20(%rbp), @XMM[7]
- mov $out, %rdx
- movdqu @XMM[7], ($out)
-
-.Lxts_dec_steal:
- movzb 16($inp), %eax
- movzb (%rdx), %ecx
- lea 1($inp), $inp
- mov %al, (%rdx)
- mov %cl, 16(%rdx)
- lea 1(%rdx), %rdx
- sub \$1,%ebx
- jnz .Lxts_dec_steal
-
- movdqu ($out), @XMM[0]
- lea 0x20(%rbp), $arg1
- pxor @XMM[6], @XMM[0]
- lea 0x20(%rbp), $arg2
- movdqa @XMM[0], 0x20(%rbp)
- lea ($key), $arg3
- call asm_AES_decrypt # doesn't touch %xmm
- pxor 0x20(%rbp), @XMM[6]
- movdqu @XMM[6], ($out)
-
-.Lxts_dec_ret:
- lea (%rsp), %rax
- pxor %xmm0, %xmm0
-.Lxts_dec_bzero: # wipe key schedule [if any]
- movdqa %xmm0, 0x00(%rax)
- movdqa %xmm0, 0x10(%rax)
- lea 0x20(%rax), %rax
- cmp %rax, %rbp
- ja .Lxts_dec_bzero
-
- lea (%rbp),%rsp # restore %rsp
-___
-$code.=<<___ if ($win64);
- movaps 0x40(%rbp), %xmm6
- movaps 0x50(%rbp), %xmm7
- movaps 0x60(%rbp), %xmm8
- movaps 0x70(%rbp), %xmm9
- movaps 0x80(%rbp), %xmm10
- movaps 0x90(%rbp), %xmm11
- movaps 0xa0(%rbp), %xmm12
- movaps 0xb0(%rbp), %xmm13
- movaps 0xc0(%rbp), %xmm14
- movaps 0xd0(%rbp), %xmm15
- lea 0xa0(%rbp), %rsp
-___
-$code.=<<___;
- mov 0x48(%rsp), %r15
- mov 0x50(%rsp), %r14
- mov 0x58(%rsp), %r13
- mov 0x60(%rsp), %r12
- mov 0x68(%rsp), %rbx
- mov 0x70(%rsp), %rax
- lea 0x78(%rsp), %rsp
- mov %rax, %rbp
-.Lxts_dec_epilogue:
- ret
-.size bsaes_xts_decrypt,.-bsaes_xts_decrypt
-___
-}
-$code.=<<___;
-.type _bsaes_const,\@object
-.align 64
-_bsaes_const:
-.LM0ISR: # InvShiftRows constants
- .quad 0x0a0e0206070b0f03, 0x0004080c0d010509
-.LISRM0:
- .quad 0x01040b0e0205080f, 0x0306090c00070a0d
-.LISR:
- .quad 0x0504070602010003, 0x0f0e0d0c080b0a09
-.LBS0: # bit-slice constants
- .quad 0x5555555555555555, 0x5555555555555555
-.LBS1:
- .quad 0x3333333333333333, 0x3333333333333333
-.LBS2:
- .quad 0x0f0f0f0f0f0f0f0f, 0x0f0f0f0f0f0f0f0f
-.LSR: # shiftrows constants
- .quad 0x0504070600030201, 0x0f0e0d0c0a09080b
-.LSRM0:
- .quad 0x0304090e00050a0f, 0x01060b0c0207080d
-.LM0SR:
- .quad 0x0a0e02060f03070b, 0x0004080c05090d01
-.LSWPUP: # byte-swap upper dword
- .quad 0x0706050403020100, 0x0c0d0e0f0b0a0908
-.LSWPUPM0SR:
- .quad 0x0a0d02060c03070b, 0x0004080f05090e01
-.LADD1: # counter increment constants
- .quad 0x0000000000000000, 0x0000000100000000
-.LADD2:
- .quad 0x0000000000000000, 0x0000000200000000
-.LADD3:
- .quad 0x0000000000000000, 0x0000000300000000
-.LADD4:
- .quad 0x0000000000000000, 0x0000000400000000
-.LADD5:
- .quad 0x0000000000000000, 0x0000000500000000
-.LADD6:
- .quad 0x0000000000000000, 0x0000000600000000
-.LADD7:
- .quad 0x0000000000000000, 0x0000000700000000
-.LADD8:
- .quad 0x0000000000000000, 0x0000000800000000
-.Lxts_magic:
- .long 0x87,0,1,0
-.Lmasks:
- .quad 0x0101010101010101, 0x0101010101010101
- .quad 0x0202020202020202, 0x0202020202020202
- .quad 0x0404040404040404, 0x0404040404040404
- .quad 0x0808080808080808, 0x0808080808080808
-.LM0:
- .quad 0x02060a0e03070b0f, 0x0004080c0105090d
-.L63:
- .quad 0x6363636363636363, 0x6363636363636363
-.asciz "Bit-sliced AES for x86_64/SSSE3, Emilia Käsper, Peter Schwabe, Andy Polyakov"
-.align 64
-.size _bsaes_const,.-_bsaes_const
-___
-
-# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
-# CONTEXT *context,DISPATCHER_CONTEXT *disp)
-if ($win64) {
-$rec="%rcx";
-$frame="%rdx";
-$context="%r8";
-$disp="%r9";
-
-$code.=<<___;
-.extern __imp_RtlVirtualUnwind
-.type se_handler,\@abi-omnipotent
-.align 16
-se_handler:
- push %rsi
- push %rdi
- push %rbx
- push %rbp
- push %r12
- push %r13
- push %r14
- push %r15
- pushfq
- sub \$64,%rsp
-
- mov 120($context),%rax # pull context->Rax
- mov 248($context),%rbx # pull context->Rip
-
- mov 8($disp),%rsi # disp->ImageBase
- mov 56($disp),%r11 # disp->HandlerData
-
- mov 0(%r11),%r10d # HandlerData[0]
- lea (%rsi,%r10),%r10 # prologue label
- cmp %r10,%rbx # context->Rip<prologue label
- jb .Lin_prologue
-
- mov 152($context),%rax # pull context->Rsp
-
- mov 4(%r11),%r10d # HandlerData[1]
- lea (%rsi,%r10),%r10 # epilogue label
- cmp %r10,%rbx # context->Rip>=epilogue label
- jae .Lin_prologue
-
- mov 160($context),%rax # pull context->Rbp
-
- lea 0x40(%rax),%rsi # %xmm save area
- lea 512($context),%rdi # &context.Xmm6
- mov \$20,%ecx # 10*sizeof(%xmm0)/sizeof(%rax)
- .long 0xa548f3fc # cld; rep movsq
- lea 0xa0(%rax),%rax # adjust stack pointer
-
- mov 0x70(%rax),%rbp
- mov 0x68(%rax),%rbx
- mov 0x60(%rax),%r12
- mov 0x58(%rax),%r13
- mov 0x50(%rax),%r14
- mov 0x48(%rax),%r15
- lea 0x78(%rax),%rax # adjust stack pointer
- mov %rbx,144($context) # restore context->Rbx
- mov %rbp,160($context) # restore context->Rbp
- mov %r12,216($context) # restore context->R12
- mov %r13,224($context) # restore context->R13
- mov %r14,232($context) # restore context->R14
- mov %r15,240($context) # restore context->R15
-
-.Lin_prologue:
- mov %rax,152($context) # restore context->Rsp
-
- mov 40($disp),%rdi # disp->ContextRecord
- mov $context,%rsi # context
- mov \$`1232/8`,%ecx # sizeof(CONTEXT)
- .long 0xa548f3fc # cld; rep movsq
-
- mov $disp,%rsi
- xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
- mov 8(%rsi),%rdx # arg2, disp->ImageBase
- mov 0(%rsi),%r8 # arg3, disp->ControlPc
- mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
- mov 40(%rsi),%r10 # disp->ContextRecord
- lea 56(%rsi),%r11 # &disp->HandlerData
- lea 24(%rsi),%r12 # &disp->EstablisherFrame
- mov %r10,32(%rsp) # arg5
- mov %r11,40(%rsp) # arg6
- mov %r12,48(%rsp) # arg7
- mov %rcx,56(%rsp) # arg8, (NULL)
- call *__imp_RtlVirtualUnwind(%rip)
-
- mov \$1,%eax # ExceptionContinueSearch
- add \$64,%rsp
- popfq
- pop %r15
- pop %r14
- pop %r13
- pop %r12
- pop %rbp
- pop %rbx
- pop %rdi
- pop %rsi
- ret
-.size se_handler,.-se_handler
-
-.section .pdata
-.align 4
-___
-$code.=<<___ if ($ecb);
- .rva .Lecb_enc_prologue
- .rva .Lecb_enc_epilogue
- .rva .Lecb_enc_info
-
- .rva .Lecb_dec_prologue
- .rva .Lecb_dec_epilogue
- .rva .Lecb_dec_info
-___
-$code.=<<___;
- .rva .Lcbc_dec_prologue
- .rva .Lcbc_dec_epilogue
- .rva .Lcbc_dec_info
-
- .rva .Lctr_enc_prologue
- .rva .Lctr_enc_epilogue
- .rva .Lctr_enc_info
-
- .rva .Lxts_enc_prologue
- .rva .Lxts_enc_epilogue
- .rva .Lxts_enc_info
-
- .rva .Lxts_dec_prologue
- .rva .Lxts_dec_epilogue
- .rva .Lxts_dec_info
-
-.section .xdata
-.align 8
-___
-$code.=<<___ if ($ecb);
-.Lecb_enc_info:
- .byte 9,0,0,0
- .rva se_handler
- .rva .Lecb_enc_body,.Lecb_enc_epilogue # HandlerData[]
-.Lecb_dec_info:
- .byte 9,0,0,0
- .rva se_handler
- .rva .Lecb_dec_body,.Lecb_dec_epilogue # HandlerData[]
-___
-$code.=<<___;
-.Lcbc_dec_info:
- .byte 9,0,0,0
- .rva se_handler
- .rva .Lcbc_dec_body,.Lcbc_dec_epilogue # HandlerData[]
-.Lctr_enc_info:
- .byte 9,0,0,0
- .rva se_handler
- .rva .Lctr_enc_body,.Lctr_enc_epilogue # HandlerData[]
-.Lxts_enc_info:
- .byte 9,0,0,0
- .rva se_handler
- .rva .Lxts_enc_body,.Lxts_enc_epilogue # HandlerData[]
-.Lxts_dec_info:
- .byte 9,0,0,0
- .rva se_handler
- .rva .Lxts_dec_body,.Lxts_dec_epilogue # HandlerData[]
-___
-}
-
-$code =~ s/\`([^\`]*)\`/eval($1)/gem;
-
-print $code;
-
-close STDOUT;
diff --git a/app/openssl/crypto/aes/asm/vpaes-x86.S b/app/openssl/crypto/aes/asm/vpaes-x86.S
deleted file mode 100644
index c53a5074..00000000
--- a/app/openssl/crypto/aes/asm/vpaes-x86.S
+++ /dev/null
@@ -1,661 +0,0 @@
-.file "vpaes-x86.s"
-.text
-.align 64
-.L_vpaes_consts:
-.long 218628480,235210255,168496130,67568393
-.long 252381056,17041926,33884169,51187212
-.long 252645135,252645135,252645135,252645135
-.long 1512730624,3266504856,1377990664,3401244816
-.long 830229760,1275146365,2969422977,3447763452
-.long 3411033600,2979783055,338359620,2782886510
-.long 4209124096,907596821,221174255,1006095553
-.long 191964160,3799684038,3164090317,1589111125
-.long 182528256,1777043520,2877432650,3265356744
-.long 1874708224,3503451415,3305285752,363511674
-.long 1606117888,3487855781,1093350906,2384367825
-.long 197121,67569157,134941193,202313229
-.long 67569157,134941193,202313229,197121
-.long 134941193,202313229,197121,67569157
-.long 202313229,197121,67569157,134941193
-.long 33619971,100992007,168364043,235736079
-.long 235736079,33619971,100992007,168364043
-.long 168364043,235736079,33619971,100992007
-.long 100992007,168364043,235736079,33619971
-.long 50462976,117835012,185207048,252579084
-.long 252314880,51251460,117574920,184942860
-.long 184682752,252054788,50987272,118359308
-.long 118099200,185467140,251790600,50727180
-.long 2946363062,528716217,1300004225,1881839624
-.long 1532713819,1532713819,1532713819,1532713819
-.long 3602276352,4288629033,3737020424,4153884961
-.long 1354558464,32357713,2958822624,3775749553
-.long 1201988352,132424512,1572796698,503232858
-.long 2213177600,1597421020,4103937655,675398315
-.long 2749646592,4273543773,1511898873,121693092
-.long 3040248576,1103263732,2871565598,1608280554
-.long 2236667136,2588920351,482954393,64377734
-.long 3069987328,291237287,2117370568,3650299247
-.long 533321216,3573750986,2572112006,1401264716
-.long 1339849704,2721158661,548607111,3445553514
-.long 2128193280,3054596040,2183486460,1257083700
-.long 655635200,1165381986,3923443150,2344132524
-.long 190078720,256924420,290342170,357187870
-.long 1610966272,2263057382,4103205268,309794674
-.long 2592527872,2233205587,1335446729,3402964816
-.long 3973531904,3225098121,3002836325,1918774430
-.long 3870401024,2102906079,2284471353,4117666579
-.long 617007872,1021508343,366931923,691083277
-.long 2528395776,3491914898,2968704004,1613121270
-.long 3445188352,3247741094,844474987,4093578302
-.long 651481088,1190302358,1689581232,574775300
-.long 4289380608,206939853,2555985458,2489840491
-.long 2130264064,327674451,3566485037,3349835193
-.long 2470714624,316102159,3636825756,3393945945
-.byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105
-.byte 111,110,32,65,69,83,32,102,111,114,32,120,56,54,47,83
-.byte 83,83,69,51,44,32,77,105,107,101,32,72,97,109,98,117
-.byte 114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105
-.byte 118,101,114,115,105,116,121,41,0
-.align 64
-.type _vpaes_preheat,@function
-.align 16
-_vpaes_preheat:
- addl (%esp),%ebp
- movdqa -48(%ebp),%xmm7
- movdqa -16(%ebp),%xmm6
- ret
-.size _vpaes_preheat,.-_vpaes_preheat
-.type _vpaes_encrypt_core,@function
-.align 16
-_vpaes_encrypt_core:
- movl $16,%ecx
- movl 240(%edx),%eax
- movdqa %xmm6,%xmm1
- movdqa (%ebp),%xmm2
- pandn %xmm0,%xmm1
- movdqu (%edx),%xmm5
- psrld $4,%xmm1
- pand %xmm6,%xmm0
-.byte 102,15,56,0,208
- movdqa 16(%ebp),%xmm0
-.byte 102,15,56,0,193
- pxor %xmm5,%xmm2
- pxor %xmm2,%xmm0
- addl $16,%edx
- leal 192(%ebp),%ebx
- jmp .L000enc_entry
-.align 16
-.L001enc_loop:
- movdqa 32(%ebp),%xmm4
-.byte 102,15,56,0,226
- pxor %xmm5,%xmm4
- movdqa 48(%ebp),%xmm0
-.byte 102,15,56,0,195
- pxor %xmm4,%xmm0
- movdqa 64(%ebp),%xmm5
-.byte 102,15,56,0,234
- movdqa -64(%ebx,%ecx,1),%xmm1
- movdqa 80(%ebp),%xmm2
-.byte 102,15,56,0,211
- pxor %xmm5,%xmm2
- movdqa (%ebx,%ecx,1),%xmm4
- movdqa %xmm0,%xmm3
-.byte 102,15,56,0,193
- addl $16,%edx
- pxor %xmm2,%xmm0
-.byte 102,15,56,0,220
- addl $16,%ecx
- pxor %xmm0,%xmm3
-.byte 102,15,56,0,193
- andl $48,%ecx
- pxor %xmm3,%xmm0
- subl $1,%eax
-.L000enc_entry:
- movdqa %xmm6,%xmm1
- pandn %xmm0,%xmm1
- psrld $4,%xmm1
- pand %xmm6,%xmm0
- movdqa -32(%ebp),%xmm5
-.byte 102,15,56,0,232
- pxor %xmm1,%xmm0
- movdqa %xmm7,%xmm3
-.byte 102,15,56,0,217
- pxor %xmm5,%xmm3
- movdqa %xmm7,%xmm4
-.byte 102,15,56,0,224
- pxor %xmm5,%xmm4
- movdqa %xmm7,%xmm2
-.byte 102,15,56,0,211
- pxor %xmm0,%xmm2
- movdqa %xmm7,%xmm3
- movdqu (%edx),%xmm5
-.byte 102,15,56,0,220
- pxor %xmm1,%xmm3
- jnz .L001enc_loop
- movdqa 96(%ebp),%xmm4
- movdqa 112(%ebp),%xmm0
-.byte 102,15,56,0,226
- pxor %xmm5,%xmm4
-.byte 102,15,56,0,195
- movdqa 64(%ebx,%ecx,1),%xmm1
- pxor %xmm4,%xmm0
-.byte 102,15,56,0,193
- ret
-.size _vpaes_encrypt_core,.-_vpaes_encrypt_core
-.type _vpaes_decrypt_core,@function
-.align 16
-_vpaes_decrypt_core:
- movl 240(%edx),%eax
- leal 608(%ebp),%ebx
- movdqa %xmm6,%xmm1
- movdqa -64(%ebx),%xmm2
- pandn %xmm0,%xmm1
- movl %eax,%ecx
- psrld $4,%xmm1
- movdqu (%edx),%xmm5
- shll $4,%ecx
- pand %xmm6,%xmm0
-.byte 102,15,56,0,208
- movdqa -48(%ebx),%xmm0
- xorl $48,%ecx
-.byte 102,15,56,0,193
- andl $48,%ecx
- pxor %xmm5,%xmm2
- movdqa 176(%ebp),%xmm5
- pxor %xmm2,%xmm0
- addl $16,%edx
- leal -352(%ebx,%ecx,1),%ecx
- jmp .L002dec_entry
-.align 16
-.L003dec_loop:
- movdqa -32(%ebx),%xmm4
-.byte 102,15,56,0,226
- pxor %xmm0,%xmm4
- movdqa -16(%ebx),%xmm0
-.byte 102,15,56,0,195
- pxor %xmm4,%xmm0
- addl $16,%edx
-.byte 102,15,56,0,197
- movdqa (%ebx),%xmm4
-.byte 102,15,56,0,226
- pxor %xmm0,%xmm4
- movdqa 16(%ebx),%xmm0
-.byte 102,15,56,0,195
- pxor %xmm4,%xmm0
- subl $1,%eax
-.byte 102,15,56,0,197
- movdqa 32(%ebx),%xmm4
-.byte 102,15,56,0,226
- pxor %xmm0,%xmm4
- movdqa 48(%ebx),%xmm0
-.byte 102,15,56,0,195
- pxor %xmm4,%xmm0
-.byte 102,15,56,0,197
- movdqa 64(%ebx),%xmm4
-.byte 102,15,56,0,226
- pxor %xmm0,%xmm4
- movdqa 80(%ebx),%xmm0
-.byte 102,15,56,0,195
- pxor %xmm4,%xmm0
-.byte 102,15,58,15,237,12
-.L002dec_entry:
- movdqa %xmm6,%xmm1
- pandn %xmm0,%xmm1
- psrld $4,%xmm1
- pand %xmm6,%xmm0
- movdqa -32(%ebp),%xmm2
-.byte 102,15,56,0,208
- pxor %xmm1,%xmm0
- movdqa %xmm7,%xmm3
-.byte 102,15,56,0,217
- pxor %xmm2,%xmm3
- movdqa %xmm7,%xmm4
-.byte 102,15,56,0,224
- pxor %xmm2,%xmm4
- movdqa %xmm7,%xmm2
-.byte 102,15,56,0,211
- pxor %xmm0,%xmm2
- movdqa %xmm7,%xmm3
-.byte 102,15,56,0,220
- pxor %xmm1,%xmm3
- movdqu (%edx),%xmm0
- jnz .L003dec_loop
- movdqa 96(%ebx),%xmm4
-.byte 102,15,56,0,226
- pxor %xmm0,%xmm4
- movdqa 112(%ebx),%xmm0
- movdqa (%ecx),%xmm2
-.byte 102,15,56,0,195
- pxor %xmm4,%xmm0
-.byte 102,15,56,0,194
- ret
-.size _vpaes_decrypt_core,.-_vpaes_decrypt_core
-.type _vpaes_schedule_core,@function
-.align 16
-_vpaes_schedule_core:
- addl (%esp),%ebp
- movdqu (%esi),%xmm0
- movdqa 320(%ebp),%xmm2
- movdqa %xmm0,%xmm3
- leal (%ebp),%ebx
- movdqa %xmm2,4(%esp)
- call _vpaes_schedule_transform
- movdqa %xmm0,%xmm7
- testl %edi,%edi
- jnz .L004schedule_am_decrypting
- movdqu %xmm0,(%edx)
- jmp .L005schedule_go
-.L004schedule_am_decrypting:
- movdqa 256(%ebp,%ecx,1),%xmm1
-.byte 102,15,56,0,217
- movdqu %xmm3,(%edx)
- xorl $48,%ecx
-.L005schedule_go:
- cmpl $192,%eax
- ja .L006schedule_256
- je .L007schedule_192
-.L008schedule_128:
- movl $10,%eax
-.L009loop_schedule_128:
- call _vpaes_schedule_round
- decl %eax
- jz .L010schedule_mangle_last
- call _vpaes_schedule_mangle
- jmp .L009loop_schedule_128
-.align 16
-.L007schedule_192:
- movdqu 8(%esi),%xmm0
- call _vpaes_schedule_transform
- movdqa %xmm0,%xmm6
- pxor %xmm4,%xmm4
- movhlps %xmm4,%xmm6
- movl $4,%eax
-.L011loop_schedule_192:
- call _vpaes_schedule_round
-.byte 102,15,58,15,198,8
- call _vpaes_schedule_mangle
- call _vpaes_schedule_192_smear
- call _vpaes_schedule_mangle
- call _vpaes_schedule_round
- decl %eax
- jz .L010schedule_mangle_last
- call _vpaes_schedule_mangle
- call _vpaes_schedule_192_smear
- jmp .L011loop_schedule_192
-.align 16
-.L006schedule_256:
- movdqu 16(%esi),%xmm0
- call _vpaes_schedule_transform
- movl $7,%eax
-.L012loop_schedule_256:
- call _vpaes_schedule_mangle
- movdqa %xmm0,%xmm6
- call _vpaes_schedule_round
- decl %eax
- jz .L010schedule_mangle_last
- call _vpaes_schedule_mangle
- pshufd $255,%xmm0,%xmm0
- movdqa %xmm7,20(%esp)
- movdqa %xmm6,%xmm7
- call .L_vpaes_schedule_low_round
- movdqa 20(%esp),%xmm7
- jmp .L012loop_schedule_256
-.align 16
-.L010schedule_mangle_last:
- leal 384(%ebp),%ebx
- testl %edi,%edi
- jnz .L013schedule_mangle_last_dec
- movdqa 256(%ebp,%ecx,1),%xmm1
-.byte 102,15,56,0,193
- leal 352(%ebp),%ebx
- addl $32,%edx
-.L013schedule_mangle_last_dec:
- addl $-16,%edx
- pxor 336(%ebp),%xmm0
- call _vpaes_schedule_transform
- movdqu %xmm0,(%edx)
- pxor %xmm0,%xmm0
- pxor %xmm1,%xmm1
- pxor %xmm2,%xmm2
- pxor %xmm3,%xmm3
- pxor %xmm4,%xmm4
- pxor %xmm5,%xmm5
- pxor %xmm6,%xmm6
- pxor %xmm7,%xmm7
- ret
-.size _vpaes_schedule_core,.-_vpaes_schedule_core
-.type _vpaes_schedule_192_smear,@function
-.align 16
-_vpaes_schedule_192_smear:
- pshufd $128,%xmm6,%xmm0
- pxor %xmm0,%xmm6
- pshufd $254,%xmm7,%xmm0
- pxor %xmm0,%xmm6
- movdqa %xmm6,%xmm0
- pxor %xmm1,%xmm1
- movhlps %xmm1,%xmm6
- ret
-.size _vpaes_schedule_192_smear,.-_vpaes_schedule_192_smear
-.type _vpaes_schedule_round,@function
-.align 16
-_vpaes_schedule_round:
- movdqa 8(%esp),%xmm2
- pxor %xmm1,%xmm1
-.byte 102,15,58,15,202,15
-.byte 102,15,58,15,210,15
- pxor %xmm1,%xmm7
- pshufd $255,%xmm0,%xmm0
-.byte 102,15,58,15,192,1
- movdqa %xmm2,8(%esp)
-.L_vpaes_schedule_low_round:
- movdqa %xmm7,%xmm1
- pslldq $4,%xmm7
- pxor %xmm1,%xmm7
- movdqa %xmm7,%xmm1
- pslldq $8,%xmm7
- pxor %xmm1,%xmm7
- pxor 336(%ebp),%xmm7
- movdqa -16(%ebp),%xmm4
- movdqa -48(%ebp),%xmm5
- movdqa %xmm4,%xmm1
- pandn %xmm0,%xmm1
- psrld $4,%xmm1
- pand %xmm4,%xmm0
- movdqa -32(%ebp),%xmm2
-.byte 102,15,56,0,208
- pxor %xmm1,%xmm0
- movdqa %xmm5,%xmm3
-.byte 102,15,56,0,217
- pxor %xmm2,%xmm3
- movdqa %xmm5,%xmm4
-.byte 102,15,56,0,224
- pxor %xmm2,%xmm4
- movdqa %xmm5,%xmm2
-.byte 102,15,56,0,211
- pxor %xmm0,%xmm2
- movdqa %xmm5,%xmm3
-.byte 102,15,56,0,220
- pxor %xmm1,%xmm3
- movdqa 32(%ebp),%xmm4
-.byte 102,15,56,0,226
- movdqa 48(%ebp),%xmm0
-.byte 102,15,56,0,195
- pxor %xmm4,%xmm0
- pxor %xmm7,%xmm0
- movdqa %xmm0,%xmm7
- ret
-.size _vpaes_schedule_round,.-_vpaes_schedule_round
-.type _vpaes_schedule_transform,@function
-.align 16
-_vpaes_schedule_transform:
- movdqa -16(%ebp),%xmm2
- movdqa %xmm2,%xmm1
- pandn %xmm0,%xmm1
- psrld $4,%xmm1
- pand %xmm2,%xmm0
- movdqa (%ebx),%xmm2
-.byte 102,15,56,0,208
- movdqa 16(%ebx),%xmm0
-.byte 102,15,56,0,193
- pxor %xmm2,%xmm0
- ret
-.size _vpaes_schedule_transform,.-_vpaes_schedule_transform
-.type _vpaes_schedule_mangle,@function
-.align 16
-_vpaes_schedule_mangle:
- movdqa %xmm0,%xmm4
- movdqa 128(%ebp),%xmm5
- testl %edi,%edi
- jnz .L014schedule_mangle_dec
- addl $16,%edx
- pxor 336(%ebp),%xmm4
-.byte 102,15,56,0,229
- movdqa %xmm4,%xmm3
-.byte 102,15,56,0,229
- pxor %xmm4,%xmm3
-.byte 102,15,56,0,229
- pxor %xmm4,%xmm3
- jmp .L015schedule_mangle_both
-.align 16
-.L014schedule_mangle_dec:
- movdqa -16(%ebp),%xmm2
- leal 416(%ebp),%esi
- movdqa %xmm2,%xmm1
- pandn %xmm4,%xmm1
- psrld $4,%xmm1
- pand %xmm2,%xmm4
- movdqa (%esi),%xmm2
-.byte 102,15,56,0,212
- movdqa 16(%esi),%xmm3
-.byte 102,15,56,0,217
- pxor %xmm2,%xmm3
-.byte 102,15,56,0,221
- movdqa 32(%esi),%xmm2
-.byte 102,15,56,0,212
- pxor %xmm3,%xmm2
- movdqa 48(%esi),%xmm3
-.byte 102,15,56,0,217
- pxor %xmm2,%xmm3
-.byte 102,15,56,0,221
- movdqa 64(%esi),%xmm2
-.byte 102,15,56,0,212
- pxor %xmm3,%xmm2
- movdqa 80(%esi),%xmm3
-.byte 102,15,56,0,217
- pxor %xmm2,%xmm3
-.byte 102,15,56,0,221
- movdqa 96(%esi),%xmm2
-.byte 102,15,56,0,212
- pxor %xmm3,%xmm2
- movdqa 112(%esi),%xmm3
-.byte 102,15,56,0,217
- pxor %xmm2,%xmm3
- addl $-16,%edx
-.L015schedule_mangle_both:
- movdqa 256(%ebp,%ecx,1),%xmm1
-.byte 102,15,56,0,217
- addl $-16,%ecx
- andl $48,%ecx
- movdqu %xmm3,(%edx)
- ret
-.size _vpaes_schedule_mangle,.-_vpaes_schedule_mangle
-.globl vpaes_set_encrypt_key
-.type vpaes_set_encrypt_key,@function
-.align 16
-vpaes_set_encrypt_key:
-.L_vpaes_set_encrypt_key_begin:
- pushl %ebp
- pushl %ebx
- pushl %esi
- pushl %edi
- movl 20(%esp),%esi
- leal -56(%esp),%ebx
- movl 24(%esp),%eax
- andl $-16,%ebx
- movl 28(%esp),%edx
- xchgl %esp,%ebx
- movl %ebx,48(%esp)
- movl %eax,%ebx
- shrl $5,%ebx
- addl $5,%ebx
- movl %ebx,240(%edx)
- movl $48,%ecx
- movl $0,%edi
- leal .L_vpaes_consts+0x30-.L016pic_point,%ebp
- call _vpaes_schedule_core
-.L016pic_point:
- movl 48(%esp),%esp
- xorl %eax,%eax
- popl %edi
- popl %esi
- popl %ebx
- popl %ebp
- ret
-.size vpaes_set_encrypt_key,.-.L_vpaes_set_encrypt_key_begin
-.globl vpaes_set_decrypt_key
-.type vpaes_set_decrypt_key,@function
-.align 16
-vpaes_set_decrypt_key:
-.L_vpaes_set_decrypt_key_begin:
- pushl %ebp
- pushl %ebx
- pushl %esi
- pushl %edi
- movl 20(%esp),%esi
- leal -56(%esp),%ebx
- movl 24(%esp),%eax
- andl $-16,%ebx
- movl 28(%esp),%edx
- xchgl %esp,%ebx
- movl %ebx,48(%esp)
- movl %eax,%ebx
- shrl $5,%ebx
- addl $5,%ebx
- movl %ebx,240(%edx)
- shll $4,%ebx
- leal 16(%edx,%ebx,1),%edx
- movl $1,%edi
- movl %eax,%ecx
- shrl $1,%ecx
- andl $32,%ecx
- xorl $32,%ecx
- leal .L_vpaes_consts+0x30-.L017pic_point,%ebp
- call _vpaes_schedule_core
-.L017pic_point:
- movl 48(%esp),%esp
- xorl %eax,%eax
- popl %edi
- popl %esi
- popl %ebx
- popl %ebp
- ret
-.size vpaes_set_decrypt_key,.-.L_vpaes_set_decrypt_key_begin
-.globl vpaes_encrypt
-.type vpaes_encrypt,@function
-.align 16
-vpaes_encrypt:
-.L_vpaes_encrypt_begin:
- pushl %ebp
- pushl %ebx
- pushl %esi
- pushl %edi
- leal .L_vpaes_consts+0x30-.L018pic_point,%ebp
- call _vpaes_preheat
-.L018pic_point:
- movl 20(%esp),%esi
- leal -56(%esp),%ebx
- movl 24(%esp),%edi
- andl $-16,%ebx
- movl 28(%esp),%edx
- xchgl %esp,%ebx
- movl %ebx,48(%esp)
- movdqu (%esi),%xmm0
- call _vpaes_encrypt_core
- movdqu %xmm0,(%edi)
- movl 48(%esp),%esp
- popl %edi
- popl %esi
- popl %ebx
- popl %ebp
- ret
-.size vpaes_encrypt,.-.L_vpaes_encrypt_begin
-.globl vpaes_decrypt
-.type vpaes_decrypt,@function
-.align 16
-vpaes_decrypt:
-.L_vpaes_decrypt_begin:
- pushl %ebp
- pushl %ebx
- pushl %esi
- pushl %edi
- leal .L_vpaes_consts+0x30-.L019pic_point,%ebp
- call _vpaes_preheat
-.L019pic_point:
- movl 20(%esp),%esi
- leal -56(%esp),%ebx
- movl 24(%esp),%edi
- andl $-16,%ebx
- movl 28(%esp),%edx
- xchgl %esp,%ebx
- movl %ebx,48(%esp)
- movdqu (%esi),%xmm0
- call _vpaes_decrypt_core
- movdqu %xmm0,(%edi)
- movl 48(%esp),%esp
- popl %edi
- popl %esi
- popl %ebx
- popl %ebp
- ret
-.size vpaes_decrypt,.-.L_vpaes_decrypt_begin
-.globl vpaes_cbc_encrypt
-.type vpaes_cbc_encrypt,@function
-.align 16
-vpaes_cbc_encrypt:
-.L_vpaes_cbc_encrypt_begin:
- pushl %ebp
- pushl %ebx
- pushl %esi
- pushl %edi
- movl 20(%esp),%esi
- movl 24(%esp),%edi
- movl 28(%esp),%eax
- movl 32(%esp),%edx
- subl $16,%eax
- jc .L020cbc_abort
- leal -56(%esp),%ebx
- movl 36(%esp),%ebp
- andl $-16,%ebx
- movl 40(%esp),%ecx
- xchgl %esp,%ebx
- movdqu (%ebp),%xmm1
- subl %esi,%edi
- movl %ebx,48(%esp)
- movl %edi,(%esp)
- movl %edx,4(%esp)
- movl %ebp,8(%esp)
- movl %eax,%edi
- leal .L_vpaes_consts+0x30-.L021pic_point,%ebp
- call _vpaes_preheat
-.L021pic_point:
- cmpl $0,%ecx
- je .L022cbc_dec_loop
- jmp .L023cbc_enc_loop
-.align 16
-.L023cbc_enc_loop:
- movdqu (%esi),%xmm0
- pxor %xmm1,%xmm0
- call _vpaes_encrypt_core
- movl (%esp),%ebx
- movl 4(%esp),%edx
- movdqa %xmm0,%xmm1
- movdqu %xmm0,(%ebx,%esi,1)
- leal 16(%esi),%esi
- subl $16,%edi
- jnc .L023cbc_enc_loop
- jmp .L024cbc_done
-.align 16
-.L022cbc_dec_loop:
- movdqu (%esi),%xmm0
- movdqa %xmm1,16(%esp)
- movdqa %xmm0,32(%esp)
- call _vpaes_decrypt_core
- movl (%esp),%ebx
- movl 4(%esp),%edx
- pxor 16(%esp),%xmm0
- movdqa 32(%esp),%xmm1
- movdqu %xmm0,(%ebx,%esi,1)
- leal 16(%esi),%esi
- subl $16,%edi
- jnc .L022cbc_dec_loop
-.L024cbc_done:
- movl 8(%esp),%ebx
- movl 48(%esp),%esp
- movdqu %xmm1,(%ebx)
-.L020cbc_abort:
- popl %edi
- popl %esi
- popl %ebx
- popl %ebp
- ret
-.size vpaes_cbc_encrypt,.-.L_vpaes_cbc_encrypt_begin
diff --git a/app/openssl/crypto/aes/asm/vpaes-x86.pl b/app/openssl/crypto/aes/asm/vpaes-x86.pl
deleted file mode 100644
index 1533e2c3..00000000
--- a/app/openssl/crypto/aes/asm/vpaes-x86.pl
+++ /dev/null
@@ -1,903 +0,0 @@
-#!/usr/bin/env perl
-
-######################################################################
-## Constant-time SSSE3 AES core implementation.
-## version 0.1
-##
-## By Mike Hamburg (Stanford University), 2009
-## Public domain.
-##
-## For details see http://shiftleft.org/papers/vector_aes/ and
-## http://crypto.stanford.edu/vpaes/.
-
-######################################################################
-# September 2011.
-#
-# Port vpaes-x86_64.pl as 32-bit "almost" drop-in replacement for
-# aes-586.pl. "Almost" refers to the fact that AES_cbc_encrypt
-# doesn't handle partial vectors (doesn't have to if called from
-# EVP only). "Drop-in" implies that this module doesn't share key
-# schedule structure with the original nor does it make assumption
-# about its alignment...
-#
-# Performance summary. aes-586.pl column lists large-block CBC
-# encrypt/decrypt/with-hyper-threading-off(*) results in cycles per
-# byte processed with 128-bit key, and vpaes-x86.pl column - [also
-# large-block CBC] encrypt/decrypt.
-#
-# aes-586.pl vpaes-x86.pl
-#
-# Core 2(**) 29.1/42.3/18.3 22.0/25.6(***)
-# Nehalem 27.9/40.4/18.1 10.3/12.0
-# Atom 102./119./60.1 64.5/85.3(***)
-#
-# (*) "Hyper-threading" in the context refers rather to cache shared
-# among multiple cores, than to specifically Intel HTT. As vast
-# majority of contemporary cores share cache, slower code path
-# is common place. In other words "with-hyper-threading-off"
-# results are presented mostly for reference purposes.
-#
-# (**) "Core 2" refers to initial 65nm design, a.k.a. Conroe.
-#
-# (***) Less impressive improvement on Core 2 and Atom is due to slow
-# pshufb, yet it's respectable +32%/65% improvement on Core 2
-# and +58%/40% on Atom (as implied, over "hyper-threading-safe"
-# code path).
-#
-# <appro@openssl.org>
-
-$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
-push(@INC,"${dir}","${dir}../../perlasm");
-require "x86asm.pl";
-
-&asm_init($ARGV[0],"vpaes-x86.pl",$x86only = $ARGV[$#ARGV] eq "386");
-
-$PREFIX="vpaes";
-
-my ($round, $base, $magic, $key, $const, $inp, $out)=
- ("eax", "ebx", "ecx", "edx","ebp", "esi","edi");
-
-&static_label("_vpaes_consts");
-&static_label("_vpaes_schedule_low_round");
-
-&set_label("_vpaes_consts",64);
-$k_inv=-0x30; # inv, inva
- &data_word(0x0D080180,0x0E05060F,0x0A0B0C02,0x04070309);
- &data_word(0x0F0B0780,0x01040A06,0x02050809,0x030D0E0C);
-
-$k_s0F=-0x10; # s0F
- &data_word(0x0F0F0F0F,0x0F0F0F0F,0x0F0F0F0F,0x0F0F0F0F);
-
-$k_ipt=0x00; # input transform (lo, hi)
- &data_word(0x5A2A7000,0xC2B2E898,0x52227808,0xCABAE090);
- &data_word(0x317C4D00,0x4C01307D,0xB0FDCC81,0xCD80B1FC);
-
-$k_sb1=0x20; # sb1u, sb1t
- &data_word(0xCB503E00,0xB19BE18F,0x142AF544,0xA5DF7A6E);
- &data_word(0xFAE22300,0x3618D415,0x0D2ED9EF,0x3BF7CCC1);
-$k_sb2=0x40; # sb2u, sb2t
- &data_word(0x0B712400,0xE27A93C6,0xBC982FCD,0x5EB7E955);
- &data_word(0x0AE12900,0x69EB8840,0xAB82234A,0xC2A163C8);
-$k_sbo=0x60; # sbou, sbot
- &data_word(0x6FBDC700,0xD0D26D17,0xC502A878,0x15AABF7A);
- &data_word(0x5FBB6A00,0xCFE474A5,0x412B35FA,0x8E1E90D1);
-
-$k_mc_forward=0x80; # mc_forward
- &data_word(0x00030201,0x04070605,0x080B0A09,0x0C0F0E0D);
- &data_word(0x04070605,0x080B0A09,0x0C0F0E0D,0x00030201);
- &data_word(0x080B0A09,0x0C0F0E0D,0x00030201,0x04070605);
- &data_word(0x0C0F0E0D,0x00030201,0x04070605,0x080B0A09);
-
-$k_mc_backward=0xc0; # mc_backward
- &data_word(0x02010003,0x06050407,0x0A09080B,0x0E0D0C0F);
- &data_word(0x0E0D0C0F,0x02010003,0x06050407,0x0A09080B);
- &data_word(0x0A09080B,0x0E0D0C0F,0x02010003,0x06050407);
- &data_word(0x06050407,0x0A09080B,0x0E0D0C0F,0x02010003);
-
-$k_sr=0x100; # sr
- &data_word(0x03020100,0x07060504,0x0B0A0908,0x0F0E0D0C);
- &data_word(0x0F0A0500,0x030E0904,0x07020D08,0x0B06010C);
- &data_word(0x0B020900,0x0F060D04,0x030A0108,0x070E050C);
- &data_word(0x070A0D00,0x0B0E0104,0x0F020508,0x0306090C);
-
-$k_rcon=0x140; # rcon
- &data_word(0xAF9DEEB6,0x1F8391B9,0x4D7C7D81,0x702A9808);
-
-$k_s63=0x150; # s63: all equal to 0x63 transformed
- &data_word(0x5B5B5B5B,0x5B5B5B5B,0x5B5B5B5B,0x5B5B5B5B);
-
-$k_opt=0x160; # output transform
- &data_word(0xD6B66000,0xFF9F4929,0xDEBE6808,0xF7974121);
- &data_word(0x50BCEC00,0x01EDBD51,0xB05C0CE0,0xE10D5DB1);
-
-$k_deskew=0x180; # deskew tables: inverts the sbox's "skew"
- &data_word(0x47A4E300,0x07E4A340,0x5DBEF91A,0x1DFEB95A);
- &data_word(0x83EA6900,0x5F36B5DC,0xF49D1E77,0x2841C2AB);
-##
-## Decryption stuff
-## Key schedule constants
-##
-$k_dksd=0x1a0; # decryption key schedule: invskew x*D
- &data_word(0xA3E44700,0xFEB91A5D,0x5A1DBEF9,0x0740E3A4);
- &data_word(0xB5368300,0x41C277F4,0xAB289D1E,0x5FDC69EA);
-$k_dksb=0x1c0; # decryption key schedule: invskew x*B
- &data_word(0x8550D500,0x9A4FCA1F,0x1CC94C99,0x03D65386);
- &data_word(0xB6FC4A00,0x115BEDA7,0x7E3482C8,0xD993256F);
-$k_dkse=0x1e0; # decryption key schedule: invskew x*E + 0x63
- &data_word(0x1FC9D600,0xD5031CCA,0x994F5086,0x53859A4C);
- &data_word(0x4FDC7BE8,0xA2319605,0x20B31487,0xCD5EF96A);
-$k_dks9=0x200; # decryption key schedule: invskew x*9
- &data_word(0x7ED9A700,0xB6116FC8,0x82255BFC,0x4AED9334);
- &data_word(0x27143300,0x45765162,0xE9DAFDCE,0x8BB89FAC);
-
-##
-## Decryption stuff
-## Round function constants
-##
-$k_dipt=0x220; # decryption input transform
- &data_word(0x0B545F00,0x0F505B04,0x114E451A,0x154A411E);
- &data_word(0x60056500,0x86E383E6,0xF491F194,0x12771772);
-
-$k_dsb9=0x240; # decryption sbox output *9*u, *9*t
- &data_word(0x9A86D600,0x851C0353,0x4F994CC9,0xCAD51F50);
- &data_word(0xECD74900,0xC03B1789,0xB2FBA565,0x725E2C9E);
-$k_dsbd=0x260; # decryption sbox output *D*u, *D*t
- &data_word(0xE6B1A200,0x7D57CCDF,0x882A4439,0xF56E9B13);
- &data_word(0x24C6CB00,0x3CE2FAF7,0x15DEEFD3,0x2931180D);
-$k_dsbb=0x280; # decryption sbox output *B*u, *B*t
- &data_word(0x96B44200,0xD0226492,0xB0F2D404,0x602646F6);
- &data_word(0xCD596700,0xC19498A6,0x3255AA6B,0xF3FF0C3E);
-$k_dsbe=0x2a0; # decryption sbox output *E*u, *E*t
- &data_word(0x26D4D000,0x46F29296,0x64B4F6B0,0x22426004);
- &data_word(0xFFAAC100,0x0C55A6CD,0x98593E32,0x9467F36B);
-$k_dsbo=0x2c0; # decryption sbox final output
- &data_word(0x7EF94000,0x1387EA53,0xD4943E2D,0xC7AA6DB9);
- &data_word(0x93441D00,0x12D7560F,0xD8C58E9C,0xCA4B8159);
-&asciz ("Vector Permutation AES for x86/SSSE3, Mike Hamburg (Stanford University)");
-&align (64);
-
-&function_begin_B("_vpaes_preheat");
- &add ($const,&DWP(0,"esp"));
- &movdqa ("xmm7",&QWP($k_inv,$const));
- &movdqa ("xmm6",&QWP($k_s0F,$const));
- &ret ();
-&function_end_B("_vpaes_preheat");
-
-##
-## _aes_encrypt_core
-##
-## AES-encrypt %xmm0.
-##
-## Inputs:
-## %xmm0 = input
-## %xmm6-%xmm7 as in _vpaes_preheat
-## (%edx) = scheduled keys
-##
-## Output in %xmm0
-## Clobbers %xmm1-%xmm5, %eax, %ebx, %ecx, %edx
-##
-##
-&function_begin_B("_vpaes_encrypt_core");
- &mov ($magic,16);
- &mov ($round,&DWP(240,$key));
- &movdqa ("xmm1","xmm6")
- &movdqa ("xmm2",&QWP($k_ipt,$const));
- &pandn ("xmm1","xmm0");
- &movdqu ("xmm5",&QWP(0,$key));
- &psrld ("xmm1",4);
- &pand ("xmm0","xmm6");
- &pshufb ("xmm2","xmm0");
- &movdqa ("xmm0",&QWP($k_ipt+16,$const));
- &pshufb ("xmm0","xmm1");
- &pxor ("xmm2","xmm5");
- &pxor ("xmm0","xmm2");
- &add ($key,16);
- &lea ($base,&DWP($k_mc_backward,$const));
- &jmp (&label("enc_entry"));
-
-
-&set_label("enc_loop",16);
- # middle of middle round
- &movdqa ("xmm4",&QWP($k_sb1,$const)); # 4 : sb1u
- &pshufb ("xmm4","xmm2"); # 4 = sb1u
- &pxor ("xmm4","xmm5"); # 4 = sb1u + k
- &movdqa ("xmm0",&QWP($k_sb1+16,$const));# 0 : sb1t
- &pshufb ("xmm0","xmm3"); # 0 = sb1t
- &pxor ("xmm0","xmm4"); # 0 = A
- &movdqa ("xmm5",&QWP($k_sb2,$const)); # 4 : sb2u
- &pshufb ("xmm5","xmm2"); # 4 = sb2u
- &movdqa ("xmm1",&QWP(-0x40,$base,$magic));# .Lk_mc_forward[]
- &movdqa ("xmm2",&QWP($k_sb2+16,$const));# 2 : sb2t
- &pshufb ("xmm2","xmm3"); # 2 = sb2t
- &pxor ("xmm2","xmm5"); # 2 = 2A
- &movdqa ("xmm4",&QWP(0,$base,$magic)); # .Lk_mc_backward[]
- &movdqa ("xmm3","xmm0"); # 3 = A
- &pshufb ("xmm0","xmm1"); # 0 = B
- &add ($key,16); # next key
- &pxor ("xmm0","xmm2"); # 0 = 2A+B
- &pshufb ("xmm3","xmm4"); # 3 = D
- &add ($magic,16); # next mc
- &pxor ("xmm3","xmm0"); # 3 = 2A+B+D
- &pshufb ("xmm0","xmm1"); # 0 = 2B+C
- &and ($magic,0x30); # ... mod 4
- &pxor ("xmm0","xmm3"); # 0 = 2A+3B+C+D
- &sub ($round,1); # nr--
-
-&set_label("enc_entry");
- # top of round
- &movdqa ("xmm1","xmm6"); # 1 : i
- &pandn ("xmm1","xmm0"); # 1 = i<<4
- &psrld ("xmm1",4); # 1 = i
- &pand ("xmm0","xmm6"); # 0 = k
- &movdqa ("xmm5",&QWP($k_inv+16,$const));# 2 : a/k
- &pshufb ("xmm5","xmm0"); # 2 = a/k
- &pxor ("xmm0","xmm1"); # 0 = j
- &movdqa ("xmm3","xmm7"); # 3 : 1/i
- &pshufb ("xmm3","xmm1"); # 3 = 1/i
- &pxor ("xmm3","xmm5"); # 3 = iak = 1/i + a/k
- &movdqa ("xmm4","xmm7"); # 4 : 1/j
- &pshufb ("xmm4","xmm0"); # 4 = 1/j
- &pxor ("xmm4","xmm5"); # 4 = jak = 1/j + a/k
- &movdqa ("xmm2","xmm7"); # 2 : 1/iak
- &pshufb ("xmm2","xmm3"); # 2 = 1/iak
- &pxor ("xmm2","xmm0"); # 2 = io
- &movdqa ("xmm3","xmm7"); # 3 : 1/jak
- &movdqu ("xmm5",&QWP(0,$key));
- &pshufb ("xmm3","xmm4"); # 3 = 1/jak
- &pxor ("xmm3","xmm1"); # 3 = jo
- &jnz (&label("enc_loop"));
-
- # middle of last round
- &movdqa ("xmm4",&QWP($k_sbo,$const)); # 3 : sbou .Lk_sbo
- &movdqa ("xmm0",&QWP($k_sbo+16,$const));# 3 : sbot .Lk_sbo+16
- &pshufb ("xmm4","xmm2"); # 4 = sbou
- &pxor ("xmm4","xmm5"); # 4 = sb1u + k
- &pshufb ("xmm0","xmm3"); # 0 = sb1t
- &movdqa ("xmm1",&QWP(0x40,$base,$magic));# .Lk_sr[]
- &pxor ("xmm0","xmm4"); # 0 = A
- &pshufb ("xmm0","xmm1");
- &ret ();
-&function_end_B("_vpaes_encrypt_core");
-
-##
-## Decryption core
-##
-## Same API as encryption core.
-##
-&function_begin_B("_vpaes_decrypt_core");
- &mov ($round,&DWP(240,$key));
- &lea ($base,&DWP($k_dsbd,$const));
- &movdqa ("xmm1","xmm6");
- &movdqa ("xmm2",&QWP($k_dipt-$k_dsbd,$base));
- &pandn ("xmm1","xmm0");
- &mov ($magic,$round);
- &psrld ("xmm1",4)
- &movdqu ("xmm5",&QWP(0,$key));
- &shl ($magic,4);
- &pand ("xmm0","xmm6");
- &pshufb ("xmm2","xmm0");
- &movdqa ("xmm0",&QWP($k_dipt-$k_dsbd+16,$base));
- &xor ($magic,0x30);
- &pshufb ("xmm0","xmm1");
- &and ($magic,0x30);
- &pxor ("xmm2","xmm5");
- &movdqa ("xmm5",&QWP($k_mc_forward+48,$const));
- &pxor ("xmm0","xmm2");
- &add ($key,16);
- &lea ($magic,&DWP($k_sr-$k_dsbd,$base,$magic));
- &jmp (&label("dec_entry"));
-
-&set_label("dec_loop",16);
-##
-## Inverse mix columns
-##
- &movdqa ("xmm4",&QWP(-0x20,$base)); # 4 : sb9u
- &pshufb ("xmm4","xmm2"); # 4 = sb9u
- &pxor ("xmm4","xmm0");
- &movdqa ("xmm0",&QWP(-0x10,$base)); # 0 : sb9t
- &pshufb ("xmm0","xmm3"); # 0 = sb9t
- &pxor ("xmm0","xmm4"); # 0 = ch
- &add ($key,16); # next round key
-
- &pshufb ("xmm0","xmm5"); # MC ch
- &movdqa ("xmm4",&QWP(0,$base)); # 4 : sbdu
- &pshufb ("xmm4","xmm2"); # 4 = sbdu
- &pxor ("xmm4","xmm0"); # 4 = ch
- &movdqa ("xmm0",&QWP(0x10,$base)); # 0 : sbdt
- &pshufb ("xmm0","xmm3"); # 0 = sbdt
- &pxor ("xmm0","xmm4"); # 0 = ch
- &sub ($round,1); # nr--
-
- &pshufb ("xmm0","xmm5"); # MC ch
- &movdqa ("xmm4",&QWP(0x20,$base)); # 4 : sbbu
- &pshufb ("xmm4","xmm2"); # 4 = sbbu
- &pxor ("xmm4","xmm0"); # 4 = ch
- &movdqa ("xmm0",&QWP(0x30,$base)); # 0 : sbbt
- &pshufb ("xmm0","xmm3"); # 0 = sbbt
- &pxor ("xmm0","xmm4"); # 0 = ch
-
- &pshufb ("xmm0","xmm5"); # MC ch
- &movdqa ("xmm4",&QWP(0x40,$base)); # 4 : sbeu
- &pshufb ("xmm4","xmm2"); # 4 = sbeu
- &pxor ("xmm4","xmm0"); # 4 = ch
- &movdqa ("xmm0",&QWP(0x50,$base)); # 0 : sbet
- &pshufb ("xmm0","xmm3"); # 0 = sbet
- &pxor ("xmm0","xmm4"); # 0 = ch
-
- &palignr("xmm5","xmm5",12);
-
-&set_label("dec_entry");
- # top of round
- &movdqa ("xmm1","xmm6"); # 1 : i
- &pandn ("xmm1","xmm0"); # 1 = i<<4
- &psrld ("xmm1",4); # 1 = i
- &pand ("xmm0","xmm6"); # 0 = k
- &movdqa ("xmm2",&QWP($k_inv+16,$const));# 2 : a/k
- &pshufb ("xmm2","xmm0"); # 2 = a/k
- &pxor ("xmm0","xmm1"); # 0 = j
- &movdqa ("xmm3","xmm7"); # 3 : 1/i
- &pshufb ("xmm3","xmm1"); # 3 = 1/i
- &pxor ("xmm3","xmm2"); # 3 = iak = 1/i + a/k
- &movdqa ("xmm4","xmm7"); # 4 : 1/j
- &pshufb ("xmm4","xmm0"); # 4 = 1/j
- &pxor ("xmm4","xmm2"); # 4 = jak = 1/j + a/k
- &movdqa ("xmm2","xmm7"); # 2 : 1/iak
- &pshufb ("xmm2","xmm3"); # 2 = 1/iak
- &pxor ("xmm2","xmm0"); # 2 = io
- &movdqa ("xmm3","xmm7"); # 3 : 1/jak
- &pshufb ("xmm3","xmm4"); # 3 = 1/jak
- &pxor ("xmm3","xmm1"); # 3 = jo
- &movdqu ("xmm0",&QWP(0,$key));
- &jnz (&label("dec_loop"));
-
- # middle of last round
- &movdqa ("xmm4",&QWP(0x60,$base)); # 3 : sbou
- &pshufb ("xmm4","xmm2"); # 4 = sbou
- &pxor ("xmm4","xmm0"); # 4 = sb1u + k
- &movdqa ("xmm0",&QWP(0x70,$base)); # 0 : sbot
- &movdqa ("xmm2",&QWP(0,$magic));
- &pshufb ("xmm0","xmm3"); # 0 = sb1t
- &pxor ("xmm0","xmm4"); # 0 = A
- &pshufb ("xmm0","xmm2");
- &ret ();
-&function_end_B("_vpaes_decrypt_core");
-
-########################################################
-## ##
-## AES key schedule ##
-## ##
-########################################################
-&function_begin_B("_vpaes_schedule_core");
- &add ($const,&DWP(0,"esp"));
- &movdqu ("xmm0",&QWP(0,$inp)); # load key (unaligned)
- &movdqa ("xmm2",&QWP($k_rcon,$const)); # load rcon
-
- # input transform
- &movdqa ("xmm3","xmm0");
- &lea ($base,&DWP($k_ipt,$const));
- &movdqa (&QWP(4,"esp"),"xmm2"); # xmm8
- &call ("_vpaes_schedule_transform");
- &movdqa ("xmm7","xmm0");
-
- &test ($out,$out);
- &jnz (&label("schedule_am_decrypting"));
-
- # encrypting, output zeroth round key after transform
- &movdqu (&QWP(0,$key),"xmm0");
- &jmp (&label("schedule_go"));
-
-&set_label("schedule_am_decrypting");
- # decrypting, output zeroth round key after shiftrows
- &movdqa ("xmm1",&QWP($k_sr,$const,$magic));
- &pshufb ("xmm3","xmm1");
- &movdqu (&QWP(0,$key),"xmm3");
- &xor ($magic,0x30);
-
-&set_label("schedule_go");
- &cmp ($round,192);
- &ja (&label("schedule_256"));
- &je (&label("schedule_192"));
- # 128: fall though
-
-##
-## .schedule_128
-##
-## 128-bit specific part of key schedule.
-##
-## This schedule is really simple, because all its parts
-## are accomplished by the subroutines.
-##
-&set_label("schedule_128");
- &mov ($round,10);
-
-&set_label("loop_schedule_128");
- &call ("_vpaes_schedule_round");
- &dec ($round);
- &jz (&label("schedule_mangle_last"));
- &call ("_vpaes_schedule_mangle"); # write output
- &jmp (&label("loop_schedule_128"));
-
-##
-## .aes_schedule_192
-##
-## 192-bit specific part of key schedule.
-##
-## The main body of this schedule is the same as the 128-bit
-## schedule, but with more smearing. The long, high side is
-## stored in %xmm7 as before, and the short, low side is in
-## the high bits of %xmm6.
-##
-## This schedule is somewhat nastier, however, because each
-## round produces 192 bits of key material, or 1.5 round keys.
-## Therefore, on each cycle we do 2 rounds and produce 3 round
-## keys.
-##
-&set_label("schedule_192",16);
- &movdqu ("xmm0",&QWP(8,$inp)); # load key part 2 (very unaligned)
- &call ("_vpaes_schedule_transform"); # input transform
- &movdqa ("xmm6","xmm0"); # save short part
- &pxor ("xmm4","xmm4"); # clear 4
- &movhlps("xmm6","xmm4"); # clobber low side with zeros
- &mov ($round,4);
-
-&set_label("loop_schedule_192");
- &call ("_vpaes_schedule_round");
- &palignr("xmm0","xmm6",8);
- &call ("_vpaes_schedule_mangle"); # save key n
- &call ("_vpaes_schedule_192_smear");
- &call ("_vpaes_schedule_mangle"); # save key n+1
- &call ("_vpaes_schedule_round");
- &dec ($round);
- &jz (&label("schedule_mangle_last"));
- &call ("_vpaes_schedule_mangle"); # save key n+2
- &call ("_vpaes_schedule_192_smear");
- &jmp (&label("loop_schedule_192"));
-
-##
-## .aes_schedule_256
-##
-## 256-bit specific part of key schedule.
-##
-## The structure here is very similar to the 128-bit
-## schedule, but with an additional "low side" in
-## %xmm6. The low side's rounds are the same as the
-## high side's, except no rcon and no rotation.
-##
-&set_label("schedule_256",16);
- &movdqu ("xmm0",&QWP(16,$inp)); # load key part 2 (unaligned)
- &call ("_vpaes_schedule_transform"); # input transform
- &mov ($round,7);
-
-&set_label("loop_schedule_256");
- &call ("_vpaes_schedule_mangle"); # output low result
- &movdqa ("xmm6","xmm0"); # save cur_lo in xmm6
-
- # high round
- &call ("_vpaes_schedule_round");
- &dec ($round);
- &jz (&label("schedule_mangle_last"));
- &call ("_vpaes_schedule_mangle");
-
- # low round. swap xmm7 and xmm6
- &pshufd ("xmm0","xmm0",0xFF);
- &movdqa (&QWP(20,"esp"),"xmm7");
- &movdqa ("xmm7","xmm6");
- &call ("_vpaes_schedule_low_round");
- &movdqa ("xmm7",&QWP(20,"esp"));
-
- &jmp (&label("loop_schedule_256"));
-
-##
-## .aes_schedule_mangle_last
-##
-## Mangler for last round of key schedule
-## Mangles %xmm0
-## when encrypting, outputs out(%xmm0) ^ 63
-## when decrypting, outputs unskew(%xmm0)
-##
-## Always called right before return... jumps to cleanup and exits
-##
-&set_label("schedule_mangle_last",16);
- # schedule last round key from xmm0
- &lea ($base,&DWP($k_deskew,$const));
- &test ($out,$out);
- &jnz (&label("schedule_mangle_last_dec"));
-
- # encrypting
- &movdqa ("xmm1",&QWP($k_sr,$const,$magic));
- &pshufb ("xmm0","xmm1"); # output permute
- &lea ($base,&DWP($k_opt,$const)); # prepare to output transform
- &add ($key,32);
-
-&set_label("schedule_mangle_last_dec");
- &add ($key,-16);
- &pxor ("xmm0",&QWP($k_s63,$const));
- &call ("_vpaes_schedule_transform"); # output transform
- &movdqu (&QWP(0,$key),"xmm0"); # save last key
-
- # cleanup
- &pxor ("xmm0","xmm0");
- &pxor ("xmm1","xmm1");
- &pxor ("xmm2","xmm2");
- &pxor ("xmm3","xmm3");
- &pxor ("xmm4","xmm4");
- &pxor ("xmm5","xmm5");
- &pxor ("xmm6","xmm6");
- &pxor ("xmm7","xmm7");
- &ret ();
-&function_end_B("_vpaes_schedule_core");
-
-##
-## .aes_schedule_192_smear
-##
-## Smear the short, low side in the 192-bit key schedule.
-##
-## Inputs:
-## %xmm7: high side, b a x y
-## %xmm6: low side, d c 0 0
-## %xmm13: 0
-##
-## Outputs:
-## %xmm6: b+c+d b+c 0 0
-## %xmm0: b+c+d b+c b a
-##
-&function_begin_B("_vpaes_schedule_192_smear");
- &pshufd ("xmm0","xmm6",0x80); # d c 0 0 -> c 0 0 0
- &pxor ("xmm6","xmm0"); # -> c+d c 0 0
- &pshufd ("xmm0","xmm7",0xFE); # b a _ _ -> b b b a
- &pxor ("xmm6","xmm0"); # -> b+c+d b+c b a
- &movdqa ("xmm0","xmm6");
- &pxor ("xmm1","xmm1");
- &movhlps("xmm6","xmm1"); # clobber low side with zeros
- &ret ();
-&function_end_B("_vpaes_schedule_192_smear");
-
-##
-## .aes_schedule_round
-##
-## Runs one main round of the key schedule on %xmm0, %xmm7
-##
-## Specifically, runs subbytes on the high dword of %xmm0
-## then rotates it by one byte and xors into the low dword of
-## %xmm7.
-##
-## Adds rcon from low byte of %xmm8, then rotates %xmm8 for
-## next rcon.
-##
-## Smears the dwords of %xmm7 by xoring the low into the
-## second low, result into third, result into highest.
-##
-## Returns results in %xmm7 = %xmm0.
-## Clobbers %xmm1-%xmm5.
-##
-&function_begin_B("_vpaes_schedule_round");
- # extract rcon from xmm8
- &movdqa ("xmm2",&QWP(8,"esp")); # xmm8
- &pxor ("xmm1","xmm1");
- &palignr("xmm1","xmm2",15);
- &palignr("xmm2","xmm2",15);
- &pxor ("xmm7","xmm1");
-
- # rotate
- &pshufd ("xmm0","xmm0",0xFF);
- &palignr("xmm0","xmm0",1);
-
- # fall through...
- &movdqa (&QWP(8,"esp"),"xmm2"); # xmm8
-
- # low round: same as high round, but no rotation and no rcon.
-&set_label("_vpaes_schedule_low_round");
- # smear xmm7
- &movdqa ("xmm1","xmm7");
- &pslldq ("xmm7",4);
- &pxor ("xmm7","xmm1");
- &movdqa ("xmm1","xmm7");
- &pslldq ("xmm7",8);
- &pxor ("xmm7","xmm1");
- &pxor ("xmm7",&QWP($k_s63,$const));
-
- # subbyte
- &movdqa ("xmm4",&QWP($k_s0F,$const));
- &movdqa ("xmm5",&QWP($k_inv,$const)); # 4 : 1/j
- &movdqa ("xmm1","xmm4");
- &pandn ("xmm1","xmm0");
- &psrld ("xmm1",4); # 1 = i
- &pand ("xmm0","xmm4"); # 0 = k
- &movdqa ("xmm2",&QWP($k_inv+16,$const));# 2 : a/k
- &pshufb ("xmm2","xmm0"); # 2 = a/k
- &pxor ("xmm0","xmm1"); # 0 = j
- &movdqa ("xmm3","xmm5"); # 3 : 1/i
- &pshufb ("xmm3","xmm1"); # 3 = 1/i
- &pxor ("xmm3","xmm2"); # 3 = iak = 1/i + a/k
- &movdqa ("xmm4","xmm5"); # 4 : 1/j
- &pshufb ("xmm4","xmm0"); # 4 = 1/j
- &pxor ("xmm4","xmm2"); # 4 = jak = 1/j + a/k
- &movdqa ("xmm2","xmm5"); # 2 : 1/iak
- &pshufb ("xmm2","xmm3"); # 2 = 1/iak
- &pxor ("xmm2","xmm0"); # 2 = io
- &movdqa ("xmm3","xmm5"); # 3 : 1/jak
- &pshufb ("xmm3","xmm4"); # 3 = 1/jak
- &pxor ("xmm3","xmm1"); # 3 = jo
- &movdqa ("xmm4",&QWP($k_sb1,$const)); # 4 : sbou
- &pshufb ("xmm4","xmm2"); # 4 = sbou
- &movdqa ("xmm0",&QWP($k_sb1+16,$const));# 0 : sbot
- &pshufb ("xmm0","xmm3"); # 0 = sb1t
- &pxor ("xmm0","xmm4"); # 0 = sbox output
-
- # add in smeared stuff
- &pxor ("xmm0","xmm7");
- &movdqa ("xmm7","xmm0");
- &ret ();
-&function_end_B("_vpaes_schedule_round");
-
-##
-## .aes_schedule_transform
-##
-## Linear-transform %xmm0 according to tables at (%ebx)
-##
-## Output in %xmm0
-## Clobbers %xmm1, %xmm2
-##
-&function_begin_B("_vpaes_schedule_transform");
- &movdqa ("xmm2",&QWP($k_s0F,$const));
- &movdqa ("xmm1","xmm2");
- &pandn ("xmm1","xmm0");
- &psrld ("xmm1",4);
- &pand ("xmm0","xmm2");
- &movdqa ("xmm2",&QWP(0,$base));
- &pshufb ("xmm2","xmm0");
- &movdqa ("xmm0",&QWP(16,$base));
- &pshufb ("xmm0","xmm1");
- &pxor ("xmm0","xmm2");
- &ret ();
-&function_end_B("_vpaes_schedule_transform");
-
-##
-## .aes_schedule_mangle
-##
-## Mangle xmm0 from (basis-transformed) standard version
-## to our version.
-##
-## On encrypt,
-## xor with 0x63
-## multiply by circulant 0,1,1,1
-## apply shiftrows transform
-##
-## On decrypt,
-## xor with 0x63
-## multiply by "inverse mixcolumns" circulant E,B,D,9
-## deskew
-## apply shiftrows transform
-##
-##
-## Writes out to (%edx), and increments or decrements it
-## Keeps track of round number mod 4 in %ecx
-## Preserves xmm0
-## Clobbers xmm1-xmm5
-##
-&function_begin_B("_vpaes_schedule_mangle");
- &movdqa ("xmm4","xmm0"); # save xmm0 for later
- &movdqa ("xmm5",&QWP($k_mc_forward,$const));
- &test ($out,$out);
- &jnz (&label("schedule_mangle_dec"));
-
- # encrypting
- &add ($key,16);
- &pxor ("xmm4",&QWP($k_s63,$const));
- &pshufb ("xmm4","xmm5");
- &movdqa ("xmm3","xmm4");
- &pshufb ("xmm4","xmm5");
- &pxor ("xmm3","xmm4");
- &pshufb ("xmm4","xmm5");
- &pxor ("xmm3","xmm4");
-
- &jmp (&label("schedule_mangle_both"));
-
-&set_label("schedule_mangle_dec",16);
- # inverse mix columns
- &movdqa ("xmm2",&QWP($k_s0F,$const));
- &lea ($inp,&DWP($k_dksd,$const));
- &movdqa ("xmm1","xmm2");
- &pandn ("xmm1","xmm4");
- &psrld ("xmm1",4); # 1 = hi
- &pand ("xmm4","xmm2"); # 4 = lo
-
- &movdqa ("xmm2",&QWP(0,$inp));
- &pshufb ("xmm2","xmm4");
- &movdqa ("xmm3",&QWP(0x10,$inp));
- &pshufb ("xmm3","xmm1");
- &pxor ("xmm3","xmm2");
- &pshufb ("xmm3","xmm5");
-
- &movdqa ("xmm2",&QWP(0x20,$inp));
- &pshufb ("xmm2","xmm4");
- &pxor ("xmm2","xmm3");
- &movdqa ("xmm3",&QWP(0x30,$inp));
- &pshufb ("xmm3","xmm1");
- &pxor ("xmm3","xmm2");
- &pshufb ("xmm3","xmm5");
-
- &movdqa ("xmm2",&QWP(0x40,$inp));
- &pshufb ("xmm2","xmm4");
- &pxor ("xmm2","xmm3");
- &movdqa ("xmm3",&QWP(0x50,$inp));
- &pshufb ("xmm3","xmm1");
- &pxor ("xmm3","xmm2");
- &pshufb ("xmm3","xmm5");
-
- &movdqa ("xmm2",&QWP(0x60,$inp));
- &pshufb ("xmm2","xmm4");
- &pxor ("xmm2","xmm3");
- &movdqa ("xmm3",&QWP(0x70,$inp));
- &pshufb ("xmm3","xmm1");
- &pxor ("xmm3","xmm2");
-
- &add ($key,-16);
-
-&set_label("schedule_mangle_both");
- &movdqa ("xmm1",&QWP($k_sr,$const,$magic));
- &pshufb ("xmm3","xmm1");
- &add ($magic,-16);
- &and ($magic,0x30);
- &movdqu (&QWP(0,$key),"xmm3");
- &ret ();
-&function_end_B("_vpaes_schedule_mangle");
-
-#
-# Interface to OpenSSL
-#
-&function_begin("${PREFIX}_set_encrypt_key");
- &mov ($inp,&wparam(0)); # inp
- &lea ($base,&DWP(-56,"esp"));
- &mov ($round,&wparam(1)); # bits
- &and ($base,-16);
- &mov ($key,&wparam(2)); # key
- &xchg ($base,"esp"); # alloca
- &mov (&DWP(48,"esp"),$base);
-
- &mov ($base,$round);
- &shr ($base,5);
- &add ($base,5);
- &mov (&DWP(240,$key),$base); # AES_KEY->rounds = nbits/32+5;
- &mov ($magic,0x30);
- &mov ($out,0);
-
- &lea ($const,&DWP(&label("_vpaes_consts")."+0x30-".&label("pic_point")));
- &call ("_vpaes_schedule_core");
-&set_label("pic_point");
-
- &mov ("esp",&DWP(48,"esp"));
- &xor ("eax","eax");
-&function_end("${PREFIX}_set_encrypt_key");
-
-&function_begin("${PREFIX}_set_decrypt_key");
- &mov ($inp,&wparam(0)); # inp
- &lea ($base,&DWP(-56,"esp"));
- &mov ($round,&wparam(1)); # bits
- &and ($base,-16);
- &mov ($key,&wparam(2)); # key
- &xchg ($base,"esp"); # alloca
- &mov (&DWP(48,"esp"),$base);
-
- &mov ($base,$round);
- &shr ($base,5);
- &add ($base,5);
- &mov (&DWP(240,$key),$base); # AES_KEY->rounds = nbits/32+5;
- &shl ($base,4);
- &lea ($key,&DWP(16,$key,$base));
-
- &mov ($out,1);
- &mov ($magic,$round);
- &shr ($magic,1);
- &and ($magic,32);
- &xor ($magic,32); # nbist==192?0:32;
-
- &lea ($const,&DWP(&label("_vpaes_consts")."+0x30-".&label("pic_point")));
- &call ("_vpaes_schedule_core");
-&set_label("pic_point");
-
- &mov ("esp",&DWP(48,"esp"));
- &xor ("eax","eax");
-&function_end("${PREFIX}_set_decrypt_key");
-
-&function_begin("${PREFIX}_encrypt");
- &lea ($const,&DWP(&label("_vpaes_consts")."+0x30-".&label("pic_point")));
- &call ("_vpaes_preheat");
-&set_label("pic_point");
- &mov ($inp,&wparam(0)); # inp
- &lea ($base,&DWP(-56,"esp"));
- &mov ($out,&wparam(1)); # out
- &and ($base,-16);
- &mov ($key,&wparam(2)); # key
- &xchg ($base,"esp"); # alloca
- &mov (&DWP(48,"esp"),$base);
-
- &movdqu ("xmm0",&QWP(0,$inp));
- &call ("_vpaes_encrypt_core");
- &movdqu (&QWP(0,$out),"xmm0");
-
- &mov ("esp",&DWP(48,"esp"));
-&function_end("${PREFIX}_encrypt");
-
-&function_begin("${PREFIX}_decrypt");
- &lea ($const,&DWP(&label("_vpaes_consts")."+0x30-".&label("pic_point")));
- &call ("_vpaes_preheat");
-&set_label("pic_point");
- &mov ($inp,&wparam(0)); # inp
- &lea ($base,&DWP(-56,"esp"));
- &mov ($out,&wparam(1)); # out
- &and ($base,-16);
- &mov ($key,&wparam(2)); # key
- &xchg ($base,"esp"); # alloca
- &mov (&DWP(48,"esp"),$base);
-
- &movdqu ("xmm0",&QWP(0,$inp));
- &call ("_vpaes_decrypt_core");
- &movdqu (&QWP(0,$out),"xmm0");
-
- &mov ("esp",&DWP(48,"esp"));
-&function_end("${PREFIX}_decrypt");
-
-&function_begin("${PREFIX}_cbc_encrypt");
- &mov ($inp,&wparam(0)); # inp
- &mov ($out,&wparam(1)); # out
- &mov ($round,&wparam(2)); # len
- &mov ($key,&wparam(3)); # key
- &sub ($round,16);
- &jc (&label("cbc_abort"));
- &lea ($base,&DWP(-56,"esp"));
- &mov ($const,&wparam(4)); # ivp
- &and ($base,-16);
- &mov ($magic,&wparam(5)); # enc
- &xchg ($base,"esp"); # alloca
- &movdqu ("xmm1",&QWP(0,$const)); # load IV
- &sub ($out,$inp);
- &mov (&DWP(48,"esp"),$base);
-
- &mov (&DWP(0,"esp"),$out); # save out
- &mov (&DWP(4,"esp"),$key) # save key
- &mov (&DWP(8,"esp"),$const); # save ivp
- &mov ($out,$round); # $out works as $len
-
- &lea ($const,&DWP(&label("_vpaes_consts")."+0x30-".&label("pic_point")));
- &call ("_vpaes_preheat");
-&set_label("pic_point");
- &cmp ($magic,0);
- &je (&label("cbc_dec_loop"));
- &jmp (&label("cbc_enc_loop"));
-
-&set_label("cbc_enc_loop",16);
- &movdqu ("xmm0",&QWP(0,$inp)); # load input
- &pxor ("xmm0","xmm1"); # inp^=iv
- &call ("_vpaes_encrypt_core");
- &mov ($base,&DWP(0,"esp")); # restore out
- &mov ($key,&DWP(4,"esp")); # restore key
- &movdqa ("xmm1","xmm0");
- &movdqu (&QWP(0,$base,$inp),"xmm0"); # write output
- &lea ($inp,&DWP(16,$inp));
- &sub ($out,16);
- &jnc (&label("cbc_enc_loop"));
- &jmp (&label("cbc_done"));
-
-&set_label("cbc_dec_loop",16);
- &movdqu ("xmm0",&QWP(0,$inp)); # load input
- &movdqa (&QWP(16,"esp"),"xmm1"); # save IV
- &movdqa (&QWP(32,"esp"),"xmm0"); # save future IV
- &call ("_vpaes_decrypt_core");
- &mov ($base,&DWP(0,"esp")); # restore out
- &mov ($key,&DWP(4,"esp")); # restore key
- &pxor ("xmm0",&QWP(16,"esp")); # out^=iv
- &movdqa ("xmm1",&QWP(32,"esp")); # load next IV
- &movdqu (&QWP(0,$base,$inp),"xmm0"); # write output
- &lea ($inp,&DWP(16,$inp));
- &sub ($out,16);
- &jnc (&label("cbc_dec_loop"));
-
-&set_label("cbc_done");
- &mov ($base,&DWP(8,"esp")); # restore ivp
- &mov ("esp",&DWP(48,"esp"));
- &movdqu (&QWP(0,$base),"xmm1"); # write IV
-&set_label("cbc_abort");
-&function_end("${PREFIX}_cbc_encrypt");
-
-&asm_finish();
diff --git a/app/openssl/crypto/aes/asm/vpaes-x86_64.S b/app/openssl/crypto/aes/asm/vpaes-x86_64.S
deleted file mode 100644
index 0162631f..00000000
--- a/app/openssl/crypto/aes/asm/vpaes-x86_64.S
+++ /dev/null
@@ -1,828 +0,0 @@
-.text
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-.type _vpaes_encrypt_core,@function
-.align 16
-_vpaes_encrypt_core:
- movq %rdx,%r9
- movq $16,%r11
- movl 240(%rdx),%eax
- movdqa %xmm9,%xmm1
- movdqa .Lk_ipt(%rip),%xmm2
- pandn %xmm0,%xmm1
- movdqu (%r9),%xmm5
- psrld $4,%xmm1
- pand %xmm9,%xmm0
-.byte 102,15,56,0,208
- movdqa .Lk_ipt+16(%rip),%xmm0
-.byte 102,15,56,0,193
- pxor %xmm5,%xmm2
- pxor %xmm2,%xmm0
- addq $16,%r9
- leaq .Lk_mc_backward(%rip),%r10
- jmp .Lenc_entry
-
-.align 16
-.Lenc_loop:
-
- movdqa %xmm13,%xmm4
-.byte 102,15,56,0,226
- pxor %xmm5,%xmm4
- movdqa %xmm12,%xmm0
-.byte 102,15,56,0,195
- pxor %xmm4,%xmm0
- movdqa %xmm15,%xmm5
-.byte 102,15,56,0,234
- movdqa -64(%r11,%r10,1),%xmm1
- movdqa %xmm14,%xmm2
-.byte 102,15,56,0,211
- pxor %xmm5,%xmm2
- movdqa (%r11,%r10,1),%xmm4
- movdqa %xmm0,%xmm3
-.byte 102,15,56,0,193
- addq $16,%r9
- pxor %xmm2,%xmm0
-.byte 102,15,56,0,220
- addq $16,%r11
- pxor %xmm0,%xmm3
-.byte 102,15,56,0,193
- andq $48,%r11
- pxor %xmm3,%xmm0
- subq $1,%rax
-
-.Lenc_entry:
-
- movdqa %xmm9,%xmm1
- pandn %xmm0,%xmm1
- psrld $4,%xmm1
- pand %xmm9,%xmm0
- movdqa %xmm11,%xmm5
-.byte 102,15,56,0,232
- pxor %xmm1,%xmm0
- movdqa %xmm10,%xmm3
-.byte 102,15,56,0,217
- pxor %xmm5,%xmm3
- movdqa %xmm10,%xmm4
-.byte 102,15,56,0,224
- pxor %xmm5,%xmm4
- movdqa %xmm10,%xmm2
-.byte 102,15,56,0,211
- pxor %xmm0,%xmm2
- movdqa %xmm10,%xmm3
- movdqu (%r9),%xmm5
-.byte 102,15,56,0,220
- pxor %xmm1,%xmm3
- jnz .Lenc_loop
-
-
- movdqa -96(%r10),%xmm4
- movdqa -80(%r10),%xmm0
-.byte 102,15,56,0,226
- pxor %xmm5,%xmm4
-.byte 102,15,56,0,195
- movdqa 64(%r11,%r10,1),%xmm1
- pxor %xmm4,%xmm0
-.byte 102,15,56,0,193
- .byte 0xf3,0xc3
-.size _vpaes_encrypt_core,.-_vpaes_encrypt_core
-
-
-
-
-
-
-.type _vpaes_decrypt_core,@function
-.align 16
-_vpaes_decrypt_core:
- movq %rdx,%r9
- movl 240(%rdx),%eax
- movdqa %xmm9,%xmm1
- movdqa .Lk_dipt(%rip),%xmm2
- pandn %xmm0,%xmm1
- movq %rax,%r11
- psrld $4,%xmm1
- movdqu (%r9),%xmm5
- shlq $4,%r11
- pand %xmm9,%xmm0
-.byte 102,15,56,0,208
- movdqa .Lk_dipt+16(%rip),%xmm0
- xorq $48,%r11
- leaq .Lk_dsbd(%rip),%r10
-.byte 102,15,56,0,193
- andq $48,%r11
- pxor %xmm5,%xmm2
- movdqa .Lk_mc_forward+48(%rip),%xmm5
- pxor %xmm2,%xmm0
- addq $16,%r9
- addq %r10,%r11
- jmp .Ldec_entry
-
-.align 16
-.Ldec_loop:
-
-
-
- movdqa -32(%r10),%xmm4
-.byte 102,15,56,0,226
- pxor %xmm0,%xmm4
- movdqa -16(%r10),%xmm0
-.byte 102,15,56,0,195
- pxor %xmm4,%xmm0
- addq $16,%r9
-
-.byte 102,15,56,0,197
- movdqa 0(%r10),%xmm4
-.byte 102,15,56,0,226
- pxor %xmm0,%xmm4
- movdqa 16(%r10),%xmm0
-.byte 102,15,56,0,195
- pxor %xmm4,%xmm0
- subq $1,%rax
-
-.byte 102,15,56,0,197
- movdqa 32(%r10),%xmm4
-.byte 102,15,56,0,226
- pxor %xmm0,%xmm4
- movdqa 48(%r10),%xmm0
-.byte 102,15,56,0,195
- pxor %xmm4,%xmm0
-
-.byte 102,15,56,0,197
- movdqa 64(%r10),%xmm4
-.byte 102,15,56,0,226
- pxor %xmm0,%xmm4
- movdqa 80(%r10),%xmm0
-.byte 102,15,56,0,195
- pxor %xmm4,%xmm0
-
-.byte 102,15,58,15,237,12
-
-.Ldec_entry:
-
- movdqa %xmm9,%xmm1
- pandn %xmm0,%xmm1
- psrld $4,%xmm1
- pand %xmm9,%xmm0
- movdqa %xmm11,%xmm2
-.byte 102,15,56,0,208
- pxor %xmm1,%xmm0
- movdqa %xmm10,%xmm3
-.byte 102,15,56,0,217
- pxor %xmm2,%xmm3
- movdqa %xmm10,%xmm4
-.byte 102,15,56,0,224
- pxor %xmm2,%xmm4
- movdqa %xmm10,%xmm2
-.byte 102,15,56,0,211
- pxor %xmm0,%xmm2
- movdqa %xmm10,%xmm3
-.byte 102,15,56,0,220
- pxor %xmm1,%xmm3
- movdqu (%r9),%xmm0
- jnz .Ldec_loop
-
-
- movdqa 96(%r10),%xmm4
-.byte 102,15,56,0,226
- pxor %xmm0,%xmm4
- movdqa 112(%r10),%xmm0
- movdqa -352(%r11),%xmm2
-.byte 102,15,56,0,195
- pxor %xmm4,%xmm0
-.byte 102,15,56,0,194
- .byte 0xf3,0xc3
-.size _vpaes_decrypt_core,.-_vpaes_decrypt_core
-
-
-
-
-
-
-.type _vpaes_schedule_core,@function
-.align 16
-_vpaes_schedule_core:
-
-
-
-
-
- call _vpaes_preheat
- movdqa .Lk_rcon(%rip),%xmm8
- movdqu (%rdi),%xmm0
-
-
- movdqa %xmm0,%xmm3
- leaq .Lk_ipt(%rip),%r11
- call _vpaes_schedule_transform
- movdqa %xmm0,%xmm7
-
- leaq .Lk_sr(%rip),%r10
- testq %rcx,%rcx
- jnz .Lschedule_am_decrypting
-
-
- movdqu %xmm0,(%rdx)
- jmp .Lschedule_go
-
-.Lschedule_am_decrypting:
-
- movdqa (%r8,%r10,1),%xmm1
-.byte 102,15,56,0,217
- movdqu %xmm3,(%rdx)
- xorq $48,%r8
-
-.Lschedule_go:
- cmpl $192,%esi
- ja .Lschedule_256
- je .Lschedule_192
-
-
-
-
-
-
-
-
-
-
-.Lschedule_128:
- movl $10,%esi
-
-.Loop_schedule_128:
- call _vpaes_schedule_round
- decq %rsi
- jz .Lschedule_mangle_last
- call _vpaes_schedule_mangle
- jmp .Loop_schedule_128
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-.align 16
-.Lschedule_192:
- movdqu 8(%rdi),%xmm0
- call _vpaes_schedule_transform
- movdqa %xmm0,%xmm6
- pxor %xmm4,%xmm4
- movhlps %xmm4,%xmm6
- movl $4,%esi
-
-.Loop_schedule_192:
- call _vpaes_schedule_round
-.byte 102,15,58,15,198,8
- call _vpaes_schedule_mangle
- call _vpaes_schedule_192_smear
- call _vpaes_schedule_mangle
- call _vpaes_schedule_round
- decq %rsi
- jz .Lschedule_mangle_last
- call _vpaes_schedule_mangle
- call _vpaes_schedule_192_smear
- jmp .Loop_schedule_192
-
-
-
-
-
-
-
-
-
-
-
-.align 16
-.Lschedule_256:
- movdqu 16(%rdi),%xmm0
- call _vpaes_schedule_transform
- movl $7,%esi
-
-.Loop_schedule_256:
- call _vpaes_schedule_mangle
- movdqa %xmm0,%xmm6
-
-
- call _vpaes_schedule_round
- decq %rsi
- jz .Lschedule_mangle_last
- call _vpaes_schedule_mangle
-
-
- pshufd $255,%xmm0,%xmm0
- movdqa %xmm7,%xmm5
- movdqa %xmm6,%xmm7
- call _vpaes_schedule_low_round
- movdqa %xmm5,%xmm7
-
- jmp .Loop_schedule_256
-
-
-
-
-
-
-
-
-
-
-
-
-.align 16
-.Lschedule_mangle_last:
-
- leaq .Lk_deskew(%rip),%r11
- testq %rcx,%rcx
- jnz .Lschedule_mangle_last_dec
-
-
- movdqa (%r8,%r10,1),%xmm1
-.byte 102,15,56,0,193
- leaq .Lk_opt(%rip),%r11
- addq $32,%rdx
-
-.Lschedule_mangle_last_dec:
- addq $-16,%rdx
- pxor .Lk_s63(%rip),%xmm0
- call _vpaes_schedule_transform
- movdqu %xmm0,(%rdx)
-
-
- pxor %xmm0,%xmm0
- pxor %xmm1,%xmm1
- pxor %xmm2,%xmm2
- pxor %xmm3,%xmm3
- pxor %xmm4,%xmm4
- pxor %xmm5,%xmm5
- pxor %xmm6,%xmm6
- pxor %xmm7,%xmm7
- .byte 0xf3,0xc3
-.size _vpaes_schedule_core,.-_vpaes_schedule_core
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-.type _vpaes_schedule_192_smear,@function
-.align 16
-_vpaes_schedule_192_smear:
- pshufd $128,%xmm6,%xmm0
- pxor %xmm0,%xmm6
- pshufd $254,%xmm7,%xmm0
- pxor %xmm0,%xmm6
- movdqa %xmm6,%xmm0
- pxor %xmm1,%xmm1
- movhlps %xmm1,%xmm6
- .byte 0xf3,0xc3
-.size _vpaes_schedule_192_smear,.-_vpaes_schedule_192_smear
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-.type _vpaes_schedule_round,@function
-.align 16
-_vpaes_schedule_round:
-
- pxor %xmm1,%xmm1
-.byte 102,65,15,58,15,200,15
-.byte 102,69,15,58,15,192,15
- pxor %xmm1,%xmm7
-
-
- pshufd $255,%xmm0,%xmm0
-.byte 102,15,58,15,192,1
-
-
-
-
-_vpaes_schedule_low_round:
-
- movdqa %xmm7,%xmm1
- pslldq $4,%xmm7
- pxor %xmm1,%xmm7
- movdqa %xmm7,%xmm1
- pslldq $8,%xmm7
- pxor %xmm1,%xmm7
- pxor .Lk_s63(%rip),%xmm7
-
-
- movdqa %xmm9,%xmm1
- pandn %xmm0,%xmm1
- psrld $4,%xmm1
- pand %xmm9,%xmm0
- movdqa %xmm11,%xmm2
-.byte 102,15,56,0,208
- pxor %xmm1,%xmm0
- movdqa %xmm10,%xmm3
-.byte 102,15,56,0,217
- pxor %xmm2,%xmm3
- movdqa %xmm10,%xmm4
-.byte 102,15,56,0,224
- pxor %xmm2,%xmm4
- movdqa %xmm10,%xmm2
-.byte 102,15,56,0,211
- pxor %xmm0,%xmm2
- movdqa %xmm10,%xmm3
-.byte 102,15,56,0,220
- pxor %xmm1,%xmm3
- movdqa %xmm13,%xmm4
-.byte 102,15,56,0,226
- movdqa %xmm12,%xmm0
-.byte 102,15,56,0,195
- pxor %xmm4,%xmm0
-
-
- pxor %xmm7,%xmm0
- movdqa %xmm0,%xmm7
- .byte 0xf3,0xc3
-.size _vpaes_schedule_round,.-_vpaes_schedule_round
-
-
-
-
-
-
-
-
-
-
-.type _vpaes_schedule_transform,@function
-.align 16
-_vpaes_schedule_transform:
- movdqa %xmm9,%xmm1
- pandn %xmm0,%xmm1
- psrld $4,%xmm1
- pand %xmm9,%xmm0
- movdqa (%r11),%xmm2
-.byte 102,15,56,0,208
- movdqa 16(%r11),%xmm0
-.byte 102,15,56,0,193
- pxor %xmm2,%xmm0
- .byte 0xf3,0xc3
-.size _vpaes_schedule_transform,.-_vpaes_schedule_transform
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-.type _vpaes_schedule_mangle,@function
-.align 16
-_vpaes_schedule_mangle:
- movdqa %xmm0,%xmm4
- movdqa .Lk_mc_forward(%rip),%xmm5
- testq %rcx,%rcx
- jnz .Lschedule_mangle_dec
-
-
- addq $16,%rdx
- pxor .Lk_s63(%rip),%xmm4
-.byte 102,15,56,0,229
- movdqa %xmm4,%xmm3
-.byte 102,15,56,0,229
- pxor %xmm4,%xmm3
-.byte 102,15,56,0,229
- pxor %xmm4,%xmm3
-
- jmp .Lschedule_mangle_both
-.align 16
-.Lschedule_mangle_dec:
-
- leaq .Lk_dksd(%rip),%r11
- movdqa %xmm9,%xmm1
- pandn %xmm4,%xmm1
- psrld $4,%xmm1
- pand %xmm9,%xmm4
-
- movdqa 0(%r11),%xmm2
-.byte 102,15,56,0,212
- movdqa 16(%r11),%xmm3
-.byte 102,15,56,0,217
- pxor %xmm2,%xmm3
-.byte 102,15,56,0,221
-
- movdqa 32(%r11),%xmm2
-.byte 102,15,56,0,212
- pxor %xmm3,%xmm2
- movdqa 48(%r11),%xmm3
-.byte 102,15,56,0,217
- pxor %xmm2,%xmm3
-.byte 102,15,56,0,221
-
- movdqa 64(%r11),%xmm2
-.byte 102,15,56,0,212
- pxor %xmm3,%xmm2
- movdqa 80(%r11),%xmm3
-.byte 102,15,56,0,217
- pxor %xmm2,%xmm3
-.byte 102,15,56,0,221
-
- movdqa 96(%r11),%xmm2
-.byte 102,15,56,0,212
- pxor %xmm3,%xmm2
- movdqa 112(%r11),%xmm3
-.byte 102,15,56,0,217
- pxor %xmm2,%xmm3
-
- addq $-16,%rdx
-
-.Lschedule_mangle_both:
- movdqa (%r8,%r10,1),%xmm1
-.byte 102,15,56,0,217
- addq $-16,%r8
- andq $48,%r8
- movdqu %xmm3,(%rdx)
- .byte 0xf3,0xc3
-.size _vpaes_schedule_mangle,.-_vpaes_schedule_mangle
-
-
-
-
-.globl vpaes_set_encrypt_key
-.type vpaes_set_encrypt_key,@function
-.align 16
-vpaes_set_encrypt_key:
- movl %esi,%eax
- shrl $5,%eax
- addl $5,%eax
- movl %eax,240(%rdx)
-
- movl $0,%ecx
- movl $48,%r8d
- call _vpaes_schedule_core
- xorl %eax,%eax
- .byte 0xf3,0xc3
-.size vpaes_set_encrypt_key,.-vpaes_set_encrypt_key
-
-.globl vpaes_set_decrypt_key
-.type vpaes_set_decrypt_key,@function
-.align 16
-vpaes_set_decrypt_key:
- movl %esi,%eax
- shrl $5,%eax
- addl $5,%eax
- movl %eax,240(%rdx)
- shll $4,%eax
- leaq 16(%rdx,%rax,1),%rdx
-
- movl $1,%ecx
- movl %esi,%r8d
- shrl $1,%r8d
- andl $32,%r8d
- xorl $32,%r8d
- call _vpaes_schedule_core
- xorl %eax,%eax
- .byte 0xf3,0xc3
-.size vpaes_set_decrypt_key,.-vpaes_set_decrypt_key
-
-.globl vpaes_encrypt
-.type vpaes_encrypt,@function
-.align 16
-vpaes_encrypt:
- movdqu (%rdi),%xmm0
- call _vpaes_preheat
- call _vpaes_encrypt_core
- movdqu %xmm0,(%rsi)
- .byte 0xf3,0xc3
-.size vpaes_encrypt,.-vpaes_encrypt
-
-.globl vpaes_decrypt
-.type vpaes_decrypt,@function
-.align 16
-vpaes_decrypt:
- movdqu (%rdi),%xmm0
- call _vpaes_preheat
- call _vpaes_decrypt_core
- movdqu %xmm0,(%rsi)
- .byte 0xf3,0xc3
-.size vpaes_decrypt,.-vpaes_decrypt
-.globl vpaes_cbc_encrypt
-.type vpaes_cbc_encrypt,@function
-.align 16
-vpaes_cbc_encrypt:
- xchgq %rcx,%rdx
- subq $16,%rcx
- jc .Lcbc_abort
- movdqu (%r8),%xmm6
- subq %rdi,%rsi
- call _vpaes_preheat
- cmpl $0,%r9d
- je .Lcbc_dec_loop
- jmp .Lcbc_enc_loop
-.align 16
-.Lcbc_enc_loop:
- movdqu (%rdi),%xmm0
- pxor %xmm6,%xmm0
- call _vpaes_encrypt_core
- movdqa %xmm0,%xmm6
- movdqu %xmm0,(%rsi,%rdi,1)
- leaq 16(%rdi),%rdi
- subq $16,%rcx
- jnc .Lcbc_enc_loop
- jmp .Lcbc_done
-.align 16
-.Lcbc_dec_loop:
- movdqu (%rdi),%xmm0
- movdqa %xmm0,%xmm7
- call _vpaes_decrypt_core
- pxor %xmm6,%xmm0
- movdqa %xmm7,%xmm6
- movdqu %xmm0,(%rsi,%rdi,1)
- leaq 16(%rdi),%rdi
- subq $16,%rcx
- jnc .Lcbc_dec_loop
-.Lcbc_done:
- movdqu %xmm6,(%r8)
-.Lcbc_abort:
- .byte 0xf3,0xc3
-.size vpaes_cbc_encrypt,.-vpaes_cbc_encrypt
-
-
-
-
-
-
-.type _vpaes_preheat,@function
-.align 16
-_vpaes_preheat:
- leaq .Lk_s0F(%rip),%r10
- movdqa -32(%r10),%xmm10
- movdqa -16(%r10),%xmm11
- movdqa 0(%r10),%xmm9
- movdqa 48(%r10),%xmm13
- movdqa 64(%r10),%xmm12
- movdqa 80(%r10),%xmm15
- movdqa 96(%r10),%xmm14
- .byte 0xf3,0xc3
-.size _vpaes_preheat,.-_vpaes_preheat
-
-
-
-
-
-.type _vpaes_consts,@object
-.align 64
-_vpaes_consts:
-.Lk_inv:
-.quad 0x0E05060F0D080180, 0x040703090A0B0C02
-.quad 0x01040A060F0B0780, 0x030D0E0C02050809
-
-.Lk_s0F:
-.quad 0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F
-
-.Lk_ipt:
-.quad 0xC2B2E8985A2A7000, 0xCABAE09052227808
-.quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81
-
-.Lk_sb1:
-.quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544
-.quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF
-.Lk_sb2:
-.quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD
-.quad 0x69EB88400AE12900, 0xC2A163C8AB82234A
-.Lk_sbo:
-.quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878
-.quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA
-
-.Lk_mc_forward:
-.quad 0x0407060500030201, 0x0C0F0E0D080B0A09
-.quad 0x080B0A0904070605, 0x000302010C0F0E0D
-.quad 0x0C0F0E0D080B0A09, 0x0407060500030201
-.quad 0x000302010C0F0E0D, 0x080B0A0904070605
-
-.Lk_mc_backward:
-.quad 0x0605040702010003, 0x0E0D0C0F0A09080B
-.quad 0x020100030E0D0C0F, 0x0A09080B06050407
-.quad 0x0E0D0C0F0A09080B, 0x0605040702010003
-.quad 0x0A09080B06050407, 0x020100030E0D0C0F
-
-.Lk_sr:
-.quad 0x0706050403020100, 0x0F0E0D0C0B0A0908
-.quad 0x030E09040F0A0500, 0x0B06010C07020D08
-.quad 0x0F060D040B020900, 0x070E050C030A0108
-.quad 0x0B0E0104070A0D00, 0x0306090C0F020508
-
-.Lk_rcon:
-.quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81
-
-.Lk_s63:
-.quad 0x5B5B5B5B5B5B5B5B, 0x5B5B5B5B5B5B5B5B
-
-.Lk_opt:
-.quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808
-.quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0
-
-.Lk_deskew:
-.quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A
-.quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77
-
-
-
-
-
-.Lk_dksd:
-.quad 0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9
-.quad 0x41C277F4B5368300, 0x5FDC69EAAB289D1E
-.Lk_dksb:
-.quad 0x9A4FCA1F8550D500, 0x03D653861CC94C99
-.quad 0x115BEDA7B6FC4A00, 0xD993256F7E3482C8
-.Lk_dkse:
-.quad 0xD5031CCA1FC9D600, 0x53859A4C994F5086
-.quad 0xA23196054FDC7BE8, 0xCD5EF96A20B31487
-.Lk_dks9:
-.quad 0xB6116FC87ED9A700, 0x4AED933482255BFC
-.quad 0x4576516227143300, 0x8BB89FACE9DAFDCE
-
-
-
-
-
-.Lk_dipt:
-.quad 0x0F505B040B545F00, 0x154A411E114E451A
-.quad 0x86E383E660056500, 0x12771772F491F194
-
-.Lk_dsb9:
-.quad 0x851C03539A86D600, 0xCAD51F504F994CC9
-.quad 0xC03B1789ECD74900, 0x725E2C9EB2FBA565
-.Lk_dsbd:
-.quad 0x7D57CCDFE6B1A200, 0xF56E9B13882A4439
-.quad 0x3CE2FAF724C6CB00, 0x2931180D15DEEFD3
-.Lk_dsbb:
-.quad 0xD022649296B44200, 0x602646F6B0F2D404
-.quad 0xC19498A6CD596700, 0xF3FF0C3E3255AA6B
-.Lk_dsbe:
-.quad 0x46F2929626D4D000, 0x2242600464B4F6B0
-.quad 0x0C55A6CDFFAAC100, 0x9467F36B98593E32
-.Lk_dsbo:
-.quad 0x1387EA537EF94000, 0xC7AA6DB9D4943E2D
-.quad 0x12D7560F93441D00, 0xCA4B8159D8C58E9C
-.byte 86,101,99,116,111,114,32,80,101,114,109,117,116,97,116,105,111,110,32,65,69,83,32,102,111,114,32,120,56,54,95,54,52,47,83,83,83,69,51,44,32,77,105,107,101,32,72,97,109,98,117,114,103,32,40,83,116,97,110,102,111,114,100,32,85,110,105,118,101,114,115,105,116,121,41,0
-.align 64
-.size _vpaes_consts,.-_vpaes_consts
diff --git a/app/openssl/crypto/aes/asm/vpaes-x86_64.pl b/app/openssl/crypto/aes/asm/vpaes-x86_64.pl
deleted file mode 100644
index bd7f45b8..00000000
--- a/app/openssl/crypto/aes/asm/vpaes-x86_64.pl
+++ /dev/null
@@ -1,1207 +0,0 @@
-#!/usr/bin/env perl
-
-######################################################################
-## Constant-time SSSE3 AES core implementation.
-## version 0.1
-##
-## By Mike Hamburg (Stanford University), 2009
-## Public domain.
-##
-## For details see http://shiftleft.org/papers/vector_aes/ and
-## http://crypto.stanford.edu/vpaes/.
-
-######################################################################
-# September 2011.
-#
-# Interface to OpenSSL as "almost" drop-in replacement for
-# aes-x86_64.pl. "Almost" refers to the fact that AES_cbc_encrypt
-# doesn't handle partial vectors (doesn't have to if called from
-# EVP only). "Drop-in" implies that this module doesn't share key
-# schedule structure with the original nor does it make assumption
-# about its alignment...
-#
-# Performance summary. aes-x86_64.pl column lists large-block CBC
-# encrypt/decrypt/with-hyper-threading-off(*) results in cycles per
-# byte processed with 128-bit key, and vpaes-x86_64.pl column -
-# [also large-block CBC] encrypt/decrypt.
-#
-# aes-x86_64.pl vpaes-x86_64.pl
-#
-# Core 2(**) 30.5/43.7/14.3 21.8/25.7(***)
-# Nehalem 30.5/42.2/14.6 9.8/11.8
-# Atom 63.9/79.0/32.1 64.0/84.8(***)
-#
-# (*) "Hyper-threading" in the context refers rather to cache shared
-# among multiple cores, than to specifically Intel HTT. As vast
-# majority of contemporary cores share cache, slower code path
-# is common place. In other words "with-hyper-threading-off"
-# results are presented mostly for reference purposes.
-#
-# (**) "Core 2" refers to initial 65nm design, a.k.a. Conroe.
-#
-# (***) Less impressive improvement on Core 2 and Atom is due to slow
-# pshufb, yet it's respectable +40%/78% improvement on Core 2
-# (as implied, over "hyper-threading-safe" code path).
-#
-# <appro@openssl.org>
-
-$flavour = shift;
-$output = shift;
-if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
-
-$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
-
-$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
-( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
-( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
-die "can't locate x86_64-xlate.pl";
-
-open OUT,"| \"$^X\" $xlate $flavour $output";
-*STDOUT=*OUT;
-
-$PREFIX="vpaes";
-
-$code.=<<___;
-.text
-
-##
-## _aes_encrypt_core
-##
-## AES-encrypt %xmm0.
-##
-## Inputs:
-## %xmm0 = input
-## %xmm9-%xmm15 as in _vpaes_preheat
-## (%rdx) = scheduled keys
-##
-## Output in %xmm0
-## Clobbers %xmm1-%xmm5, %r9, %r10, %r11, %rax
-## Preserves %xmm6 - %xmm8 so you get some local vectors
-##
-##
-.type _vpaes_encrypt_core,\@abi-omnipotent
-.align 16
-_vpaes_encrypt_core:
- mov %rdx, %r9
- mov \$16, %r11
- mov 240(%rdx),%eax
- movdqa %xmm9, %xmm1
- movdqa .Lk_ipt(%rip), %xmm2 # iptlo
- pandn %xmm0, %xmm1
- movdqu (%r9), %xmm5 # round0 key
- psrld \$4, %xmm1
- pand %xmm9, %xmm0
- pshufb %xmm0, %xmm2
- movdqa .Lk_ipt+16(%rip), %xmm0 # ipthi
- pshufb %xmm1, %xmm0
- pxor %xmm5, %xmm2
- pxor %xmm2, %xmm0
- add \$16, %r9
- lea .Lk_mc_backward(%rip),%r10
- jmp .Lenc_entry
-
-.align 16
-.Lenc_loop:
- # middle of middle round
- movdqa %xmm13, %xmm4 # 4 : sb1u
- pshufb %xmm2, %xmm4 # 4 = sb1u
- pxor %xmm5, %xmm4 # 4 = sb1u + k
- movdqa %xmm12, %xmm0 # 0 : sb1t
- pshufb %xmm3, %xmm0 # 0 = sb1t
- pxor %xmm4, %xmm0 # 0 = A
- movdqa %xmm15, %xmm5 # 4 : sb2u
- pshufb %xmm2, %xmm5 # 4 = sb2u
- movdqa -0x40(%r11,%r10), %xmm1 # .Lk_mc_forward[]
- movdqa %xmm14, %xmm2 # 2 : sb2t
- pshufb %xmm3, %xmm2 # 2 = sb2t
- pxor %xmm5, %xmm2 # 2 = 2A
- movdqa (%r11,%r10), %xmm4 # .Lk_mc_backward[]
- movdqa %xmm0, %xmm3 # 3 = A
- pshufb %xmm1, %xmm0 # 0 = B
- add \$16, %r9 # next key
- pxor %xmm2, %xmm0 # 0 = 2A+B
- pshufb %xmm4, %xmm3 # 3 = D
- add \$16, %r11 # next mc
- pxor %xmm0, %xmm3 # 3 = 2A+B+D
- pshufb %xmm1, %xmm0 # 0 = 2B+C
- and \$0x30, %r11 # ... mod 4
- pxor %xmm3, %xmm0 # 0 = 2A+3B+C+D
- sub \$1,%rax # nr--
-
-.Lenc_entry:
- # top of round
- movdqa %xmm9, %xmm1 # 1 : i
- pandn %xmm0, %xmm1 # 1 = i<<4
- psrld \$4, %xmm1 # 1 = i
- pand %xmm9, %xmm0 # 0 = k
- movdqa %xmm11, %xmm5 # 2 : a/k
- pshufb %xmm0, %xmm5 # 2 = a/k
- pxor %xmm1, %xmm0 # 0 = j
- movdqa %xmm10, %xmm3 # 3 : 1/i
- pshufb %xmm1, %xmm3 # 3 = 1/i
- pxor %xmm5, %xmm3 # 3 = iak = 1/i + a/k
- movdqa %xmm10, %xmm4 # 4 : 1/j
- pshufb %xmm0, %xmm4 # 4 = 1/j
- pxor %xmm5, %xmm4 # 4 = jak = 1/j + a/k
- movdqa %xmm10, %xmm2 # 2 : 1/iak
- pshufb %xmm3, %xmm2 # 2 = 1/iak
- pxor %xmm0, %xmm2 # 2 = io
- movdqa %xmm10, %xmm3 # 3 : 1/jak
- movdqu (%r9), %xmm5
- pshufb %xmm4, %xmm3 # 3 = 1/jak
- pxor %xmm1, %xmm3 # 3 = jo
- jnz .Lenc_loop
-
- # middle of last round
- movdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo
- movdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16
- pshufb %xmm2, %xmm4 # 4 = sbou
- pxor %xmm5, %xmm4 # 4 = sb1u + k
- pshufb %xmm3, %xmm0 # 0 = sb1t
- movdqa 0x40(%r11,%r10), %xmm1 # .Lk_sr[]
- pxor %xmm4, %xmm0 # 0 = A
- pshufb %xmm1, %xmm0
- ret
-.size _vpaes_encrypt_core,.-_vpaes_encrypt_core
-
-##
-## Decryption core
-##
-## Same API as encryption core.
-##
-.type _vpaes_decrypt_core,\@abi-omnipotent
-.align 16
-_vpaes_decrypt_core:
- mov %rdx, %r9 # load key
- mov 240(%rdx),%eax
- movdqa %xmm9, %xmm1
- movdqa .Lk_dipt(%rip), %xmm2 # iptlo
- pandn %xmm0, %xmm1
- mov %rax, %r11
- psrld \$4, %xmm1
- movdqu (%r9), %xmm5 # round0 key
- shl \$4, %r11
- pand %xmm9, %xmm0
- pshufb %xmm0, %xmm2
- movdqa .Lk_dipt+16(%rip), %xmm0 # ipthi
- xor \$0x30, %r11
- lea .Lk_dsbd(%rip),%r10
- pshufb %xmm1, %xmm0
- and \$0x30, %r11
- pxor %xmm5, %xmm2
- movdqa .Lk_mc_forward+48(%rip), %xmm5
- pxor %xmm2, %xmm0
- add \$16, %r9
- add %r10, %r11
- jmp .Ldec_entry
-
-.align 16
-.Ldec_loop:
-##
-## Inverse mix columns
-##
- movdqa -0x20(%r10),%xmm4 # 4 : sb9u
- pshufb %xmm2, %xmm4 # 4 = sb9u
- pxor %xmm0, %xmm4
- movdqa -0x10(%r10),%xmm0 # 0 : sb9t
- pshufb %xmm3, %xmm0 # 0 = sb9t
- pxor %xmm4, %xmm0 # 0 = ch
- add \$16, %r9 # next round key
-
- pshufb %xmm5, %xmm0 # MC ch
- movdqa 0x00(%r10),%xmm4 # 4 : sbdu
- pshufb %xmm2, %xmm4 # 4 = sbdu
- pxor %xmm0, %xmm4 # 4 = ch
- movdqa 0x10(%r10),%xmm0 # 0 : sbdt
- pshufb %xmm3, %xmm0 # 0 = sbdt
- pxor %xmm4, %xmm0 # 0 = ch
- sub \$1,%rax # nr--
-
- pshufb %xmm5, %xmm0 # MC ch
- movdqa 0x20(%r10),%xmm4 # 4 : sbbu
- pshufb %xmm2, %xmm4 # 4 = sbbu
- pxor %xmm0, %xmm4 # 4 = ch
- movdqa 0x30(%r10),%xmm0 # 0 : sbbt
- pshufb %xmm3, %xmm0 # 0 = sbbt
- pxor %xmm4, %xmm0 # 0 = ch
-
- pshufb %xmm5, %xmm0 # MC ch
- movdqa 0x40(%r10),%xmm4 # 4 : sbeu
- pshufb %xmm2, %xmm4 # 4 = sbeu
- pxor %xmm0, %xmm4 # 4 = ch
- movdqa 0x50(%r10),%xmm0 # 0 : sbet
- pshufb %xmm3, %xmm0 # 0 = sbet
- pxor %xmm4, %xmm0 # 0 = ch
-
- palignr \$12, %xmm5, %xmm5
-
-.Ldec_entry:
- # top of round
- movdqa %xmm9, %xmm1 # 1 : i
- pandn %xmm0, %xmm1 # 1 = i<<4
- psrld \$4, %xmm1 # 1 = i
- pand %xmm9, %xmm0 # 0 = k
- movdqa %xmm11, %xmm2 # 2 : a/k
- pshufb %xmm0, %xmm2 # 2 = a/k
- pxor %xmm1, %xmm0 # 0 = j
- movdqa %xmm10, %xmm3 # 3 : 1/i
- pshufb %xmm1, %xmm3 # 3 = 1/i
- pxor %xmm2, %xmm3 # 3 = iak = 1/i + a/k
- movdqa %xmm10, %xmm4 # 4 : 1/j
- pshufb %xmm0, %xmm4 # 4 = 1/j
- pxor %xmm2, %xmm4 # 4 = jak = 1/j + a/k
- movdqa %xmm10, %xmm2 # 2 : 1/iak
- pshufb %xmm3, %xmm2 # 2 = 1/iak
- pxor %xmm0, %xmm2 # 2 = io
- movdqa %xmm10, %xmm3 # 3 : 1/jak
- pshufb %xmm4, %xmm3 # 3 = 1/jak
- pxor %xmm1, %xmm3 # 3 = jo
- movdqu (%r9), %xmm0
- jnz .Ldec_loop
-
- # middle of last round
- movdqa 0x60(%r10), %xmm4 # 3 : sbou
- pshufb %xmm2, %xmm4 # 4 = sbou
- pxor %xmm0, %xmm4 # 4 = sb1u + k
- movdqa 0x70(%r10), %xmm0 # 0 : sbot
- movdqa -0x160(%r11), %xmm2 # .Lk_sr-.Lk_dsbd=-0x160
- pshufb %xmm3, %xmm0 # 0 = sb1t
- pxor %xmm4, %xmm0 # 0 = A
- pshufb %xmm2, %xmm0
- ret
-.size _vpaes_decrypt_core,.-_vpaes_decrypt_core
-
-########################################################
-## ##
-## AES key schedule ##
-## ##
-########################################################
-.type _vpaes_schedule_core,\@abi-omnipotent
-.align 16
-_vpaes_schedule_core:
- # rdi = key
- # rsi = size in bits
- # rdx = buffer
- # rcx = direction. 0=encrypt, 1=decrypt
-
- call _vpaes_preheat # load the tables
- movdqa .Lk_rcon(%rip), %xmm8 # load rcon
- movdqu (%rdi), %xmm0 # load key (unaligned)
-
- # input transform
- movdqa %xmm0, %xmm3
- lea .Lk_ipt(%rip), %r11
- call _vpaes_schedule_transform
- movdqa %xmm0, %xmm7
-
- lea .Lk_sr(%rip),%r10
- test %rcx, %rcx
- jnz .Lschedule_am_decrypting
-
- # encrypting, output zeroth round key after transform
- movdqu %xmm0, (%rdx)
- jmp .Lschedule_go
-
-.Lschedule_am_decrypting:
- # decrypting, output zeroth round key after shiftrows
- movdqa (%r8,%r10),%xmm1
- pshufb %xmm1, %xmm3
- movdqu %xmm3, (%rdx)
- xor \$0x30, %r8
-
-.Lschedule_go:
- cmp \$192, %esi
- ja .Lschedule_256
- je .Lschedule_192
- # 128: fall though
-
-##
-## .schedule_128
-##
-## 128-bit specific part of key schedule.
-##
-## This schedule is really simple, because all its parts
-## are accomplished by the subroutines.
-##
-.Lschedule_128:
- mov \$10, %esi
-
-.Loop_schedule_128:
- call _vpaes_schedule_round
- dec %rsi
- jz .Lschedule_mangle_last
- call _vpaes_schedule_mangle # write output
- jmp .Loop_schedule_128
-
-##
-## .aes_schedule_192
-##
-## 192-bit specific part of key schedule.
-##
-## The main body of this schedule is the same as the 128-bit
-## schedule, but with more smearing. The long, high side is
-## stored in %xmm7 as before, and the short, low side is in
-## the high bits of %xmm6.
-##
-## This schedule is somewhat nastier, however, because each
-## round produces 192 bits of key material, or 1.5 round keys.
-## Therefore, on each cycle we do 2 rounds and produce 3 round
-## keys.
-##
-.align 16
-.Lschedule_192:
- movdqu 8(%rdi),%xmm0 # load key part 2 (very unaligned)
- call _vpaes_schedule_transform # input transform
- movdqa %xmm0, %xmm6 # save short part
- pxor %xmm4, %xmm4 # clear 4
- movhlps %xmm4, %xmm6 # clobber low side with zeros
- mov \$4, %esi
-
-.Loop_schedule_192:
- call _vpaes_schedule_round
- palignr \$8,%xmm6,%xmm0
- call _vpaes_schedule_mangle # save key n
- call _vpaes_schedule_192_smear
- call _vpaes_schedule_mangle # save key n+1
- call _vpaes_schedule_round
- dec %rsi
- jz .Lschedule_mangle_last
- call _vpaes_schedule_mangle # save key n+2
- call _vpaes_schedule_192_smear
- jmp .Loop_schedule_192
-
-##
-## .aes_schedule_256
-##
-## 256-bit specific part of key schedule.
-##
-## The structure here is very similar to the 128-bit
-## schedule, but with an additional "low side" in
-## %xmm6. The low side's rounds are the same as the
-## high side's, except no rcon and no rotation.
-##
-.align 16
-.Lschedule_256:
- movdqu 16(%rdi),%xmm0 # load key part 2 (unaligned)
- call _vpaes_schedule_transform # input transform
- mov \$7, %esi
-
-.Loop_schedule_256:
- call _vpaes_schedule_mangle # output low result
- movdqa %xmm0, %xmm6 # save cur_lo in xmm6
-
- # high round
- call _vpaes_schedule_round
- dec %rsi
- jz .Lschedule_mangle_last
- call _vpaes_schedule_mangle
-
- # low round. swap xmm7 and xmm6
- pshufd \$0xFF, %xmm0, %xmm0
- movdqa %xmm7, %xmm5
- movdqa %xmm6, %xmm7
- call _vpaes_schedule_low_round
- movdqa %xmm5, %xmm7
-
- jmp .Loop_schedule_256
-
-
-##
-## .aes_schedule_mangle_last
-##
-## Mangler for last round of key schedule
-## Mangles %xmm0
-## when encrypting, outputs out(%xmm0) ^ 63
-## when decrypting, outputs unskew(%xmm0)
-##
-## Always called right before return... jumps to cleanup and exits
-##
-.align 16
-.Lschedule_mangle_last:
- # schedule last round key from xmm0
- lea .Lk_deskew(%rip),%r11 # prepare to deskew
- test %rcx, %rcx
- jnz .Lschedule_mangle_last_dec
-
- # encrypting
- movdqa (%r8,%r10),%xmm1
- pshufb %xmm1, %xmm0 # output permute
- lea .Lk_opt(%rip), %r11 # prepare to output transform
- add \$32, %rdx
-
-.Lschedule_mangle_last_dec:
- add \$-16, %rdx
- pxor .Lk_s63(%rip), %xmm0
- call _vpaes_schedule_transform # output transform
- movdqu %xmm0, (%rdx) # save last key
-
- # cleanup
- pxor %xmm0, %xmm0
- pxor %xmm1, %xmm1
- pxor %xmm2, %xmm2
- pxor %xmm3, %xmm3
- pxor %xmm4, %xmm4
- pxor %xmm5, %xmm5
- pxor %xmm6, %xmm6
- pxor %xmm7, %xmm7
- ret
-.size _vpaes_schedule_core,.-_vpaes_schedule_core
-
-##
-## .aes_schedule_192_smear
-##
-## Smear the short, low side in the 192-bit key schedule.
-##
-## Inputs:
-## %xmm7: high side, b a x y
-## %xmm6: low side, d c 0 0
-## %xmm13: 0
-##
-## Outputs:
-## %xmm6: b+c+d b+c 0 0
-## %xmm0: b+c+d b+c b a
-##
-.type _vpaes_schedule_192_smear,\@abi-omnipotent
-.align 16
-_vpaes_schedule_192_smear:
- pshufd \$0x80, %xmm6, %xmm0 # d c 0 0 -> c 0 0 0
- pxor %xmm0, %xmm6 # -> c+d c 0 0
- pshufd \$0xFE, %xmm7, %xmm0 # b a _ _ -> b b b a
- pxor %xmm0, %xmm6 # -> b+c+d b+c b a
- movdqa %xmm6, %xmm0
- pxor %xmm1, %xmm1
- movhlps %xmm1, %xmm6 # clobber low side with zeros
- ret
-.size _vpaes_schedule_192_smear,.-_vpaes_schedule_192_smear
-
-##
-## .aes_schedule_round
-##
-## Runs one main round of the key schedule on %xmm0, %xmm7
-##
-## Specifically, runs subbytes on the high dword of %xmm0
-## then rotates it by one byte and xors into the low dword of
-## %xmm7.
-##
-## Adds rcon from low byte of %xmm8, then rotates %xmm8 for
-## next rcon.
-##
-## Smears the dwords of %xmm7 by xoring the low into the
-## second low, result into third, result into highest.
-##
-## Returns results in %xmm7 = %xmm0.
-## Clobbers %xmm1-%xmm4, %r11.
-##
-.type _vpaes_schedule_round,\@abi-omnipotent
-.align 16
-_vpaes_schedule_round:
- # extract rcon from xmm8
- pxor %xmm1, %xmm1
- palignr \$15, %xmm8, %xmm1
- palignr \$15, %xmm8, %xmm8
- pxor %xmm1, %xmm7
-
- # rotate
- pshufd \$0xFF, %xmm0, %xmm0
- palignr \$1, %xmm0, %xmm0
-
- # fall through...
-
- # low round: same as high round, but no rotation and no rcon.
-_vpaes_schedule_low_round:
- # smear xmm7
- movdqa %xmm7, %xmm1
- pslldq \$4, %xmm7
- pxor %xmm1, %xmm7
- movdqa %xmm7, %xmm1
- pslldq \$8, %xmm7
- pxor %xmm1, %xmm7
- pxor .Lk_s63(%rip), %xmm7
-
- # subbytes
- movdqa %xmm9, %xmm1
- pandn %xmm0, %xmm1
- psrld \$4, %xmm1 # 1 = i
- pand %xmm9, %xmm0 # 0 = k
- movdqa %xmm11, %xmm2 # 2 : a/k
- pshufb %xmm0, %xmm2 # 2 = a/k
- pxor %xmm1, %xmm0 # 0 = j
- movdqa %xmm10, %xmm3 # 3 : 1/i
- pshufb %xmm1, %xmm3 # 3 = 1/i
- pxor %xmm2, %xmm3 # 3 = iak = 1/i + a/k
- movdqa %xmm10, %xmm4 # 4 : 1/j
- pshufb %xmm0, %xmm4 # 4 = 1/j
- pxor %xmm2, %xmm4 # 4 = jak = 1/j + a/k
- movdqa %xmm10, %xmm2 # 2 : 1/iak
- pshufb %xmm3, %xmm2 # 2 = 1/iak
- pxor %xmm0, %xmm2 # 2 = io
- movdqa %xmm10, %xmm3 # 3 : 1/jak
- pshufb %xmm4, %xmm3 # 3 = 1/jak
- pxor %xmm1, %xmm3 # 3 = jo
- movdqa %xmm13, %xmm4 # 4 : sbou
- pshufb %xmm2, %xmm4 # 4 = sbou
- movdqa %xmm12, %xmm0 # 0 : sbot
- pshufb %xmm3, %xmm0 # 0 = sb1t
- pxor %xmm4, %xmm0 # 0 = sbox output
-
- # add in smeared stuff
- pxor %xmm7, %xmm0
- movdqa %xmm0, %xmm7
- ret
-.size _vpaes_schedule_round,.-_vpaes_schedule_round
-
-##
-## .aes_schedule_transform
-##
-## Linear-transform %xmm0 according to tables at (%r11)
-##
-## Requires that %xmm9 = 0x0F0F... as in preheat
-## Output in %xmm0
-## Clobbers %xmm1, %xmm2
-##
-.type _vpaes_schedule_transform,\@abi-omnipotent
-.align 16
-_vpaes_schedule_transform:
- movdqa %xmm9, %xmm1
- pandn %xmm0, %xmm1
- psrld \$4, %xmm1
- pand %xmm9, %xmm0
- movdqa (%r11), %xmm2 # lo
- pshufb %xmm0, %xmm2
- movdqa 16(%r11), %xmm0 # hi
- pshufb %xmm1, %xmm0
- pxor %xmm2, %xmm0
- ret
-.size _vpaes_schedule_transform,.-_vpaes_schedule_transform
-
-##
-## .aes_schedule_mangle
-##
-## Mangle xmm0 from (basis-transformed) standard version
-## to our version.
-##
-## On encrypt,
-## xor with 0x63
-## multiply by circulant 0,1,1,1
-## apply shiftrows transform
-##
-## On decrypt,
-## xor with 0x63
-## multiply by "inverse mixcolumns" circulant E,B,D,9
-## deskew
-## apply shiftrows transform
-##
-##
-## Writes out to (%rdx), and increments or decrements it
-## Keeps track of round number mod 4 in %r8
-## Preserves xmm0
-## Clobbers xmm1-xmm5
-##
-.type _vpaes_schedule_mangle,\@abi-omnipotent
-.align 16
-_vpaes_schedule_mangle:
- movdqa %xmm0, %xmm4 # save xmm0 for later
- movdqa .Lk_mc_forward(%rip),%xmm5
- test %rcx, %rcx
- jnz .Lschedule_mangle_dec
-
- # encrypting
- add \$16, %rdx
- pxor .Lk_s63(%rip),%xmm4
- pshufb %xmm5, %xmm4
- movdqa %xmm4, %xmm3
- pshufb %xmm5, %xmm4
- pxor %xmm4, %xmm3
- pshufb %xmm5, %xmm4
- pxor %xmm4, %xmm3
-
- jmp .Lschedule_mangle_both
-.align 16
-.Lschedule_mangle_dec:
- # inverse mix columns
- lea .Lk_dksd(%rip),%r11
- movdqa %xmm9, %xmm1
- pandn %xmm4, %xmm1
- psrld \$4, %xmm1 # 1 = hi
- pand %xmm9, %xmm4 # 4 = lo
-
- movdqa 0x00(%r11), %xmm2
- pshufb %xmm4, %xmm2
- movdqa 0x10(%r11), %xmm3
- pshufb %xmm1, %xmm3
- pxor %xmm2, %xmm3
- pshufb %xmm5, %xmm3
-
- movdqa 0x20(%r11), %xmm2
- pshufb %xmm4, %xmm2
- pxor %xmm3, %xmm2
- movdqa 0x30(%r11), %xmm3
- pshufb %xmm1, %xmm3
- pxor %xmm2, %xmm3
- pshufb %xmm5, %xmm3
-
- movdqa 0x40(%r11), %xmm2
- pshufb %xmm4, %xmm2
- pxor %xmm3, %xmm2
- movdqa 0x50(%r11), %xmm3
- pshufb %xmm1, %xmm3
- pxor %xmm2, %xmm3
- pshufb %xmm5, %xmm3
-
- movdqa 0x60(%r11), %xmm2
- pshufb %xmm4, %xmm2
- pxor %xmm3, %xmm2
- movdqa 0x70(%r11), %xmm3
- pshufb %xmm1, %xmm3
- pxor %xmm2, %xmm3
-
- add \$-16, %rdx
-
-.Lschedule_mangle_both:
- movdqa (%r8,%r10),%xmm1
- pshufb %xmm1,%xmm3
- add \$-16, %r8
- and \$0x30, %r8
- movdqu %xmm3, (%rdx)
- ret
-.size _vpaes_schedule_mangle,.-_vpaes_schedule_mangle
-
-#
-# Interface to OpenSSL
-#
-.globl ${PREFIX}_set_encrypt_key
-.type ${PREFIX}_set_encrypt_key,\@function,3
-.align 16
-${PREFIX}_set_encrypt_key:
-___
-$code.=<<___ if ($win64);
- lea -0xb8(%rsp),%rsp
- movaps %xmm6,0x10(%rsp)
- movaps %xmm7,0x20(%rsp)
- movaps %xmm8,0x30(%rsp)
- movaps %xmm9,0x40(%rsp)
- movaps %xmm10,0x50(%rsp)
- movaps %xmm11,0x60(%rsp)
- movaps %xmm12,0x70(%rsp)
- movaps %xmm13,0x80(%rsp)
- movaps %xmm14,0x90(%rsp)
- movaps %xmm15,0xa0(%rsp)
-.Lenc_key_body:
-___
-$code.=<<___;
- mov %esi,%eax
- shr \$5,%eax
- add \$5,%eax
- mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5;
-
- mov \$0,%ecx
- mov \$0x30,%r8d
- call _vpaes_schedule_core
-___
-$code.=<<___ if ($win64);
- movaps 0x10(%rsp),%xmm6
- movaps 0x20(%rsp),%xmm7
- movaps 0x30(%rsp),%xmm8
- movaps 0x40(%rsp),%xmm9
- movaps 0x50(%rsp),%xmm10
- movaps 0x60(%rsp),%xmm11
- movaps 0x70(%rsp),%xmm12
- movaps 0x80(%rsp),%xmm13
- movaps 0x90(%rsp),%xmm14
- movaps 0xa0(%rsp),%xmm15
- lea 0xb8(%rsp),%rsp
-.Lenc_key_epilogue:
-___
-$code.=<<___;
- xor %eax,%eax
- ret
-.size ${PREFIX}_set_encrypt_key,.-${PREFIX}_set_encrypt_key
-
-.globl ${PREFIX}_set_decrypt_key
-.type ${PREFIX}_set_decrypt_key,\@function,3
-.align 16
-${PREFIX}_set_decrypt_key:
-___
-$code.=<<___ if ($win64);
- lea -0xb8(%rsp),%rsp
- movaps %xmm6,0x10(%rsp)
- movaps %xmm7,0x20(%rsp)
- movaps %xmm8,0x30(%rsp)
- movaps %xmm9,0x40(%rsp)
- movaps %xmm10,0x50(%rsp)
- movaps %xmm11,0x60(%rsp)
- movaps %xmm12,0x70(%rsp)
- movaps %xmm13,0x80(%rsp)
- movaps %xmm14,0x90(%rsp)
- movaps %xmm15,0xa0(%rsp)
-.Ldec_key_body:
-___
-$code.=<<___;
- mov %esi,%eax
- shr \$5,%eax
- add \$5,%eax
- mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5;
- shl \$4,%eax
- lea 16(%rdx,%rax),%rdx
-
- mov \$1,%ecx
- mov %esi,%r8d
- shr \$1,%r8d
- and \$32,%r8d
- xor \$32,%r8d # nbits==192?0:32
- call _vpaes_schedule_core
-___
-$code.=<<___ if ($win64);
- movaps 0x10(%rsp),%xmm6
- movaps 0x20(%rsp),%xmm7
- movaps 0x30(%rsp),%xmm8
- movaps 0x40(%rsp),%xmm9
- movaps 0x50(%rsp),%xmm10
- movaps 0x60(%rsp),%xmm11
- movaps 0x70(%rsp),%xmm12
- movaps 0x80(%rsp),%xmm13
- movaps 0x90(%rsp),%xmm14
- movaps 0xa0(%rsp),%xmm15
- lea 0xb8(%rsp),%rsp
-.Ldec_key_epilogue:
-___
-$code.=<<___;
- xor %eax,%eax
- ret
-.size ${PREFIX}_set_decrypt_key,.-${PREFIX}_set_decrypt_key
-
-.globl ${PREFIX}_encrypt
-.type ${PREFIX}_encrypt,\@function,3
-.align 16
-${PREFIX}_encrypt:
-___
-$code.=<<___ if ($win64);
- lea -0xb8(%rsp),%rsp
- movaps %xmm6,0x10(%rsp)
- movaps %xmm7,0x20(%rsp)
- movaps %xmm8,0x30(%rsp)
- movaps %xmm9,0x40(%rsp)
- movaps %xmm10,0x50(%rsp)
- movaps %xmm11,0x60(%rsp)
- movaps %xmm12,0x70(%rsp)
- movaps %xmm13,0x80(%rsp)
- movaps %xmm14,0x90(%rsp)
- movaps %xmm15,0xa0(%rsp)
-.Lenc_body:
-___
-$code.=<<___;
- movdqu (%rdi),%xmm0
- call _vpaes_preheat
- call _vpaes_encrypt_core
- movdqu %xmm0,(%rsi)
-___
-$code.=<<___ if ($win64);
- movaps 0x10(%rsp),%xmm6
- movaps 0x20(%rsp),%xmm7
- movaps 0x30(%rsp),%xmm8
- movaps 0x40(%rsp),%xmm9
- movaps 0x50(%rsp),%xmm10
- movaps 0x60(%rsp),%xmm11
- movaps 0x70(%rsp),%xmm12
- movaps 0x80(%rsp),%xmm13
- movaps 0x90(%rsp),%xmm14
- movaps 0xa0(%rsp),%xmm15
- lea 0xb8(%rsp),%rsp
-.Lenc_epilogue:
-___
-$code.=<<___;
- ret
-.size ${PREFIX}_encrypt,.-${PREFIX}_encrypt
-
-.globl ${PREFIX}_decrypt
-.type ${PREFIX}_decrypt,\@function,3
-.align 16
-${PREFIX}_decrypt:
-___
-$code.=<<___ if ($win64);
- lea -0xb8(%rsp),%rsp
- movaps %xmm6,0x10(%rsp)
- movaps %xmm7,0x20(%rsp)
- movaps %xmm8,0x30(%rsp)
- movaps %xmm9,0x40(%rsp)
- movaps %xmm10,0x50(%rsp)
- movaps %xmm11,0x60(%rsp)
- movaps %xmm12,0x70(%rsp)
- movaps %xmm13,0x80(%rsp)
- movaps %xmm14,0x90(%rsp)
- movaps %xmm15,0xa0(%rsp)
-.Ldec_body:
-___
-$code.=<<___;
- movdqu (%rdi),%xmm0
- call _vpaes_preheat
- call _vpaes_decrypt_core
- movdqu %xmm0,(%rsi)
-___
-$code.=<<___ if ($win64);
- movaps 0x10(%rsp),%xmm6
- movaps 0x20(%rsp),%xmm7
- movaps 0x30(%rsp),%xmm8
- movaps 0x40(%rsp),%xmm9
- movaps 0x50(%rsp),%xmm10
- movaps 0x60(%rsp),%xmm11
- movaps 0x70(%rsp),%xmm12
- movaps 0x80(%rsp),%xmm13
- movaps 0x90(%rsp),%xmm14
- movaps 0xa0(%rsp),%xmm15
- lea 0xb8(%rsp),%rsp
-.Ldec_epilogue:
-___
-$code.=<<___;
- ret
-.size ${PREFIX}_decrypt,.-${PREFIX}_decrypt
-___
-{
-my ($inp,$out,$len,$key,$ivp,$enc)=("%rdi","%rsi","%rdx","%rcx","%r8","%r9");
-# void AES_cbc_encrypt (const void char *inp, unsigned char *out,
-# size_t length, const AES_KEY *key,
-# unsigned char *ivp,const int enc);
-$code.=<<___;
-.globl ${PREFIX}_cbc_encrypt
-.type ${PREFIX}_cbc_encrypt,\@function,6
-.align 16
-${PREFIX}_cbc_encrypt:
- xchg $key,$len
-___
-($len,$key)=($key,$len);
-$code.=<<___;
- sub \$16,$len
- jc .Lcbc_abort
-___
-$code.=<<___ if ($win64);
- lea -0xb8(%rsp),%rsp
- movaps %xmm6,0x10(%rsp)
- movaps %xmm7,0x20(%rsp)
- movaps %xmm8,0x30(%rsp)
- movaps %xmm9,0x40(%rsp)
- movaps %xmm10,0x50(%rsp)
- movaps %xmm11,0x60(%rsp)
- movaps %xmm12,0x70(%rsp)
- movaps %xmm13,0x80(%rsp)
- movaps %xmm14,0x90(%rsp)
- movaps %xmm15,0xa0(%rsp)
-.Lcbc_body:
-___
-$code.=<<___;
- movdqu ($ivp),%xmm6 # load IV
- sub $inp,$out
- call _vpaes_preheat
- cmp \$0,${enc}d
- je .Lcbc_dec_loop
- jmp .Lcbc_enc_loop
-.align 16
-.Lcbc_enc_loop:
- movdqu ($inp),%xmm0
- pxor %xmm6,%xmm0
- call _vpaes_encrypt_core
- movdqa %xmm0,%xmm6
- movdqu %xmm0,($out,$inp)
- lea 16($inp),$inp
- sub \$16,$len
- jnc .Lcbc_enc_loop
- jmp .Lcbc_done
-.align 16
-.Lcbc_dec_loop:
- movdqu ($inp),%xmm0
- movdqa %xmm0,%xmm7
- call _vpaes_decrypt_core
- pxor %xmm6,%xmm0
- movdqa %xmm7,%xmm6
- movdqu %xmm0,($out,$inp)
- lea 16($inp),$inp
- sub \$16,$len
- jnc .Lcbc_dec_loop
-.Lcbc_done:
- movdqu %xmm6,($ivp) # save IV
-___
-$code.=<<___ if ($win64);
- movaps 0x10(%rsp),%xmm6
- movaps 0x20(%rsp),%xmm7
- movaps 0x30(%rsp),%xmm8
- movaps 0x40(%rsp),%xmm9
- movaps 0x50(%rsp),%xmm10
- movaps 0x60(%rsp),%xmm11
- movaps 0x70(%rsp),%xmm12
- movaps 0x80(%rsp),%xmm13
- movaps 0x90(%rsp),%xmm14
- movaps 0xa0(%rsp),%xmm15
- lea 0xb8(%rsp),%rsp
-.Lcbc_epilogue:
-___
-$code.=<<___;
-.Lcbc_abort:
- ret
-.size ${PREFIX}_cbc_encrypt,.-${PREFIX}_cbc_encrypt
-___
-}
-$code.=<<___;
-##
-## _aes_preheat
-##
-## Fills register %r10 -> .aes_consts (so you can -fPIC)
-## and %xmm9-%xmm15 as specified below.
-##
-.type _vpaes_preheat,\@abi-omnipotent
-.align 16
-_vpaes_preheat:
- lea .Lk_s0F(%rip), %r10
- movdqa -0x20(%r10), %xmm10 # .Lk_inv
- movdqa -0x10(%r10), %xmm11 # .Lk_inv+16
- movdqa 0x00(%r10), %xmm9 # .Lk_s0F
- movdqa 0x30(%r10), %xmm13 # .Lk_sb1
- movdqa 0x40(%r10), %xmm12 # .Lk_sb1+16
- movdqa 0x50(%r10), %xmm15 # .Lk_sb2
- movdqa 0x60(%r10), %xmm14 # .Lk_sb2+16
- ret
-.size _vpaes_preheat,.-_vpaes_preheat
-########################################################
-## ##
-## Constants ##
-## ##
-########################################################
-.type _vpaes_consts,\@object
-.align 64
-_vpaes_consts:
-.Lk_inv: # inv, inva
- .quad 0x0E05060F0D080180, 0x040703090A0B0C02
- .quad 0x01040A060F0B0780, 0x030D0E0C02050809
-
-.Lk_s0F: # s0F
- .quad 0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F0F0F0F0F
-
-.Lk_ipt: # input transform (lo, hi)
- .quad 0xC2B2E8985A2A7000, 0xCABAE09052227808
- .quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81
-
-.Lk_sb1: # sb1u, sb1t
- .quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544
- .quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF
-.Lk_sb2: # sb2u, sb2t
- .quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD
- .quad 0x69EB88400AE12900, 0xC2A163C8AB82234A
-.Lk_sbo: # sbou, sbot
- .quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878
- .quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA
-
-.Lk_mc_forward: # mc_forward
- .quad 0x0407060500030201, 0x0C0F0E0D080B0A09
- .quad 0x080B0A0904070605, 0x000302010C0F0E0D
- .quad 0x0C0F0E0D080B0A09, 0x0407060500030201
- .quad 0x000302010C0F0E0D, 0x080B0A0904070605
-
-.Lk_mc_backward:# mc_backward
- .quad 0x0605040702010003, 0x0E0D0C0F0A09080B
- .quad 0x020100030E0D0C0F, 0x0A09080B06050407
- .quad 0x0E0D0C0F0A09080B, 0x0605040702010003
- .quad 0x0A09080B06050407, 0x020100030E0D0C0F
-
-.Lk_sr: # sr
- .quad 0x0706050403020100, 0x0F0E0D0C0B0A0908
- .quad 0x030E09040F0A0500, 0x0B06010C07020D08
- .quad 0x0F060D040B020900, 0x070E050C030A0108
- .quad 0x0B0E0104070A0D00, 0x0306090C0F020508
-
-.Lk_rcon: # rcon
- .quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81
-
-.Lk_s63: # s63: all equal to 0x63 transformed
- .quad 0x5B5B5B5B5B5B5B5B, 0x5B5B5B5B5B5B5B5B
-
-.Lk_opt: # output transform
- .quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808
- .quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0
-
-.Lk_deskew: # deskew tables: inverts the sbox's "skew"
- .quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A
- .quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77
-
-##
-## Decryption stuff
-## Key schedule constants
-##
-.Lk_dksd: # decryption key schedule: invskew x*D
- .quad 0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9
- .quad 0x41C277F4B5368300, 0x5FDC69EAAB289D1E
-.Lk_dksb: # decryption key schedule: invskew x*B
- .quad 0x9A4FCA1F8550D500, 0x03D653861CC94C99
- .quad 0x115BEDA7B6FC4A00, 0xD993256F7E3482C8
-.Lk_dkse: # decryption key schedule: invskew x*E + 0x63
- .quad 0xD5031CCA1FC9D600, 0x53859A4C994F5086
- .quad 0xA23196054FDC7BE8, 0xCD5EF96A20B31487
-.Lk_dks9: # decryption key schedule: invskew x*9
- .quad 0xB6116FC87ED9A700, 0x4AED933482255BFC
- .quad 0x4576516227143300, 0x8BB89FACE9DAFDCE
-
-##
-## Decryption stuff
-## Round function constants
-##
-.Lk_dipt: # decryption input transform
- .quad 0x0F505B040B545F00, 0x154A411E114E451A
- .quad 0x86E383E660056500, 0x12771772F491F194
-
-.Lk_dsb9: # decryption sbox output *9*u, *9*t
- .quad 0x851C03539A86D600, 0xCAD51F504F994CC9
- .quad 0xC03B1789ECD74900, 0x725E2C9EB2FBA565
-.Lk_dsbd: # decryption sbox output *D*u, *D*t
- .quad 0x7D57CCDFE6B1A200, 0xF56E9B13882A4439
- .quad 0x3CE2FAF724C6CB00, 0x2931180D15DEEFD3
-.Lk_dsbb: # decryption sbox output *B*u, *B*t
- .quad 0xD022649296B44200, 0x602646F6B0F2D404
- .quad 0xC19498A6CD596700, 0xF3FF0C3E3255AA6B
-.Lk_dsbe: # decryption sbox output *E*u, *E*t
- .quad 0x46F2929626D4D000, 0x2242600464B4F6B0
- .quad 0x0C55A6CDFFAAC100, 0x9467F36B98593E32
-.Lk_dsbo: # decryption sbox final output
- .quad 0x1387EA537EF94000, 0xC7AA6DB9D4943E2D
- .quad 0x12D7560F93441D00, 0xCA4B8159D8C58E9C
-.asciz "Vector Permutation AES for x86_64/SSSE3, Mike Hamburg (Stanford University)"
-.align 64
-.size _vpaes_consts,.-_vpaes_consts
-___
-
-if ($win64) {
-# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
-# CONTEXT *context,DISPATCHER_CONTEXT *disp)
-$rec="%rcx";
-$frame="%rdx";
-$context="%r8";
-$disp="%r9";
-
-$code.=<<___;
-.extern __imp_RtlVirtualUnwind
-.type se_handler,\@abi-omnipotent
-.align 16
-se_handler:
- push %rsi
- push %rdi
- push %rbx
- push %rbp
- push %r12
- push %r13
- push %r14
- push %r15
- pushfq
- sub \$64,%rsp
-
- mov 120($context),%rax # pull context->Rax
- mov 248($context),%rbx # pull context->Rip
-
- mov 8($disp),%rsi # disp->ImageBase
- mov 56($disp),%r11 # disp->HandlerData
-
- mov 0(%r11),%r10d # HandlerData[0]
- lea (%rsi,%r10),%r10 # prologue label
- cmp %r10,%rbx # context->Rip<prologue label
- jb .Lin_prologue
-
- mov 152($context),%rax # pull context->Rsp
-
- mov 4(%r11),%r10d # HandlerData[1]
- lea (%rsi,%r10),%r10 # epilogue label
- cmp %r10,%rbx # context->Rip>=epilogue label
- jae .Lin_prologue
-
- lea 16(%rax),%rsi # %xmm save area
- lea 512($context),%rdi # &context.Xmm6
- mov \$20,%ecx # 10*sizeof(%xmm0)/sizeof(%rax)
- .long 0xa548f3fc # cld; rep movsq
- lea 0xb8(%rax),%rax # adjust stack pointer
-
-.Lin_prologue:
- mov 8(%rax),%rdi
- mov 16(%rax),%rsi
- mov %rax,152($context) # restore context->Rsp
- mov %rsi,168($context) # restore context->Rsi
- mov %rdi,176($context) # restore context->Rdi
-
- mov 40($disp),%rdi # disp->ContextRecord
- mov $context,%rsi # context
- mov \$`1232/8`,%ecx # sizeof(CONTEXT)
- .long 0xa548f3fc # cld; rep movsq
-
- mov $disp,%rsi
- xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
- mov 8(%rsi),%rdx # arg2, disp->ImageBase
- mov 0(%rsi),%r8 # arg3, disp->ControlPc
- mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
- mov 40(%rsi),%r10 # disp->ContextRecord
- lea 56(%rsi),%r11 # &disp->HandlerData
- lea 24(%rsi),%r12 # &disp->EstablisherFrame
- mov %r10,32(%rsp) # arg5
- mov %r11,40(%rsp) # arg6
- mov %r12,48(%rsp) # arg7
- mov %rcx,56(%rsp) # arg8, (NULL)
- call *__imp_RtlVirtualUnwind(%rip)
-
- mov \$1,%eax # ExceptionContinueSearch
- add \$64,%rsp
- popfq
- pop %r15
- pop %r14
- pop %r13
- pop %r12
- pop %rbp
- pop %rbx
- pop %rdi
- pop %rsi
- ret
-.size se_handler,.-se_handler
-
-.section .pdata
-.align 4
- .rva .LSEH_begin_${PREFIX}_set_encrypt_key
- .rva .LSEH_end_${PREFIX}_set_encrypt_key
- .rva .LSEH_info_${PREFIX}_set_encrypt_key
-
- .rva .LSEH_begin_${PREFIX}_set_decrypt_key
- .rva .LSEH_end_${PREFIX}_set_decrypt_key
- .rva .LSEH_info_${PREFIX}_set_decrypt_key
-
- .rva .LSEH_begin_${PREFIX}_encrypt
- .rva .LSEH_end_${PREFIX}_encrypt
- .rva .LSEH_info_${PREFIX}_encrypt
-
- .rva .LSEH_begin_${PREFIX}_decrypt
- .rva .LSEH_end_${PREFIX}_decrypt
- .rva .LSEH_info_${PREFIX}_decrypt
-
- .rva .LSEH_begin_${PREFIX}_cbc_encrypt
- .rva .LSEH_end_${PREFIX}_cbc_encrypt
- .rva .LSEH_info_${PREFIX}_cbc_encrypt
-
-.section .xdata
-.align 8
-.LSEH_info_${PREFIX}_set_encrypt_key:
- .byte 9,0,0,0
- .rva se_handler
- .rva .Lenc_key_body,.Lenc_key_epilogue # HandlerData[]
-.LSEH_info_${PREFIX}_set_decrypt_key:
- .byte 9,0,0,0
- .rva se_handler
- .rva .Ldec_key_body,.Ldec_key_epilogue # HandlerData[]
-.LSEH_info_${PREFIX}_encrypt:
- .byte 9,0,0,0
- .rva se_handler
- .rva .Lenc_body,.Lenc_epilogue # HandlerData[]
-.LSEH_info_${PREFIX}_decrypt:
- .byte 9,0,0,0
- .rva se_handler
- .rva .Ldec_body,.Ldec_epilogue # HandlerData[]
-.LSEH_info_${PREFIX}_cbc_encrypt:
- .byte 9,0,0,0
- .rva se_handler
- .rva .Lcbc_body,.Lcbc_epilogue # HandlerData[]
-___
-}
-
-$code =~ s/\`([^\`]*)\`/eval($1)/gem;
-
-print $code;
-
-close STDOUT;