diff options
author | Parménides GV <parmegv@sdf.org> | 2014-06-13 13:24:13 +0200 |
---|---|---|
committer | Parménides GV <parmegv@sdf.org> | 2014-06-13 13:24:13 +0200 |
commit | 69b10487fcd63dfe1e94fa97c9f3fd9b035646b4 (patch) | |
tree | d4960893a4444634d404c7fbe4fa3e8778d30179 /app/openssl/crypto/aes/asm/aes-s390x.pl | |
parent | 9f6cfff38ae87922adc022300e1e2fd1c0d4c3e4 (diff) | |
parent | e45929e220fe49e30235a1d4d36c1a413547f8bf (diff) |
Merge branch 'develop'
Diffstat (limited to 'app/openssl/crypto/aes/asm/aes-s390x.pl')
-rw-r--r-- | app/openssl/crypto/aes/asm/aes-s390x.pl | 1054 |
1 files changed, 976 insertions, 78 deletions
diff --git a/app/openssl/crypto/aes/asm/aes-s390x.pl b/app/openssl/crypto/aes/asm/aes-s390x.pl index 7e018892..e75dcd03 100644 --- a/app/openssl/crypto/aes/asm/aes-s390x.pl +++ b/app/openssl/crypto/aes/asm/aes-s390x.pl @@ -44,12 +44,57 @@ # Unlike previous version hardware support detection takes place only # at the moment of key schedule setup, which is denoted in key->rounds. # This is done, because deferred key setup can't be made MT-safe, not -# for key lengthes longer than 128 bits. +# for keys longer than 128 bits. # # Add AES_cbc_encrypt, which gives incredible performance improvement, # it was measured to be ~6.6x. It's less than previously mentioned 8x, # because software implementation was optimized. +# May 2010. +# +# Add AES_ctr32_encrypt. If hardware-assisted, it provides up to 4.3x +# performance improvement over "generic" counter mode routine relying +# on single-block, also hardware-assisted, AES_encrypt. "Up to" refers +# to the fact that exact throughput value depends on current stack +# frame alignment within 4KB page. In worst case you get ~75% of the +# maximum, but *on average* it would be as much as ~98%. Meaning that +# worst case is unlike, it's like hitting ravine on plateau. + +# November 2010. +# +# Adapt for -m31 build. If kernel supports what's called "highgprs" +# feature on Linux [see /proc/cpuinfo], it's possible to use 64-bit +# instructions and achieve "64-bit" performance even in 31-bit legacy +# application context. The feature is not specific to any particular +# processor, as long as it's "z-CPU". Latter implies that the code +# remains z/Architecture specific. On z990 it was measured to perform +# 2x better than code generated by gcc 4.3. + +# December 2010. +# +# Add support for z196 "cipher message with counter" instruction. +# Note however that it's disengaged, because it was measured to +# perform ~12% worse than vanilla km-based code... + +# February 2011. +# +# Add AES_xts_[en|de]crypt. This includes support for z196 km-xts-aes +# instructions, which deliver ~70% improvement at 8KB block size over +# vanilla km-based code, 37% - at most like 512-bytes block size. + +$flavour = shift; + +if ($flavour =~ /3[12]/) { + $SIZE_T=4; + $g=""; +} else { + $SIZE_T=8; + $g="g"; +} + +while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {} +open STDOUT,">$output"; + $softonly=0; # allow hardware support $t0="%r0"; $mask="%r0"; @@ -69,6 +114,8 @@ $rounds="%r13"; $ra="%r14"; $sp="%r15"; +$stdframe=16*$SIZE_T+4*8; + sub _data_word() { my $i; while(defined($i=shift)) { $code.=sprintf".long\t0x%08x,0x%08x\n",$i,$i; } @@ -210,7 +257,7 @@ $code.=<<___ if (!$softonly); .Lesoft: ___ $code.=<<___; - stmg %r3,$ra,24($sp) + stm${g} %r3,$ra,3*$SIZE_T($sp) llgf $s0,0($inp) llgf $s1,4($inp) @@ -220,20 +267,20 @@ $code.=<<___; larl $tbl,AES_Te bras $ra,_s390x_AES_encrypt - lg $out,24($sp) + l${g} $out,3*$SIZE_T($sp) st $s0,0($out) st $s1,4($out) st $s2,8($out) st $s3,12($out) - lmg %r6,$ra,48($sp) + lm${g} %r6,$ra,6*$SIZE_T($sp) br $ra .size AES_encrypt,.-AES_encrypt .type _s390x_AES_encrypt,\@function .align 16 _s390x_AES_encrypt: - stg $ra,152($sp) + st${g} $ra,15*$SIZE_T($sp) x $s0,0($key) x $s1,4($key) x $s2,8($key) @@ -397,7 +444,7 @@ _s390x_AES_encrypt: or $s2,$i3 or $s3,$t3 - lg $ra,152($sp) + l${g} $ra,15*$SIZE_T($sp) xr $s0,$t0 xr $s1,$t2 x $s2,24($key) @@ -536,7 +583,7 @@ $code.=<<___ if (!$softonly); .Ldsoft: ___ $code.=<<___; - stmg %r3,$ra,24($sp) + stm${g} %r3,$ra,3*$SIZE_T($sp) llgf $s0,0($inp) llgf $s1,4($inp) @@ -546,20 +593,20 @@ $code.=<<___; larl $tbl,AES_Td bras $ra,_s390x_AES_decrypt - lg $out,24($sp) + l${g} $out,3*$SIZE_T($sp) st $s0,0($out) st $s1,4($out) st $s2,8($out) st $s3,12($out) - lmg %r6,$ra,48($sp) + lm${g} %r6,$ra,6*$SIZE_T($sp) br $ra .size AES_decrypt,.-AES_decrypt .type _s390x_AES_decrypt,\@function .align 16 _s390x_AES_decrypt: - stg $ra,152($sp) + st${g} $ra,15*$SIZE_T($sp) x $s0,0($key) x $s1,4($key) x $s2,8($key) @@ -703,7 +750,7 @@ _s390x_AES_decrypt: nr $i1,$mask nr $i2,$mask - lg $ra,152($sp) + l${g} $ra,15*$SIZE_T($sp) or $s1,$t1 l $t0,16($key) l $t1,20($key) @@ -732,14 +779,15 @@ ___ $code.=<<___; # void AES_set_encrypt_key(const unsigned char *in, int bits, # AES_KEY *key) { -.globl AES_set_encrypt_key -.type AES_set_encrypt_key,\@function +.globl private_AES_set_encrypt_key +.type private_AES_set_encrypt_key,\@function .align 16 -AES_set_encrypt_key: +private_AES_set_encrypt_key: +_s390x_AES_set_encrypt_key: lghi $t0,0 - clgr $inp,$t0 + cl${g}r $inp,$t0 je .Lminus1 - clgr $key,$t0 + cl${g}r $key,$t0 je .Lminus1 lghi $t0,128 @@ -789,7 +837,8 @@ $code.=<<___ if (!$softonly); je 1f lg %r1,24($inp) stg %r1,24($key) -1: st $bits,236($key) # save bits +1: st $bits,236($key) # save bits [for debugging purposes] + lgr $t0,%r5 st %r5,240($key) # save km code lghi %r2,0 br %r14 @@ -797,7 +846,7 @@ ___ $code.=<<___; .align 16 .Lekey_internal: - stmg %r6,%r13,48($sp) # all non-volatile regs + stm${g} %r4,%r13,4*$SIZE_T($sp) # all non-volatile regs and $key larl $tbl,AES_Te+2048 @@ -857,8 +906,9 @@ $code.=<<___; la $key,16($key) # key+=4 la $t3,4($t3) # i++ brct $rounds,.L128_loop + lghi $t0,10 lghi %r2,0 - lmg %r6,%r13,48($sp) + lm${g} %r4,%r13,4*$SIZE_T($sp) br $ra .align 16 @@ -905,8 +955,9 @@ $code.=<<___; st $s2,32($key) st $s3,36($key) brct $rounds,.L192_continue + lghi $t0,12 lghi %r2,0 - lmg %r6,%r13,48($sp) + lm${g} %r4,%r13,4*$SIZE_T($sp) br $ra .align 16 @@ -967,8 +1018,9 @@ $code.=<<___; st $s2,40($key) st $s3,44($key) brct $rounds,.L256_continue + lghi $t0,14 lghi %r2,0 - lmg %r6,%r13,48($sp) + lm${g} %r4,%r13,4*$SIZE_T($sp) br $ra .align 16 @@ -1011,42 +1063,34 @@ $code.=<<___; .Lminus1: lghi %r2,-1 br $ra -.size AES_set_encrypt_key,.-AES_set_encrypt_key +.size private_AES_set_encrypt_key,.-private_AES_set_encrypt_key # void AES_set_decrypt_key(const unsigned char *in, int bits, # AES_KEY *key) { -.globl AES_set_decrypt_key -.type AES_set_decrypt_key,\@function +.globl private_AES_set_decrypt_key +.type private_AES_set_decrypt_key,\@function .align 16 -AES_set_decrypt_key: - stg $key,32($sp) # I rely on AES_set_encrypt_key to - stg $ra,112($sp) # save non-volatile registers! - bras $ra,AES_set_encrypt_key - lg $key,32($sp) - lg $ra,112($sp) +private_AES_set_decrypt_key: + #st${g} $key,4*$SIZE_T($sp) # I rely on AES_set_encrypt_key to + st${g} $ra,14*$SIZE_T($sp) # save non-volatile registers and $key! + bras $ra,_s390x_AES_set_encrypt_key + #l${g} $key,4*$SIZE_T($sp) + l${g} $ra,14*$SIZE_T($sp) ltgr %r2,%r2 bnzr $ra ___ $code.=<<___ if (!$softonly); - l $t0,240($key) + #l $t0,240($key) lhi $t1,16 cr $t0,$t1 jl .Lgo oill $t0,0x80 # set "decrypt" bit st $t0,240($key) br $ra - -.align 16 -.Ldkey_internal: - stg $key,32($sp) - stg $ra,40($sp) - bras $ra,.Lekey_internal - lg $key,32($sp) - lg $ra,40($sp) ___ $code.=<<___; - -.Lgo: llgf $rounds,240($key) +.align 16 +.Lgo: lgr $rounds,$t0 #llgf $rounds,240($key) la $i1,0($key) sllg $i2,$rounds,4 la $i2,0($i2,$key) @@ -1123,13 +1167,14 @@ $code.=<<___; la $key,4($key) brct $rounds,.Lmix - lmg %r6,%r13,48($sp)# as was saved by AES_set_encrypt_key! + lm${g} %r6,%r13,6*$SIZE_T($sp)# as was saved by AES_set_encrypt_key! lghi %r2,0 br $ra -.size AES_set_decrypt_key,.-AES_set_decrypt_key +.size private_AES_set_decrypt_key,.-private_AES_set_decrypt_key ___ -#void AES_cbc_encrypt(const unsigned char *in, unsigned char *out, +######################################################################## +# void AES_cbc_encrypt(const unsigned char *in, unsigned char *out, # size_t length, const AES_KEY *key, # unsigned char *ivec, const int enc) { @@ -1163,7 +1208,7 @@ $code.=<<___ if (!$softonly); l %r0,240($key) # load kmc code lghi $key,15 # res=len%16, len-=res; ngr $key,$len - slgr $len,$key + sl${g}r $len,$key la %r1,16($sp) # parameter block - ivec || key jz .Lkmc_truncated .long 0xb92f0042 # kmc %r4,%r2 @@ -1181,34 +1226,34 @@ $code.=<<___ if (!$softonly); tmll %r0,0x80 jnz .Lkmc_truncated_dec lghi %r1,0 - stg %r1,128($sp) - stg %r1,136($sp) + stg %r1,16*$SIZE_T($sp) + stg %r1,16*$SIZE_T+8($sp) bras %r1,1f - mvc 128(1,$sp),0($inp) + mvc 16*$SIZE_T(1,$sp),0($inp) 1: ex $key,0(%r1) la %r1,16($sp) # restore parameter block - la $inp,128($sp) + la $inp,16*$SIZE_T($sp) lghi $len,16 .long 0xb92f0042 # kmc %r4,%r2 j .Lkmc_done .align 16 .Lkmc_truncated_dec: - stg $out,64($sp) - la $out,128($sp) + st${g} $out,4*$SIZE_T($sp) + la $out,16*$SIZE_T($sp) lghi $len,16 .long 0xb92f0042 # kmc %r4,%r2 - lg $out,64($sp) + l${g} $out,4*$SIZE_T($sp) bras %r1,2f - mvc 0(1,$out),128($sp) + mvc 0(1,$out),16*$SIZE_T($sp) 2: ex $key,0(%r1) j .Lkmc_done .align 16 .Lcbc_software: ___ $code.=<<___; - stmg $key,$ra,40($sp) + stm${g} $key,$ra,5*$SIZE_T($sp) lhi %r0,0 - cl %r0,164($sp) + cl %r0,`$stdframe+$SIZE_T-4`($sp) je .Lcbc_decrypt larl $tbl,AES_Te @@ -1219,10 +1264,10 @@ $code.=<<___; llgf $s3,12($ivp) lghi $t0,16 - slgr $len,$t0 + sl${g}r $len,$t0 brc 4,.Lcbc_enc_tail # if borrow .Lcbc_enc_loop: - stmg $inp,$out,16($sp) + stm${g} $inp,$out,2*$SIZE_T($sp) x $s0,0($inp) x $s1,4($inp) x $s2,8($inp) @@ -1231,7 +1276,7 @@ $code.=<<___; bras $ra,_s390x_AES_encrypt - lmg $inp,$key,16($sp) + lm${g} $inp,$key,2*$SIZE_T($sp) st $s0,0($out) st $s1,4($out) st $s2,8($out) @@ -1240,33 +1285,33 @@ $code.=<<___; la $inp,16($inp) la $out,16($out) lghi $t0,16 - ltgr $len,$len + lt${g}r $len,$len jz .Lcbc_enc_done - slgr $len,$t0 + sl${g}r $len,$t0 brc 4,.Lcbc_enc_tail # if borrow j .Lcbc_enc_loop .align 16 .Lcbc_enc_done: - lg $ivp,48($sp) + l${g} $ivp,6*$SIZE_T($sp) st $s0,0($ivp) st $s1,4($ivp) st $s2,8($ivp) st $s3,12($ivp) - lmg %r7,$ra,56($sp) + lm${g} %r7,$ra,7*$SIZE_T($sp) br $ra .align 16 .Lcbc_enc_tail: aghi $len,15 lghi $t0,0 - stg $t0,128($sp) - stg $t0,136($sp) + stg $t0,16*$SIZE_T($sp) + stg $t0,16*$SIZE_T+8($sp) bras $t1,3f - mvc 128(1,$sp),0($inp) + mvc 16*$SIZE_T(1,$sp),0($inp) 3: ex $len,0($t1) lghi $len,0 - la $inp,128($sp) + la $inp,16*$SIZE_T($sp) j .Lcbc_enc_loop .align 16 @@ -1275,10 +1320,10 @@ $code.=<<___; lg $t0,0($ivp) lg $t1,8($ivp) - stmg $t0,$t1,128($sp) + stmg $t0,$t1,16*$SIZE_T($sp) .Lcbc_dec_loop: - stmg $inp,$out,16($sp) + stm${g} $inp,$out,2*$SIZE_T($sp) llgf $s0,0($inp) llgf $s1,4($inp) llgf $s2,8($inp) @@ -1287,7 +1332,7 @@ $code.=<<___; bras $ra,_s390x_AES_decrypt - lmg $inp,$key,16($sp) + lm${g} $inp,$key,2*$SIZE_T($sp) sllg $s0,$s0,32 sllg $s2,$s2,32 lr $s0,$s1 @@ -1295,15 +1340,15 @@ $code.=<<___; lg $t0,0($inp) lg $t1,8($inp) - xg $s0,128($sp) - xg $s2,136($sp) + xg $s0,16*$SIZE_T($sp) + xg $s2,16*$SIZE_T+8($sp) lghi $s1,16 - slgr $len,$s1 + sl${g}r $len,$s1 brc 4,.Lcbc_dec_tail # if borrow brc 2,.Lcbc_dec_done # if zero stg $s0,0($out) stg $s2,8($out) - stmg $t0,$t1,128($sp) + stmg $t0,$t1,16*$SIZE_T($sp) la $inp,16($inp) la $out,16($out) @@ -1313,7 +1358,7 @@ $code.=<<___; stg $s0,0($out) stg $s2,8($out) .Lcbc_dec_exit: - lmg $ivp,$ra,48($sp) + lm${g} %r6,$ra,6*$SIZE_T($sp) stmg $t0,$t1,0($ivp) br $ra @@ -1321,19 +1366,872 @@ $code.=<<___; .align 16 .Lcbc_dec_tail: aghi $len,15 - stg $s0,128($sp) - stg $s2,136($sp) + stg $s0,16*$SIZE_T($sp) + stg $s2,16*$SIZE_T+8($sp) bras $s1,4f - mvc 0(1,$out),128($sp) + mvc 0(1,$out),16*$SIZE_T($sp) 4: ex $len,0($s1) j .Lcbc_dec_exit .size AES_cbc_encrypt,.-AES_cbc_encrypt -.comm OPENSSL_s390xcap_P,8,8 +___ +} +######################################################################## +# void AES_ctr32_encrypt(const unsigned char *in, unsigned char *out, +# size_t blocks, const AES_KEY *key, +# const unsigned char *ivec) +{ +my $inp="%r2"; +my $out="%r4"; # blocks and out are swapped +my $len="%r3"; +my $key="%r5"; my $iv0="%r5"; +my $ivp="%r6"; +my $fp ="%r7"; + +$code.=<<___; +.globl AES_ctr32_encrypt +.type AES_ctr32_encrypt,\@function +.align 16 +AES_ctr32_encrypt: + xgr %r3,%r4 # flip %r3 and %r4, $out and $len + xgr %r4,%r3 + xgr %r3,%r4 + llgfr $len,$len # safe in ctr32 subroutine even in 64-bit case +___ +$code.=<<___ if (!$softonly); + l %r0,240($key) + lhi %r1,16 + clr %r0,%r1 + jl .Lctr32_software + + stm${g} %r6,$s3,6*$SIZE_T($sp) + + slgr $out,$inp + la %r1,0($key) # %r1 is permanent copy of $key + lg $iv0,0($ivp) # load ivec + lg $ivp,8($ivp) + + # prepare and allocate stack frame at the top of 4K page + # with 1K reserved for eventual signal handling + lghi $s0,-1024-256-16# guarantee at least 256-bytes buffer + lghi $s1,-4096 + algr $s0,$sp + lgr $fp,$sp + ngr $s0,$s1 # align at page boundary + slgr $fp,$s0 # total buffer size + lgr $s2,$sp + lghi $s1,1024+16 # sl[g]fi is extended-immediate facility + slgr $fp,$s1 # deduct reservation to get usable buffer size + # buffer size is at lest 256 and at most 3072+256-16 + + la $sp,1024($s0) # alloca + srlg $fp,$fp,4 # convert bytes to blocks, minimum 16 + st${g} $s2,0($sp) # back-chain + st${g} $fp,$SIZE_T($sp) + + slgr $len,$fp + brc 1,.Lctr32_hw_switch # not zero, no borrow + algr $fp,$len # input is shorter than allocated buffer + lghi $len,0 + st${g} $fp,$SIZE_T($sp) + +.Lctr32_hw_switch: +___ +$code.=<<___ if (0); ######### kmctr code was measured to be ~12% slower + larl $s0,OPENSSL_s390xcap_P + lg $s0,8($s0) + tmhh $s0,0x0004 # check for message_security-assist-4 + jz .Lctr32_km_loop + + llgfr $s0,%r0 + lgr $s1,%r1 + lghi %r0,0 + la %r1,16($sp) + .long 0xb92d2042 # kmctr %r4,%r2,%r2 + + llihh %r0,0x8000 # check if kmctr supports the function code + srlg %r0,%r0,0($s0) + ng %r0,16($sp) + lgr %r0,$s0 + lgr %r1,$s1 + jz .Lctr32_km_loop + +####### kmctr code + algr $out,$inp # restore $out + lgr $s1,$len # $s1 undertakes $len + j .Lctr32_kmctr_loop +.align 16 +.Lctr32_kmctr_loop: + la $s2,16($sp) + lgr $s3,$fp +.Lctr32_kmctr_prepare: + stg $iv0,0($s2) + stg $ivp,8($s2) + la $s2,16($s2) + ahi $ivp,1 # 32-bit increment, preserves upper half + brct $s3,.Lctr32_kmctr_prepare + + #la $inp,0($inp) # inp + sllg $len,$fp,4 # len + #la $out,0($out) # out + la $s2,16($sp) # iv + .long 0xb92da042 # kmctr $out,$s2,$inp + brc 1,.-4 # pay attention to "partial completion" + + slgr $s1,$fp + brc 1,.Lctr32_kmctr_loop # not zero, no borrow + algr $fp,$s1 + lghi $s1,0 + brc 4+1,.Lctr32_kmctr_loop # not zero + + l${g} $sp,0($sp) + lm${g} %r6,$s3,6*$SIZE_T($sp) + br $ra +.align 16 +___ +$code.=<<___; +.Lctr32_km_loop: + la $s2,16($sp) + lgr $s3,$fp +.Lctr32_km_prepare: + stg $iv0,0($s2) + stg $ivp,8($s2) + la $s2,16($s2) + ahi $ivp,1 # 32-bit increment, preserves upper half + brct $s3,.Lctr32_km_prepare + + la $s0,16($sp) # inp + sllg $s1,$fp,4 # len + la $s2,16($sp) # out + .long 0xb92e00a8 # km %r10,%r8 + brc 1,.-4 # pay attention to "partial completion" + + la $s2,16($sp) + lgr $s3,$fp + slgr $s2,$inp +.Lctr32_km_xor: + lg $s0,0($inp) + lg $s1,8($inp) + xg $s0,0($s2,$inp) + xg $s1,8($s2,$inp) + stg $s0,0($out,$inp) + stg $s1,8($out,$inp) + la $inp,16($inp) + brct $s3,.Lctr32_km_xor + + slgr $len,$fp + brc 1,.Lctr32_km_loop # not zero, no borrow + algr $fp,$len + lghi $len,0 + brc 4+1,.Lctr32_km_loop # not zero + + l${g} $s0,0($sp) + l${g} $s1,$SIZE_T($sp) + la $s2,16($sp) +.Lctr32_km_zap: + stg $s0,0($s2) + stg $s0,8($s2) + la $s2,16($s2) + brct $s1,.Lctr32_km_zap + + la $sp,0($s0) + lm${g} %r6,$s3,6*$SIZE_T($sp) + br $ra +.align 16 +.Lctr32_software: +___ +$code.=<<___; + stm${g} $key,$ra,5*$SIZE_T($sp) + sl${g}r $inp,$out + larl $tbl,AES_Te + llgf $t1,12($ivp) + +.Lctr32_loop: + stm${g} $inp,$out,2*$SIZE_T($sp) + llgf $s0,0($ivp) + llgf $s1,4($ivp) + llgf $s2,8($ivp) + lgr $s3,$t1 + st $t1,16*$SIZE_T($sp) + lgr %r4,$key + + bras $ra,_s390x_AES_encrypt + + lm${g} $inp,$ivp,2*$SIZE_T($sp) + llgf $t1,16*$SIZE_T($sp) + x $s0,0($inp,$out) + x $s1,4($inp,$out) + x $s2,8($inp,$out) + x $s3,12($inp,$out) + stm $s0,$s3,0($out) + + la $out,16($out) + ahi $t1,1 # 32-bit increment + brct $len,.Lctr32_loop + + lm${g} %r6,$ra,6*$SIZE_T($sp) + br $ra +.size AES_ctr32_encrypt,.-AES_ctr32_encrypt +___ +} + +######################################################################## +# void AES_xts_encrypt(const char *inp,char *out,size_t len, +# const AES_KEY *key1, const AES_KEY *key2, +# const unsigned char iv[16]); +# +{ +my $inp="%r2"; +my $out="%r4"; # len and out are swapped +my $len="%r3"; +my $key1="%r5"; # $i1 +my $key2="%r6"; # $i2 +my $fp="%r7"; # $i3 +my $tweak=16*$SIZE_T+16; # or $stdframe-16, bottom of the frame... + +$code.=<<___; +.type _s390x_xts_km,\@function +.align 16 +_s390x_xts_km: +___ +$code.=<<___ if(1); + llgfr $s0,%r0 # put aside the function code + lghi $s1,0x7f + nr $s1,%r0 + lghi %r0,0 # query capability vector + la %r1,$tweak-16($sp) + .long 0xb92e0042 # km %r4,%r2 + llihh %r1,0x8000 + srlg %r1,%r1,32($s1) # check for 32+function code + ng %r1,$tweak-16($sp) + lgr %r0,$s0 # restore the function code + la %r1,0($key1) # restore $key1 + jz .Lxts_km_vanilla + + lmg $i2,$i3,$tweak($sp) # put aside the tweak value + algr $out,$inp + + oill %r0,32 # switch to xts function code + aghi $s1,-18 # + sllg $s1,$s1,3 # (function code - 18)*8, 0 or 16 + la %r1,$tweak-16($sp) + slgr %r1,$s1 # parameter block position + lmg $s0,$s3,0($key1) # load 256 bits of key material, + stmg $s0,$s3,0(%r1) # and copy it to parameter block. + # yes, it contains junk and overlaps + # with the tweak in 128-bit case. + # it's done to avoid conditional + # branch. + stmg $i2,$i3,$tweak($sp) # "re-seat" the tweak value + + .long 0xb92e0042 # km %r4,%r2 + brc 1,.-4 # pay attention to "partial completion" + + lrvg $s0,$tweak+0($sp) # load the last tweak + lrvg $s1,$tweak+8($sp) + stmg %r0,%r3,$tweak-32($sp) # wipe copy of the key + + nill %r0,0xffdf # switch back to original function code + la %r1,0($key1) # restore pointer to $key1 + slgr $out,$inp + + llgc $len,2*$SIZE_T-1($sp) + nill $len,0x0f # $len%=16 + br $ra + +.align 16 +.Lxts_km_vanilla: +___ +$code.=<<___; + # prepare and allocate stack frame at the top of 4K page + # with 1K reserved for eventual signal handling + lghi $s0,-1024-256-16# guarantee at least 256-bytes buffer + lghi $s1,-4096 + algr $s0,$sp + lgr $fp,$sp + ngr $s0,$s1 # align at page boundary + slgr $fp,$s0 # total buffer size + lgr $s2,$sp + lghi $s1,1024+16 # sl[g]fi is extended-immediate facility + slgr $fp,$s1 # deduct reservation to get usable buffer size + # buffer size is at lest 256 and at most 3072+256-16 + + la $sp,1024($s0) # alloca + nill $fp,0xfff0 # round to 16*n + st${g} $s2,0($sp) # back-chain + nill $len,0xfff0 # redundant + st${g} $fp,$SIZE_T($sp) + + slgr $len,$fp + brc 1,.Lxts_km_go # not zero, no borrow + algr $fp,$len # input is shorter than allocated buffer + lghi $len,0 + st${g} $fp,$SIZE_T($sp) + +.Lxts_km_go: + lrvg $s0,$tweak+0($s2) # load the tweak value in little-endian + lrvg $s1,$tweak+8($s2) + + la $s2,16($sp) # vector of ascending tweak values + slgr $s2,$inp + srlg $s3,$fp,4 + j .Lxts_km_start + +.Lxts_km_loop: + la $s2,16($sp) + slgr $s2,$inp + srlg $s3,$fp,4 +.Lxts_km_prepare: + lghi $i1,0x87 + srag $i2,$s1,63 # broadcast upper bit + ngr $i1,$i2 # rem + algr $s0,$s0 + alcgr $s1,$s1 + xgr $s0,$i1 +.Lxts_km_start: + lrvgr $i1,$s0 # flip byte order + lrvgr $i2,$s1 + stg $i1,0($s2,$inp) + stg $i2,8($s2,$inp) + xg $i1,0($inp) + xg $i2,8($inp) + stg $i1,0($out,$inp) + stg $i2,8($out,$inp) + la $inp,16($inp) + brct $s3,.Lxts_km_prepare + + slgr $inp,$fp # rewind $inp + la $s2,0($out,$inp) + lgr $s3,$fp + .long 0xb92e00aa # km $s2,$s2 + brc 1,.-4 # pay attention to "partial completion" + + la $s2,16($sp) + slgr $s2,$inp + srlg $s3,$fp,4 +.Lxts_km_xor: + lg $i1,0($out,$inp) + lg $i2,8($out,$inp) + xg $i1,0($s2,$inp) + xg $i2,8($s2,$inp) + stg $i1,0($out,$inp) + stg $i2,8($out,$inp) + la $inp,16($inp) + brct $s3,.Lxts_km_xor + + slgr $len,$fp + brc 1,.Lxts_km_loop # not zero, no borrow + algr $fp,$len + lghi $len,0 + brc 4+1,.Lxts_km_loop # not zero + + l${g} $i1,0($sp) # back-chain + llgf $fp,`2*$SIZE_T-4`($sp) # bytes used + la $i2,16($sp) + srlg $fp,$fp,4 +.Lxts_km_zap: + stg $i1,0($i2) + stg $i1,8($i2) + la $i2,16($i2) + brct $fp,.Lxts_km_zap + + la $sp,0($i1) + llgc $len,2*$SIZE_T-1($i1) + nill $len,0x0f # $len%=16 + bzr $ra + + # generate one more tweak... + lghi $i1,0x87 + srag $i2,$s1,63 # broadcast upper bit + ngr $i1,$i2 # rem + algr $s0,$s0 + alcgr $s1,$s1 + xgr $s0,$i1 + + ltr $len,$len # clear zero flag + br $ra +.size _s390x_xts_km,.-_s390x_xts_km + +.globl AES_xts_encrypt +.type AES_xts_encrypt,\@function +.align 16 +AES_xts_encrypt: + xgr %r3,%r4 # flip %r3 and %r4, $out and $len + xgr %r4,%r3 + xgr %r3,%r4 +___ +$code.=<<___ if ($SIZE_T==4); + llgfr $len,$len +___ +$code.=<<___; + st${g} $len,1*$SIZE_T($sp) # save copy of $len + srag $len,$len,4 # formally wrong, because it expands + # sign byte, but who can afford asking + # to process more than 2^63-1 bytes? + # I use it, because it sets condition + # code... + bcr 8,$ra # abort if zero (i.e. less than 16) +___ +$code.=<<___ if (!$softonly); + llgf %r0,240($key2) + lhi %r1,16 + clr %r0,%r1 + jl .Lxts_enc_software + + st${g} $ra,5*$SIZE_T($sp) + stm${g} %r6,$s3,6*$SIZE_T($sp) + + sllg $len,$len,4 # $len&=~15 + slgr $out,$inp + + # generate the tweak value + l${g} $s3,$stdframe($sp) # pointer to iv + la $s2,$tweak($sp) + lmg $s0,$s1,0($s3) + lghi $s3,16 + stmg $s0,$s1,0($s2) + la %r1,0($key2) # $key2 is not needed anymore + .long 0xb92e00aa # km $s2,$s2, generate the tweak + brc 1,.-4 # can this happen? + + l %r0,240($key1) + la %r1,0($key1) # $key1 is not needed anymore + bras $ra,_s390x_xts_km + jz .Lxts_enc_km_done + + aghi $inp,-16 # take one step back + la $i3,0($out,$inp) # put aside real $out +.Lxts_enc_km_steal: + llgc $i1,16($inp) + llgc $i2,0($out,$inp) + stc $i1,0($out,$inp) + stc $i2,16($out,$inp) + la $inp,1($inp) + brct $len,.Lxts_enc_km_steal + + la $s2,0($i3) + lghi $s3,16 + lrvgr $i1,$s0 # flip byte order + lrvgr $i2,$s1 + xg $i1,0($s2) + xg $i2,8($s2) + stg $i1,0($s2) + stg $i2,8($s2) + .long 0xb92e00aa # km $s2,$s2 + brc 1,.-4 # can this happen? + lrvgr $i1,$s0 # flip byte order + lrvgr $i2,$s1 + xg $i1,0($i3) + xg $i2,8($i3) + stg $i1,0($i3) + stg $i2,8($i3) + +.Lxts_enc_km_done: + stg $sp,$tweak+0($sp) # wipe tweak + stg $sp,$tweak+8($sp) + l${g} $ra,5*$SIZE_T($sp) + lm${g} %r6,$s3,6*$SIZE_T($sp) + br $ra +.align 16 +.Lxts_enc_software: +___ +$code.=<<___; + stm${g} %r6,$ra,6*$SIZE_T($sp) + + slgr $out,$inp + + l${g} $s3,$stdframe($sp) # ivp + llgf $s0,0($s3) # load iv + llgf $s1,4($s3) + llgf $s2,8($s3) + llgf $s3,12($s3) + stm${g} %r2,%r5,2*$SIZE_T($sp) + la $key,0($key2) + larl $tbl,AES_Te + bras $ra,_s390x_AES_encrypt # generate the tweak + lm${g} %r2,%r5,2*$SIZE_T($sp) + stm $s0,$s3,$tweak($sp) # save the tweak + j .Lxts_enc_enter + +.align 16 +.Lxts_enc_loop: + lrvg $s1,$tweak+0($sp) # load the tweak in little-endian + lrvg $s3,$tweak+8($sp) + lghi %r1,0x87 + srag %r0,$s3,63 # broadcast upper bit + ngr %r1,%r0 # rem + algr $s1,$s1 + alcgr $s3,$s3 + xgr $s1,%r1 + lrvgr $s1,$s1 # flip byte order + lrvgr $s3,$s3 + srlg $s0,$s1,32 # smash the tweak to 4x32-bits + stg $s1,$tweak+0($sp) # save the tweak + llgfr $s1,$s1 + srlg $s2,$s3,32 + stg $s3,$tweak+8($sp) + llgfr $s3,$s3 + la $inp,16($inp) # $inp+=16 +.Lxts_enc_enter: + x $s0,0($inp) # ^=*($inp) + x $s1,4($inp) + x $s2,8($inp) + x $s3,12($inp) + stm${g} %r2,%r3,2*$SIZE_T($sp) # only two registers are changing + la $key,0($key1) + bras $ra,_s390x_AES_encrypt + lm${g} %r2,%r5,2*$SIZE_T($sp) + x $s0,$tweak+0($sp) # ^=tweak + x $s1,$tweak+4($sp) + x $s2,$tweak+8($sp) + x $s3,$tweak+12($sp) + st $s0,0($out,$inp) + st $s1,4($out,$inp) + st $s2,8($out,$inp) + st $s3,12($out,$inp) + brct${g} $len,.Lxts_enc_loop + + llgc $len,`2*$SIZE_T-1`($sp) + nill $len,0x0f # $len%16 + jz .Lxts_enc_done + + la $i3,0($inp,$out) # put aside real $out +.Lxts_enc_steal: + llgc %r0,16($inp) + llgc %r1,0($out,$inp) + stc %r0,0($out,$inp) + stc %r1,16($out,$inp) + la $inp,1($inp) + brct $len,.Lxts_enc_steal + la $out,0($i3) # restore real $out + + # generate last tweak... + lrvg $s1,$tweak+0($sp) # load the tweak in little-endian + lrvg $s3,$tweak+8($sp) + lghi %r1,0x87 + srag %r0,$s3,63 # broadcast upper bit + ngr %r1,%r0 # rem + algr $s1,$s1 + alcgr $s3,$s3 + xgr $s1,%r1 + lrvgr $s1,$s1 # flip byte order + lrvgr $s3,$s3 + srlg $s0,$s1,32 # smash the tweak to 4x32-bits + stg $s1,$tweak+0($sp) # save the tweak + llgfr $s1,$s1 + srlg $s2,$s3,32 + stg $s3,$tweak+8($sp) + llgfr $s3,$s3 + + x $s0,0($out) # ^=*(inp)|stolen cipther-text + x $s1,4($out) + x $s2,8($out) + x $s3,12($out) + st${g} $out,4*$SIZE_T($sp) + la $key,0($key1) + bras $ra,_s390x_AES_encrypt + l${g} $out,4*$SIZE_T($sp) + x $s0,`$tweak+0`($sp) # ^=tweak + x $s1,`$tweak+4`($sp) + x $s2,`$tweak+8`($sp) + x $s3,`$tweak+12`($sp) + st $s0,0($out) + st $s1,4($out) + st $s2,8($out) + st $s3,12($out) + +.Lxts_enc_done: + stg $sp,$tweak+0($sp) # wipe tweak + stg $sp,$twesk+8($sp) + lm${g} %r6,$ra,6*$SIZE_T($sp) + br $ra +.size AES_xts_encrypt,.-AES_xts_encrypt +___ +# void AES_xts_decrypt(const char *inp,char *out,size_t len, +# const AES_KEY *key1, const AES_KEY *key2, +# const unsigned char iv[16]); +# +$code.=<<___; +.globl AES_xts_decrypt +.type AES_xts_decrypt,\@function +.align 16 +AES_xts_decrypt: + xgr %r3,%r4 # flip %r3 and %r4, $out and $len + xgr %r4,%r3 + xgr %r3,%r4 +___ +$code.=<<___ if ($SIZE_T==4); + llgfr $len,$len +___ +$code.=<<___; + st${g} $len,1*$SIZE_T($sp) # save copy of $len + aghi $len,-16 + bcr 4,$ra # abort if less than zero. formally + # wrong, because $len is unsigned, + # but who can afford asking to + # process more than 2^63-1 bytes? + tmll $len,0x0f + jnz .Lxts_dec_proceed + aghi $len,16 +.Lxts_dec_proceed: +___ +$code.=<<___ if (!$softonly); + llgf %r0,240($key2) + lhi %r1,16 + clr %r0,%r1 + jl .Lxts_dec_software + + st${g} $ra,5*$SIZE_T($sp) + stm${g} %r6,$s3,6*$SIZE_T($sp) + + nill $len,0xfff0 # $len&=~15 + slgr $out,$inp + + # generate the tweak value + l${g} $s3,$stdframe($sp) # pointer to iv + la $s2,$tweak($sp) + lmg $s0,$s1,0($s3) + lghi $s3,16 + stmg $s0,$s1,0($s2) + la %r1,0($key2) # $key2 is not needed past this point + .long 0xb92e00aa # km $s2,$s2, generate the tweak + brc 1,.-4 # can this happen? + + l %r0,240($key1) + la %r1,0($key1) # $key1 is not needed anymore + + ltgr $len,$len + jz .Lxts_dec_km_short + bras $ra,_s390x_xts_km + jz .Lxts_dec_km_done + + lrvgr $s2,$s0 # make copy in reverse byte order + lrvgr $s3,$s1 + j .Lxts_dec_km_2ndtweak + +.Lxts_dec_km_short: + llgc $len,`2*$SIZE_T-1`($sp) + nill $len,0x0f # $len%=16 + lrvg $s0,$tweak+0($sp) # load the tweak + lrvg $s1,$tweak+8($sp) + lrvgr $s2,$s0 # make copy in reverse byte order + lrvgr $s3,$s1 + +.Lxts_dec_km_2ndtweak: + lghi $i1,0x87 + srag $i2,$s1,63 # broadcast upper bit + ngr $i1,$i2 # rem + algr $s0,$s0 + alcgr $s1,$s1 + xgr $s0,$i1 + lrvgr $i1,$s0 # flip byte order + lrvgr $i2,$s1 + + xg $i1,0($inp) + xg $i2,8($inp) + stg $i1,0($out,$inp) + stg $i2,8($out,$inp) + la $i2,0($out,$inp) + lghi $i3,16 + .long 0xb92e0066 # km $i2,$i2 + brc 1,.-4 # can this happen? + lrvgr $i1,$s0 + lrvgr $i2,$s1 + xg $i1,0($out,$inp) + xg $i2,8($out,$inp) + stg $i1,0($out,$inp) + stg $i2,8($out,$inp) + + la $i3,0($out,$inp) # put aside real $out +.Lxts_dec_km_steal: + llgc $i1,16($inp) + llgc $i2,0($out,$inp) + stc $i1,0($out,$inp) + stc $i2,16($out,$inp) + la $inp,1($inp) + brct $len,.Lxts_dec_km_steal + + lgr $s0,$s2 + lgr $s1,$s3 + xg $s0,0($i3) + xg $s1,8($i3) + stg $s0,0($i3) + stg $s1,8($i3) + la $s0,0($i3) + lghi $s1,16 + .long 0xb92e0088 # km $s0,$s0 + brc 1,.-4 # can this happen? + xg $s2,0($i3) + xg $s3,8($i3) + stg $s2,0($i3) + stg $s3,8($i3) +.Lxts_dec_km_done: + stg $sp,$tweak+0($sp) # wipe tweak + stg $sp,$tweak+8($sp) + l${g} $ra,5*$SIZE_T($sp) + lm${g} %r6,$s3,6*$SIZE_T($sp) + br $ra +.align 16 +.Lxts_dec_software: +___ +$code.=<<___; + stm${g} %r6,$ra,6*$SIZE_T($sp) + + srlg $len,$len,4 + slgr $out,$inp + + l${g} $s3,$stdframe($sp) # ivp + llgf $s0,0($s3) # load iv + llgf $s1,4($s3) + llgf $s2,8($s3) + llgf $s3,12($s3) + stm${g} %r2,%r5,2*$SIZE_T($sp) + la $key,0($key2) + larl $tbl,AES_Te + bras $ra,_s390x_AES_encrypt # generate the tweak + lm${g} %r2,%r5,2*$SIZE_T($sp) + larl $tbl,AES_Td + lt${g}r $len,$len + stm $s0,$s3,$tweak($sp) # save the tweak + jz .Lxts_dec_short + j .Lxts_dec_enter + +.align 16 +.Lxts_dec_loop: + lrvg $s1,$tweak+0($sp) # load the tweak in little-endian + lrvg $s3,$tweak+8($sp) + lghi %r1,0x87 + srag %r0,$s3,63 # broadcast upper bit + ngr %r1,%r0 # rem + algr $s1,$s1 + alcgr $s3,$s3 + xgr $s1,%r1 + lrvgr $s1,$s1 # flip byte order + lrvgr $s3,$s3 + srlg $s0,$s1,32 # smash the tweak to 4x32-bits + stg $s1,$tweak+0($sp) # save the tweak + llgfr $s1,$s1 + srlg $s2,$s3,32 + stg $s3,$tweak+8($sp) + llgfr $s3,$s3 +.Lxts_dec_enter: + x $s0,0($inp) # tweak^=*(inp) + x $s1,4($inp) + x $s2,8($inp) + x $s3,12($inp) + stm${g} %r2,%r3,2*$SIZE_T($sp) # only two registers are changing + la $key,0($key1) + bras $ra,_s390x_AES_decrypt + lm${g} %r2,%r5,2*$SIZE_T($sp) + x $s0,$tweak+0($sp) # ^=tweak + x $s1,$tweak+4($sp) + x $s2,$tweak+8($sp) + x $s3,$tweak+12($sp) + st $s0,0($out,$inp) + st $s1,4($out,$inp) + st $s2,8($out,$inp) + st $s3,12($out,$inp) + la $inp,16($inp) + brct${g} $len,.Lxts_dec_loop + + llgc $len,`2*$SIZE_T-1`($sp) + nill $len,0x0f # $len%16 + jz .Lxts_dec_done + + # generate pair of tweaks... + lrvg $s1,$tweak+0($sp) # load the tweak in little-endian + lrvg $s3,$tweak+8($sp) + lghi %r1,0x87 + srag %r0,$s3,63 # broadcast upper bit + ngr %r1,%r0 # rem + algr $s1,$s1 + alcgr $s3,$s3 + xgr $s1,%r1 + lrvgr $i2,$s1 # flip byte order + lrvgr $i3,$s3 + stmg $i2,$i3,$tweak($sp) # save the 1st tweak + j .Lxts_dec_2ndtweak + +.align 16 +.Lxts_dec_short: + llgc $len,`2*$SIZE_T-1`($sp) + nill $len,0x0f # $len%16 + lrvg $s1,$tweak+0($sp) # load the tweak in little-endian + lrvg $s3,$tweak+8($sp) +.Lxts_dec_2ndtweak: + lghi %r1,0x87 + srag %r0,$s3,63 # broadcast upper bit + ngr %r1,%r0 # rem + algr $s1,$s1 + alcgr $s3,$s3 + xgr $s1,%r1 + lrvgr $s1,$s1 # flip byte order + lrvgr $s3,$s3 + srlg $s0,$s1,32 # smash the tweak to 4x32-bits + stg $s1,$tweak-16+0($sp) # save the 2nd tweak + llgfr $s1,$s1 + srlg $s2,$s3,32 + stg $s3,$tweak-16+8($sp) + llgfr $s3,$s3 + + x $s0,0($inp) # tweak_the_2nd^=*(inp) + x $s1,4($inp) + x $s2,8($inp) + x $s3,12($inp) + stm${g} %r2,%r3,2*$SIZE_T($sp) + la $key,0($key1) + bras $ra,_s390x_AES_decrypt + lm${g} %r2,%r5,2*$SIZE_T($sp) + x $s0,$tweak-16+0($sp) # ^=tweak_the_2nd + x $s1,$tweak-16+4($sp) + x $s2,$tweak-16+8($sp) + x $s3,$tweak-16+12($sp) + st $s0,0($out,$inp) + st $s1,4($out,$inp) + st $s2,8($out,$inp) + st $s3,12($out,$inp) + + la $i3,0($out,$inp) # put aside real $out +.Lxts_dec_steal: + llgc %r0,16($inp) + llgc %r1,0($out,$inp) + stc %r0,0($out,$inp) + stc %r1,16($out,$inp) + la $inp,1($inp) + brct $len,.Lxts_dec_steal + la $out,0($i3) # restore real $out + + lm $s0,$s3,$tweak($sp) # load the 1st tweak + x $s0,0($out) # tweak^=*(inp)|stolen cipher-text + x $s1,4($out) + x $s2,8($out) + x $s3,12($out) + st${g} $out,4*$SIZE_T($sp) + la $key,0($key1) + bras $ra,_s390x_AES_decrypt + l${g} $out,4*$SIZE_T($sp) + x $s0,$tweak+0($sp) # ^=tweak + x $s1,$tweak+4($sp) + x $s2,$tweak+8($sp) + x $s3,$tweak+12($sp) + st $s0,0($out) + st $s1,4($out) + st $s2,8($out) + st $s3,12($out) + stg $sp,$tweak-16+0($sp) # wipe 2nd tweak + stg $sp,$tweak-16+8($sp) +.Lxts_dec_done: + stg $sp,$tweak+0($sp) # wipe tweak + stg $sp,$twesk+8($sp) + lm${g} %r6,$ra,6*$SIZE_T($sp) + br $ra +.size AES_xts_decrypt,.-AES_xts_decrypt ___ } $code.=<<___; .string "AES for s390x, CRYPTOGAMS by <appro\@openssl.org>" +.comm OPENSSL_s390xcap_P,16,8 ___ $code =~ s/\`([^\`]*)\`/eval $1/gem; print $code; +close STDOUT; # force flush |