summaryrefslogtreecommitdiff
path: root/openssl/crypto/rc4/asm
diff options
context:
space:
mode:
authorArne Schwabe <arne@rfc2549.org>2012-04-16 19:21:14 +0200
committerArne Schwabe <arne@rfc2549.org>2012-04-16 19:21:14 +0200
commit3e4d8f433239c40311037616b1b8833a06651ae0 (patch)
tree98ab7fce0d011d34677b0beb762d389cb5c39199 /openssl/crypto/rc4/asm
Initial import
Diffstat (limited to 'openssl/crypto/rc4/asm')
-rw-r--r--openssl/crypto/rc4/asm/rc4-586.pl270
-rw-r--r--openssl/crypto/rc4/asm/rc4-ia64.pl755
-rw-r--r--openssl/crypto/rc4/asm/rc4-s390x.pl205
-rwxr-xr-xopenssl/crypto/rc4/asm/rc4-x86_64.pl504
4 files changed, 1734 insertions, 0 deletions
diff --git a/openssl/crypto/rc4/asm/rc4-586.pl b/openssl/crypto/rc4/asm/rc4-586.pl
new file mode 100644
index 00000000..38a44a70
--- /dev/null
+++ b/openssl/crypto/rc4/asm/rc4-586.pl
@@ -0,0 +1,270 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# [Re]written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# At some point it became apparent that the original SSLeay RC4
+# assembler implementation performs suboptimally on latest IA-32
+# microarchitectures. After re-tuning performance has changed as
+# following:
+#
+# Pentium -10%
+# Pentium III +12%
+# AMD +50%(*)
+# P4 +250%(**)
+#
+# (*) This number is actually a trade-off:-) It's possible to
+# achieve +72%, but at the cost of -48% off PIII performance.
+# In other words code performing further 13% faster on AMD
+# would perform almost 2 times slower on Intel PIII...
+# For reference! This code delivers ~80% of rc4-amd64.pl
+# performance on the same Opteron machine.
+# (**) This number requires compressed key schedule set up by
+# RC4_set_key [see commentary below for further details].
+#
+# <appro@fy.chalmers.se>
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+push(@INC,"${dir}","${dir}../../perlasm");
+require "x86asm.pl";
+
+&asm_init($ARGV[0],"rc4-586.pl");
+
+$xx="eax";
+$yy="ebx";
+$tx="ecx";
+$ty="edx";
+$inp="esi";
+$out="ebp";
+$dat="edi";
+
+sub RC4_loop {
+ my $i=shift;
+ my $func = ($i==0)?*mov:*or;
+
+ &add (&LB($yy),&LB($tx));
+ &mov ($ty,&DWP(0,$dat,$yy,4));
+ &mov (&DWP(0,$dat,$yy,4),$tx);
+ &mov (&DWP(0,$dat,$xx,4),$ty);
+ &add ($ty,$tx);
+ &inc (&LB($xx));
+ &and ($ty,0xff);
+ &ror ($out,8) if ($i!=0);
+ if ($i<3) {
+ &mov ($tx,&DWP(0,$dat,$xx,4));
+ } else {
+ &mov ($tx,&wparam(3)); # reload [re-biased] out
+ }
+ &$func ($out,&DWP(0,$dat,$ty,4));
+}
+
+# void RC4(RC4_KEY *key,size_t len,const unsigned char *inp,unsigned char *out);
+&function_begin("RC4");
+ &mov ($dat,&wparam(0)); # load key schedule pointer
+ &mov ($ty, &wparam(1)); # load len
+ &mov ($inp,&wparam(2)); # load inp
+ &mov ($out,&wparam(3)); # load out
+
+ &xor ($xx,$xx); # avoid partial register stalls
+ &xor ($yy,$yy);
+
+ &cmp ($ty,0); # safety net
+ &je (&label("abort"));
+
+ &mov (&LB($xx),&BP(0,$dat)); # load key->x
+ &mov (&LB($yy),&BP(4,$dat)); # load key->y
+ &add ($dat,8);
+
+ &lea ($tx,&DWP(0,$inp,$ty));
+ &sub ($out,$inp); # re-bias out
+ &mov (&wparam(1),$tx); # save input+len
+
+ &inc (&LB($xx));
+
+ # detect compressed key schedule...
+ &cmp (&DWP(256,$dat),-1);
+ &je (&label("RC4_CHAR"));
+
+ &mov ($tx,&DWP(0,$dat,$xx,4));
+
+ &and ($ty,-4); # how many 4-byte chunks?
+ &jz (&label("loop1"));
+
+ &lea ($ty,&DWP(-4,$inp,$ty));
+ &mov (&wparam(2),$ty); # save input+(len/4)*4-4
+ &mov (&wparam(3),$out); # $out as accumulator in this loop
+
+ &set_label("loop4",16);
+ for ($i=0;$i<4;$i++) { RC4_loop($i); }
+ &ror ($out,8);
+ &xor ($out,&DWP(0,$inp));
+ &cmp ($inp,&wparam(2)); # compare to input+(len/4)*4-4
+ &mov (&DWP(0,$tx,$inp),$out);# $tx holds re-biased out here
+ &lea ($inp,&DWP(4,$inp));
+ &mov ($tx,&DWP(0,$dat,$xx,4));
+ &jb (&label("loop4"));
+
+ &cmp ($inp,&wparam(1)); # compare to input+len
+ &je (&label("done"));
+ &mov ($out,&wparam(3)); # restore $out
+
+ &set_label("loop1",16);
+ &add (&LB($yy),&LB($tx));
+ &mov ($ty,&DWP(0,$dat,$yy,4));
+ &mov (&DWP(0,$dat,$yy,4),$tx);
+ &mov (&DWP(0,$dat,$xx,4),$ty);
+ &add ($ty,$tx);
+ &inc (&LB($xx));
+ &and ($ty,0xff);
+ &mov ($ty,&DWP(0,$dat,$ty,4));
+ &xor (&LB($ty),&BP(0,$inp));
+ &lea ($inp,&DWP(1,$inp));
+ &mov ($tx,&DWP(0,$dat,$xx,4));
+ &cmp ($inp,&wparam(1)); # compare to input+len
+ &mov (&BP(-1,$out,$inp),&LB($ty));
+ &jb (&label("loop1"));
+
+ &jmp (&label("done"));
+
+# this is essentially Intel P4 specific codepath...
+&set_label("RC4_CHAR",16);
+ &movz ($tx,&BP(0,$dat,$xx));
+ # strangely enough unrolled loop performs over 20% slower...
+ &set_label("cloop1");
+ &add (&LB($yy),&LB($tx));
+ &movz ($ty,&BP(0,$dat,$yy));
+ &mov (&BP(0,$dat,$yy),&LB($tx));
+ &mov (&BP(0,$dat,$xx),&LB($ty));
+ &add (&LB($ty),&LB($tx));
+ &movz ($ty,&BP(0,$dat,$ty));
+ &add (&LB($xx),1);
+ &xor (&LB($ty),&BP(0,$inp));
+ &lea ($inp,&DWP(1,$inp));
+ &movz ($tx,&BP(0,$dat,$xx));
+ &cmp ($inp,&wparam(1));
+ &mov (&BP(-1,$out,$inp),&LB($ty));
+ &jb (&label("cloop1"));
+
+&set_label("done");
+ &dec (&LB($xx));
+ &mov (&BP(-4,$dat),&LB($yy)); # save key->y
+ &mov (&BP(-8,$dat),&LB($xx)); # save key->x
+&set_label("abort");
+&function_end("RC4");
+
+########################################################################
+
+$inp="esi";
+$out="edi";
+$idi="ebp";
+$ido="ecx";
+$idx="edx";
+
+&external_label("OPENSSL_ia32cap_P");
+
+# void RC4_set_key(RC4_KEY *key,int len,const unsigned char *data);
+&function_begin("RC4_set_key");
+ &mov ($out,&wparam(0)); # load key
+ &mov ($idi,&wparam(1)); # load len
+ &mov ($inp,&wparam(2)); # load data
+ &picmeup($idx,"OPENSSL_ia32cap_P");
+
+ &lea ($out,&DWP(2*4,$out)); # &key->data
+ &lea ($inp,&DWP(0,$inp,$idi)); # $inp to point at the end
+ &neg ($idi);
+ &xor ("eax","eax");
+ &mov (&DWP(-4,$out),$idi); # borrow key->y
+
+ &bt (&DWP(0,$idx),20); # check for bit#20
+ &jc (&label("c1stloop"));
+
+&set_label("w1stloop",16);
+ &mov (&DWP(0,$out,"eax",4),"eax"); # key->data[i]=i;
+ &add (&LB("eax"),1); # i++;
+ &jnc (&label("w1stloop"));
+
+ &xor ($ido,$ido);
+ &xor ($idx,$idx);
+
+&set_label("w2ndloop",16);
+ &mov ("eax",&DWP(0,$out,$ido,4));
+ &add (&LB($idx),&BP(0,$inp,$idi));
+ &add (&LB($idx),&LB("eax"));
+ &add ($idi,1);
+ &mov ("ebx",&DWP(0,$out,$idx,4));
+ &jnz (&label("wnowrap"));
+ &mov ($idi,&DWP(-4,$out));
+ &set_label("wnowrap");
+ &mov (&DWP(0,$out,$idx,4),"eax");
+ &mov (&DWP(0,$out,$ido,4),"ebx");
+ &add (&LB($ido),1);
+ &jnc (&label("w2ndloop"));
+&jmp (&label("exit"));
+
+# Unlike all other x86 [and x86_64] implementations, Intel P4 core
+# [including EM64T] was found to perform poorly with above "32-bit" key
+# schedule, a.k.a. RC4_INT. Performance improvement for IA-32 hand-coded
+# assembler turned out to be 3.5x if re-coded for compressed 8-bit one,
+# a.k.a. RC4_CHAR! It's however inappropriate to just switch to 8-bit
+# schedule for x86[_64], because non-P4 implementations suffer from
+# significant performance losses then, e.g. PIII exhibits >2x
+# deterioration, and so does Opteron. In order to assure optimal
+# all-round performance, we detect P4 at run-time and set up compressed
+# key schedule, which is recognized by RC4 procedure.
+
+&set_label("c1stloop",16);
+ &mov (&BP(0,$out,"eax"),&LB("eax")); # key->data[i]=i;
+ &add (&LB("eax"),1); # i++;
+ &jnc (&label("c1stloop"));
+
+ &xor ($ido,$ido);
+ &xor ($idx,$idx);
+ &xor ("ebx","ebx");
+
+&set_label("c2ndloop",16);
+ &mov (&LB("eax"),&BP(0,$out,$ido));
+ &add (&LB($idx),&BP(0,$inp,$idi));
+ &add (&LB($idx),&LB("eax"));
+ &add ($idi,1);
+ &mov (&LB("ebx"),&BP(0,$out,$idx));
+ &jnz (&label("cnowrap"));
+ &mov ($idi,&DWP(-4,$out));
+ &set_label("cnowrap");
+ &mov (&BP(0,$out,$idx),&LB("eax"));
+ &mov (&BP(0,$out,$ido),&LB("ebx"));
+ &add (&LB($ido),1);
+ &jnc (&label("c2ndloop"));
+
+ &mov (&DWP(256,$out),-1); # mark schedule as compressed
+
+&set_label("exit");
+ &xor ("eax","eax");
+ &mov (&DWP(-8,$out),"eax"); # key->x=0;
+ &mov (&DWP(-4,$out),"eax"); # key->y=0;
+&function_end("RC4_set_key");
+
+# const char *RC4_options(void);
+&function_begin_B("RC4_options");
+ &call (&label("pic_point"));
+&set_label("pic_point");
+ &blindpop("eax");
+ &lea ("eax",&DWP(&label("opts")."-".&label("pic_point"),"eax"));
+ &picmeup("edx","OPENSSL_ia32cap_P");
+ &bt (&DWP(0,"edx"),20);
+ &jnc (&label("skip"));
+ &add ("eax",12);
+ &set_label("skip");
+ &ret ();
+&set_label("opts",64);
+&asciz ("rc4(4x,int)");
+&asciz ("rc4(1x,char)");
+&asciz ("RC4 for x86, CRYPTOGAMS by <appro\@openssl.org>");
+&align (64);
+&function_end_B("RC4_options");
+
+&asm_finish();
+
diff --git a/openssl/crypto/rc4/asm/rc4-ia64.pl b/openssl/crypto/rc4/asm/rc4-ia64.pl
new file mode 100644
index 00000000..49cd5b5e
--- /dev/null
+++ b/openssl/crypto/rc4/asm/rc4-ia64.pl
@@ -0,0 +1,755 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by David Mosberger <David.Mosberger@acm.org> based on the
+# Itanium optimized Crypto code which was released by HP Labs at
+# http://www.hpl.hp.com/research/linux/crypto/.
+#
+# Copyright (c) 2005 Hewlett-Packard Development Company, L.P.
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+
+
+# This is a little helper program which generates a software-pipelined
+# for RC4 encryption. The basic algorithm looks like this:
+#
+# for (counter = 0; counter < len; ++counter)
+# {
+# in = inp[counter];
+# SI = S[I];
+# J = (SI + J) & 0xff;
+# SJ = S[J];
+# T = (SI + SJ) & 0xff;
+# S[I] = SJ, S[J] = SI;
+# ST = S[T];
+# outp[counter] = in ^ ST;
+# I = (I + 1) & 0xff;
+# }
+#
+# Pipelining this loop isn't easy, because the stores to the S[] array
+# need to be observed in the right order. The loop generated by the
+# code below has the following pipeline diagram:
+#
+# cycle
+# | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |10 |11 |12 |13 |14 |15 |16 |17 |
+# iter
+# 1: xxx LDI xxx xxx xxx LDJ xxx SWP xxx LDT xxx xxx
+# 2: xxx LDI xxx xxx xxx LDJ xxx SWP xxx LDT xxx xxx
+# 3: xxx LDI xxx xxx xxx LDJ xxx SWP xxx LDT xxx xxx
+#
+# where:
+# LDI = load of S[I]
+# LDJ = load of S[J]
+# SWP = swap of S[I] and S[J]
+# LDT = load of S[T]
+#
+# Note that in the above diagram, the major trouble-spot is that LDI
+# of the 2nd iteration is performed BEFORE the SWP of the first
+# iteration. Fortunately, this is easy to detect (I of the 1st
+# iteration will be equal to J of the 2nd iteration) and when this
+# happens, we simply forward the proper value from the 1st iteration
+# to the 2nd one. The proper value in this case is simply the value
+# of S[I] from the first iteration (thanks to the fact that SWP
+# simply swaps the contents of S[I] and S[J]).
+#
+# Another potential trouble-spot is in cycle 7, where SWP of the 1st
+# iteration issues at the same time as the LDI of the 3rd iteration.
+# However, thanks to IA-64 execution semantics, this can be taken
+# care of simply by placing LDI later in the instruction-group than
+# SWP. IA-64 CPUs will automatically forward the value if they
+# detect that the SWP and LDI are accessing the same memory-location.
+
+# The core-loop that can be pipelined then looks like this (annotated
+# with McKinley/Madison issue port & latency numbers, assuming L1
+# cache hits for the most part):
+
+# operation: instruction: issue-ports: latency
+# ------------------ ----------------------------- ------------- -------
+
+# Data = *inp++ ld1 data = [inp], 1 M0-M1 1 cyc c0
+# shladd Iptr = I, KeyTable, 3 M0-M3, I0, I1 1 cyc
+# I = (I + 1) & 0xff padd1 nextI = I, one M0-M3, I0, I1 3 cyc
+# ;;
+# SI = S[I] ld8 SI = [Iptr] M0-M1 1 cyc c1 * after SWAP!
+# ;;
+# cmp.eq.unc pBypass = I, J * after J is valid!
+# J = SI + J add J = J, SI M0-M3, I0, I1 1 cyc c2
+# (pBypass) br.cond.spnt Bypass
+# ;;
+# ---------------------------------------------------------------------------------------
+# J = J & 0xff zxt1 J = J I0, I1, 1 cyc c3
+# ;;
+# shladd Jptr = J, KeyTable, 3 M0-M3, I0, I1 1 cyc c4
+# ;;
+# SJ = S[J] ld8 SJ = [Jptr] M0-M1 1 cyc c5
+# ;;
+# ---------------------------------------------------------------------------------------
+# T = (SI + SJ) add T = SI, SJ M0-M3, I0, I1 1 cyc c6
+# ;;
+# T = T & 0xff zxt1 T = T I0, I1 1 cyc
+# S[I] = SJ st8 [Iptr] = SJ M2-M3 c7
+# S[J] = SI st8 [Jptr] = SI M2-M3
+# ;;
+# shladd Tptr = T, KeyTable, 3 M0-M3, I0, I1 1 cyc c8
+# ;;
+# ---------------------------------------------------------------------------------------
+# T = S[T] ld8 T = [Tptr] M0-M1 1 cyc c9
+# ;;
+# data ^= T xor data = data, T M0-M3, I0, I1 1 cyc c10
+# ;;
+# *out++ = Data ^ T dep word = word, data, 8, POS I0, I1 1 cyc c11
+# ;;
+# ---------------------------------------------------------------------------------------
+
+# There are several points worth making here:
+
+# - Note that due to the bypass/forwarding-path, the first two
+# phases of the loop are strangly mingled together. In
+# particular, note that the first stage of the pipeline is
+# using the value of "J", as calculated by the second stage.
+# - Each bundle-pair will have exactly 6 instructions.
+# - Pipelined, the loop can execute in 3 cycles/iteration and
+# 4 stages. However, McKinley/Madison can issue "st1" to
+# the same bank at a rate of at most one per 4 cycles. Thus,
+# instead of storing each byte, we accumulate them in a word
+# and then write them back at once with a single "st8" (this
+# implies that the setup code needs to ensure that the output
+# buffer is properly aligned, if need be, by encoding the
+# first few bytes separately).
+# - There is no space for a "br.ctop" instruction. For this
+# reason we can't use module-loop support in IA-64 and have
+# to do a traditional, purely software-pipelined loop.
+# - We can't replace any of the remaining "add/zxt1" pairs with
+# "padd1" because the latency for that instruction is too high
+# and would push the loop to the point where more bypasses
+# would be needed, which we don't have space for.
+# - The above loop runs at around 3.26 cycles/byte, or roughly
+# 440 MByte/sec on a 1.5GHz Madison. This is well below the
+# system bus bandwidth and hence with judicious use of
+# "lfetch" this loop can run at (almost) peak speed even when
+# the input and output data reside in memory. The
+# max. latency that can be tolerated is (PREFETCH_DISTANCE *
+# L2_LINE_SIZE * 3 cyc), or about 384 cycles assuming (at
+# least) 1-ahead prefetching of 128 byte cache-lines. Note
+# that we do NOT prefetch into L1, since that would only
+# interfere with the S[] table values stored there. This is
+# acceptable because there is a 10 cycle latency between
+# load and first use of the input data.
+# - We use a branch to out-of-line bypass-code of cycle-pressure:
+# we calculate the next J, check for the need to activate the
+# bypass path, and activate the bypass path ALL IN THE SAME
+# CYCLE. If we didn't have these constraints, we could do
+# the bypass with a simple conditional move instruction.
+# Fortunately, the bypass paths get activated relatively
+# infrequently, so the extra branches don't cost all that much
+# (about 0.04 cycles/byte, measured on a 16396 byte file with
+# random input data).
+#
+
+$phases = 4; # number of stages/phases in the pipelined-loop
+$unroll_count = 6; # number of times we unrolled it
+$pComI = (1 << 0);
+$pComJ = (1 << 1);
+$pComT = (1 << 2);
+$pOut = (1 << 3);
+
+$NData = 4;
+$NIP = 3;
+$NJP = 2;
+$NI = 2;
+$NSI = 3;
+$NSJ = 2;
+$NT = 2;
+$NOutWord = 2;
+
+#
+# $threshold is the minimum length before we attempt to use the
+# big software-pipelined loop. It MUST be greater-or-equal
+# to:
+# PHASES * (UNROLL_COUNT + 1) + 7
+#
+# The "+ 7" comes from the fact we may have to encode up to
+# 7 bytes separately before the output pointer is aligned.
+#
+$threshold = (3 * ($phases * ($unroll_count + 1)) + 7);
+
+sub I {
+ local *code = shift;
+ local $format = shift;
+ $code .= sprintf ("\t\t".$format."\n", @_);
+}
+
+sub P {
+ local *code = shift;
+ local $format = shift;
+ $code .= sprintf ($format."\n", @_);
+}
+
+sub STOP {
+ local *code = shift;
+ $code .=<<___;
+ ;;
+___
+}
+
+sub emit_body {
+ local *c = shift;
+ local *bypass = shift;
+ local ($iteration, $p) = @_;
+
+ local $i0 = $iteration;
+ local $i1 = $iteration - 1;
+ local $i2 = $iteration - 2;
+ local $i3 = $iteration - 3;
+ local $iw0 = ($iteration - 3) / 8;
+ local $iw1 = ($iteration > 3) ? ($iteration - 4) / 8 : 1;
+ local $byte_num = ($iteration - 3) % 8;
+ local $label = $iteration + 1;
+ local $pAny = ($p & 0xf) == 0xf;
+ local $pByp = (($p & $pComI) && ($iteration > 0));
+
+ $c.=<<___;
+//////////////////////////////////////////////////
+___
+
+ if (($p & 0xf) == 0) {
+ $c.="#ifdef HOST_IS_BIG_ENDIAN\n";
+ &I(\$c,"shr.u OutWord[%u] = OutWord[%u], 32;;",
+ $iw1 % $NOutWord, $iw1 % $NOutWord);
+ $c.="#endif\n";
+ &I(\$c, "st4 [OutPtr] = OutWord[%u], 4", $iw1 % $NOutWord);
+ return;
+ }
+
+ # Cycle 0
+ &I(\$c, "{ .mmi") if ($pAny);
+ &I(\$c, "ld1 Data[%u] = [InPtr], 1", $i0 % $NData) if ($p & $pComI);
+ &I(\$c, "padd1 I[%u] = One, I[%u]", $i0 % $NI, $i1 % $NI)if ($p & $pComI);
+ &I(\$c, "zxt1 J = J") if ($p & $pComJ);
+ &I(\$c, "}") if ($pAny);
+ &I(\$c, "{ .mmi") if ($pAny);
+ &I(\$c, "LKEY T[%u] = [T[%u]]", $i1 % $NT, $i1 % $NT) if ($p & $pOut);
+ &I(\$c, "add T[%u] = SI[%u], SJ[%u]",
+ $i0 % $NT, $i2 % $NSI, $i1 % $NSJ) if ($p & $pComT);
+ &I(\$c, "KEYADDR(IPr[%u], I[%u])", $i0 % $NIP, $i1 % $NI) if ($p & $pComI);
+ &I(\$c, "}") if ($pAny);
+ &STOP(\$c);
+
+ # Cycle 1
+ &I(\$c, "{ .mmi") if ($pAny);
+ &I(\$c, "SKEY [IPr[%u]] = SJ[%u]", $i2 % $NIP, $i1%$NSJ)if ($p & $pComT);
+ &I(\$c, "SKEY [JP[%u]] = SI[%u]", $i1 % $NJP, $i2%$NSI) if ($p & $pComT);
+ &I(\$c, "zxt1 T[%u] = T[%u]", $i0 % $NT, $i0 % $NT) if ($p & $pComT);
+ &I(\$c, "}") if ($pAny);
+ &I(\$c, "{ .mmi") if ($pAny);
+ &I(\$c, "LKEY SI[%u] = [IPr[%u]]", $i0 % $NSI, $i0%$NIP)if ($p & $pComI);
+ &I(\$c, "KEYADDR(JP[%u], J)", $i0 % $NJP) if ($p & $pComJ);
+ &I(\$c, "xor Data[%u] = Data[%u], T[%u]",
+ $i3 % $NData, $i3 % $NData, $i1 % $NT) if ($p & $pOut);
+ &I(\$c, "}") if ($pAny);
+ &STOP(\$c);
+
+ # Cycle 2
+ &I(\$c, "{ .mmi") if ($pAny);
+ &I(\$c, "LKEY SJ[%u] = [JP[%u]]", $i0 % $NSJ, $i0%$NJP) if ($p & $pComJ);
+ &I(\$c, "cmp.eq pBypass, p0 = I[%u], J", $i1 % $NI) if ($pByp);
+ &I(\$c, "dep OutWord[%u] = Data[%u], OutWord[%u], BYTE_POS(%u), 8",
+ $iw0%$NOutWord, $i3%$NData, $iw1%$NOutWord, $byte_num) if ($p & $pOut);
+ &I(\$c, "}") if ($pAny);
+ &I(\$c, "{ .mmb") if ($pAny);
+ &I(\$c, "add J = J, SI[%u]", $i0 % $NSI) if ($p & $pComI);
+ &I(\$c, "KEYADDR(T[%u], T[%u])", $i0 % $NT, $i0 % $NT) if ($p & $pComT);
+ &P(\$c, "(pBypass)\tbr.cond.spnt.many .rc4Bypass%u",$label)if ($pByp);
+ &I(\$c, "}") if ($pAny);
+ &STOP(\$c);
+
+ &P(\$c, ".rc4Resume%u:", $label) if ($pByp);
+ if ($byte_num == 0 && $iteration >= $phases) {
+ &I(\$c, "st8 [OutPtr] = OutWord[%u], 8",
+ $iw1 % $NOutWord) if ($p & $pOut);
+ if ($iteration == (1 + $unroll_count) * $phases - 1) {
+ if ($unroll_count == 6) {
+ &I(\$c, "mov OutWord[%u] = OutWord[%u]",
+ $iw1 % $NOutWord, $iw0 % $NOutWord);
+ }
+ &I(\$c, "lfetch.nt1 [InPrefetch], %u",
+ $unroll_count * $phases);
+ &I(\$c, "lfetch.excl.nt1 [OutPrefetch], %u",
+ $unroll_count * $phases);
+ &I(\$c, "br.cloop.sptk.few .rc4Loop");
+ }
+ }
+
+ if ($pByp) {
+ &P(\$bypass, ".rc4Bypass%u:", $label);
+ &I(\$bypass, "sub J = J, SI[%u]", $i0 % $NSI);
+ &I(\$bypass, "nop 0");
+ &I(\$bypass, "nop 0");
+ &I(\$bypass, ";;");
+ &I(\$bypass, "add J = J, SI[%u]", $i1 % $NSI);
+ &I(\$bypass, "mov SI[%u] = SI[%u]", $i0 % $NSI, $i1 % $NSI);
+ &I(\$bypass, "br.sptk.many .rc4Resume%u\n", $label);
+ &I(\$bypass, ";;");
+ }
+}
+
+$code=<<___;
+.ident \"rc4-ia64.s, version 3.0\"
+.ident \"Copyright (c) 2005 Hewlett-Packard Development Company, L.P.\"
+
+#define LCSave r8
+#define PRSave r9
+
+/* Inputs become invalid once rotation begins! */
+
+#define StateTable in0
+#define DataLen in1
+#define InputBuffer in2
+#define OutputBuffer in3
+
+#define KTable r14
+#define J r15
+#define InPtr r16
+#define OutPtr r17
+#define InPrefetch r18
+#define OutPrefetch r19
+#define One r20
+#define LoopCount r21
+#define Remainder r22
+#define IFinal r23
+#define EndPtr r24
+
+#define tmp0 r25
+#define tmp1 r26
+
+#define pBypass p6
+#define pDone p7
+#define pSmall p8
+#define pAligned p9
+#define pUnaligned p10
+
+#define pComputeI pPhase[0]
+#define pComputeJ pPhase[1]
+#define pComputeT pPhase[2]
+#define pOutput pPhase[3]
+
+#define RetVal r8
+#define L_OK p7
+#define L_NOK p8
+
+#define _NINPUTS 4
+#define _NOUTPUT 0
+
+#define _NROTATE 24
+#define _NLOCALS (_NROTATE - _NINPUTS - _NOUTPUT)
+
+#ifndef SZ
+# define SZ 4 // this must be set to sizeof(RC4_INT)
+#endif
+
+#if SZ == 1
+# define LKEY ld1
+# define SKEY st1
+# define KEYADDR(dst, i) add dst = i, KTable
+#elif SZ == 2
+# define LKEY ld2
+# define SKEY st2
+# define KEYADDR(dst, i) shladd dst = i, 1, KTable
+#elif SZ == 4
+# define LKEY ld4
+# define SKEY st4
+# define KEYADDR(dst, i) shladd dst = i, 2, KTable
+#else
+# define LKEY ld8
+# define SKEY st8
+# define KEYADDR(dst, i) shladd dst = i, 3, KTable
+#endif
+
+#if defined(_HPUX_SOURCE) && !defined(_LP64)
+# define ADDP addp4
+#else
+# define ADDP add
+#endif
+
+/* Define a macro for the bit number of the n-th byte: */
+
+#if defined(_HPUX_SOURCE) || defined(B_ENDIAN)
+# define HOST_IS_BIG_ENDIAN
+# define BYTE_POS(n) (56 - (8 * (n)))
+#else
+# define BYTE_POS(n) (8 * (n))
+#endif
+
+/*
+ We must perform the first phase of the pipeline explicitly since
+ we will always load from the stable the first time. The br.cexit
+ will never be taken since regardless of the number of bytes because
+ the epilogue count is 4.
+*/
+/* MODSCHED_RC4 macro was split to _PROLOGUE and _LOOP, because HP-UX
+ assembler failed on original macro with syntax error. <appro> */
+#define MODSCHED_RC4_PROLOGUE \\
+ { \\
+ ld1 Data[0] = [InPtr], 1; \\
+ add IFinal = 1, I[1]; \\
+ KEYADDR(IPr[0], I[1]); \\
+ } ;; \\
+ { \\
+ LKEY SI[0] = [IPr[0]]; \\
+ mov pr.rot = 0x10000; \\
+ mov ar.ec = 4; \\
+ } ;; \\
+ { \\
+ add J = J, SI[0]; \\
+ zxt1 I[0] = IFinal; \\
+ br.cexit.spnt.few .+16; /* never taken */ \\
+ } ;;
+#define MODSCHED_RC4_LOOP(label) \\
+label: \\
+ { .mmi; \\
+ (pComputeI) ld1 Data[0] = [InPtr], 1; \\
+ (pComputeI) add IFinal = 1, I[1]; \\
+ (pComputeJ) zxt1 J = J; \\
+ }{ .mmi; \\
+ (pOutput) LKEY T[1] = [T[1]]; \\
+ (pComputeT) add T[0] = SI[2], SJ[1]; \\
+ (pComputeI) KEYADDR(IPr[0], I[1]); \\
+ } ;; \\
+ { .mmi; \\
+ (pComputeT) SKEY [IPr[2]] = SJ[1]; \\
+ (pComputeT) SKEY [JP[1]] = SI[2]; \\
+ (pComputeT) zxt1 T[0] = T[0]; \\
+ }{ .mmi; \\
+ (pComputeI) LKEY SI[0] = [IPr[0]]; \\
+ (pComputeJ) KEYADDR(JP[0], J); \\
+ (pComputeI) cmp.eq.unc pBypass, p0 = I[1], J; \\
+ } ;; \\
+ { .mmi; \\
+ (pComputeJ) LKEY SJ[0] = [JP[0]]; \\
+ (pOutput) xor Data[3] = Data[3], T[1]; \\
+ nop 0x0; \\
+ }{ .mmi; \\
+ (pComputeT) KEYADDR(T[0], T[0]); \\
+ (pBypass) mov SI[0] = SI[1]; \\
+ (pComputeI) zxt1 I[0] = IFinal; \\
+ } ;; \\
+ { .mmb; \\
+ (pOutput) st1 [OutPtr] = Data[3], 1; \\
+ (pComputeI) add J = J, SI[0]; \\
+ br.ctop.sptk.few label; \\
+ } ;;
+
+ .text
+
+ .align 32
+
+ .type RC4, \@function
+ .global RC4
+
+ .proc RC4
+ .prologue
+
+RC4:
+ {
+ .mmi
+ alloc r2 = ar.pfs, _NINPUTS, _NLOCALS, _NOUTPUT, _NROTATE
+
+ .rotr Data[4], I[2], IPr[3], SI[3], JP[2], SJ[2], T[2], \\
+ OutWord[2]
+ .rotp pPhase[4]
+
+ ADDP InPrefetch = 0, InputBuffer
+ ADDP KTable = 0, StateTable
+ }
+ {
+ .mmi
+ ADDP InPtr = 0, InputBuffer
+ ADDP OutPtr = 0, OutputBuffer
+ mov RetVal = r0
+ }
+ ;;
+ {
+ .mmi
+ lfetch.nt1 [InPrefetch], 0x80
+ ADDP OutPrefetch = 0, OutputBuffer
+ }
+ { // Return 0 if the input length is nonsensical
+ .mib
+ ADDP StateTable = 0, StateTable
+ cmp.ge.unc L_NOK, L_OK = r0, DataLen
+ (L_NOK) br.ret.sptk.few rp
+ }
+ ;;
+ {
+ .mib
+ cmp.eq.or L_NOK, L_OK = r0, InPtr
+ cmp.eq.or L_NOK, L_OK = r0, OutPtr
+ nop 0x0
+ }
+ {
+ .mib
+ cmp.eq.or L_NOK, L_OK = r0, StateTable
+ nop 0x0
+ (L_NOK) br.ret.sptk.few rp
+ }
+ ;;
+ LKEY I[1] = [KTable], SZ
+/* Prefetch the state-table. It contains 256 elements of size SZ */
+
+#if SZ == 1
+ ADDP tmp0 = 1*128, StateTable
+#elif SZ == 2
+ ADDP tmp0 = 3*128, StateTable
+ ADDP tmp1 = 2*128, StateTable
+#elif SZ == 4
+ ADDP tmp0 = 7*128, StateTable
+ ADDP tmp1 = 6*128, StateTable
+#elif SZ == 8
+ ADDP tmp0 = 15*128, StateTable
+ ADDP tmp1 = 14*128, StateTable
+#endif
+ ;;
+#if SZ >= 8
+ lfetch.fault.nt1 [tmp0], -256 // 15
+ lfetch.fault.nt1 [tmp1], -256;;
+ lfetch.fault.nt1 [tmp0], -256 // 13
+ lfetch.fault.nt1 [tmp1], -256;;
+ lfetch.fault.nt1 [tmp0], -256 // 11
+ lfetch.fault.nt1 [tmp1], -256;;
+ lfetch.fault.nt1 [tmp0], -256 // 9
+ lfetch.fault.nt1 [tmp1], -256;;
+#endif
+#if SZ >= 4
+ lfetch.fault.nt1 [tmp0], -256 // 7
+ lfetch.fault.nt1 [tmp1], -256;;
+ lfetch.fault.nt1 [tmp0], -256 // 5
+ lfetch.fault.nt1 [tmp1], -256;;
+#endif
+#if SZ >= 2
+ lfetch.fault.nt1 [tmp0], -256 // 3
+ lfetch.fault.nt1 [tmp1], -256;;
+#endif
+ {
+ .mii
+ lfetch.fault.nt1 [tmp0] // 1
+ add I[1]=1,I[1];;
+ zxt1 I[1]=I[1]
+ }
+ {
+ .mmi
+ lfetch.nt1 [InPrefetch], 0x80
+ lfetch.excl.nt1 [OutPrefetch], 0x80
+ .save pr, PRSave
+ mov PRSave = pr
+ } ;;
+ {
+ .mmi
+ lfetch.excl.nt1 [OutPrefetch], 0x80
+ LKEY J = [KTable], SZ
+ ADDP EndPtr = DataLen, InPtr
+ } ;;
+ {
+ .mmi
+ ADDP EndPtr = -1, EndPtr // Make it point to
+ // last data byte.
+ mov One = 1
+ .save ar.lc, LCSave
+ mov LCSave = ar.lc
+ .body
+ } ;;
+ {
+ .mmb
+ sub Remainder = 0, OutPtr
+ cmp.gtu pSmall, p0 = $threshold, DataLen
+(pSmall) br.cond.dpnt .rc4Remainder // Data too small for
+ // big loop.
+ } ;;
+ {
+ .mmi
+ and Remainder = 0x7, Remainder
+ ;;
+ cmp.eq pAligned, pUnaligned = Remainder, r0
+ nop 0x0
+ } ;;
+ {
+ .mmb
+.pred.rel "mutex",pUnaligned,pAligned
+(pUnaligned) add Remainder = -1, Remainder
+(pAligned) sub Remainder = EndPtr, InPtr
+(pAligned) br.cond.dptk.many .rc4Aligned
+ } ;;
+ {
+ .mmi
+ nop 0x0
+ nop 0x0
+ mov.i ar.lc = Remainder
+ }
+
+/* Do the initial few bytes via the compact, modulo-scheduled loop
+ until the output pointer is 8-byte-aligned. */
+
+ MODSCHED_RC4_PROLOGUE
+ MODSCHED_RC4_LOOP(.RC4AlignLoop)
+
+ {
+ .mib
+ sub Remainder = EndPtr, InPtr
+ zxt1 IFinal = IFinal
+ clrrrb // Clear CFM.rrb.pr so
+ ;; // next "mov pr.rot = N"
+ // does the right thing.
+ }
+ {
+ .mmi
+ mov I[1] = IFinal
+ nop 0x0
+ nop 0x0
+ } ;;
+
+
+.rc4Aligned:
+
+/*
+ Unrolled loop count = (Remainder - ($unroll_count+1)*$phases)/($unroll_count*$phases)
+ */
+
+ {
+ .mlx
+ add LoopCount = 1 - ($unroll_count + 1)*$phases, Remainder
+ movl Remainder = 0xaaaaaaaaaaaaaaab
+ } ;;
+ {
+ .mmi
+ setf.sig f6 = LoopCount // M2, M3 6 cyc
+ setf.sig f7 = Remainder // M2, M3 6 cyc
+ nop 0x0
+ } ;;
+ {
+ .mfb
+ nop 0x0
+ xmpy.hu f6 = f6, f7
+ nop 0x0
+ } ;;
+ {
+ .mmi
+ getf.sig LoopCount = f6;; // M2 5 cyc
+ nop 0x0
+ shr.u LoopCount = LoopCount, 4
+ } ;;
+ {
+ .mmi
+ nop 0x0
+ nop 0x0
+ mov.i ar.lc = LoopCount
+ } ;;
+
+/* Now comes the unrolled loop: */
+
+.rc4Prologue:
+___
+
+$iteration = 0;
+
+# Generate the prologue:
+$predicates = 1;
+for ($i = 0; $i < $phases; ++$i) {
+ &emit_body (\$code, \$bypass, $iteration++, $predicates);
+ $predicates = ($predicates << 1) | 1;
+}
+
+$code.=<<___;
+.rc4Loop:
+___
+
+# Generate the body:
+for ($i = 0; $i < $unroll_count*$phases; ++$i) {
+ &emit_body (\$code, \$bypass, $iteration++, $predicates);
+}
+
+$code.=<<___;
+.rc4Epilogue:
+___
+
+# Generate the epilogue:
+for ($i = 0; $i < $phases; ++$i) {
+ $predicates <<= 1;
+ &emit_body (\$code, \$bypass, $iteration++, $predicates);
+}
+
+$code.=<<___;
+ {
+ .mmi
+ lfetch.nt1 [EndPtr] // fetch line with last byte
+ mov IFinal = I[1]
+ nop 0x0
+ }
+
+.rc4Remainder:
+ {
+ .mmi
+ sub Remainder = EndPtr, InPtr // Calculate
+ // # of bytes
+ // left - 1
+ nop 0x0
+ nop 0x0
+ } ;;
+ {
+ .mib
+ cmp.eq pDone, p0 = -1, Remainder // done already?
+ mov.i ar.lc = Remainder
+(pDone) br.cond.dptk.few .rc4Complete
+ }
+
+/* Do the remaining bytes via the compact, modulo-scheduled loop */
+
+ MODSCHED_RC4_PROLOGUE
+ MODSCHED_RC4_LOOP(.RC4RestLoop)
+
+.rc4Complete:
+ {
+ .mmi
+ add KTable = -SZ, KTable
+ add IFinal = -1, IFinal
+ mov ar.lc = LCSave
+ } ;;
+ {
+ .mii
+ SKEY [KTable] = J,-SZ
+ zxt1 IFinal = IFinal
+ mov pr = PRSave, 0x1FFFF
+ } ;;
+ {
+ .mib
+ SKEY [KTable] = IFinal
+ add RetVal = 1, r0
+ br.ret.sptk.few rp
+ } ;;
+___
+
+# Last but not least, emit the code for the bypass-code of the unrolled loop:
+
+$code.=$bypass;
+
+$code.=<<___;
+ .endp RC4
+___
+
+print $code;
diff --git a/openssl/crypto/rc4/asm/rc4-s390x.pl b/openssl/crypto/rc4/asm/rc4-s390x.pl
new file mode 100644
index 00000000..96681fa0
--- /dev/null
+++ b/openssl/crypto/rc4/asm/rc4-s390x.pl
@@ -0,0 +1,205 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# February 2009
+#
+# Performance is 2x of gcc 3.4.6 on z10. Coding "secret" is to
+# "cluster" Address Generation Interlocks, so that one pipeline stall
+# resolves several dependencies.
+
+$rp="%r14";
+$sp="%r15";
+$code=<<___;
+.text
+
+___
+
+# void RC4(RC4_KEY *key,size_t len,const void *inp,void *out)
+{
+$acc="%r0";
+$cnt="%r1";
+$key="%r2";
+$len="%r3";
+$inp="%r4";
+$out="%r5";
+
+@XX=("%r6","%r7");
+@TX=("%r8","%r9");
+$YY="%r10";
+$TY="%r11";
+
+$code.=<<___;
+.globl RC4
+.type RC4,\@function
+.align 64
+RC4:
+ stmg %r6,%r11,48($sp)
+ llgc $XX[0],0($key)
+ llgc $YY,1($key)
+ la $XX[0],1($XX[0])
+ nill $XX[0],0xff
+ srlg $cnt,$len,3
+ ltgr $cnt,$cnt
+ llgc $TX[0],2($XX[0],$key)
+ jz .Lshort
+ j .Loop8
+
+.align 64
+.Loop8:
+___
+for ($i=0;$i<8;$i++) {
+$code.=<<___;
+ la $YY,0($YY,$TX[0]) # $i
+ nill $YY,255
+ la $XX[1],1($XX[0])
+ nill $XX[1],255
+___
+$code.=<<___ if ($i==1);
+ llgc $acc,2($TY,$key)
+___
+$code.=<<___ if ($i>1);
+ sllg $acc,$acc,8
+ ic $acc,2($TY,$key)
+___
+$code.=<<___;
+ llgc $TY,2($YY,$key)
+ stc $TX[0],2($YY,$key)
+ llgc $TX[1],2($XX[1],$key)
+ stc $TY,2($XX[0],$key)
+ cr $XX[1],$YY
+ jne .Lcmov$i
+ la $TX[1],0($TX[0])
+.Lcmov$i:
+ la $TY,0($TY,$TX[0])
+ nill $TY,255
+___
+push(@TX,shift(@TX)); push(@XX,shift(@XX)); # "rotate" registers
+}
+
+$code.=<<___;
+ lg $TX[1],0($inp)
+ sllg $acc,$acc,8
+ la $inp,8($inp)
+ ic $acc,2($TY,$key)
+ xgr $acc,$TX[1]
+ stg $acc,0($out)
+ la $out,8($out)
+ brct $cnt,.Loop8
+
+.Lshort:
+ lghi $acc,7
+ ngr $len,$acc
+ jz .Lexit
+ j .Loop1
+
+.align 16
+.Loop1:
+ la $YY,0($YY,$TX[0])
+ nill $YY,255
+ llgc $TY,2($YY,$key)
+ stc $TX[0],2($YY,$key)
+ stc $TY,2($XX[0],$key)
+ ar $TY,$TX[0]
+ ahi $XX[0],1
+ nill $TY,255
+ nill $XX[0],255
+ llgc $acc,0($inp)
+ la $inp,1($inp)
+ llgc $TY,2($TY,$key)
+ llgc $TX[0],2($XX[0],$key)
+ xr $acc,$TY
+ stc $acc,0($out)
+ la $out,1($out)
+ brct $len,.Loop1
+
+.Lexit:
+ ahi $XX[0],-1
+ stc $XX[0],0($key)
+ stc $YY,1($key)
+ lmg %r6,%r11,48($sp)
+ br $rp
+.size RC4,.-RC4
+.string "RC4 for s390x, CRYPTOGAMS by <appro\@openssl.org>"
+
+___
+}
+
+# void RC4_set_key(RC4_KEY *key,unsigned int len,const void *inp)
+{
+$cnt="%r0";
+$idx="%r1";
+$key="%r2";
+$len="%r3";
+$inp="%r4";
+$acc="%r5";
+$dat="%r6";
+$ikey="%r7";
+$iinp="%r8";
+
+$code.=<<___;
+.globl RC4_set_key
+.type RC4_set_key,\@function
+.align 64
+RC4_set_key:
+ stmg %r6,%r8,48($sp)
+ lhi $cnt,256
+ la $idx,0(%r0)
+ sth $idx,0($key)
+.align 4
+.L1stloop:
+ stc $idx,2($idx,$key)
+ la $idx,1($idx)
+ brct $cnt,.L1stloop
+
+ lghi $ikey,-256
+ lr $cnt,$len
+ la $iinp,0(%r0)
+ la $idx,0(%r0)
+.align 16
+.L2ndloop:
+ llgc $acc,2+256($ikey,$key)
+ llgc $dat,0($iinp,$inp)
+ la $idx,0($idx,$acc)
+ la $ikey,1($ikey)
+ la $idx,0($idx,$dat)
+ nill $idx,255
+ la $iinp,1($iinp)
+ tml $ikey,255
+ llgc $dat,2($idx,$key)
+ stc $dat,2+256-1($ikey,$key)
+ stc $acc,2($idx,$key)
+ jz .Ldone
+ brct $cnt,.L2ndloop
+ lr $cnt,$len
+ la $iinp,0(%r0)
+ j .L2ndloop
+.Ldone:
+ lmg %r6,%r8,48($sp)
+ br $rp
+.size RC4_set_key,.-RC4_set_key
+
+___
+}
+
+# const char *RC4_options()
+$code.=<<___;
+.globl RC4_options
+.type RC4_options,\@function
+.align 16
+RC4_options:
+ larl %r2,.Loptions
+ br %r14
+.size RC4_options,.-RC4_options
+.section .rodata
+.Loptions:
+.align 8
+.string "rc4(8x,char)"
+___
+
+print $code;
diff --git a/openssl/crypto/rc4/asm/rc4-x86_64.pl b/openssl/crypto/rc4/asm/rc4-x86_64.pl
new file mode 100755
index 00000000..677be5fe
--- /dev/null
+++ b/openssl/crypto/rc4/asm/rc4-x86_64.pl
@@ -0,0 +1,504 @@
+#!/usr/bin/env perl
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# 2.22x RC4 tune-up:-) It should be noted though that my hand [as in
+# "hand-coded assembler"] doesn't stand for the whole improvement
+# coefficient. It turned out that eliminating RC4_CHAR from config
+# line results in ~40% improvement (yes, even for C implementation).
+# Presumably it has everything to do with AMD cache architecture and
+# RAW or whatever penalties. Once again! The module *requires* config
+# line *without* RC4_CHAR! As for coding "secret," I bet on partial
+# register arithmetics. For example instead of 'inc %r8; and $255,%r8'
+# I simply 'inc %r8b'. Even though optimization manual discourages
+# to operate on partial registers, it turned out to be the best bet.
+# At least for AMD... How IA32E would perform remains to be seen...
+
+# As was shown by Marc Bevand reordering of couple of load operations
+# results in even higher performance gain of 3.3x:-) At least on
+# Opteron... For reference, 1x in this case is RC4_CHAR C-code
+# compiled with gcc 3.3.2, which performs at ~54MBps per 1GHz clock.
+# Latter means that if you want to *estimate* what to expect from
+# *your* Opteron, then multiply 54 by 3.3 and clock frequency in GHz.
+
+# Intel P4 EM64T core was found to run the AMD64 code really slow...
+# The only way to achieve comparable performance on P4 was to keep
+# RC4_CHAR. Kind of ironic, huh? As it's apparently impossible to
+# compose blended code, which would perform even within 30% marginal
+# on either AMD and Intel platforms, I implement both cases. See
+# rc4_skey.c for further details...
+
+# P4 EM64T core appears to be "allergic" to 64-bit inc/dec. Replacing
+# those with add/sub results in 50% performance improvement of folded
+# loop...
+
+# As was shown by Zou Nanhai loop unrolling can improve Intel EM64T
+# performance by >30% [unlike P4 32-bit case that is]. But this is
+# provided that loads are reordered even more aggressively! Both code
+# pathes, AMD64 and EM64T, reorder loads in essentially same manner
+# as my IA-64 implementation. On Opteron this resulted in modest 5%
+# improvement [I had to test it], while final Intel P4 performance
+# achieves respectful 432MBps on 2.8GHz processor now. For reference.
+# If executed on Xeon, current RC4_CHAR code-path is 2.7x faster than
+# RC4_INT code-path. While if executed on Opteron, it's only 25%
+# slower than the RC4_INT one [meaning that if CPU µ-arch detection
+# is not implemented, then this final RC4_CHAR code-path should be
+# preferred, as it provides better *all-round* performance].
+
+# Intel Core2 was observed to perform poorly on both code paths:-( It
+# apparently suffers from some kind of partial register stall, which
+# occurs in 64-bit mode only [as virtually identical 32-bit loop was
+# observed to outperform 64-bit one by almost 50%]. Adding two movzb to
+# cloop1 boosts its performance by 80%! This loop appears to be optimal
+# fit for Core2 and therefore the code was modified to skip cloop8 on
+# this CPU.
+
+$flavour = shift;
+$output = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+open STDOUT,"| $^X $xlate $flavour $output";
+
+$dat="%rdi"; # arg1
+$len="%rsi"; # arg2
+$inp="%rdx"; # arg3
+$out="%rcx"; # arg4
+
+@XX=("%r8","%r10");
+@TX=("%r9","%r11");
+$YY="%r12";
+$TY="%r13";
+
+$code=<<___;
+.text
+
+.globl RC4
+.type RC4,\@function,4
+.align 16
+RC4: or $len,$len
+ jne .Lentry
+ ret
+.Lentry:
+ push %rbx
+ push %r12
+ push %r13
+.Lprologue:
+
+ add \$8,$dat
+ movl -8($dat),$XX[0]#d
+ movl -4($dat),$YY#d
+ cmpl \$-1,256($dat)
+ je .LRC4_CHAR
+ inc $XX[0]#b
+ movl ($dat,$XX[0],4),$TX[0]#d
+ test \$-8,$len
+ jz .Lloop1
+ jmp .Lloop8
+.align 16
+.Lloop8:
+___
+for ($i=0;$i<8;$i++) {
+$code.=<<___;
+ add $TX[0]#b,$YY#b
+ mov $XX[0],$XX[1]
+ movl ($dat,$YY,4),$TY#d
+ ror \$8,%rax # ror is redundant when $i=0
+ inc $XX[1]#b
+ movl ($dat,$XX[1],4),$TX[1]#d
+ cmp $XX[1],$YY
+ movl $TX[0]#d,($dat,$YY,4)
+ cmove $TX[0],$TX[1]
+ movl $TY#d,($dat,$XX[0],4)
+ add $TX[0]#b,$TY#b
+ movb ($dat,$TY,4),%al
+___
+push(@TX,shift(@TX)); push(@XX,shift(@XX)); # "rotate" registers
+}
+$code.=<<___;
+ ror \$8,%rax
+ sub \$8,$len
+
+ xor ($inp),%rax
+ add \$8,$inp
+ mov %rax,($out)
+ add \$8,$out
+
+ test \$-8,$len
+ jnz .Lloop8
+ cmp \$0,$len
+ jne .Lloop1
+ jmp .Lexit
+
+.align 16
+.Lloop1:
+ add $TX[0]#b,$YY#b
+ movl ($dat,$YY,4),$TY#d
+ movl $TX[0]#d,($dat,$YY,4)
+ movl $TY#d,($dat,$XX[0],4)
+ add $TY#b,$TX[0]#b
+ inc $XX[0]#b
+ movl ($dat,$TX[0],4),$TY#d
+ movl ($dat,$XX[0],4),$TX[0]#d
+ xorb ($inp),$TY#b
+ inc $inp
+ movb $TY#b,($out)
+ inc $out
+ dec $len
+ jnz .Lloop1
+ jmp .Lexit
+
+.align 16
+.LRC4_CHAR:
+ add \$1,$XX[0]#b
+ movzb ($dat,$XX[0]),$TX[0]#d
+ test \$-8,$len
+ jz .Lcloop1
+ cmpl \$0,260($dat)
+ jnz .Lcloop1
+ jmp .Lcloop8
+.align 16
+.Lcloop8:
+ mov ($inp),%eax
+ mov 4($inp),%ebx
+___
+# unroll 2x4-wise, because 64-bit rotates kill Intel P4...
+for ($i=0;$i<4;$i++) {
+$code.=<<___;
+ add $TX[0]#b,$YY#b
+ lea 1($XX[0]),$XX[1]
+ movzb ($dat,$YY),$TY#d
+ movzb $XX[1]#b,$XX[1]#d
+ movzb ($dat,$XX[1]),$TX[1]#d
+ movb $TX[0]#b,($dat,$YY)
+ cmp $XX[1],$YY
+ movb $TY#b,($dat,$XX[0])
+ jne .Lcmov$i # Intel cmov is sloooow...
+ mov $TX[0],$TX[1]
+.Lcmov$i:
+ add $TX[0]#b,$TY#b
+ xor ($dat,$TY),%al
+ ror \$8,%eax
+___
+push(@TX,shift(@TX)); push(@XX,shift(@XX)); # "rotate" registers
+}
+for ($i=4;$i<8;$i++) {
+$code.=<<___;
+ add $TX[0]#b,$YY#b
+ lea 1($XX[0]),$XX[1]
+ movzb ($dat,$YY),$TY#d
+ movzb $XX[1]#b,$XX[1]#d
+ movzb ($dat,$XX[1]),$TX[1]#d
+ movb $TX[0]#b,($dat,$YY)
+ cmp $XX[1],$YY
+ movb $TY#b,($dat,$XX[0])
+ jne .Lcmov$i # Intel cmov is sloooow...
+ mov $TX[0],$TX[1]
+.Lcmov$i:
+ add $TX[0]#b,$TY#b
+ xor ($dat,$TY),%bl
+ ror \$8,%ebx
+___
+push(@TX,shift(@TX)); push(@XX,shift(@XX)); # "rotate" registers
+}
+$code.=<<___;
+ lea -8($len),$len
+ mov %eax,($out)
+ lea 8($inp),$inp
+ mov %ebx,4($out)
+ lea 8($out),$out
+
+ test \$-8,$len
+ jnz .Lcloop8
+ cmp \$0,$len
+ jne .Lcloop1
+ jmp .Lexit
+___
+$code.=<<___;
+.align 16
+.Lcloop1:
+ add $TX[0]#b,$YY#b
+ movzb ($dat,$YY),$TY#d
+ movb $TX[0]#b,($dat,$YY)
+ movb $TY#b,($dat,$XX[0])
+ add $TX[0]#b,$TY#b
+ add \$1,$XX[0]#b
+ movzb $TY#b,$TY#d
+ movzb $XX[0]#b,$XX[0]#d
+ movzb ($dat,$TY),$TY#d
+ movzb ($dat,$XX[0]),$TX[0]#d
+ xorb ($inp),$TY#b
+ lea 1($inp),$inp
+ movb $TY#b,($out)
+ lea 1($out),$out
+ sub \$1,$len
+ jnz .Lcloop1
+ jmp .Lexit
+
+.align 16
+.Lexit:
+ sub \$1,$XX[0]#b
+ movl $XX[0]#d,-8($dat)
+ movl $YY#d,-4($dat)
+
+ mov (%rsp),%r13
+ mov 8(%rsp),%r12
+ mov 16(%rsp),%rbx
+ add \$24,%rsp
+.Lepilogue:
+ ret
+.size RC4,.-RC4
+___
+
+$idx="%r8";
+$ido="%r9";
+
+$code.=<<___;
+.extern OPENSSL_ia32cap_P
+.globl RC4_set_key
+.type RC4_set_key,\@function,3
+.align 16
+RC4_set_key:
+ lea 8($dat),$dat
+ lea ($inp,$len),$inp
+ neg $len
+ mov $len,%rcx
+ xor %eax,%eax
+ xor $ido,$ido
+ xor %r10,%r10
+ xor %r11,%r11
+
+ mov OPENSSL_ia32cap_P(%rip),$idx#d
+ bt \$20,$idx#d
+ jnc .Lw1stloop
+ bt \$30,$idx#d
+ setc $ido#b
+ mov $ido#d,260($dat)
+ jmp .Lc1stloop
+
+.align 16
+.Lw1stloop:
+ mov %eax,($dat,%rax,4)
+ add \$1,%al
+ jnc .Lw1stloop
+
+ xor $ido,$ido
+ xor $idx,$idx
+.align 16
+.Lw2ndloop:
+ mov ($dat,$ido,4),%r10d
+ add ($inp,$len,1),$idx#b
+ add %r10b,$idx#b
+ add \$1,$len
+ mov ($dat,$idx,4),%r11d
+ cmovz %rcx,$len
+ mov %r10d,($dat,$idx,4)
+ mov %r11d,($dat,$ido,4)
+ add \$1,$ido#b
+ jnc .Lw2ndloop
+ jmp .Lexit_key
+
+.align 16
+.Lc1stloop:
+ mov %al,($dat,%rax)
+ add \$1,%al
+ jnc .Lc1stloop
+
+ xor $ido,$ido
+ xor $idx,$idx
+.align 16
+.Lc2ndloop:
+ mov ($dat,$ido),%r10b
+ add ($inp,$len),$idx#b
+ add %r10b,$idx#b
+ add \$1,$len
+ mov ($dat,$idx),%r11b
+ jnz .Lcnowrap
+ mov %rcx,$len
+.Lcnowrap:
+ mov %r10b,($dat,$idx)
+ mov %r11b,($dat,$ido)
+ add \$1,$ido#b
+ jnc .Lc2ndloop
+ movl \$-1,256($dat)
+
+.align 16
+.Lexit_key:
+ xor %eax,%eax
+ mov %eax,-8($dat)
+ mov %eax,-4($dat)
+ ret
+.size RC4_set_key,.-RC4_set_key
+
+.globl RC4_options
+.type RC4_options,\@abi-omnipotent
+.align 16
+RC4_options:
+ lea .Lopts(%rip),%rax
+ mov OPENSSL_ia32cap_P(%rip),%edx
+ bt \$20,%edx
+ jnc .Ldone
+ add \$12,%rax
+ bt \$30,%edx
+ jnc .Ldone
+ add \$13,%rax
+.Ldone:
+ ret
+.align 64
+.Lopts:
+.asciz "rc4(8x,int)"
+.asciz "rc4(8x,char)"
+.asciz "rc4(1x,char)"
+.asciz "RC4 for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
+.align 64
+.size RC4_options,.-RC4_options
+___
+
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+# CONTEXT *context,DISPATCHER_CONTEXT *disp)
+if ($win64) {
+$rec="%rcx";
+$frame="%rdx";
+$context="%r8";
+$disp="%r9";
+
+$code.=<<___;
+.extern __imp_RtlVirtualUnwind
+.type stream_se_handler,\@abi-omnipotent
+.align 16
+stream_se_handler:
+ push %rsi
+ push %rdi
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ pushfq
+ sub \$64,%rsp
+
+ mov 120($context),%rax # pull context->Rax
+ mov 248($context),%rbx # pull context->Rip
+
+ lea .Lprologue(%rip),%r10
+ cmp %r10,%rbx # context->Rip<prologue label
+ jb .Lin_prologue
+
+ mov 152($context),%rax # pull context->Rsp
+
+ lea .Lepilogue(%rip),%r10
+ cmp %r10,%rbx # context->Rip>=epilogue label
+ jae .Lin_prologue
+
+ lea 24(%rax),%rax
+
+ mov -8(%rax),%rbx
+ mov -16(%rax),%r12
+ mov -24(%rax),%r13
+ mov %rbx,144($context) # restore context->Rbx
+ mov %r12,216($context) # restore context->R12
+ mov %r13,224($context) # restore context->R13
+
+.Lin_prologue:
+ mov 8(%rax),%rdi
+ mov 16(%rax),%rsi
+ mov %rax,152($context) # restore context->Rsp
+ mov %rsi,168($context) # restore context->Rsi
+ mov %rdi,176($context) # restore context->Rdi
+
+ jmp .Lcommon_seh_exit
+.size stream_se_handler,.-stream_se_handler
+
+.type key_se_handler,\@abi-omnipotent
+.align 16
+key_se_handler:
+ push %rsi
+ push %rdi
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ pushfq
+ sub \$64,%rsp
+
+ mov 152($context),%rax # pull context->Rsp
+ mov 8(%rax),%rdi
+ mov 16(%rax),%rsi
+ mov %rsi,168($context) # restore context->Rsi
+ mov %rdi,176($context) # restore context->Rdi
+
+.Lcommon_seh_exit:
+
+ mov 40($disp),%rdi # disp->ContextRecord
+ mov $context,%rsi # context
+ mov \$154,%ecx # sizeof(CONTEXT)
+ .long 0xa548f3fc # cld; rep movsq
+
+ mov $disp,%rsi
+ xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
+ mov 8(%rsi),%rdx # arg2, disp->ImageBase
+ mov 0(%rsi),%r8 # arg3, disp->ControlPc
+ mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
+ mov 40(%rsi),%r10 # disp->ContextRecord
+ lea 56(%rsi),%r11 # &disp->HandlerData
+ lea 24(%rsi),%r12 # &disp->EstablisherFrame
+ mov %r10,32(%rsp) # arg5
+ mov %r11,40(%rsp) # arg6
+ mov %r12,48(%rsp) # arg7
+ mov %rcx,56(%rsp) # arg8, (NULL)
+ call *__imp_RtlVirtualUnwind(%rip)
+
+ mov \$1,%eax # ExceptionContinueSearch
+ add \$64,%rsp
+ popfq
+ pop %r15
+ pop %r14
+ pop %r13
+ pop %r12
+ pop %rbp
+ pop %rbx
+ pop %rdi
+ pop %rsi
+ ret
+.size key_se_handler,.-key_se_handler
+
+.section .pdata
+.align 4
+ .rva .LSEH_begin_RC4
+ .rva .LSEH_end_RC4
+ .rva .LSEH_info_RC4
+
+ .rva .LSEH_begin_RC4_set_key
+ .rva .LSEH_end_RC4_set_key
+ .rva .LSEH_info_RC4_set_key
+
+.section .xdata
+.align 8
+.LSEH_info_RC4:
+ .byte 9,0,0,0
+ .rva stream_se_handler
+.LSEH_info_RC4_set_key:
+ .byte 9,0,0,0
+ .rva key_se_handler
+___
+}
+
+$code =~ s/#([bwd])/$1/gm;
+
+print $code;
+
+close STDOUT;