summaryrefslogtreecommitdiff
path: root/app/openssl/crypto/modes/asm
diff options
context:
space:
mode:
Diffstat (limited to 'app/openssl/crypto/modes/asm')
-rw-r--r--app/openssl/crypto/modes/asm/ghash-armv4.S248
-rw-r--r--app/openssl/crypto/modes/asm/ghash-armv4.pl229
-rw-r--r--app/openssl/crypto/modes/asm/ghashv8-armx-64.S115
-rw-r--r--app/openssl/crypto/modes/asm/ghashv8-armx.S116
-rw-r--r--app/openssl/crypto/modes/asm/ghashv8-armx.pl240
5 files changed, 150 insertions, 798 deletions
diff --git a/app/openssl/crypto/modes/asm/ghash-armv4.S b/app/openssl/crypto/modes/asm/ghash-armv4.S
index 6c453774..d66c4cbf 100644
--- a/app/openssl/crypto/modes/asm/ghash-armv4.S
+++ b/app/openssl/crypto/modes/asm/ghash-armv4.S
@@ -309,213 +309,99 @@ gcm_gmult_4bit:
#if __ARM_ARCH__>=7
.fpu neon
-.global gcm_init_neon
-.type gcm_init_neon,%function
-.align 4
-gcm_init_neon:
- vld1.64 d7,[r1,:64]! @ load H
- vmov.i8 q8,#0xe1
- vld1.64 d6,[r1,:64]
- vshl.i64 d17,#57
- vshr.u64 d16,#63 @ t0=0xc2....01
- vdup.8 q9,d7[7]
- vshr.u64 d26,d6,#63
- vshr.s8 q9,#7 @ broadcast carry bit
- vshl.i64 q3,q3,#1
- vand q8,q8,q9
- vorr d7,d26 @ H<<<=1
- veor q3,q3,q8 @ twisted H
- vstmia r0,{q3}
-
- bx lr @ bx lr
-.size gcm_init_neon,.-gcm_init_neon
-
.global gcm_gmult_neon
.type gcm_gmult_neon,%function
.align 4
gcm_gmult_neon:
- vld1.64 d7,[r0,:64]! @ load Xi
- vld1.64 d6,[r0,:64]!
- vmov.i64 d29,#0x0000ffffffffffff
- vldmia r1,{d26-d27} @ load twisted H
- vmov.i64 d30,#0x00000000ffffffff
+ sub r1,#16 @ point at H in GCM128_CTX
+ vld1.64 d29,[r0,:64]!@ load Xi
+ vmov.i32 d5,#0xe1 @ our irreducible polynomial
+ vld1.64 d28,[r0,:64]!
+ vshr.u64 d5,#32
+ vldmia r1,{d0-d1} @ load H
+ veor q12,q12
#ifdef __ARMEL__
- vrev64.8 q3,q3
+ vrev64.8 q14,q14
#endif
- vmov.i64 d31,#0x000000000000ffff
- veor d28,d26,d27 @ Karatsuba pre-processing
+ veor q13,q13
+ veor q11,q11
+ mov r1,#16
+ veor q10,q10
mov r3,#16
- b .Lgmult_neon
+ veor d2,d2
+ vdup.8 d4,d28[0] @ broadcast lowest byte
+ b .Linner_neon
.size gcm_gmult_neon,.-gcm_gmult_neon
.global gcm_ghash_neon
.type gcm_ghash_neon,%function
.align 4
gcm_ghash_neon:
- vld1.64 d1,[r0,:64]! @ load Xi
- vld1.64 d0,[r0,:64]!
- vmov.i64 d29,#0x0000ffffffffffff
- vldmia r1,{d26-d27} @ load twisted H
- vmov.i64 d30,#0x00000000ffffffff
+ vld1.64 d21,[r0,:64]! @ load Xi
+ vmov.i32 d5,#0xe1 @ our irreducible polynomial
+ vld1.64 d20,[r0,:64]!
+ vshr.u64 d5,#32
+ vldmia r0,{d0-d1} @ load H
+ veor q12,q12
+ nop
#ifdef __ARMEL__
- vrev64.8 q0,q0
+ vrev64.8 q10,q10
#endif
- vmov.i64 d31,#0x000000000000ffff
- veor d28,d26,d27 @ Karatsuba pre-processing
-
-.Loop_neon:
- vld1.64 d7,[r2]! @ load inp
- vld1.64 d6,[r2]!
+.Louter_neon:
+ vld1.64 d29,[r2]! @ load inp
+ veor q13,q13
+ vld1.64 d28,[r2]!
+ veor q11,q11
+ mov r1,#16
#ifdef __ARMEL__
- vrev64.8 q3,q3
+ vrev64.8 q14,q14
#endif
- veor q3,q0 @ inp^=Xi
-.Lgmult_neon:
- vext.8 d16, d26, d26, #1 @ A1
- vmull.p8 q8, d16, d6 @ F = A1*B
- vext.8 d0, d6, d6, #1 @ B1
- vmull.p8 q0, d26, d0 @ E = A*B1
- vext.8 d18, d26, d26, #2 @ A2
- vmull.p8 q9, d18, d6 @ H = A2*B
- vext.8 d22, d6, d6, #2 @ B2
- vmull.p8 q11, d26, d22 @ G = A*B2
- vext.8 d20, d26, d26, #3 @ A3
- veor q8, q8, q0 @ L = E + F
- vmull.p8 q10, d20, d6 @ J = A3*B
- vext.8 d0, d6, d6, #3 @ B3
- veor q9, q9, q11 @ M = G + H
- vmull.p8 q0, d26, d0 @ I = A*B3
- veor d16, d16, d17 @ t0 = (L) (P0 + P1) << 8
- vand d17, d17, d29
- vext.8 d22, d6, d6, #4 @ B4
- veor d18, d18, d19 @ t1 = (M) (P2 + P3) << 16
- vand d19, d19, d30
- vmull.p8 q11, d26, d22 @ K = A*B4
- veor q10, q10, q0 @ N = I + J
- veor d16, d16, d17
- veor d18, d18, d19
- veor d20, d20, d21 @ t2 = (N) (P4 + P5) << 24
- vand d21, d21, d31
- vext.8 q8, q8, q8, #15
- veor d22, d22, d23 @ t3 = (K) (P6 + P7) << 32
- vmov.i64 d23, #0
- vext.8 q9, q9, q9, #14
- veor d20, d20, d21
- vmull.p8 q0, d26, d6 @ D = A*B
- vext.8 q11, q11, q11, #12
- vext.8 q10, q10, q10, #13
- veor q8, q8, q9
- veor q10, q10, q11
- veor q0, q0, q8
- veor q0, q0, q10
- veor d6,d6,d7 @ Karatsuba pre-processing
- vext.8 d16, d28, d28, #1 @ A1
- vmull.p8 q8, d16, d6 @ F = A1*B
- vext.8 d2, d6, d6, #1 @ B1
- vmull.p8 q1, d28, d2 @ E = A*B1
- vext.8 d18, d28, d28, #2 @ A2
- vmull.p8 q9, d18, d6 @ H = A2*B
- vext.8 d22, d6, d6, #2 @ B2
- vmull.p8 q11, d28, d22 @ G = A*B2
- vext.8 d20, d28, d28, #3 @ A3
- veor q8, q8, q1 @ L = E + F
- vmull.p8 q10, d20, d6 @ J = A3*B
- vext.8 d2, d6, d6, #3 @ B3
- veor q9, q9, q11 @ M = G + H
- vmull.p8 q1, d28, d2 @ I = A*B3
- veor d16, d16, d17 @ t0 = (L) (P0 + P1) << 8
- vand d17, d17, d29
- vext.8 d22, d6, d6, #4 @ B4
- veor d18, d18, d19 @ t1 = (M) (P2 + P3) << 16
- vand d19, d19, d30
- vmull.p8 q11, d28, d22 @ K = A*B4
- veor q10, q10, q1 @ N = I + J
- veor d16, d16, d17
- veor d18, d18, d19
- veor d20, d20, d21 @ t2 = (N) (P4 + P5) << 24
- vand d21, d21, d31
- vext.8 q8, q8, q8, #15
- veor d22, d22, d23 @ t3 = (K) (P6 + P7) << 32
- vmov.i64 d23, #0
- vext.8 q9, q9, q9, #14
- veor d20, d20, d21
- vmull.p8 q1, d28, d6 @ D = A*B
- vext.8 q11, q11, q11, #12
- vext.8 q10, q10, q10, #13
- veor q8, q8, q9
- veor q10, q10, q11
- veor q1, q1, q8
- veor q1, q1, q10
- vext.8 d16, d27, d27, #1 @ A1
- vmull.p8 q8, d16, d7 @ F = A1*B
- vext.8 d4, d7, d7, #1 @ B1
- vmull.p8 q2, d27, d4 @ E = A*B1
- vext.8 d18, d27, d27, #2 @ A2
- vmull.p8 q9, d18, d7 @ H = A2*B
- vext.8 d22, d7, d7, #2 @ B2
- vmull.p8 q11, d27, d22 @ G = A*B2
- vext.8 d20, d27, d27, #3 @ A3
- veor q8, q8, q2 @ L = E + F
- vmull.p8 q10, d20, d7 @ J = A3*B
- vext.8 d4, d7, d7, #3 @ B3
- veor q9, q9, q11 @ M = G + H
- vmull.p8 q2, d27, d4 @ I = A*B3
- veor d16, d16, d17 @ t0 = (L) (P0 + P1) << 8
- vand d17, d17, d29
- vext.8 d22, d7, d7, #4 @ B4
- veor d18, d18, d19 @ t1 = (M) (P2 + P3) << 16
- vand d19, d19, d30
- vmull.p8 q11, d27, d22 @ K = A*B4
- veor q10, q10, q2 @ N = I + J
- veor d16, d16, d17
- veor d18, d18, d19
- veor d20, d20, d21 @ t2 = (N) (P4 + P5) << 24
- vand d21, d21, d31
- vext.8 q8, q8, q8, #15
- veor d22, d22, d23 @ t3 = (K) (P6 + P7) << 32
- vmov.i64 d23, #0
- vext.8 q9, q9, q9, #14
- veor d20, d20, d21
- vmull.p8 q2, d27, d7 @ D = A*B
- vext.8 q11, q11, q11, #12
- vext.8 q10, q10, q10, #13
- veor q8, q8, q9
- veor q10, q10, q11
- veor q2, q2, q8
- veor q2, q2, q10
- veor q1,q1,q0 @ Karatsuba post-processing
- veor q1,q1,q2
- veor d1,d1,d2
- veor d4,d4,d3 @ Xh|Xl - 256-bit result
+ veor d2,d2
+ veor q14,q10 @ inp^=Xi
+ veor q10,q10
+ vdup.8 d4,d28[0] @ broadcast lowest byte
+.Linner_neon:
+ subs r1,r1,#1
+ vmull.p8 q9,d1,d4 @ H.lo·Xi[i]
+ vmull.p8 q8,d0,d4 @ H.hi·Xi[i]
+ vext.8 q14,q12,#1 @ IN>>=8
+
+ veor q10,q13 @ modulo-scheduled part
+ vshl.i64 d22,#48
+ vdup.8 d4,d28[0] @ broadcast lowest byte
+ veor d3,d18,d20
+
+ veor d21,d22
+ vuzp.8 q9,q8
+ vsli.8 d2,d3,#1 @ compose the "carry" byte
+ vext.8 q10,q12,#1 @ Z>>=8
- @ equivalent of reduction_avx from ghash-x86_64.pl
- vshl.i64 q9,q0,#57 @ 1st phase
- vshl.i64 q10,q0,#62
- veor q10,q10,q9 @
- vshl.i64 q9,q0,#63
- veor q10, q10, q9 @
- veor d1,d1,d20 @
- veor d4,d4,d21
+ vmull.p8 q11,d2,d5 @ "carry"·0xe1
+ vshr.u8 d2,d3,#7 @ save Z's bottom bit
+ vext.8 q13,q9,q12,#1 @ Qlo>>=8
+ veor q10,q8
+ bne .Linner_neon
- vshr.u64 q10,q0,#1 @ 2nd phase
- veor q2,q2,q0
- veor q0,q0,q10 @
- vshr.u64 q10,q10,#6
- vshr.u64 q0,q0,#1 @
- veor q0,q0,q2 @
- veor q0,q0,q10 @
+ veor q10,q13 @ modulo-scheduled artefact
+ vshl.i64 d22,#48
+ veor d21,d22
+ @ finalization, normalize Z:Zo
+ vand d2,d5 @ suffices to mask the bit
+ vshr.u64 d3,d20,#63
+ vshl.i64 q10,#1
subs r3,#16
- bne .Loop_neon
+ vorr q10,q1 @ Z=Z:Zo<<1
+ bne .Louter_neon
#ifdef __ARMEL__
- vrev64.8 q0,q0
+ vrev64.8 q10,q10
#endif
sub r0,#16
- vst1.64 d1,[r0,:64]! @ write out Xi
- vst1.64 d0,[r0,:64]
+ vst1.64 d21,[r0,:64]! @ write out Xi
+ vst1.64 d20,[r0,:64]
- bx lr @ bx lr
+ .word 0xe12fff1e
.size gcm_ghash_neon,.-gcm_ghash_neon
#endif
.asciz "GHASH for ARMv4/NEON, CRYPTOGAMS by <appro@openssl.org>"
diff --git a/app/openssl/crypto/modes/asm/ghash-armv4.pl b/app/openssl/crypto/modes/asm/ghash-armv4.pl
index b79ecbcc..e46f8e34 100644
--- a/app/openssl/crypto/modes/asm/ghash-armv4.pl
+++ b/app/openssl/crypto/modes/asm/ghash-armv4.pl
@@ -35,20 +35,6 @@
# Add NEON implementation featuring polynomial multiplication, i.e. no
# lookup tables involved. On Cortex A8 it was measured to process one
# byte in 15 cycles or 55% faster than integer-only code.
-#
-# April 2014
-#
-# Switch to multiplication algorithm suggested in paper referred
-# below and combine it with reduction algorithm from x86 module.
-# Performance improvement over previous version varies from 65% on
-# Snapdragon S4 to 110% on Cortex A9. In absolute terms Cortex A8
-# processes one byte in 8.45 cycles, A9 - in 10.2, Snapdragon S4 -
-# in 9.33.
-#
-# Câmara, D.; Gouvêa, C. P. L.; López, J. & Dahab, R.: Fast Software
-# Polynomial Multiplication on ARM Processors using the NEON Engine.
-#
-# http://conradoplg.cryptoland.net/files/2010/12/mocrysen13.pdf
# ====================================================================
# Note about "528B" variant. In ARM case it makes lesser sense to
@@ -317,160 +303,117 @@ $code.=<<___;
.size gcm_gmult_4bit,.-gcm_gmult_4bit
___
{
-my ($Xl,$Xm,$Xh,$IN)=map("q$_",(0..3));
-my ($t0,$t1,$t2,$t3)=map("q$_",(8..12));
-my ($Hlo,$Hhi,$Hhl,$k48,$k32,$k16)=map("d$_",(26..31));
+my $cnt=$Htbl; # $Htbl is used once in the very beginning
-sub clmul64x64 {
-my ($r,$a,$b)=@_;
-$code.=<<___;
- vext.8 $t0#lo, $a, $a, #1 @ A1
- vmull.p8 $t0, $t0#lo, $b @ F = A1*B
- vext.8 $r#lo, $b, $b, #1 @ B1
- vmull.p8 $r, $a, $r#lo @ E = A*B1
- vext.8 $t1#lo, $a, $a, #2 @ A2
- vmull.p8 $t1, $t1#lo, $b @ H = A2*B
- vext.8 $t3#lo, $b, $b, #2 @ B2
- vmull.p8 $t3, $a, $t3#lo @ G = A*B2
- vext.8 $t2#lo, $a, $a, #3 @ A3
- veor $t0, $t0, $r @ L = E + F
- vmull.p8 $t2, $t2#lo, $b @ J = A3*B
- vext.8 $r#lo, $b, $b, #3 @ B3
- veor $t1, $t1, $t3 @ M = G + H
- vmull.p8 $r, $a, $r#lo @ I = A*B3
- veor $t0#lo, $t0#lo, $t0#hi @ t0 = (L) (P0 + P1) << 8
- vand $t0#hi, $t0#hi, $k48
- vext.8 $t3#lo, $b, $b, #4 @ B4
- veor $t1#lo, $t1#lo, $t1#hi @ t1 = (M) (P2 + P3) << 16
- vand $t1#hi, $t1#hi, $k32
- vmull.p8 $t3, $a, $t3#lo @ K = A*B4
- veor $t2, $t2, $r @ N = I + J
- veor $t0#lo, $t0#lo, $t0#hi
- veor $t1#lo, $t1#lo, $t1#hi
- veor $t2#lo, $t2#lo, $t2#hi @ t2 = (N) (P4 + P5) << 24
- vand $t2#hi, $t2#hi, $k16
- vext.8 $t0, $t0, $t0, #15
- veor $t3#lo, $t3#lo, $t3#hi @ t3 = (K) (P6 + P7) << 32
- vmov.i64 $t3#hi, #0
- vext.8 $t1, $t1, $t1, #14
- veor $t2#lo, $t2#lo, $t2#hi
- vmull.p8 $r, $a, $b @ D = A*B
- vext.8 $t3, $t3, $t3, #12
- vext.8 $t2, $t2, $t2, #13
- veor $t0, $t0, $t1
- veor $t2, $t2, $t3
- veor $r, $r, $t0
- veor $r, $r, $t2
-___
-}
+my ($Hhi, $Hlo, $Zo, $T, $xi, $mod) = map("d$_",(0..7));
+my ($Qhi, $Qlo, $Z, $R, $zero, $Qpost, $IN) = map("q$_",(8..15));
+
+# Z:Zo keeps 128-bit result shifted by 1 to the right, with bottom bit
+# in Zo. Or should I say "top bit", because GHASH is specified in
+# reverse bit order? Otherwise straightforward 128-bt H by one input
+# byte multiplication and modulo-reduction, times 16.
+
+sub Dlo() { shift=~m|q([1]?[0-9])|?"d".($1*2):""; }
+sub Dhi() { shift=~m|q([1]?[0-9])|?"d".($1*2+1):""; }
+sub Q() { shift=~m|d([1-3]?[02468])|?"q".($1/2):""; }
$code.=<<___;
#if __ARM_ARCH__>=7
.fpu neon
-.global gcm_init_neon
-.type gcm_init_neon,%function
-.align 4
-gcm_init_neon:
- vld1.64 $IN#hi,[r1,:64]! @ load H
- vmov.i8 $t0,#0xe1
- vld1.64 $IN#lo,[r1,:64]
- vshl.i64 $t0#hi,#57
- vshr.u64 $t0#lo,#63 @ t0=0xc2....01
- vdup.8 $t1,$IN#hi[7]
- vshr.u64 $Hlo,$IN#lo,#63
- vshr.s8 $t1,#7 @ broadcast carry bit
- vshl.i64 $IN,$IN,#1
- vand $t0,$t0,$t1
- vorr $IN#hi,$Hlo @ H<<<=1
- veor $IN,$IN,$t0 @ twisted H
- vstmia r0,{$IN}
-
- ret @ bx lr
-.size gcm_init_neon,.-gcm_init_neon
-
.global gcm_gmult_neon
.type gcm_gmult_neon,%function
.align 4
gcm_gmult_neon:
- vld1.64 $IN#hi,[$Xi,:64]! @ load Xi
- vld1.64 $IN#lo,[$Xi,:64]!
- vmov.i64 $k48,#0x0000ffffffffffff
- vldmia $Htbl,{$Hlo-$Hhi} @ load twisted H
- vmov.i64 $k32,#0x00000000ffffffff
+ sub $Htbl,#16 @ point at H in GCM128_CTX
+ vld1.64 `&Dhi("$IN")`,[$Xi,:64]!@ load Xi
+ vmov.i32 $mod,#0xe1 @ our irreducible polynomial
+ vld1.64 `&Dlo("$IN")`,[$Xi,:64]!
+ vshr.u64 $mod,#32
+ vldmia $Htbl,{$Hhi-$Hlo} @ load H
+ veor $zero,$zero
#ifdef __ARMEL__
vrev64.8 $IN,$IN
#endif
- vmov.i64 $k16,#0x000000000000ffff
- veor $Hhl,$Hlo,$Hhi @ Karatsuba pre-processing
+ veor $Qpost,$Qpost
+ veor $R,$R
+ mov $cnt,#16
+ veor $Z,$Z
mov $len,#16
- b .Lgmult_neon
+ veor $Zo,$Zo
+ vdup.8 $xi,`&Dlo("$IN")`[0] @ broadcast lowest byte
+ b .Linner_neon
.size gcm_gmult_neon,.-gcm_gmult_neon
.global gcm_ghash_neon
.type gcm_ghash_neon,%function
.align 4
gcm_ghash_neon:
- vld1.64 $Xl#hi,[$Xi,:64]! @ load Xi
- vld1.64 $Xl#lo,[$Xi,:64]!
- vmov.i64 $k48,#0x0000ffffffffffff
- vldmia $Htbl,{$Hlo-$Hhi} @ load twisted H
- vmov.i64 $k32,#0x00000000ffffffff
+ vld1.64 `&Dhi("$Z")`,[$Xi,:64]! @ load Xi
+ vmov.i32 $mod,#0xe1 @ our irreducible polynomial
+ vld1.64 `&Dlo("$Z")`,[$Xi,:64]!
+ vshr.u64 $mod,#32
+ vldmia $Xi,{$Hhi-$Hlo} @ load H
+ veor $zero,$zero
+ nop
#ifdef __ARMEL__
- vrev64.8 $Xl,$Xl
+ vrev64.8 $Z,$Z
#endif
- vmov.i64 $k16,#0x000000000000ffff
- veor $Hhl,$Hlo,$Hhi @ Karatsuba pre-processing
-
-.Loop_neon:
- vld1.64 $IN#hi,[$inp]! @ load inp
- vld1.64 $IN#lo,[$inp]!
+.Louter_neon:
+ vld1.64 `&Dhi($IN)`,[$inp]! @ load inp
+ veor $Qpost,$Qpost
+ vld1.64 `&Dlo($IN)`,[$inp]!
+ veor $R,$R
+ mov $cnt,#16
#ifdef __ARMEL__
vrev64.8 $IN,$IN
#endif
- veor $IN,$Xl @ inp^=Xi
-.Lgmult_neon:
-___
- &clmul64x64 ($Xl,$Hlo,"$IN#lo"); # H.lo·Xi.lo
-$code.=<<___;
- veor $IN#lo,$IN#lo,$IN#hi @ Karatsuba pre-processing
-___
- &clmul64x64 ($Xm,$Hhl,"$IN#lo"); # (H.lo+H.hi)·(Xi.lo+Xi.hi)
- &clmul64x64 ($Xh,$Hhi,"$IN#hi"); # H.hi·Xi.hi
-$code.=<<___;
- veor $Xm,$Xm,$Xl @ Karatsuba post-processing
- veor $Xm,$Xm,$Xh
- veor $Xl#hi,$Xl#hi,$Xm#lo
- veor $Xh#lo,$Xh#lo,$Xm#hi @ Xh|Xl - 256-bit result
-
- @ equivalent of reduction_avx from ghash-x86_64.pl
- vshl.i64 $t1,$Xl,#57 @ 1st phase
- vshl.i64 $t2,$Xl,#62
- veor $t2,$t2,$t1 @
- vshl.i64 $t1,$Xl,#63
- veor $t2, $t2, $t1 @
- veor $Xl#hi,$Xl#hi,$t2#lo @
- veor $Xh#lo,$Xh#lo,$t2#hi
-
- vshr.u64 $t2,$Xl,#1 @ 2nd phase
- veor $Xh,$Xh,$Xl
- veor $Xl,$Xl,$t2 @
- vshr.u64 $t2,$t2,#6
- vshr.u64 $Xl,$Xl,#1 @
- veor $Xl,$Xl,$Xh @
- veor $Xl,$Xl,$t2 @
-
+ veor $Zo,$Zo
+ veor $IN,$Z @ inp^=Xi
+ veor $Z,$Z
+ vdup.8 $xi,`&Dlo("$IN")`[0] @ broadcast lowest byte
+.Linner_neon:
+ subs $cnt,$cnt,#1
+ vmull.p8 $Qlo,$Hlo,$xi @ H.lo·Xi[i]
+ vmull.p8 $Qhi,$Hhi,$xi @ H.hi·Xi[i]
+ vext.8 $IN,$zero,#1 @ IN>>=8
+
+ veor $Z,$Qpost @ modulo-scheduled part
+ vshl.i64 `&Dlo("$R")`,#48
+ vdup.8 $xi,`&Dlo("$IN")`[0] @ broadcast lowest byte
+ veor $T,`&Dlo("$Qlo")`,`&Dlo("$Z")`
+
+ veor `&Dhi("$Z")`,`&Dlo("$R")`
+ vuzp.8 $Qlo,$Qhi
+ vsli.8 $Zo,$T,#1 @ compose the "carry" byte
+ vext.8 $Z,$zero,#1 @ Z>>=8
+
+ vmull.p8 $R,$Zo,$mod @ "carry"·0xe1
+ vshr.u8 $Zo,$T,#7 @ save Z's bottom bit
+ vext.8 $Qpost,$Qlo,$zero,#1 @ Qlo>>=8
+ veor $Z,$Qhi
+ bne .Linner_neon
+
+ veor $Z,$Qpost @ modulo-scheduled artefact
+ vshl.i64 `&Dlo("$R")`,#48
+ veor `&Dhi("$Z")`,`&Dlo("$R")`
+
+ @ finalization, normalize Z:Zo
+ vand $Zo,$mod @ suffices to mask the bit
+ vshr.u64 `&Dhi(&Q("$Zo"))`,`&Dlo("$Z")`,#63
+ vshl.i64 $Z,#1
subs $len,#16
- bne .Loop_neon
+ vorr $Z,`&Q("$Zo")` @ Z=Z:Zo<<1
+ bne .Louter_neon
#ifdef __ARMEL__
- vrev64.8 $Xl,$Xl
+ vrev64.8 $Z,$Z
#endif
sub $Xi,#16
- vst1.64 $Xl#hi,[$Xi,:64]! @ write out Xi
- vst1.64 $Xl#lo,[$Xi,:64]
+ vst1.64 `&Dhi("$Z")`,[$Xi,:64]! @ write out Xi
+ vst1.64 `&Dlo("$Z")`,[$Xi,:64]
- ret @ bx lr
+ bx lr
.size gcm_ghash_neon,.-gcm_ghash_neon
#endif
___
@@ -480,13 +423,7 @@ $code.=<<___;
.align 2
___
-foreach (split("\n",$code)) {
- s/\`([^\`]*)\`/eval $1/geo;
-
- s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo or
- s/\bret\b/bx lr/go or
- s/\bbx\s+lr\b/.word\t0xe12fff1e/go; # make it possible to compile with -march=armv4
-
- print $_,"\n";
-}
+$code =~ s/\`([^\`]*)\`/eval $1/gem;
+$code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm; # make it possible to compile with -march=armv4
+print $code;
close STDOUT; # enforce flush
diff --git a/app/openssl/crypto/modes/asm/ghashv8-armx-64.S b/app/openssl/crypto/modes/asm/ghashv8-armx-64.S
deleted file mode 100644
index b77b6c40..00000000
--- a/app/openssl/crypto/modes/asm/ghashv8-armx-64.S
+++ /dev/null
@@ -1,115 +0,0 @@
-#include "arm_arch.h"
-
-.text
-.arch armv8-a+crypto
-.global gcm_init_v8
-.type gcm_init_v8,%function
-.align 4
-gcm_init_v8:
- ld1 {v17.2d},[x1] //load H
- movi v16.16b,#0xe1
- ext v3.16b,v17.16b,v17.16b,#8
- shl v16.2d,v16.2d,#57
- ushr v18.2d,v16.2d,#63
- ext v16.16b,v18.16b,v16.16b,#8 //t0=0xc2....01
- dup v17.4s,v17.s[1]
- ushr v19.2d,v3.2d,#63
- sshr v17.4s,v17.4s,#31 //broadcast carry bit
- and v19.16b,v19.16b,v16.16b
- shl v3.2d,v3.2d,#1
- ext v19.16b,v19.16b,v19.16b,#8
- and v16.16b,v16.16b,v17.16b
- orr v3.16b,v3.16b,v19.16b //H<<<=1
- eor v3.16b,v3.16b,v16.16b //twisted H
- st1 {v3.2d},[x0]
-
- ret
-.size gcm_init_v8,.-gcm_init_v8
-
-.global gcm_gmult_v8
-.type gcm_gmult_v8,%function
-.align 4
-gcm_gmult_v8:
- ld1 {v17.2d},[x0] //load Xi
- movi v19.16b,#0xe1
- ld1 {v20.2d},[x1] //load twisted H
- shl v19.2d,v19.2d,#57
-#ifndef __ARMEB__
- rev64 v17.16b,v17.16b
-#endif
- ext v21.16b,v20.16b,v20.16b,#8
- mov x3,#0
- ext v3.16b,v17.16b,v17.16b,#8
- mov x12,#0
- eor v21.16b,v21.16b,v20.16b //Karatsuba pre-processing
- mov x2,x0
- b .Lgmult_v8
-.size gcm_gmult_v8,.-gcm_gmult_v8
-
-.global gcm_ghash_v8
-.type gcm_ghash_v8,%function
-.align 4
-gcm_ghash_v8:
- ld1 {v0.2d},[x0] //load [rotated] Xi
- subs x3,x3,#16
- movi v19.16b,#0xe1
- mov x12,#16
- ld1 {v20.2d},[x1] //load twisted H
- csel x12,xzr,x12,eq
- ext v0.16b,v0.16b,v0.16b,#8
- shl v19.2d,v19.2d,#57
- ld1 {v17.2d},[x2],x12 //load [rotated] inp
- ext v21.16b,v20.16b,v20.16b,#8
-#ifndef __ARMEB__
- rev64 v0.16b,v0.16b
- rev64 v17.16b,v17.16b
-#endif
- eor v21.16b,v21.16b,v20.16b //Karatsuba pre-processing
- ext v3.16b,v17.16b,v17.16b,#8
- b .Loop_v8
-
-.align 4
-.Loop_v8:
- ext v18.16b,v0.16b,v0.16b,#8
- eor v3.16b,v3.16b,v0.16b //inp^=Xi
- eor v17.16b,v17.16b,v18.16b //v17.16b is rotated inp^Xi
-
-.Lgmult_v8:
- pmull v0.1q,v20.1d,v3.1d //H.lo·Xi.lo
- eor v17.16b,v17.16b,v3.16b //Karatsuba pre-processing
- pmull2 v2.1q,v20.2d,v3.2d //H.hi·Xi.hi
- subs x3,x3,#16
- pmull v1.1q,v21.1d,v17.1d //(H.lo+H.hi)·(Xi.lo+Xi.hi)
- csel x12,xzr,x12,eq
-
- ext v17.16b,v0.16b,v2.16b,#8 //Karatsuba post-processing
- eor v18.16b,v0.16b,v2.16b
- eor v1.16b,v1.16b,v17.16b
- ld1 {v17.2d},[x2],x12 //load [rotated] inp
- eor v1.16b,v1.16b,v18.16b
- pmull v18.1q,v0.1d,v19.1d //1st phase
-
- ins v2.d[0],v1.d[1]
- ins v1.d[1],v0.d[0]
-#ifndef __ARMEB__
- rev64 v17.16b,v17.16b
-#endif
- eor v0.16b,v1.16b,v18.16b
- ext v3.16b,v17.16b,v17.16b,#8
-
- ext v18.16b,v0.16b,v0.16b,#8 //2nd phase
- pmull v0.1q,v0.1d,v19.1d
- eor v18.16b,v18.16b,v2.16b
- eor v0.16b,v0.16b,v18.16b
- b.hs .Loop_v8
-
-#ifndef __ARMEB__
- rev64 v0.16b,v0.16b
-#endif
- ext v0.16b,v0.16b,v0.16b,#8
- st1 {v0.2d},[x0] //write out Xi
-
- ret
-.size gcm_ghash_v8,.-gcm_ghash_v8
-.asciz "GHASH for ARMv8, CRYPTOGAMS by <appro@openssl.org>"
-.align 2
diff --git a/app/openssl/crypto/modes/asm/ghashv8-armx.S b/app/openssl/crypto/modes/asm/ghashv8-armx.S
deleted file mode 100644
index f388c54e..00000000
--- a/app/openssl/crypto/modes/asm/ghashv8-armx.S
+++ /dev/null
@@ -1,116 +0,0 @@
-#include "arm_arch.h"
-
-.text
-.fpu neon
-.code 32
-.global gcm_init_v8
-.type gcm_init_v8,%function
-.align 4
-gcm_init_v8:
- vld1.64 {q9},[r1] @ load H
- vmov.i8 q8,#0xe1
- vext.8 q3,q9,q9,#8
- vshl.i64 q8,q8,#57
- vshr.u64 q10,q8,#63
- vext.8 q8,q10,q8,#8 @ t0=0xc2....01
- vdup.32 q9,d18[1]
- vshr.u64 q11,q3,#63
- vshr.s32 q9,q9,#31 @ broadcast carry bit
- vand q11,q11,q8
- vshl.i64 q3,q3,#1
- vext.8 q11,q11,q11,#8
- vand q8,q8,q9
- vorr q3,q3,q11 @ H<<<=1
- veor q3,q3,q8 @ twisted H
- vst1.64 {q3},[r0]
-
- bx lr
-.size gcm_init_v8,.-gcm_init_v8
-
-.global gcm_gmult_v8
-.type gcm_gmult_v8,%function
-.align 4
-gcm_gmult_v8:
- vld1.64 {q9},[r0] @ load Xi
- vmov.i8 q11,#0xe1
- vld1.64 {q12},[r1] @ load twisted H
- vshl.u64 q11,q11,#57
-#ifndef __ARMEB__
- vrev64.8 q9,q9
-#endif
- vext.8 q13,q12,q12,#8
- mov r3,#0
- vext.8 q3,q9,q9,#8
- mov r12,#0
- veor q13,q13,q12 @ Karatsuba pre-processing
- mov r2,r0
- b .Lgmult_v8
-.size gcm_gmult_v8,.-gcm_gmult_v8
-
-.global gcm_ghash_v8
-.type gcm_ghash_v8,%function
-.align 4
-gcm_ghash_v8:
- vld1.64 {q0},[r0] @ load [rotated] Xi
- subs r3,r3,#16
- vmov.i8 q11,#0xe1
- mov r12,#16
- vld1.64 {q12},[r1] @ load twisted H
- moveq r12,#0
- vext.8 q0,q0,q0,#8
- vshl.u64 q11,q11,#57
- vld1.64 {q9},[r2],r12 @ load [rotated] inp
- vext.8 q13,q12,q12,#8
-#ifndef __ARMEB__
- vrev64.8 q0,q0
- vrev64.8 q9,q9
-#endif
- veor q13,q13,q12 @ Karatsuba pre-processing
- vext.8 q3,q9,q9,#8
- b .Loop_v8
-
-.align 4
-.Loop_v8:
- vext.8 q10,q0,q0,#8
- veor q3,q3,q0 @ inp^=Xi
- veor q9,q9,q10 @ q9 is rotated inp^Xi
-
-.Lgmult_v8:
- .byte 0x86,0x0e,0xa8,0xf2 @ pmull q0,q12,q3 @ H.lo·Xi.lo
- veor q9,q9,q3 @ Karatsuba pre-processing
- .byte 0x87,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q3 @ H.hi·Xi.hi
- subs r3,r3,#16
- .byte 0xa2,0x2e,0xaa,0xf2 @ pmull q1,q13,q9 @ (H.lo+H.hi)·(Xi.lo+Xi.hi)
- moveq r12,#0
-
- vext.8 q9,q0,q2,#8 @ Karatsuba post-processing
- veor q10,q0,q2
- veor q1,q1,q9
- vld1.64 {q9},[r2],r12 @ load [rotated] inp
- veor q1,q1,q10
- .byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase
-
- vmov d4,d3 @ Xh|Xm - 256-bit result
- vmov d3,d0 @ Xm is rotated Xl
-#ifndef __ARMEB__
- vrev64.8 q9,q9
-#endif
- veor q0,q1,q10
- vext.8 q3,q9,q9,#8
-
- vext.8 q10,q0,q0,#8 @ 2nd phase
- .byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11
- veor q10,q10,q2
- veor q0,q0,q10
- bhs .Loop_v8
-
-#ifndef __ARMEB__
- vrev64.8 q0,q0
-#endif
- vext.8 q0,q0,q0,#8
- vst1.64 {q0},[r0] @ write out Xi
-
- bx lr
-.size gcm_ghash_v8,.-gcm_ghash_v8
-.asciz "GHASH for ARMv8, CRYPTOGAMS by <appro@openssl.org>"
-.align 2
diff --git a/app/openssl/crypto/modes/asm/ghashv8-armx.pl b/app/openssl/crypto/modes/asm/ghashv8-armx.pl
deleted file mode 100644
index 69e863e7..00000000
--- a/app/openssl/crypto/modes/asm/ghashv8-armx.pl
+++ /dev/null
@@ -1,240 +0,0 @@
-#!/usr/bin/env perl
-#
-# ====================================================================
-# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
-# project. The module is, however, dual licensed under OpenSSL and
-# CRYPTOGAMS licenses depending on where you obtain it. For further
-# details see http://www.openssl.org/~appro/cryptogams/.
-# ====================================================================
-#
-# GHASH for ARMv8 Crypto Extension, 64-bit polynomial multiplication.
-#
-# June 2014
-#
-# Initial version was developed in tight cooperation with Ard
-# Biesheuvel <ard.biesheuvel@linaro.org> from bits-n-pieces from
-# other assembly modules. Just like aesv8-armx.pl this module
-# supports both AArch32 and AArch64 execution modes.
-#
-# Current performance in cycles per processed byte:
-#
-# PMULL[2] 32-bit NEON(*)
-# Apple A7 1.76 5.62
-# Cortex-A5x n/a n/a
-#
-# (*) presented for reference/comparison purposes;
-
-$flavour = shift;
-open STDOUT,">".shift;
-
-$Xi="x0"; # argument block
-$Htbl="x1";
-$inp="x2";
-$len="x3";
-
-$inc="x12";
-
-{
-my ($Xl,$Xm,$Xh,$IN)=map("q$_",(0..3));
-my ($t0,$t1,$t2,$t3,$H,$Hhl)=map("q$_",(8..14));
-
-$code=<<___;
-#include "arm_arch.h"
-
-.text
-___
-$code.=".arch armv8-a+crypto\n" if ($flavour =~ /64/);
-$code.=".fpu neon\n.code 32\n" if ($flavour !~ /64/);
-
-$code.=<<___;
-.global gcm_init_v8
-.type gcm_init_v8,%function
-.align 4
-gcm_init_v8:
- vld1.64 {$t1},[x1] @ load H
- vmov.i8 $t0,#0xe1
- vext.8 $IN,$t1,$t1,#8
- vshl.i64 $t0,$t0,#57
- vshr.u64 $t2,$t0,#63
- vext.8 $t0,$t2,$t0,#8 @ t0=0xc2....01
- vdup.32 $t1,${t1}[1]
- vshr.u64 $t3,$IN,#63
- vshr.s32 $t1,$t1,#31 @ broadcast carry bit
- vand $t3,$t3,$t0
- vshl.i64 $IN,$IN,#1
- vext.8 $t3,$t3,$t3,#8
- vand $t0,$t0,$t1
- vorr $IN,$IN,$t3 @ H<<<=1
- veor $IN,$IN,$t0 @ twisted H
- vst1.64 {$IN},[x0]
-
- ret
-.size gcm_init_v8,.-gcm_init_v8
-
-.global gcm_gmult_v8
-.type gcm_gmult_v8,%function
-.align 4
-gcm_gmult_v8:
- vld1.64 {$t1},[$Xi] @ load Xi
- vmov.i8 $t3,#0xe1
- vld1.64 {$H},[$Htbl] @ load twisted H
- vshl.u64 $t3,$t3,#57
-#ifndef __ARMEB__
- vrev64.8 $t1,$t1
-#endif
- vext.8 $Hhl,$H,$H,#8
- mov $len,#0
- vext.8 $IN,$t1,$t1,#8
- mov $inc,#0
- veor $Hhl,$Hhl,$H @ Karatsuba pre-processing
- mov $inp,$Xi
- b .Lgmult_v8
-.size gcm_gmult_v8,.-gcm_gmult_v8
-
-.global gcm_ghash_v8
-.type gcm_ghash_v8,%function
-.align 4
-gcm_ghash_v8:
- vld1.64 {$Xl},[$Xi] @ load [rotated] Xi
- subs $len,$len,#16
- vmov.i8 $t3,#0xe1
- mov $inc,#16
- vld1.64 {$H},[$Htbl] @ load twisted H
- cclr $inc,eq
- vext.8 $Xl,$Xl,$Xl,#8
- vshl.u64 $t3,$t3,#57
- vld1.64 {$t1},[$inp],$inc @ load [rotated] inp
- vext.8 $Hhl,$H,$H,#8
-#ifndef __ARMEB__
- vrev64.8 $Xl,$Xl
- vrev64.8 $t1,$t1
-#endif
- veor $Hhl,$Hhl,$H @ Karatsuba pre-processing
- vext.8 $IN,$t1,$t1,#8
- b .Loop_v8
-
-.align 4
-.Loop_v8:
- vext.8 $t2,$Xl,$Xl,#8
- veor $IN,$IN,$Xl @ inp^=Xi
- veor $t1,$t1,$t2 @ $t1 is rotated inp^Xi
-
-.Lgmult_v8:
- vpmull.p64 $Xl,$H,$IN @ H.lo·Xi.lo
- veor $t1,$t1,$IN @ Karatsuba pre-processing
- vpmull2.p64 $Xh,$H,$IN @ H.hi·Xi.hi
- subs $len,$len,#16
- vpmull.p64 $Xm,$Hhl,$t1 @ (H.lo+H.hi)·(Xi.lo+Xi.hi)
- cclr $inc,eq
-
- vext.8 $t1,$Xl,$Xh,#8 @ Karatsuba post-processing
- veor $t2,$Xl,$Xh
- veor $Xm,$Xm,$t1
- vld1.64 {$t1},[$inp],$inc @ load [rotated] inp
- veor $Xm,$Xm,$t2
- vpmull.p64 $t2,$Xl,$t3 @ 1st phase
-
- vmov $Xh#lo,$Xm#hi @ Xh|Xm - 256-bit result
- vmov $Xm#hi,$Xl#lo @ Xm is rotated Xl
-#ifndef __ARMEB__
- vrev64.8 $t1,$t1
-#endif
- veor $Xl,$Xm,$t2
- vext.8 $IN,$t1,$t1,#8
-
- vext.8 $t2,$Xl,$Xl,#8 @ 2nd phase
- vpmull.p64 $Xl,$Xl,$t3
- veor $t2,$t2,$Xh
- veor $Xl,$Xl,$t2
- b.hs .Loop_v8
-
-#ifndef __ARMEB__
- vrev64.8 $Xl,$Xl
-#endif
- vext.8 $Xl,$Xl,$Xl,#8
- vst1.64 {$Xl},[$Xi] @ write out Xi
-
- ret
-.size gcm_ghash_v8,.-gcm_ghash_v8
-___
-}
-$code.=<<___;
-.asciz "GHASH for ARMv8, CRYPTOGAMS by <appro\@openssl.org>"
-.align 2
-___
-
-if ($flavour =~ /64/) { ######## 64-bit code
- sub unvmov {
- my $arg=shift;
-
- $arg =~ m/q([0-9]+)#(lo|hi),\s*q([0-9]+)#(lo|hi)/o &&
- sprintf "ins v%d.d[%d],v%d.d[%d]",$1,($2 eq "lo")?0:1,$3,($4 eq "lo")?0:1;
- }
- foreach(split("\n",$code)) {
- s/cclr\s+([wx])([^,]+),\s*([a-z]+)/csel $1$2,$1zr,$1$2,$3/o or
- s/vmov\.i8/movi/o or # fix up legacy mnemonics
- s/vmov\s+(.*)/unvmov($1)/geo or
- s/vext\.8/ext/o or
- s/vshr\.s/sshr\.s/o or
- s/vshr/ushr/o or
- s/^(\s+)v/$1/o or # strip off v prefix
- s/\bbx\s+lr\b/ret/o;
-
- s/\bq([0-9]+)\b/"v".($1<8?$1:$1+8).".16b"/geo; # old->new registers
- s/@\s/\/\//o; # old->new style commentary
-
- # fix up remainig legacy suffixes
- s/\.[ui]?8(\s)/$1/o;
- s/\.[uis]?32//o and s/\.16b/\.4s/go;
- m/\.p64/o and s/\.16b/\.1q/o; # 1st pmull argument
- m/l\.p64/o and s/\.16b/\.1d/go; # 2nd and 3rd pmull arguments
- s/\.[uisp]?64//o and s/\.16b/\.2d/go;
- s/\.[42]([sd])\[([0-3])\]/\.$1\[$2\]/o;
-
- print $_,"\n";
- }
-} else { ######## 32-bit code
- sub unvdup32 {
- my $arg=shift;
-
- $arg =~ m/q([0-9]+),\s*q([0-9]+)\[([0-3])\]/o &&
- sprintf "vdup.32 q%d,d%d[%d]",$1,2*$2+($3>>1),$3&1;
- }
- sub unvpmullp64 {
- my ($mnemonic,$arg)=@_;
-
- if ($arg =~ m/q([0-9]+),\s*q([0-9]+),\s*q([0-9]+)/o) {
- my $word = 0xf2a00e00|(($1&7)<<13)|(($1&8)<<19)
- |(($2&7)<<17)|(($2&8)<<4)
- |(($3&7)<<1) |(($3&8)<<2);
- $word |= 0x00010001 if ($mnemonic =~ "2");
- # since ARMv7 instructions are always encoded little-endian.
- # correct solution is to use .inst directive, but older
- # assemblers don't implement it:-(
- sprintf ".byte\t0x%02x,0x%02x,0x%02x,0x%02x\t@ %s %s",
- $word&0xff,($word>>8)&0xff,
- ($word>>16)&0xff,($word>>24)&0xff,
- $mnemonic,$arg;
- }
- }
-
- foreach(split("\n",$code)) {
- s/\b[wx]([0-9]+)\b/r$1/go; # new->old registers
- s/\bv([0-9])\.[12468]+[bsd]\b/q$1/go; # new->old registers
- s/\/\/\s?/@ /o; # new->old style commentary
-
- # fix up remainig new-style suffixes
- s/\],#[0-9]+/]!/o;
-
- s/cclr\s+([^,]+),\s*([a-z]+)/mov$2 $1,#0/o or
- s/vdup\.32\s+(.*)/unvdup32($1)/geo or
- s/v?(pmull2?)\.p64\s+(.*)/unvpmullp64($1,$2)/geo or
- s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo or
- s/^(\s+)b\./$1b/o or
- s/^(\s+)ret/$1bx\tlr/o;
-
- print $_,"\n";
- }
-}
-
-close STDOUT; # enforce flush