boringssl/crypto/chacha/asm/chacha-armv8.pl

1128 lines
26 KiB
Perl
Raw Normal View History

#!/usr/bin/env perl
#
# ====================================================================
# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
# project. The module is, however, dual licensed under OpenSSL and
# CRYPTOGAMS licenses depending on where you obtain it. For further
# details see http://www.openssl.org/~appro/cryptogams/.
# ====================================================================
#
# June 2015
#
# ChaCha20 for ARMv8.
#
# Performance in cycles per byte out of large buffer.
#
# IALU/gcc-4.9 3xNEON+1xIALU 6xNEON+2xIALU
#
# Apple A7 5.50/+49% 3.33 1.70
# Cortex-A53 8.40/+80% 4.72 4.72(*)
# Cortex-A57 8.06/+43% 4.90 4.43(**)
# Denver 4.50/+82% 2.63 2.67(*)
# X-Gene 9.50/+46% 8.82 8.89(*)
#
# (*) it's expected that doubling interleave factor doesn't help
# all processors, only those with higher NEON latency and
# higher instruction issue rate;
# (**) expected improvement was actually higher;
$flavour=shift;
$output=shift;
$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
die "can't locate arm-xlate.pl";
open OUT,"| \"$^X\" $xlate $flavour $output";
*STDOUT=*OUT;
sub AUTOLOAD() # thunk [simplified] x86-style perlasm
{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; $opcode =~ s/_/\./;
my $arg = pop;
$arg = "#$arg" if ($arg*1 eq $arg);
$code .= "\t$opcode\t".join(',',@_,$arg)."\n";
}
my ($out,$inp,$len,$key,$ctr) = map("x$_",(0..4));
my @x=map("x$_",(5..17,19..21));
my @d=map("x$_",(22..28,30));
sub ROUND {
my ($a0,$b0,$c0,$d0)=@_;
my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
(
"&add_32 (@x[$a0],@x[$a0],@x[$b0])",
"&add_32 (@x[$a1],@x[$a1],@x[$b1])",
"&add_32 (@x[$a2],@x[$a2],@x[$b2])",
"&add_32 (@x[$a3],@x[$a3],@x[$b3])",
"&eor_32 (@x[$d0],@x[$d0],@x[$a0])",
"&eor_32 (@x[$d1],@x[$d1],@x[$a1])",
"&eor_32 (@x[$d2],@x[$d2],@x[$a2])",
"&eor_32 (@x[$d3],@x[$d3],@x[$a3])",
"&ror_32 (@x[$d0],@x[$d0],16)",
"&ror_32 (@x[$d1],@x[$d1],16)",
"&ror_32 (@x[$d2],@x[$d2],16)",
"&ror_32 (@x[$d3],@x[$d3],16)",
"&add_32 (@x[$c0],@x[$c0],@x[$d0])",
"&add_32 (@x[$c1],@x[$c1],@x[$d1])",
"&add_32 (@x[$c2],@x[$c2],@x[$d2])",
"&add_32 (@x[$c3],@x[$c3],@x[$d3])",
"&eor_32 (@x[$b0],@x[$b0],@x[$c0])",
"&eor_32 (@x[$b1],@x[$b1],@x[$c1])",
"&eor_32 (@x[$b2],@x[$b2],@x[$c2])",
"&eor_32 (@x[$b3],@x[$b3],@x[$c3])",
"&ror_32 (@x[$b0],@x[$b0],20)",
"&ror_32 (@x[$b1],@x[$b1],20)",
"&ror_32 (@x[$b2],@x[$b2],20)",
"&ror_32 (@x[$b3],@x[$b3],20)",
"&add_32 (@x[$a0],@x[$a0],@x[$b0])",
"&add_32 (@x[$a1],@x[$a1],@x[$b1])",
"&add_32 (@x[$a2],@x[$a2],@x[$b2])",
"&add_32 (@x[$a3],@x[$a3],@x[$b3])",
"&eor_32 (@x[$d0],@x[$d0],@x[$a0])",
"&eor_32 (@x[$d1],@x[$d1],@x[$a1])",
"&eor_32 (@x[$d2],@x[$d2],@x[$a2])",
"&eor_32 (@x[$d3],@x[$d3],@x[$a3])",
"&ror_32 (@x[$d0],@x[$d0],24)",
"&ror_32 (@x[$d1],@x[$d1],24)",
"&ror_32 (@x[$d2],@x[$d2],24)",
"&ror_32 (@x[$d3],@x[$d3],24)",
"&add_32 (@x[$c0],@x[$c0],@x[$d0])",
"&add_32 (@x[$c1],@x[$c1],@x[$d1])",
"&add_32 (@x[$c2],@x[$c2],@x[$d2])",
"&add_32 (@x[$c3],@x[$c3],@x[$d3])",
"&eor_32 (@x[$b0],@x[$b0],@x[$c0])",
"&eor_32 (@x[$b1],@x[$b1],@x[$c1])",
"&eor_32 (@x[$b2],@x[$b2],@x[$c2])",
"&eor_32 (@x[$b3],@x[$b3],@x[$c3])",
"&ror_32 (@x[$b0],@x[$b0],25)",
"&ror_32 (@x[$b1],@x[$b1],25)",
"&ror_32 (@x[$b2],@x[$b2],25)",
"&ror_32 (@x[$b3],@x[$b3],25)"
);
}
$code.=<<___;
Enable upstream's ChaCha20 assembly for x86 and ARM (32- and 64-bit). This removes chacha_vec_arm.S and chacha_vec.c in favor of unifying on upstream's code. Upstream's is faster and this cuts down on the number of distinct codepaths. Our old scheme also didn't give vectorized code on Windows or aarch64. BoringSSL-specific modifications made to the assembly: - As usual, the shelling out to $CC is replaced with hardcoding $avx. I've tested up to the AVX2 codepath, so enable it all. - I've removed the AMD XOP code as I have not tested it. - As usual, the ARM file need the arm_arch.h include tweaked. Speed numbers follow. We can hope for further wins on these benchmarks after importing the Poly1305 assembly. x86 --- Old: Did 1422000 ChaCha20-Poly1305 (16 bytes) seal operations in 1000433us (1421384.5 ops/sec): 22.7 MB/s Did 123000 ChaCha20-Poly1305 (1350 bytes) seal operations in 1003803us (122534.0 ops/sec): 165.4 MB/s Did 22000 ChaCha20-Poly1305 (8192 bytes) seal operations in 1000282us (21993.8 ops/sec): 180.2 MB/s Did 1428000 ChaCha20-Poly1305-Old (16 bytes) seal operations in 1000214us (1427694.5 ops/sec): 22.8 MB/s Did 124000 ChaCha20-Poly1305-Old (1350 bytes) seal operations in 1006332us (123219.8 ops/sec): 166.3 MB/s Did 22000 ChaCha20-Poly1305-Old (8192 bytes) seal operations in 1020771us (21552.3 ops/sec): 176.6 MB/s New: Did 1520000 ChaCha20-Poly1305 (16 bytes) seal operations in 1000567us (1519138.6 ops/sec): 24.3 MB/s Did 152000 ChaCha20-Poly1305 (1350 bytes) seal operations in 1004216us (151361.9 ops/sec): 204.3 MB/s Did 31000 ChaCha20-Poly1305 (8192 bytes) seal operations in 1009085us (30720.9 ops/sec): 251.7 MB/s Did 1797000 ChaCha20-Poly1305-Old (16 bytes) seal operations in 1000141us (1796746.7 ops/sec): 28.7 MB/s Did 171000 ChaCha20-Poly1305-Old (1350 bytes) seal operations in 1003204us (170453.9 ops/sec): 230.1 MB/s Did 31000 ChaCha20-Poly1305-Old (8192 bytes) seal operations in 1005349us (30835.1 ops/sec): 252.6 MB/s x86_64, no AVX2 --- Old: Did 1782000 ChaCha20-Poly1305 (16 bytes) seal operations in 1000204us (1781636.5 ops/sec): 28.5 MB/s Did 317000 ChaCha20-Poly1305 (1350 bytes) seal operations in 1001579us (316500.2 ops/sec): 427.3 MB/s Did 62000 ChaCha20-Poly1305 (8192 bytes) seal operations in 1012146us (61256.0 ops/sec): 501.8 MB/s Did 1778000 ChaCha20-Poly1305-Old (16 bytes) seal operations in 1000220us (1777608.9 ops/sec): 28.4 MB/s Did 315000 ChaCha20-Poly1305-Old (1350 bytes) seal operations in 1002886us (314093.5 ops/sec): 424.0 MB/s Did 71000 ChaCha20-Poly1305-Old (8192 bytes) seal operations in 1014606us (69977.9 ops/sec): 573.3 MB/s New: Did 1866000 ChaCha20-Poly1305 (16 bytes) seal operations in 1000019us (1865964.5 ops/sec): 29.9 MB/s Did 399000 ChaCha20-Poly1305 (1350 bytes) seal operations in 1001017us (398594.6 ops/sec): 538.1 MB/s Did 84000 ChaCha20-Poly1305 (8192 bytes) seal operations in 1005645us (83528.5 ops/sec): 684.3 MB/s Did 1881000 ChaCha20-Poly1305-Old (16 bytes) seal operations in 1000325us (1880388.9 ops/sec): 30.1 MB/s Did 404000 ChaCha20-Poly1305-Old (1350 bytes) seal operations in 1000004us (403998.4 ops/sec): 545.4 MB/s Did 85000 ChaCha20-Poly1305-Old (8192 bytes) seal operations in 1010048us (84154.4 ops/sec): 689.4 MB/s x86_64, AVX2 --- Old: Did 2375000 ChaCha20-Poly1305 (16 bytes) seal operations in 1000282us (2374330.4 ops/sec): 38.0 MB/s Did 448000 ChaCha20-Poly1305 (1350 bytes) seal operations in 1001865us (447166.0 ops/sec): 603.7 MB/s Did 88000 ChaCha20-Poly1305 (8192 bytes) seal operations in 1005217us (87543.3 ops/sec): 717.2 MB/s Did 2409000 ChaCha20-Poly1305-Old (16 bytes) seal operations in 1000188us (2408547.2 ops/sec): 38.5 MB/s Did 446000 ChaCha20-Poly1305-Old (1350 bytes) seal operations in 1001003us (445553.1 ops/sec): 601.5 MB/s Did 90000 ChaCha20-Poly1305-Old (8192 bytes) seal operations in 1006722us (89399.1 ops/sec): 732.4 MB/s New: Did 2622000 ChaCha20-Poly1305 (16 bytes) seal operations in 1000266us (2621302.7 ops/sec): 41.9 MB/s Did 794000 ChaCha20-Poly1305 (1350 bytes) seal operations in 1000783us (793378.8 ops/sec): 1071.1 MB/s Did 173000 ChaCha20-Poly1305 (8192 bytes) seal operations in 1000176us (172969.6 ops/sec): 1417.0 MB/s Did 2623000 ChaCha20-Poly1305-Old (16 bytes) seal operations in 1000330us (2622134.7 ops/sec): 42.0 MB/s Did 783000 ChaCha20-Poly1305-Old (1350 bytes) seal operations in 1000531us (782584.4 ops/sec): 1056.5 MB/s Did 174000 ChaCha20-Poly1305-Old (8192 bytes) seal operations in 1000840us (173854.0 ops/sec): 1424.2 MB/s arm, Nexus 4 --- Old: Did 388550 ChaCha20-Poly1305 (16 bytes) seal operations in 1000580us (388324.8 ops/sec): 6.2 MB/s Did 90000 ChaCha20-Poly1305 (1350 bytes) seal operations in 1003816us (89657.9 ops/sec): 121.0 MB/s Did 19000 ChaCha20-Poly1305 (8192 bytes) seal operations in 1045750us (18168.8 ops/sec): 148.8 MB/s Did 398500 ChaCha20-Poly1305-Old (16 bytes) seal operations in 1000305us (398378.5 ops/sec): 6.4 MB/s Did 90500 ChaCha20-Poly1305-Old (1350 bytes) seal operations in 1000305us (90472.4 ops/sec): 122.1 MB/s Did 19000 ChaCha20-Poly1305-Old (8192 bytes) seal operations in 1043278us (18211.8 ops/sec): 149.2 MB/s New: Did 424788 ChaCha20-Poly1305 (16 bytes) seal operations in 1000641us (424515.9 ops/sec): 6.8 MB/s Did 115000 ChaCha20-Poly1305 (1350 bytes) seal operations in 1001526us (114824.8 ops/sec): 155.0 MB/s Did 27000 ChaCha20-Poly1305 (8192 bytes) seal operations in 1033023us (26136.9 ops/sec): 214.1 MB/s Did 447750 ChaCha20-Poly1305-Old (16 bytes) seal operations in 1000549us (447504.3 ops/sec): 7.2 MB/s Did 117500 ChaCha20-Poly1305-Old (1350 bytes) seal operations in 1001923us (117274.5 ops/sec): 158.3 MB/s Did 27000 ChaCha20-Poly1305-Old (8192 bytes) seal operations in 1025118us (26338.4 ops/sec): 215.8 MB/s aarch64, Nexus 6p (Note we didn't have aarch64 assembly before at all, and still don't have it for Poly1305. Hopefully once that's added this will be faster than the arm numbers...) --- Old: Did 145040 ChaCha20-Poly1305 (16 bytes) seal operations in 1003065us (144596.8 ops/sec): 2.3 MB/s Did 14000 ChaCha20-Poly1305 (1350 bytes) seal operations in 1042605us (13427.9 ops/sec): 18.1 MB/s Did 2618 ChaCha20-Poly1305 (8192 bytes) seal operations in 1093241us (2394.7 ops/sec): 19.6 MB/s Did 148000 ChaCha20-Poly1305-Old (16 bytes) seal operations in 1000709us (147895.1 ops/sec): 2.4 MB/s Did 14000 ChaCha20-Poly1305-Old (1350 bytes) seal operations in 1047294us (13367.8 ops/sec): 18.0 MB/s Did 2607 ChaCha20-Poly1305-Old (8192 bytes) seal operations in 1090745us (2390.1 ops/sec): 19.6 MB/s New: Did 358000 ChaCha20-Poly1305 (16 bytes) seal operations in 1000769us (357724.9 ops/sec): 5.7 MB/s Did 45000 ChaCha20-Poly1305 (1350 bytes) seal operations in 1021267us (44062.9 ops/sec): 59.5 MB/s Did 8591 ChaCha20-Poly1305 (8192 bytes) seal operations in 1047136us (8204.3 ops/sec): 67.2 MB/s Did 343000 ChaCha20-Poly1305-Old (16 bytes) seal operations in 1000489us (342832.4 ops/sec): 5.5 MB/s Did 44000 ChaCha20-Poly1305-Old (1350 bytes) seal operations in 1008326us (43636.7 ops/sec): 58.9 MB/s Did 8866 ChaCha20-Poly1305-Old (8192 bytes) seal operations in 1083341us (8183.9 ops/sec): 67.0 MB/s Change-Id: I629fe195d072f2c99e8f947578fad6d70823c4c8 Reviewed-on: https://boringssl-review.googlesource.com/7202 Reviewed-by: Adam Langley <agl@google.com>
2016-02-19 23:47:22 +00:00
#include <openssl/arm_arch.h>
.text
.extern OPENSSL_armcap_P
.align 5
.Lsigma:
.quad 0x3320646e61707865,0x6b20657479622d32 // endian-neutral
.Lone:
.long 1,0,0,0
.LOPENSSL_armcap_P:
#ifdef __ILP32__
.long OPENSSL_armcap_P-.
#else
.quad OPENSSL_armcap_P-.
#endif
.asciz "ChaCha20 for ARMv8, CRYPTOGAMS by <appro\@openssl.org>"
.globl ChaCha20_ctr32
.type ChaCha20_ctr32,%function
.align 5
ChaCha20_ctr32:
cbz $len,.Labort
adr @x[0],.LOPENSSL_armcap_P
cmp $len,#192
b.lo .Lshort
#ifdef __ILP32__
ldrsw @x[1],[@x[0]]
#else
ldr @x[1],[@x[0]]
#endif
ldr w17,[@x[1],@x[0]]
tst w17,#ARMV7_NEON
b.ne ChaCha20_neon
.Lshort:
stp x29,x30,[sp,#-96]!
add x29,sp,#0
adr @x[0],.Lsigma
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
sub sp,sp,#64
ldp @d[0],@d[1],[@x[0]] // load sigma
ldp @d[2],@d[3],[$key] // load key
ldp @d[4],@d[5],[$key,#16]
ldp @d[6],@d[7],[$ctr] // load counter
#ifdef __ARMEB__
ror @d[2],@d[2],#32
ror @d[3],@d[3],#32
ror @d[4],@d[4],#32
ror @d[5],@d[5],#32
ror @d[6],@d[6],#32
ror @d[7],@d[7],#32
#endif
.Loop_outer:
mov.32 @x[0],@d[0] // unpack key block
lsr @x[1],@d[0],#32
mov.32 @x[2],@d[1]
lsr @x[3],@d[1],#32
mov.32 @x[4],@d[2]
lsr @x[5],@d[2],#32
mov.32 @x[6],@d[3]
lsr @x[7],@d[3],#32
mov.32 @x[8],@d[4]
lsr @x[9],@d[4],#32
mov.32 @x[10],@d[5]
lsr @x[11],@d[5],#32
mov.32 @x[12],@d[6]
lsr @x[13],@d[6],#32
mov.32 @x[14],@d[7]
lsr @x[15],@d[7],#32
mov $ctr,#10
subs $len,$len,#64
.Loop:
sub $ctr,$ctr,#1
___
foreach (&ROUND(0, 4, 8,12)) { eval; }
foreach (&ROUND(0, 5,10,15)) { eval; }
$code.=<<___;
cbnz $ctr,.Loop
add.32 @x[0],@x[0],@d[0] // accumulate key block
add @x[1],@x[1],@d[0],lsr#32
add.32 @x[2],@x[2],@d[1]
add @x[3],@x[3],@d[1],lsr#32
add.32 @x[4],@x[4],@d[2]
add @x[5],@x[5],@d[2],lsr#32
add.32 @x[6],@x[6],@d[3]
add @x[7],@x[7],@d[3],lsr#32
add.32 @x[8],@x[8],@d[4]
add @x[9],@x[9],@d[4],lsr#32
add.32 @x[10],@x[10],@d[5]
add @x[11],@x[11],@d[5],lsr#32
add.32 @x[12],@x[12],@d[6]
add @x[13],@x[13],@d[6],lsr#32
add.32 @x[14],@x[14],@d[7]
add @x[15],@x[15],@d[7],lsr#32
b.lo .Ltail
add @x[0],@x[0],@x[1],lsl#32 // pack
add @x[2],@x[2],@x[3],lsl#32
ldp @x[1],@x[3],[$inp,#0] // load input
add @x[4],@x[4],@x[5],lsl#32
add @x[6],@x[6],@x[7],lsl#32
ldp @x[5],@x[7],[$inp,#16]
add @x[8],@x[8],@x[9],lsl#32
add @x[10],@x[10],@x[11],lsl#32
ldp @x[9],@x[11],[$inp,#32]
add @x[12],@x[12],@x[13],lsl#32
add @x[14],@x[14],@x[15],lsl#32
ldp @x[13],@x[15],[$inp,#48]
add $inp,$inp,#64
#ifdef __ARMEB__
rev @x[0],@x[0]
rev @x[2],@x[2]
rev @x[4],@x[4]
rev @x[6],@x[6]
rev @x[8],@x[8]
rev @x[10],@x[10]
rev @x[12],@x[12]
rev @x[14],@x[14]
#endif
eor @x[0],@x[0],@x[1]
eor @x[2],@x[2],@x[3]
eor @x[4],@x[4],@x[5]
eor @x[6],@x[6],@x[7]
eor @x[8],@x[8],@x[9]
eor @x[10],@x[10],@x[11]
eor @x[12],@x[12],@x[13]
eor @x[14],@x[14],@x[15]
stp @x[0],@x[2],[$out,#0] // store output
add @d[6],@d[6],#1 // increment counter
stp @x[4],@x[6],[$out,#16]
stp @x[8],@x[10],[$out,#32]
stp @x[12],@x[14],[$out,#48]
add $out,$out,#64
b.hi .Loop_outer
ldp x19,x20,[x29,#16]
add sp,sp,#64
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#96
.Labort:
ret
.align 4
.Ltail:
add $len,$len,#64
.Less_than_64:
sub $out,$out,#1
add $inp,$inp,$len
add $out,$out,$len
add $ctr,sp,$len
neg $len,$len
add @x[0],@x[0],@x[1],lsl#32 // pack
add @x[2],@x[2],@x[3],lsl#32
add @x[4],@x[4],@x[5],lsl#32
add @x[6],@x[6],@x[7],lsl#32
add @x[8],@x[8],@x[9],lsl#32
add @x[10],@x[10],@x[11],lsl#32
add @x[12],@x[12],@x[13],lsl#32
add @x[14],@x[14],@x[15],lsl#32
#ifdef __ARMEB__
rev @x[0],@x[0]
rev @x[2],@x[2]
rev @x[4],@x[4]
rev @x[6],@x[6]
rev @x[8],@x[8]
rev @x[10],@x[10]
rev @x[12],@x[12]
rev @x[14],@x[14]
#endif
stp @x[0],@x[2],[sp,#0]
stp @x[4],@x[6],[sp,#16]
stp @x[8],@x[10],[sp,#32]
stp @x[12],@x[14],[sp,#48]
.Loop_tail:
ldrb w10,[$inp,$len]
ldrb w11,[$ctr,$len]
add $len,$len,#1
eor w10,w10,w11
strb w10,[$out,$len]
cbnz $len,.Loop_tail
stp xzr,xzr,[sp,#0]
stp xzr,xzr,[sp,#16]
stp xzr,xzr,[sp,#32]
stp xzr,xzr,[sp,#48]
ldp x19,x20,[x29,#16]
add sp,sp,#64
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#96
ret
.size ChaCha20_ctr32,.-ChaCha20_ctr32
___
{{{
my ($A0,$B0,$C0,$D0,$A1,$B1,$C1,$D1,$A2,$B2,$C2,$D2,$T0,$T1,$T2,$T3) =
map("v$_.4s",(0..7,16..23));
my (@K)=map("v$_.4s",(24..30));
my $ONE="v31.4s";
sub NEONROUND {
my $odd = pop;
my ($a,$b,$c,$d,$t)=@_;
(
"&add ('$a','$a','$b')",
"&eor ('$d','$d','$a')",
"&rev32_16 ('$d','$d')", # vrot ($d,16)
"&add ('$c','$c','$d')",
"&eor ('$t','$b','$c')",
"&ushr ('$b','$t',20)",
"&sli ('$b','$t',12)",
"&add ('$a','$a','$b')",
"&eor ('$t','$d','$a')",
"&ushr ('$d','$t',24)",
"&sli ('$d','$t',8)",
"&add ('$c','$c','$d')",
"&eor ('$t','$b','$c')",
"&ushr ('$b','$t',25)",
"&sli ('$b','$t',7)",
"&ext ('$c','$c','$c',8)",
"&ext ('$d','$d','$d',$odd?4:12)",
"&ext ('$b','$b','$b',$odd?12:4)"
);
}
$code.=<<___;
.type ChaCha20_neon,%function
.align 5
ChaCha20_neon:
stp x29,x30,[sp,#-96]!
add x29,sp,#0
adr @x[0],.Lsigma
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
cmp $len,#512
b.hs .L512_or_more_neon
sub sp,sp,#64
ldp @d[0],@d[1],[@x[0]] // load sigma
ld1 {@K[0]},[@x[0]],#16
ldp @d[2],@d[3],[$key] // load key
ldp @d[4],@d[5],[$key,#16]
ld1 {@K[1],@K[2]},[$key]
ldp @d[6],@d[7],[$ctr] // load counter
ld1 {@K[3]},[$ctr]
ld1 {$ONE},[@x[0]]
#ifdef __ARMEB__
rev64 @K[0],@K[0]
ror @d[2],@d[2],#32
ror @d[3],@d[3],#32
ror @d[4],@d[4],#32
ror @d[5],@d[5],#32
ror @d[6],@d[6],#32
ror @d[7],@d[7],#32
#endif
add @K[3],@K[3],$ONE // += 1
add @K[4],@K[3],$ONE
add @K[5],@K[4],$ONE
shl $ONE,$ONE,#2 // 1 -> 4
.Loop_outer_neon:
mov.32 @x[0],@d[0] // unpack key block
lsr @x[1],@d[0],#32
mov $A0,@K[0]
mov.32 @x[2],@d[1]
lsr @x[3],@d[1],#32
mov $A1,@K[0]
mov.32 @x[4],@d[2]
lsr @x[5],@d[2],#32
mov $A2,@K[0]
mov.32 @x[6],@d[3]
mov $B0,@K[1]
lsr @x[7],@d[3],#32
mov $B1,@K[1]
mov.32 @x[8],@d[4]
mov $B2,@K[1]
lsr @x[9],@d[4],#32
mov $D0,@K[3]
mov.32 @x[10],@d[5]
mov $D1,@K[4]
lsr @x[11],@d[5],#32
mov $D2,@K[5]
mov.32 @x[12],@d[6]
mov $C0,@K[2]
lsr @x[13],@d[6],#32
mov $C1,@K[2]
mov.32 @x[14],@d[7]
mov $C2,@K[2]
lsr @x[15],@d[7],#32
mov $ctr,#10
subs $len,$len,#256
.Loop_neon:
sub $ctr,$ctr,#1
___
my @thread0=&NEONROUND($A0,$B0,$C0,$D0,$T0,0);
my @thread1=&NEONROUND($A1,$B1,$C1,$D1,$T1,0);
my @thread2=&NEONROUND($A2,$B2,$C2,$D2,$T2,0);
my @thread3=&ROUND(0,4,8,12);
foreach (@thread0) {
eval; eval(shift(@thread3));
eval(shift(@thread1)); eval(shift(@thread3));
eval(shift(@thread2)); eval(shift(@thread3));
}
@thread0=&NEONROUND($A0,$B0,$C0,$D0,$T0,1);
@thread1=&NEONROUND($A1,$B1,$C1,$D1,$T1,1);
@thread2=&NEONROUND($A2,$B2,$C2,$D2,$T2,1);
@thread3=&ROUND(0,5,10,15);
foreach (@thread0) {
eval; eval(shift(@thread3));
eval(shift(@thread1)); eval(shift(@thread3));
eval(shift(@thread2)); eval(shift(@thread3));
}
$code.=<<___;
cbnz $ctr,.Loop_neon
add.32 @x[0],@x[0],@d[0] // accumulate key block
add $A0,$A0,@K[0]
add @x[1],@x[1],@d[0],lsr#32
add $A1,$A1,@K[0]
add.32 @x[2],@x[2],@d[1]
add $A2,$A2,@K[0]
add @x[3],@x[3],@d[1],lsr#32
add $C0,$C0,@K[2]
add.32 @x[4],@x[4],@d[2]
add $C1,$C1,@K[2]
add @x[5],@x[5],@d[2],lsr#32
add $C2,$C2,@K[2]
add.32 @x[6],@x[6],@d[3]
add $D0,$D0,@K[3]
add @x[7],@x[7],@d[3],lsr#32
add.32 @x[8],@x[8],@d[4]
add $D1,$D1,@K[4]
add @x[9],@x[9],@d[4],lsr#32
add.32 @x[10],@x[10],@d[5]
add $D2,$D2,@K[5]
add @x[11],@x[11],@d[5],lsr#32
add.32 @x[12],@x[12],@d[6]
add $B0,$B0,@K[1]
add @x[13],@x[13],@d[6],lsr#32
add.32 @x[14],@x[14],@d[7]
add $B1,$B1,@K[1]
add @x[15],@x[15],@d[7],lsr#32
add $B2,$B2,@K[1]
b.lo .Ltail_neon
add @x[0],@x[0],@x[1],lsl#32 // pack
add @x[2],@x[2],@x[3],lsl#32
ldp @x[1],@x[3],[$inp,#0] // load input
add @x[4],@x[4],@x[5],lsl#32
add @x[6],@x[6],@x[7],lsl#32
ldp @x[5],@x[7],[$inp,#16]
add @x[8],@x[8],@x[9],lsl#32
add @x[10],@x[10],@x[11],lsl#32
ldp @x[9],@x[11],[$inp,#32]
add @x[12],@x[12],@x[13],lsl#32
add @x[14],@x[14],@x[15],lsl#32
ldp @x[13],@x[15],[$inp,#48]
add $inp,$inp,#64
#ifdef __ARMEB__
rev @x[0],@x[0]
rev @x[2],@x[2]
rev @x[4],@x[4]
rev @x[6],@x[6]
rev @x[8],@x[8]
rev @x[10],@x[10]
rev @x[12],@x[12]
rev @x[14],@x[14]
#endif
ld1.8 {$T0-$T3},[$inp],#64
eor @x[0],@x[0],@x[1]
eor @x[2],@x[2],@x[3]
eor @x[4],@x[4],@x[5]
eor @x[6],@x[6],@x[7]
eor @x[8],@x[8],@x[9]
eor $A0,$A0,$T0
eor @x[10],@x[10],@x[11]
eor $B0,$B0,$T1
eor @x[12],@x[12],@x[13]
eor $C0,$C0,$T2
eor @x[14],@x[14],@x[15]
eor $D0,$D0,$T3
ld1.8 {$T0-$T3},[$inp],#64
stp @x[0],@x[2],[$out,#0] // store output
add @d[6],@d[6],#4 // increment counter
stp @x[4],@x[6],[$out,#16]
add @K[3],@K[3],$ONE // += 4
stp @x[8],@x[10],[$out,#32]
add @K[4],@K[4],$ONE
stp @x[12],@x[14],[$out,#48]
add @K[5],@K[5],$ONE
add $out,$out,#64
st1.8 {$A0-$D0},[$out],#64
ld1.8 {$A0-$D0},[$inp],#64
eor $A1,$A1,$T0
eor $B1,$B1,$T1
eor $C1,$C1,$T2
eor $D1,$D1,$T3
st1.8 {$A1-$D1},[$out],#64
eor $A2,$A2,$A0
eor $B2,$B2,$B0
eor $C2,$C2,$C0
eor $D2,$D2,$D0
st1.8 {$A2-$D2},[$out],#64
b.hi .Loop_outer_neon
ldp x19,x20,[x29,#16]
add sp,sp,#64
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#96
ret
.Ltail_neon:
add $len,$len,#256
cmp $len,#64
b.lo .Less_than_64
add @x[0],@x[0],@x[1],lsl#32 // pack
add @x[2],@x[2],@x[3],lsl#32
ldp @x[1],@x[3],[$inp,#0] // load input
add @x[4],@x[4],@x[5],lsl#32
add @x[6],@x[6],@x[7],lsl#32
ldp @x[5],@x[7],[$inp,#16]
add @x[8],@x[8],@x[9],lsl#32
add @x[10],@x[10],@x[11],lsl#32
ldp @x[9],@x[11],[$inp,#32]
add @x[12],@x[12],@x[13],lsl#32
add @x[14],@x[14],@x[15],lsl#32
ldp @x[13],@x[15],[$inp,#48]
add $inp,$inp,#64
#ifdef __ARMEB__
rev @x[0],@x[0]
rev @x[2],@x[2]
rev @x[4],@x[4]
rev @x[6],@x[6]
rev @x[8],@x[8]
rev @x[10],@x[10]
rev @x[12],@x[12]
rev @x[14],@x[14]
#endif
eor @x[0],@x[0],@x[1]
eor @x[2],@x[2],@x[3]
eor @x[4],@x[4],@x[5]
eor @x[6],@x[6],@x[7]
eor @x[8],@x[8],@x[9]
eor @x[10],@x[10],@x[11]
eor @x[12],@x[12],@x[13]
eor @x[14],@x[14],@x[15]
stp @x[0],@x[2],[$out,#0] // store output
add @d[6],@d[6],#4 // increment counter
stp @x[4],@x[6],[$out,#16]
stp @x[8],@x[10],[$out,#32]
stp @x[12],@x[14],[$out,#48]
add $out,$out,#64
b.eq .Ldone_neon
sub $len,$len,#64
cmp $len,#64
b.lo .Less_than_128
ld1.8 {$T0-$T3},[$inp],#64
eor $A0,$A0,$T0
eor $B0,$B0,$T1
eor $C0,$C0,$T2
eor $D0,$D0,$T3
st1.8 {$A0-$D0},[$out],#64
b.eq .Ldone_neon
sub $len,$len,#64
cmp $len,#64
b.lo .Less_than_192
ld1.8 {$T0-$T3},[$inp],#64
eor $A1,$A1,$T0
eor $B1,$B1,$T1
eor $C1,$C1,$T2
eor $D1,$D1,$T3
st1.8 {$A1-$D1},[$out],#64
b.eq .Ldone_neon
sub $len,$len,#64
st1.8 {$A2-$D2},[sp]
b .Last_neon
.Less_than_128:
st1.8 {$A0-$D0},[sp]
b .Last_neon
.Less_than_192:
st1.8 {$A1-$D1},[sp]
b .Last_neon
.align 4
.Last_neon:
sub $out,$out,#1
add $inp,$inp,$len
add $out,$out,$len
add $ctr,sp,$len
neg $len,$len
.Loop_tail_neon:
ldrb w10,[$inp,$len]
ldrb w11,[$ctr,$len]
add $len,$len,#1
eor w10,w10,w11
strb w10,[$out,$len]
cbnz $len,.Loop_tail_neon
stp xzr,xzr,[sp,#0]
stp xzr,xzr,[sp,#16]
stp xzr,xzr,[sp,#32]
stp xzr,xzr,[sp,#48]
.Ldone_neon:
ldp x19,x20,[x29,#16]
add sp,sp,#64
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#96
ret
.size ChaCha20_neon,.-ChaCha20_neon
___
{
my ($T0,$T1,$T2,$T3,$T4,$T5)=@K;
my ($A0,$B0,$C0,$D0,$A1,$B1,$C1,$D1,$A2,$B2,$C2,$D2,
$A3,$B3,$C3,$D3,$A4,$B4,$C4,$D4,$A5,$B5,$C5,$D5) = map("v$_.4s",(0..23));
$code.=<<___;
.type ChaCha20_512_neon,%function
.align 5
ChaCha20_512_neon:
stp x29,x30,[sp,#-96]!
add x29,sp,#0
adr @x[0],.Lsigma
stp x19,x20,[sp,#16]
stp x21,x22,[sp,#32]
stp x23,x24,[sp,#48]
stp x25,x26,[sp,#64]
stp x27,x28,[sp,#80]
.L512_or_more_neon:
sub sp,sp,#128+64
ldp @d[0],@d[1],[@x[0]] // load sigma
ld1 {@K[0]},[@x[0]],#16
ldp @d[2],@d[3],[$key] // load key
ldp @d[4],@d[5],[$key,#16]
ld1 {@K[1],@K[2]},[$key]
ldp @d[6],@d[7],[$ctr] // load counter
ld1 {@K[3]},[$ctr]
ld1 {$ONE},[@x[0]]
#ifdef __ARMEB__
rev64 @K[0],@K[0]
ror @d[2],@d[2],#32
ror @d[3],@d[3],#32
ror @d[4],@d[4],#32
ror @d[5],@d[5],#32
ror @d[6],@d[6],#32
ror @d[7],@d[7],#32
#endif
add @K[3],@K[3],$ONE // += 1
stp @K[0],@K[1],[sp,#0] // off-load key block, invariant part
add @K[3],@K[3],$ONE // not typo
str @K[2],[sp,#32]
add @K[4],@K[3],$ONE
add @K[5],@K[4],$ONE
add @K[6],@K[5],$ONE
shl $ONE,$ONE,#2 // 1 -> 4
stp d8,d9,[sp,#128+0] // meet ABI requirements
stp d10,d11,[sp,#128+16]
stp d12,d13,[sp,#128+32]
stp d14,d15,[sp,#128+48]
sub $len,$len,#512 // not typo
.Loop_outer_512_neon:
mov $A0,@K[0]
mov $A1,@K[0]
mov $A2,@K[0]
mov $A3,@K[0]
mov $A4,@K[0]
mov $A5,@K[0]
mov $B0,@K[1]
mov.32 @x[0],@d[0] // unpack key block
mov $B1,@K[1]
lsr @x[1],@d[0],#32
mov $B2,@K[1]
mov.32 @x[2],@d[1]
mov $B3,@K[1]
lsr @x[3],@d[1],#32
mov $B4,@K[1]
mov.32 @x[4],@d[2]
mov $B5,@K[1]
lsr @x[5],@d[2],#32
mov $D0,@K[3]
mov.32 @x[6],@d[3]
mov $D1,@K[4]
lsr @x[7],@d[3],#32
mov $D2,@K[5]
mov.32 @x[8],@d[4]
mov $D3,@K[6]
lsr @x[9],@d[4],#32
mov $C0,@K[2]
mov.32 @x[10],@d[5]
mov $C1,@K[2]
lsr @x[11],@d[5],#32
add $D4,$D0,$ONE // +4
mov.32 @x[12],@d[6]
add $D5,$D1,$ONE // +4
lsr @x[13],@d[6],#32
mov $C2,@K[2]
mov.32 @x[14],@d[7]
mov $C3,@K[2]
lsr @x[15],@d[7],#32
mov $C4,@K[2]
stp @K[3],@K[4],[sp,#48] // off-load key block, variable part
mov $C5,@K[2]
str @K[5],[sp,#80]
mov $ctr,#5
subs $len,$len,#512
.Loop_upper_neon:
sub $ctr,$ctr,#1
___
my @thread0=&NEONROUND($A0,$B0,$C0,$D0,$T0,0);
my @thread1=&NEONROUND($A1,$B1,$C1,$D1,$T1,0);
my @thread2=&NEONROUND($A2,$B2,$C2,$D2,$T2,0);
my @thread3=&NEONROUND($A3,$B3,$C3,$D3,$T3,0);
my @thread4=&NEONROUND($A4,$B4,$C4,$D4,$T4,0);
my @thread5=&NEONROUND($A5,$B5,$C5,$D5,$T5,0);
my @thread67=(&ROUND(0,4,8,12),&ROUND(0,5,10,15));
my $diff = ($#thread0+1)*6 - $#thread67 - 1;
my $i = 0;
foreach (@thread0) {
eval; eval(shift(@thread67));
eval(shift(@thread1)); eval(shift(@thread67));
eval(shift(@thread2)); eval(shift(@thread67));
eval(shift(@thread3)); eval(shift(@thread67));
eval(shift(@thread4)); eval(shift(@thread67));
eval(shift(@thread5)); eval(shift(@thread67));
}
@thread0=&NEONROUND($A0,$B0,$C0,$D0,$T0,1);
@thread1=&NEONROUND($A1,$B1,$C1,$D1,$T1,1);
@thread2=&NEONROUND($A2,$B2,$C2,$D2,$T2,1);
@thread3=&NEONROUND($A3,$B3,$C3,$D3,$T3,1);
@thread4=&NEONROUND($A4,$B4,$C4,$D4,$T4,1);
@thread5=&NEONROUND($A5,$B5,$C5,$D5,$T5,1);
@thread67=(&ROUND(0,4,8,12),&ROUND(0,5,10,15));
foreach (@thread0) {
eval; eval(shift(@thread67));
eval(shift(@thread1)); eval(shift(@thread67));
eval(shift(@thread2)); eval(shift(@thread67));
eval(shift(@thread3)); eval(shift(@thread67));
eval(shift(@thread4)); eval(shift(@thread67));
eval(shift(@thread5)); eval(shift(@thread67));
}
$code.=<<___;
cbnz $ctr,.Loop_upper_neon
add.32 @x[0],@x[0],@d[0] // accumulate key block
add @x[1],@x[1],@d[0],lsr#32
add.32 @x[2],@x[2],@d[1]
add @x[3],@x[3],@d[1],lsr#32
add.32 @x[4],@x[4],@d[2]
add @x[5],@x[5],@d[2],lsr#32
add.32 @x[6],@x[6],@d[3]
add @x[7],@x[7],@d[3],lsr#32
add.32 @x[8],@x[8],@d[4]
add @x[9],@x[9],@d[4],lsr#32
add.32 @x[10],@x[10],@d[5]
add @x[11],@x[11],@d[5],lsr#32
add.32 @x[12],@x[12],@d[6]
add @x[13],@x[13],@d[6],lsr#32
add.32 @x[14],@x[14],@d[7]
add @x[15],@x[15],@d[7],lsr#32
add @x[0],@x[0],@x[1],lsl#32 // pack
add @x[2],@x[2],@x[3],lsl#32
ldp @x[1],@x[3],[$inp,#0] // load input
add @x[4],@x[4],@x[5],lsl#32
add @x[6],@x[6],@x[7],lsl#32
ldp @x[5],@x[7],[$inp,#16]
add @x[8],@x[8],@x[9],lsl#32
add @x[10],@x[10],@x[11],lsl#32
ldp @x[9],@x[11],[$inp,#32]
add @x[12],@x[12],@x[13],lsl#32
add @x[14],@x[14],@x[15],lsl#32
ldp @x[13],@x[15],[$inp,#48]
add $inp,$inp,#64
#ifdef __ARMEB__
rev @x[0],@x[0]
rev @x[2],@x[2]
rev @x[4],@x[4]
rev @x[6],@x[6]
rev @x[8],@x[8]
rev @x[10],@x[10]
rev @x[12],@x[12]
rev @x[14],@x[14]
#endif
eor @x[0],@x[0],@x[1]
eor @x[2],@x[2],@x[3]
eor @x[4],@x[4],@x[5]
eor @x[6],@x[6],@x[7]
eor @x[8],@x[8],@x[9]
eor @x[10],@x[10],@x[11]
eor @x[12],@x[12],@x[13]
eor @x[14],@x[14],@x[15]
stp @x[0],@x[2],[$out,#0] // store output
add @d[6],@d[6],#1 // increment counter
mov.32 @x[0],@d[0] // unpack key block
lsr @x[1],@d[0],#32
stp @x[4],@x[6],[$out,#16]
mov.32 @x[2],@d[1]
lsr @x[3],@d[1],#32
stp @x[8],@x[10],[$out,#32]
mov.32 @x[4],@d[2]
lsr @x[5],@d[2],#32
stp @x[12],@x[14],[$out,#48]
add $out,$out,#64
mov.32 @x[6],@d[3]
lsr @x[7],@d[3],#32
mov.32 @x[8],@d[4]
lsr @x[9],@d[4],#32
mov.32 @x[10],@d[5]
lsr @x[11],@d[5],#32
mov.32 @x[12],@d[6]
lsr @x[13],@d[6],#32
mov.32 @x[14],@d[7]
lsr @x[15],@d[7],#32
mov $ctr,#5
.Loop_lower_neon:
sub $ctr,$ctr,#1
___
@thread0=&NEONROUND($A0,$B0,$C0,$D0,$T0,0);
@thread1=&NEONROUND($A1,$B1,$C1,$D1,$T1,0);
@thread2=&NEONROUND($A2,$B2,$C2,$D2,$T2,0);
@thread3=&NEONROUND($A3,$B3,$C3,$D3,$T3,0);
@thread4=&NEONROUND($A4,$B4,$C4,$D4,$T4,0);
@thread5=&NEONROUND($A5,$B5,$C5,$D5,$T5,0);
@thread67=(&ROUND(0,4,8,12),&ROUND(0,5,10,15));
foreach (@thread0) {
eval; eval(shift(@thread67));
eval(shift(@thread1)); eval(shift(@thread67));
eval(shift(@thread2)); eval(shift(@thread67));
eval(shift(@thread3)); eval(shift(@thread67));
eval(shift(@thread4)); eval(shift(@thread67));
eval(shift(@thread5)); eval(shift(@thread67));
}
@thread0=&NEONROUND($A0,$B0,$C0,$D0,$T0,1);
@thread1=&NEONROUND($A1,$B1,$C1,$D1,$T1,1);
@thread2=&NEONROUND($A2,$B2,$C2,$D2,$T2,1);
@thread3=&NEONROUND($A3,$B3,$C3,$D3,$T3,1);
@thread4=&NEONROUND($A4,$B4,$C4,$D4,$T4,1);
@thread5=&NEONROUND($A5,$B5,$C5,$D5,$T5,1);
@thread67=(&ROUND(0,4,8,12),&ROUND(0,5,10,15));
foreach (@thread0) {
eval; eval(shift(@thread67));
eval(shift(@thread1)); eval(shift(@thread67));
eval(shift(@thread2)); eval(shift(@thread67));
eval(shift(@thread3)); eval(shift(@thread67));
eval(shift(@thread4)); eval(shift(@thread67));
eval(shift(@thread5)); eval(shift(@thread67));
}
$code.=<<___;
cbnz $ctr,.Loop_lower_neon
add.32 @x[0],@x[0],@d[0] // accumulate key block
ldp @K[0],@K[1],[sp,#0]
add @x[1],@x[1],@d[0],lsr#32
ldp @K[2],@K[3],[sp,#32]
add.32 @x[2],@x[2],@d[1]
ldp @K[4],@K[5],[sp,#64]
add @x[3],@x[3],@d[1],lsr#32
add $A0,$A0,@K[0]
add.32 @x[4],@x[4],@d[2]
add $A1,$A1,@K[0]
add @x[5],@x[5],@d[2],lsr#32
add $A2,$A2,@K[0]
add.32 @x[6],@x[6],@d[3]
add $A3,$A3,@K[0]
add @x[7],@x[7],@d[3],lsr#32
add $A4,$A4,@K[0]
add.32 @x[8],@x[8],@d[4]
add $A5,$A5,@K[0]
add @x[9],@x[9],@d[4],lsr#32
add $C0,$C0,@K[2]
add.32 @x[10],@x[10],@d[5]
add $C1,$C1,@K[2]
add @x[11],@x[11],@d[5],lsr#32
add $C2,$C2,@K[2]
add.32 @x[12],@x[12],@d[6]
add $C3,$C3,@K[2]
add @x[13],@x[13],@d[6],lsr#32
add $C4,$C4,@K[2]
add.32 @x[14],@x[14],@d[7]
add $C5,$C5,@K[2]
add @x[15],@x[15],@d[7],lsr#32
add $D4,$D4,$ONE // +4
add @x[0],@x[0],@x[1],lsl#32 // pack
add $D5,$D5,$ONE // +4
add @x[2],@x[2],@x[3],lsl#32
add $D0,$D0,@K[3]
ldp @x[1],@x[3],[$inp,#0] // load input
add $D1,$D1,@K[4]
add @x[4],@x[4],@x[5],lsl#32
add $D2,$D2,@K[5]
add @x[6],@x[6],@x[7],lsl#32
add $D3,$D3,@K[6]
ldp @x[5],@x[7],[$inp,#16]
add $D4,$D4,@K[3]
add @x[8],@x[8],@x[9],lsl#32
add $D5,$D5,@K[4]
add @x[10],@x[10],@x[11],lsl#32
add $B0,$B0,@K[1]
ldp @x[9],@x[11],[$inp,#32]
add $B1,$B1,@K[1]
add @x[12],@x[12],@x[13],lsl#32
add $B2,$B2,@K[1]
add @x[14],@x[14],@x[15],lsl#32
add $B3,$B3,@K[1]
ldp @x[13],@x[15],[$inp,#48]
add $B4,$B4,@K[1]
add $inp,$inp,#64
add $B5,$B5,@K[1]
#ifdef __ARMEB__
rev @x[0],@x[0]
rev @x[2],@x[2]
rev @x[4],@x[4]
rev @x[6],@x[6]
rev @x[8],@x[8]
rev @x[10],@x[10]
rev @x[12],@x[12]
rev @x[14],@x[14]
#endif
ld1.8 {$T0-$T3},[$inp],#64
eor @x[0],@x[0],@x[1]
eor @x[2],@x[2],@x[3]
eor @x[4],@x[4],@x[5]
eor @x[6],@x[6],@x[7]
eor @x[8],@x[8],@x[9]
eor $A0,$A0,$T0
eor @x[10],@x[10],@x[11]
eor $B0,$B0,$T1
eor @x[12],@x[12],@x[13]
eor $C0,$C0,$T2
eor @x[14],@x[14],@x[15]
eor $D0,$D0,$T3
ld1.8 {$T0-$T3},[$inp],#64
stp @x[0],@x[2],[$out,#0] // store output
add @d[6],@d[6],#7 // increment counter
stp @x[4],@x[6],[$out,#16]
stp @x[8],@x[10],[$out,#32]
stp @x[12],@x[14],[$out,#48]
add $out,$out,#64
st1.8 {$A0-$D0},[$out],#64
ld1.8 {$A0-$D0},[$inp],#64
eor $A1,$A1,$T0
eor $B1,$B1,$T1
eor $C1,$C1,$T2
eor $D1,$D1,$T3
st1.8 {$A1-$D1},[$out],#64
ld1.8 {$A1-$D1},[$inp],#64
eor $A2,$A2,$A0
ldp @K[0],@K[1],[sp,#0]
eor $B2,$B2,$B0
ldp @K[2],@K[3],[sp,#32]
eor $C2,$C2,$C0
eor $D2,$D2,$D0
st1.8 {$A2-$D2},[$out],#64
ld1.8 {$A2-$D2},[$inp],#64
eor $A3,$A3,$A1
eor $B3,$B3,$B1
eor $C3,$C3,$C1
eor $D3,$D3,$D1
st1.8 {$A3-$D3},[$out],#64
ld1.8 {$A3-$D3},[$inp],#64
eor $A4,$A4,$A2
eor $B4,$B4,$B2
eor $C4,$C4,$C2
eor $D4,$D4,$D2
st1.8 {$A4-$D4},[$out],#64
shl $A0,$ONE,#1 // 4 -> 8
eor $A5,$A5,$A3
eor $B5,$B5,$B3
eor $C5,$C5,$C3
eor $D5,$D5,$D3
st1.8 {$A5-$D5},[$out],#64
add @K[3],@K[3],$A0 // += 8
add @K[4],@K[4],$A0
add @K[5],@K[5],$A0
add @K[6],@K[6],$A0
b.hs .Loop_outer_512_neon
adds $len,$len,#512
ushr $A0,$ONE,#2 // 4 -> 1
ldp d8,d9,[sp,#128+0] // meet ABI requirements
ldp d10,d11,[sp,#128+16]
ldp d12,d13,[sp,#128+32]
ldp d14,d15,[sp,#128+48]
stp @K[0],$ONE,[sp,#0] // wipe off-load area
stp @K[0],$ONE,[sp,#32]
stp @K[0],$ONE,[sp,#64]
b.eq .Ldone_512_neon
cmp $len,#192
sub @K[3],@K[3],$A0 // -= 1
sub @K[4],@K[4],$A0
sub @K[5],@K[5],$A0
add sp,sp,#128
b.hs .Loop_outer_neon
eor @K[1],@K[1],@K[1]
eor @K[2],@K[2],@K[2]
eor @K[3],@K[3],@K[3]
eor @K[4],@K[4],@K[4]
eor @K[5],@K[5],@K[5]
eor @K[6],@K[6],@K[6]
b .Loop_outer
.Ldone_512_neon:
ldp x19,x20,[x29,#16]
add sp,sp,#128+64
ldp x21,x22,[x29,#32]
ldp x23,x24,[x29,#48]
ldp x25,x26,[x29,#64]
ldp x27,x28,[x29,#80]
ldp x29,x30,[sp],#96
ret
.size ChaCha20_512_neon,.-ChaCha20_512_neon
___
}
}}}
foreach (split("\n",$code)) {
s/\`([^\`]*)\`/eval $1/geo;
(s/\b([a-z]+)\.32\b/$1/ and (s/x([0-9]+)/w$1/g or 1)) or
(m/\b(eor|ext|mov)\b/ and (s/\.4s/\.16b/g or 1)) or
(s/\b((?:ld|st)1)\.8\b/$1/ and (s/\.4s/\.16b/g or 1)) or
(m/\b(ld|st)[rp]\b/ and (s/v([0-9]+)\.4s/q$1/g or 1)) or
(s/\brev32\.16\b/rev32/ and (s/\.4s/\.8h/g or 1));
#s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo;
print $_,"\n";
}
close STDOUT; # flush