aboutsummaryrefslogtreecommitdiff
path: root/openssl-1.1.0h/crypto/chacha/asm
diff options
context:
space:
mode:
Diffstat (limited to 'openssl-1.1.0h/crypto/chacha/asm')
-rwxr-xr-xopenssl-1.1.0h/crypto/chacha/asm/chacha-armv4.pl1158
-rwxr-xr-xopenssl-1.1.0h/crypto/chacha/asm/chacha-armv8.pl1135
-rwxr-xr-xopenssl-1.1.0h/crypto/chacha/asm/chacha-c64xplus.pl926
-rwxr-xr-xopenssl-1.1.0h/crypto/chacha/asm/chacha-ppc.pl953
-rwxr-xr-xopenssl-1.1.0h/crypto/chacha/asm/chacha-s390x.pl326
-rwxr-xr-xopenssl-1.1.0h/crypto/chacha/asm/chacha-x86.pl1154
-rwxr-xr-xopenssl-1.1.0h/crypto/chacha/asm/chacha-x86_64.pl2245
7 files changed, 7897 insertions, 0 deletions
diff --git a/openssl-1.1.0h/crypto/chacha/asm/chacha-armv4.pl b/openssl-1.1.0h/crypto/chacha/asm/chacha-armv4.pl
new file mode 100755
index 0000000..b5e21e4
--- /dev/null
+++ b/openssl-1.1.0h/crypto/chacha/asm/chacha-armv4.pl
@@ -0,0 +1,1158 @@
+#! /usr/bin/env perl
+# Copyright 2016 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License"). You may not use
+# this file except in compliance with the License. You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# December 2014
+#
+# ChaCha20 for ARMv4.
+#
+# Performance in cycles per byte out of large buffer.
+#
+# IALU/gcc-4.4 1xNEON 3xNEON+1xIALU
+#
+# Cortex-A5 19.3(*)/+95% 21.8 14.1
+# Cortex-A8 10.5(*)/+160% 13.9 6.35
+# Cortex-A9 12.9(**)/+110% 14.3 6.50
+# Cortex-A15 11.0/+40% 16.0 5.00
+# Snapdragon S4 11.5/+125% 13.6 4.90
+#
+# (*) most "favourable" result for aligned data on little-endian
+# processor, result for misaligned data is 10-15% lower;
+# (**) this result is a trade-off: it can be improved by 20%,
+# but then Snapdragon S4 and Cortex-A8 results get
+# 20-25% worse;
+
+$flavour = shift;
+if ($flavour=~/\w[\w\-]*\.\w+$/) { $output=$flavour; undef $flavour; }
+else { while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {} }
+
+if ($flavour && $flavour ne "void") {
+ $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+ ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
+ ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
+ die "can't locate arm-xlate.pl";
+
+ open STDOUT,"| \"$^X\" $xlate $flavour $output";
+} else {
+ open STDOUT,">$output";
+}
+
+sub AUTOLOAD() # thunk [simplified] x86-style perlasm
+{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; $opcode =~ s/_/\./;
+ my $arg = pop;
+ $arg = "#$arg" if ($arg*1 eq $arg);
+ $code .= "\t$opcode\t".join(',',@_,$arg)."\n";
+}
+
+my @x=map("r$_",(0..7,"x","x","x","x",12,"x",14,"x"));
+my @t=map("r$_",(8..11));
+
+sub ROUND {
+my ($a0,$b0,$c0,$d0)=@_;
+my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
+my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
+my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
+my $odd = $d0&1;
+my ($xc,$xc_) = (@t[0..1]);
+my ($xd,$xd_) = $odd ? (@t[2],@x[$d1]) : (@x[$d0],@t[2]);
+my @ret;
+
+ # Consider order in which variables are addressed by their
+ # index:
+ #
+ # a b c d
+ #
+ # 0 4 8 12 < even round
+ # 1 5 9 13
+ # 2 6 10 14
+ # 3 7 11 15
+ # 0 5 10 15 < odd round
+ # 1 6 11 12
+ # 2 7 8 13
+ # 3 4 9 14
+ #
+ # 'a', 'b' are permanently allocated in registers, @x[0..7],
+ # while 'c's and pair of 'd's are maintained in memory. If
+ # you observe 'c' column, you'll notice that pair of 'c's is
+ # invariant between rounds. This means that we have to reload
+ # them once per round, in the middle. This is why you'll see
+ # bunch of 'c' stores and loads in the middle, but none in
+ # the beginning or end. If you observe 'd' column, you'll
+ # notice that 15 and 13 are reused in next pair of rounds.
+ # This is why these two are chosen for offloading to memory,
+ # to make loads count more.
+ push @ret,(
+ "&add (@x[$a0],@x[$a0],@x[$b0])",
+ "&mov ($xd,$xd,'ror#16')",
+ "&add (@x[$a1],@x[$a1],@x[$b1])",
+ "&mov ($xd_,$xd_,'ror#16')",
+ "&eor ($xd,$xd,@x[$a0],'ror#16')",
+ "&eor ($xd_,$xd_,@x[$a1],'ror#16')",
+
+ "&add ($xc,$xc,$xd)",
+ "&mov (@x[$b0],@x[$b0],'ror#20')",
+ "&add ($xc_,$xc_,$xd_)",
+ "&mov (@x[$b1],@x[$b1],'ror#20')",
+ "&eor (@x[$b0],@x[$b0],$xc,'ror#20')",
+ "&eor (@x[$b1],@x[$b1],$xc_,'ror#20')",
+
+ "&add (@x[$a0],@x[$a0],@x[$b0])",
+ "&mov ($xd,$xd,'ror#24')",
+ "&add (@x[$a1],@x[$a1],@x[$b1])",
+ "&mov ($xd_,$xd_,'ror#24')",
+ "&eor ($xd,$xd,@x[$a0],'ror#24')",
+ "&eor ($xd_,$xd_,@x[$a1],'ror#24')",
+
+ "&add ($xc,$xc,$xd)",
+ "&mov (@x[$b0],@x[$b0],'ror#25')" );
+ push @ret,(
+ "&str ($xd,'[sp,#4*(16+$d0)]')",
+ "&ldr ($xd,'[sp,#4*(16+$d2)]')" ) if ($odd);
+ push @ret,(
+ "&add ($xc_,$xc_,$xd_)",
+ "&mov (@x[$b1],@x[$b1],'ror#25')" );
+ push @ret,(
+ "&str ($xd_,'[sp,#4*(16+$d1)]')",
+ "&ldr ($xd_,'[sp,#4*(16+$d3)]')" ) if (!$odd);
+ push @ret,(
+ "&eor (@x[$b0],@x[$b0],$xc,'ror#25')",
+ "&eor (@x[$b1],@x[$b1],$xc_,'ror#25')" );
+
+ $xd=@x[$d2] if (!$odd);
+ $xd_=@x[$d3] if ($odd);
+ push @ret,(
+ "&str ($xc,'[sp,#4*(16+$c0)]')",
+ "&ldr ($xc,'[sp,#4*(16+$c2)]')",
+ "&add (@x[$a2],@x[$a2],@x[$b2])",
+ "&mov ($xd,$xd,'ror#16')",
+ "&str ($xc_,'[sp,#4*(16+$c1)]')",
+ "&ldr ($xc_,'[sp,#4*(16+$c3)]')",
+ "&add (@x[$a3],@x[$a3],@x[$b3])",
+ "&mov ($xd_,$xd_,'ror#16')",
+ "&eor ($xd,$xd,@x[$a2],'ror#16')",
+ "&eor ($xd_,$xd_,@x[$a3],'ror#16')",
+
+ "&add ($xc,$xc,$xd)",
+ "&mov (@x[$b2],@x[$b2],'ror#20')",
+ "&add ($xc_,$xc_,$xd_)",
+ "&mov (@x[$b3],@x[$b3],'ror#20')",
+ "&eor (@x[$b2],@x[$b2],$xc,'ror#20')",
+ "&eor (@x[$b3],@x[$b3],$xc_,'ror#20')",
+
+ "&add (@x[$a2],@x[$a2],@x[$b2])",
+ "&mov ($xd,$xd,'ror#24')",
+ "&add (@x[$a3],@x[$a3],@x[$b3])",
+ "&mov ($xd_,$xd_,'ror#24')",
+ "&eor ($xd,$xd,@x[$a2],'ror#24')",
+ "&eor ($xd_,$xd_,@x[$a3],'ror#24')",
+
+ "&add ($xc,$xc,$xd)",
+ "&mov (@x[$b2],@x[$b2],'ror#25')",
+ "&add ($xc_,$xc_,$xd_)",
+ "&mov (@x[$b3],@x[$b3],'ror#25')",
+ "&eor (@x[$b2],@x[$b2],$xc,'ror#25')",
+ "&eor (@x[$b3],@x[$b3],$xc_,'ror#25')" );
+
+ @ret;
+}
+
+$code.=<<___;
+#include "arm_arch.h"
+
+.text
+#if defined(__thumb2__)
+.syntax unified
+.thumb
+#else
+.code 32
+#endif
+
+#if defined(__thumb2__) || defined(__clang__)
+#define ldrhsb ldrbhs
+#endif
+
+.align 5
+.Lsigma:
+.long 0x61707865,0x3320646e,0x79622d32,0x6b206574 @ endian-neutral
+.Lone:
+.long 1,0,0,0
+#if __ARM_MAX_ARCH__>=7
+.LOPENSSL_armcap:
+.word OPENSSL_armcap_P-.LChaCha20_ctr32
+#else
+.word -1
+#endif
+
+.globl ChaCha20_ctr32
+.type ChaCha20_ctr32,%function
+.align 5
+ChaCha20_ctr32:
+.LChaCha20_ctr32:
+ ldr r12,[sp,#0] @ pull pointer to counter and nonce
+ stmdb sp!,{r0-r2,r4-r11,lr}
+#if __ARM_ARCH__<7 && !defined(__thumb2__)
+ sub r14,pc,#16 @ ChaCha20_ctr32
+#else
+ adr r14,.LChaCha20_ctr32
+#endif
+ cmp r2,#0 @ len==0?
+#ifdef __thumb2__
+ itt eq
+#endif
+ addeq sp,sp,#4*3
+ beq .Lno_data
+#if __ARM_MAX_ARCH__>=7
+ cmp r2,#192 @ test len
+ bls .Lshort
+ ldr r4,[r14,#-32]
+ ldr r4,[r14,r4]
+# ifdef __APPLE__
+ ldr r4,[r4]
+# endif
+ tst r4,#ARMV7_NEON
+ bne .LChaCha20_neon
+.Lshort:
+#endif
+ ldmia r12,{r4-r7} @ load counter and nonce
+ sub sp,sp,#4*(16) @ off-load area
+ sub r14,r14,#64 @ .Lsigma
+ stmdb sp!,{r4-r7} @ copy counter and nonce
+ ldmia r3,{r4-r11} @ load key
+ ldmia r14,{r0-r3} @ load sigma
+ stmdb sp!,{r4-r11} @ copy key
+ stmdb sp!,{r0-r3} @ copy sigma
+ str r10,[sp,#4*(16+10)] @ off-load "@x[10]"
+ str r11,[sp,#4*(16+11)] @ off-load "@x[11]"
+ b .Loop_outer_enter
+
+.align 4
+.Loop_outer:
+ ldmia sp,{r0-r9} @ load key material
+ str @t[3],[sp,#4*(32+2)] @ save len
+ str r12, [sp,#4*(32+1)] @ save inp
+ str r14, [sp,#4*(32+0)] @ save out
+.Loop_outer_enter:
+ ldr @t[3], [sp,#4*(15)]
+ ldr @x[12],[sp,#4*(12)] @ modulo-scheduled load
+ ldr @t[2], [sp,#4*(13)]
+ ldr @x[14],[sp,#4*(14)]
+ str @t[3], [sp,#4*(16+15)]
+ mov @t[3],#10
+ b .Loop
+
+.align 4
+.Loop:
+ subs @t[3],@t[3],#1
+___
+ foreach (&ROUND(0, 4, 8,12)) { eval; }
+ foreach (&ROUND(0, 5,10,15)) { eval; }
+$code.=<<___;
+ bne .Loop
+
+ ldr @t[3],[sp,#4*(32+2)] @ load len
+
+ str @t[0], [sp,#4*(16+8)] @ modulo-scheduled store
+ str @t[1], [sp,#4*(16+9)]
+ str @x[12],[sp,#4*(16+12)]
+ str @t[2], [sp,#4*(16+13)]
+ str @x[14],[sp,#4*(16+14)]
+
+ @ at this point we have first half of 512-bit result in
+ @ @x[0-7] and second half at sp+4*(16+8)
+
+ cmp @t[3],#64 @ done yet?
+#ifdef __thumb2__
+ itete lo
+#endif
+ addlo r12,sp,#4*(0) @ shortcut or ...
+ ldrhs r12,[sp,#4*(32+1)] @ ... load inp
+ addlo r14,sp,#4*(0) @ shortcut or ...
+ ldrhs r14,[sp,#4*(32+0)] @ ... load out
+
+ ldr @t[0],[sp,#4*(0)] @ load key material
+ ldr @t[1],[sp,#4*(1)]
+
+#if __ARM_ARCH__>=6 || !defined(__ARMEB__)
+# if __ARM_ARCH__<7
+ orr @t[2],r12,r14
+ tst @t[2],#3 @ are input and output aligned?
+ ldr @t[2],[sp,#4*(2)]
+ bne .Lunaligned
+ cmp @t[3],#64 @ restore flags
+# else
+ ldr @t[2],[sp,#4*(2)]
+# endif
+ ldr @t[3],[sp,#4*(3)]
+
+ add @x[0],@x[0],@t[0] @ accumulate key material
+ add @x[1],@x[1],@t[1]
+# ifdef __thumb2__
+ itt hs
+# endif
+ ldrhs @t[0],[r12],#16 @ load input
+ ldrhs @t[1],[r12,#-12]
+
+ add @x[2],@x[2],@t[2]
+ add @x[3],@x[3],@t[3]
+# ifdef __thumb2__
+ itt hs
+# endif
+ ldrhs @t[2],[r12,#-8]
+ ldrhs @t[3],[r12,#-4]
+# if __ARM_ARCH__>=6 && defined(__ARMEB__)
+ rev @x[0],@x[0]
+ rev @x[1],@x[1]
+ rev @x[2],@x[2]
+ rev @x[3],@x[3]
+# endif
+# ifdef __thumb2__
+ itt hs
+# endif
+ eorhs @x[0],@x[0],@t[0] @ xor with input
+ eorhs @x[1],@x[1],@t[1]
+ add @t[0],sp,#4*(4)
+ str @x[0],[r14],#16 @ store output
+# ifdef __thumb2__
+ itt hs
+# endif
+ eorhs @x[2],@x[2],@t[2]
+ eorhs @x[3],@x[3],@t[3]
+ ldmia @t[0],{@t[0]-@t[3]} @ load key material
+ str @x[1],[r14,#-12]
+ str @x[2],[r14,#-8]
+ str @x[3],[r14,#-4]
+
+ add @x[4],@x[4],@t[0] @ accumulate key material
+ add @x[5],@x[5],@t[1]
+# ifdef __thumb2__
+ itt hs
+# endif
+ ldrhs @t[0],[r12],#16 @ load input
+ ldrhs @t[1],[r12,#-12]
+ add @x[6],@x[6],@t[2]
+ add @x[7],@x[7],@t[3]
+# ifdef __thumb2__
+ itt hs
+# endif
+ ldrhs @t[2],[r12,#-8]
+ ldrhs @t[3],[r12,#-4]
+# if __ARM_ARCH__>=6 && defined(__ARMEB__)
+ rev @x[4],@x[4]
+ rev @x[5],@x[5]
+ rev @x[6],@x[6]
+ rev @x[7],@x[7]
+# endif
+# ifdef __thumb2__
+ itt hs
+# endif
+ eorhs @x[4],@x[4],@t[0]
+ eorhs @x[5],@x[5],@t[1]
+ add @t[0],sp,#4*(8)
+ str @x[4],[r14],#16 @ store output
+# ifdef __thumb2__
+ itt hs
+# endif
+ eorhs @x[6],@x[6],@t[2]
+ eorhs @x[7],@x[7],@t[3]
+ str @x[5],[r14,#-12]
+ ldmia @t[0],{@t[0]-@t[3]} @ load key material
+ str @x[6],[r14,#-8]
+ add @x[0],sp,#4*(16+8)
+ str @x[7],[r14,#-4]
+
+ ldmia @x[0],{@x[0]-@x[7]} @ load second half
+
+ add @x[0],@x[0],@t[0] @ accumulate key material
+ add @x[1],@x[1],@t[1]
+# ifdef __thumb2__
+ itt hs
+# endif
+ ldrhs @t[0],[r12],#16 @ load input
+ ldrhs @t[1],[r12,#-12]
+# ifdef __thumb2__
+ itt hi
+# endif
+ strhi @t[2],[sp,#4*(16+10)] @ copy "@x[10]" while at it
+ strhi @t[3],[sp,#4*(16+11)] @ copy "@x[11]" while at it
+ add @x[2],@x[2],@t[2]
+ add @x[3],@x[3],@t[3]
+# ifdef __thumb2__
+ itt hs
+# endif
+ ldrhs @t[2],[r12,#-8]
+ ldrhs @t[3],[r12,#-4]
+# if __ARM_ARCH__>=6 && defined(__ARMEB__)
+ rev @x[0],@x[0]
+ rev @x[1],@x[1]
+ rev @x[2],@x[2]
+ rev @x[3],@x[3]
+# endif
+# ifdef __thumb2__
+ itt hs
+# endif
+ eorhs @x[0],@x[0],@t[0]
+ eorhs @x[1],@x[1],@t[1]
+ add @t[0],sp,#4*(12)
+ str @x[0],[r14],#16 @ store output
+# ifdef __thumb2__
+ itt hs
+# endif
+ eorhs @x[2],@x[2],@t[2]
+ eorhs @x[3],@x[3],@t[3]
+ str @x[1],[r14,#-12]
+ ldmia @t[0],{@t[0]-@t[3]} @ load key material
+ str @x[2],[r14,#-8]
+ str @x[3],[r14,#-4]
+
+ add @x[4],@x[4],@t[0] @ accumulate key material
+ add @x[5],@x[5],@t[1]
+# ifdef __thumb2__
+ itt hi
+# endif
+ addhi @t[0],@t[0],#1 @ next counter value
+ strhi @t[0],[sp,#4*(12)] @ save next counter value
+# ifdef __thumb2__
+ itt hs
+# endif
+ ldrhs @t[0],[r12],#16 @ load input
+ ldrhs @t[1],[r12,#-12]
+ add @x[6],@x[6],@t[2]
+ add @x[7],@x[7],@t[3]
+# ifdef __thumb2__
+ itt hs
+# endif
+ ldrhs @t[2],[r12,#-8]
+ ldrhs @t[3],[r12,#-4]
+# if __ARM_ARCH__>=6 && defined(__ARMEB__)
+ rev @x[4],@x[4]
+ rev @x[5],@x[5]
+ rev @x[6],@x[6]
+ rev @x[7],@x[7]
+# endif
+# ifdef __thumb2__
+ itt hs
+# endif
+ eorhs @x[4],@x[4],@t[0]
+ eorhs @x[5],@x[5],@t[1]
+# ifdef __thumb2__
+ it ne
+# endif
+ ldrne @t[0],[sp,#4*(32+2)] @ re-load len
+# ifdef __thumb2__
+ itt hs
+# endif
+ eorhs @x[6],@x[6],@t[2]
+ eorhs @x[7],@x[7],@t[3]
+ str @x[4],[r14],#16 @ store output
+ str @x[5],[r14,#-12]
+# ifdef __thumb2__
+ it hs
+# endif
+ subhs @t[3],@t[0],#64 @ len-=64
+ str @x[6],[r14,#-8]
+ str @x[7],[r14,#-4]
+ bhi .Loop_outer
+
+ beq .Ldone
+# if __ARM_ARCH__<7
+ b .Ltail
+
+.align 4
+.Lunaligned: @ unaligned endian-neutral path
+ cmp @t[3],#64 @ restore flags
+# endif
+#endif
+#if __ARM_ARCH__<7
+ ldr @t[3],[sp,#4*(3)]
+___
+for ($i=0;$i<16;$i+=4) {
+my $j=$i&0x7;
+
+$code.=<<___ if ($i==4);
+ add @x[0],sp,#4*(16+8)
+___
+$code.=<<___ if ($i==8);
+ ldmia @x[0],{@x[0]-@x[7]} @ load second half
+# ifdef __thumb2__
+ itt hi
+# endif
+ strhi @t[2],[sp,#4*(16+10)] @ copy "@x[10]"
+ strhi @t[3],[sp,#4*(16+11)] @ copy "@x[11]"
+___
+$code.=<<___;
+ add @x[$j+0],@x[$j+0],@t[0] @ accumulate key material
+___
+$code.=<<___ if ($i==12);
+# ifdef __thumb2__
+ itt hi
+# endif
+ addhi @t[0],@t[0],#1 @ next counter value
+ strhi @t[0],[sp,#4*(12)] @ save next counter value
+___
+$code.=<<___;
+ add @x[$j+1],@x[$j+1],@t[1]
+ add @x[$j+2],@x[$j+2],@t[2]
+# ifdef __thumb2__
+ itete lo
+# endif
+ eorlo @t[0],@t[0],@t[0] @ zero or ...
+ ldrhsb @t[0],[r12],#16 @ ... load input
+ eorlo @t[1],@t[1],@t[1]
+ ldrhsb @t[1],[r12,#-12]
+
+ add @x[$j+3],@x[$j+3],@t[3]
+# ifdef __thumb2__
+ itete lo
+# endif
+ eorlo @t[2],@t[2],@t[2]
+ ldrhsb @t[2],[r12,#-8]
+ eorlo @t[3],@t[3],@t[3]
+ ldrhsb @t[3],[r12,#-4]
+
+ eor @x[$j+0],@t[0],@x[$j+0] @ xor with input (or zero)
+ eor @x[$j+1],@t[1],@x[$j+1]
+# ifdef __thumb2__
+ itt hs
+# endif
+ ldrhsb @t[0],[r12,#-15] @ load more input
+ ldrhsb @t[1],[r12,#-11]
+ eor @x[$j+2],@t[2],@x[$j+2]
+ strb @x[$j+0],[r14],#16 @ store output
+ eor @x[$j+3],@t[3],@x[$j+3]
+# ifdef __thumb2__
+ itt hs
+# endif
+ ldrhsb @t[2],[r12,#-7]
+ ldrhsb @t[3],[r12,#-3]
+ strb @x[$j+1],[r14,#-12]
+ eor @x[$j+0],@t[0],@x[$j+0],lsr#8
+ strb @x[$j+2],[r14,#-8]
+ eor @x[$j+1],@t[1],@x[$j+1],lsr#8
+# ifdef __thumb2__
+ itt hs
+# endif
+ ldrhsb @t[0],[r12,#-14] @ load more input
+ ldrhsb @t[1],[r12,#-10]
+ strb @x[$j+3],[r14,#-4]
+ eor @x[$j+2],@t[2],@x[$j+2],lsr#8
+ strb @x[$j+0],[r14,#-15]
+ eor @x[$j+3],@t[3],@x[$j+3],lsr#8
+# ifdef __thumb2__
+ itt hs
+# endif
+ ldrhsb @t[2],[r12,#-6]
+ ldrhsb @t[3],[r12,#-2]
+ strb @x[$j+1],[r14,#-11]
+ eor @x[$j+0],@t[0],@x[$j+0],lsr#8
+ strb @x[$j+2],[r14,#-7]
+ eor @x[$j+1],@t[1],@x[$j+1],lsr#8
+# ifdef __thumb2__
+ itt hs
+# endif
+ ldrhsb @t[0],[r12,#-13] @ load more input
+ ldrhsb @t[1],[r12,#-9]
+ strb @x[$j+3],[r14,#-3]
+ eor @x[$j+2],@t[2],@x[$j+2],lsr#8
+ strb @x[$j+0],[r14,#-14]
+ eor @x[$j+3],@t[3],@x[$j+3],lsr#8
+# ifdef __thumb2__
+ itt hs
+# endif
+ ldrhsb @t[2],[r12,#-5]
+ ldrhsb @t[3],[r12,#-1]
+ strb @x[$j+1],[r14,#-10]
+ strb @x[$j+2],[r14,#-6]
+ eor @x[$j+0],@t[0],@x[$j+0],lsr#8
+ strb @x[$j+3],[r14,#-2]
+ eor @x[$j+1],@t[1],@x[$j+1],lsr#8
+ strb @x[$j+0],[r14,#-13]
+ eor @x[$j+2],@t[2],@x[$j+2],lsr#8
+ strb @x[$j+1],[r14,#-9]
+ eor @x[$j+3],@t[3],@x[$j+3],lsr#8
+ strb @x[$j+2],[r14,#-5]
+ strb @x[$j+3],[r14,#-1]
+___
+$code.=<<___ if ($i<12);
+ add @t[0],sp,#4*(4+$i)
+ ldmia @t[0],{@t[0]-@t[3]} @ load key material
+___
+}
+$code.=<<___;
+# ifdef __thumb2__
+ it ne
+# endif
+ ldrne @t[0],[sp,#4*(32+2)] @ re-load len
+# ifdef __thumb2__
+ it hs
+# endif
+ subhs @t[3],@t[0],#64 @ len-=64
+ bhi .Loop_outer
+
+ beq .Ldone
+#endif
+
+.Ltail:
+ ldr r12,[sp,#4*(32+1)] @ load inp
+ add @t[1],sp,#4*(0)
+ ldr r14,[sp,#4*(32+0)] @ load out
+
+.Loop_tail:
+ ldrb @t[2],[@t[1]],#1 @ read buffer on stack
+ ldrb @t[3],[r12],#1 @ read input
+ subs @t[0],@t[0],#1
+ eor @t[3],@t[3],@t[2]
+ strb @t[3],[r14],#1 @ store output
+ bne .Loop_tail
+
+.Ldone:
+ add sp,sp,#4*(32+3)
+.Lno_data:
+ ldmia sp!,{r4-r11,pc}
+.size ChaCha20_ctr32,.-ChaCha20_ctr32
+___
+
+{{{
+my ($a0,$b0,$c0,$d0,$a1,$b1,$c1,$d1,$a2,$b2,$c2,$d2,$t0,$t1,$t2,$t3) =
+ map("q$_",(0..15));
+
+sub NEONROUND {
+my $odd = pop;
+my ($a,$b,$c,$d,$t)=@_;
+
+ (
+ "&vadd_i32 ($a,$a,$b)",
+ "&veor ($d,$d,$a)",
+ "&vrev32_16 ($d,$d)", # vrot ($d,16)
+
+ "&vadd_i32 ($c,$c,$d)",
+ "&veor ($t,$b,$c)",
+ "&vshr_u32 ($b,$t,20)",
+ "&vsli_32 ($b,$t,12)",
+
+ "&vadd_i32 ($a,$a,$b)",
+ "&veor ($t,$d,$a)",
+ "&vshr_u32 ($d,$t,24)",
+ "&vsli_32 ($d,$t,8)",
+
+ "&vadd_i32 ($c,$c,$d)",
+ "&veor ($t,$b,$c)",
+ "&vshr_u32 ($b,$t,25)",
+ "&vsli_32 ($b,$t,7)",
+
+ "&vext_8 ($c,$c,$c,8)",
+ "&vext_8 ($b,$b,$b,$odd?12:4)",
+ "&vext_8 ($d,$d,$d,$odd?4:12)"
+ );
+}
+
+$code.=<<___;
+#if __ARM_MAX_ARCH__>=7
+.arch armv7-a
+.fpu neon
+
+.type ChaCha20_neon,%function
+.align 5
+ChaCha20_neon:
+ ldr r12,[sp,#0] @ pull pointer to counter and nonce
+ stmdb sp!,{r0-r2,r4-r11,lr}
+.LChaCha20_neon:
+ adr r14,.Lsigma
+ vstmdb sp!,{d8-d15} @ ABI spec says so
+ stmdb sp!,{r0-r3}
+
+ vld1.32 {$b0-$c0},[r3] @ load key
+ ldmia r3,{r4-r11} @ load key
+
+ sub sp,sp,#4*(16+16)
+ vld1.32 {$d0},[r12] @ load counter and nonce
+ add r12,sp,#4*8
+ ldmia r14,{r0-r3} @ load sigma
+ vld1.32 {$a0},[r14]! @ load sigma
+ vld1.32 {$t0},[r14] @ one
+ vst1.32 {$c0-$d0},[r12] @ copy 1/2key|counter|nonce
+ vst1.32 {$a0-$b0},[sp] @ copy sigma|1/2key
+
+ str r10,[sp,#4*(16+10)] @ off-load "@x[10]"
+ str r11,[sp,#4*(16+11)] @ off-load "@x[11]"
+ vshl.i32 $t1#lo,$t0#lo,#1 @ two
+ vstr $t0#lo,[sp,#4*(16+0)]
+ vshl.i32 $t2#lo,$t0#lo,#2 @ four
+ vstr $t1#lo,[sp,#4*(16+2)]
+ vmov $a1,$a0
+ vstr $t2#lo,[sp,#4*(16+4)]
+ vmov $a2,$a0
+ vmov $b1,$b0
+ vmov $b2,$b0
+ b .Loop_neon_enter
+
+.align 4
+.Loop_neon_outer:
+ ldmia sp,{r0-r9} @ load key material
+ cmp @t[3],#64*2 @ if len<=64*2
+ bls .Lbreak_neon @ switch to integer-only
+ vmov $a1,$a0
+ str @t[3],[sp,#4*(32+2)] @ save len
+ vmov $a2,$a0
+ str r12, [sp,#4*(32+1)] @ save inp
+ vmov $b1,$b0
+ str r14, [sp,#4*(32+0)] @ save out
+ vmov $b2,$b0
+.Loop_neon_enter:
+ ldr @t[3], [sp,#4*(15)]
+ vadd.i32 $d1,$d0,$t0 @ counter+1
+ ldr @x[12],[sp,#4*(12)] @ modulo-scheduled load
+ vmov $c1,$c0
+ ldr @t[2], [sp,#4*(13)]
+ vmov $c2,$c0
+ ldr @x[14],[sp,#4*(14)]
+ vadd.i32 $d2,$d1,$t0 @ counter+2
+ str @t[3], [sp,#4*(16+15)]
+ mov @t[3],#10
+ add @x[12],@x[12],#3 @ counter+3
+ b .Loop_neon
+
+.align 4
+.Loop_neon:
+ subs @t[3],@t[3],#1
+___
+ my @thread0=&NEONROUND($a0,$b0,$c0,$d0,$t0,0);
+ my @thread1=&NEONROUND($a1,$b1,$c1,$d1,$t1,0);
+ my @thread2=&NEONROUND($a2,$b2,$c2,$d2,$t2,0);
+ my @thread3=&ROUND(0,4,8,12);
+
+ foreach (@thread0) {
+ eval; eval(shift(@thread3));
+ eval(shift(@thread1)); eval(shift(@thread3));
+ eval(shift(@thread2)); eval(shift(@thread3));
+ }
+
+ @thread0=&NEONROUND($a0,$b0,$c0,$d0,$t0,1);
+ @thread1=&NEONROUND($a1,$b1,$c1,$d1,$t1,1);
+ @thread2=&NEONROUND($a2,$b2,$c2,$d2,$t2,1);
+ @thread3=&ROUND(0,5,10,15);
+
+ foreach (@thread0) {
+ eval; eval(shift(@thread3));
+ eval(shift(@thread1)); eval(shift(@thread3));
+ eval(shift(@thread2)); eval(shift(@thread3));
+ }
+$code.=<<___;
+ bne .Loop_neon
+
+ add @t[3],sp,#32
+ vld1.32 {$t0-$t1},[sp] @ load key material
+ vld1.32 {$t2-$t3},[@t[3]]
+
+ ldr @t[3],[sp,#4*(32+2)] @ load len
+
+ str @t[0], [sp,#4*(16+8)] @ modulo-scheduled store
+ str @t[1], [sp,#4*(16+9)]
+ str @x[12],[sp,#4*(16+12)]
+ str @t[2], [sp,#4*(16+13)]
+ str @x[14],[sp,#4*(16+14)]
+
+ @ at this point we have first half of 512-bit result in
+ @ @x[0-7] and second half at sp+4*(16+8)
+
+ ldr r12,[sp,#4*(32+1)] @ load inp
+ ldr r14,[sp,#4*(32+0)] @ load out
+
+ vadd.i32 $a0,$a0,$t0 @ accumulate key material
+ vadd.i32 $a1,$a1,$t0
+ vadd.i32 $a2,$a2,$t0
+ vldr $t0#lo,[sp,#4*(16+0)] @ one
+
+ vadd.i32 $b0,$b0,$t1
+ vadd.i32 $b1,$b1,$t1
+ vadd.i32 $b2,$b2,$t1
+ vldr $t1#lo,[sp,#4*(16+2)] @ two
+
+ vadd.i32 $c0,$c0,$t2
+ vadd.i32 $c1,$c1,$t2
+ vadd.i32 $c2,$c2,$t2
+ vadd.i32 $d1#lo,$d1#lo,$t0#lo @ counter+1
+ vadd.i32 $d2#lo,$d2#lo,$t1#lo @ counter+2
+
+ vadd.i32 $d0,$d0,$t3
+ vadd.i32 $d1,$d1,$t3
+ vadd.i32 $d2,$d2,$t3
+
+ cmp @t[3],#64*4
+ blo .Ltail_neon
+
+ vld1.8 {$t0-$t1},[r12]! @ load input
+ mov @t[3],sp
+ vld1.8 {$t2-$t3},[r12]!
+ veor $a0,$a0,$t0 @ xor with input
+ veor $b0,$b0,$t1
+ vld1.8 {$t0-$t1},[r12]!
+ veor $c0,$c0,$t2
+ veor $d0,$d0,$t3
+ vld1.8 {$t2-$t3},[r12]!
+
+ veor $a1,$a1,$t0
+ vst1.8 {$a0-$b0},[r14]! @ store output
+ veor $b1,$b1,$t1
+ vld1.8 {$t0-$t1},[r12]!
+ veor $c1,$c1,$t2
+ vst1.8 {$c0-$d0},[r14]!
+ veor $d1,$d1,$t3
+ vld1.8 {$t2-$t3},[r12]!
+
+ veor $a2,$a2,$t0
+ vld1.32 {$a0-$b0},[@t[3]]! @ load for next iteration
+ veor $t0#hi,$t0#hi,$t0#hi
+ vldr $t0#lo,[sp,#4*(16+4)] @ four
+ veor $b2,$b2,$t1
+ vld1.32 {$c0-$d0},[@t[3]]
+ veor $c2,$c2,$t2
+ vst1.8 {$a1-$b1},[r14]!
+ veor $d2,$d2,$t3
+ vst1.8 {$c1-$d1},[r14]!
+
+ vadd.i32 $d0#lo,$d0#lo,$t0#lo @ next counter value
+ vldr $t0#lo,[sp,#4*(16+0)] @ one
+
+ ldmia sp,{@t[0]-@t[3]} @ load key material
+ add @x[0],@x[0],@t[0] @ accumulate key material
+ ldr @t[0],[r12],#16 @ load input
+ vst1.8 {$a2-$b2},[r14]!
+ add @x[1],@x[1],@t[1]
+ ldr @t[1],[r12,#-12]
+ vst1.8 {$c2-$d2},[r14]!
+ add @x[2],@x[2],@t[2]
+ ldr @t[2],[r12,#-8]
+ add @x[3],@x[3],@t[3]
+ ldr @t[3],[r12,#-4]
+# ifdef __ARMEB__
+ rev @x[0],@x[0]
+ rev @x[1],@x[1]
+ rev @x[2],@x[2]
+ rev @x[3],@x[3]
+# endif
+ eor @x[0],@x[0],@t[0] @ xor with input
+ add @t[0],sp,#4*(4)
+ eor @x[1],@x[1],@t[1]
+ str @x[0],[r14],#16 @ store output
+ eor @x[2],@x[2],@t[2]
+ str @x[1],[r14,#-12]
+ eor @x[3],@x[3],@t[3]
+ ldmia @t[0],{@t[0]-@t[3]} @ load key material
+ str @x[2],[r14,#-8]
+ str @x[3],[r14,#-4]
+
+ add @x[4],@x[4],@t[0] @ accumulate key material
+ ldr @t[0],[r12],#16 @ load input
+ add @x[5],@x[5],@t[1]
+ ldr @t[1],[r12,#-12]
+ add @x[6],@x[6],@t[2]
+ ldr @t[2],[r12,#-8]
+ add @x[7],@x[7],@t[3]
+ ldr @t[3],[r12,#-4]
+# ifdef __ARMEB__
+ rev @x[4],@x[4]
+ rev @x[5],@x[5]
+ rev @x[6],@x[6]
+ rev @x[7],@x[7]
+# endif
+ eor @x[4],@x[4],@t[0]
+ add @t[0],sp,#4*(8)
+ eor @x[5],@x[5],@t[1]
+ str @x[4],[r14],#16 @ store output
+ eor @x[6],@x[6],@t[2]
+ str @x[5],[r14,#-12]
+ eor @x[7],@x[7],@t[3]
+ ldmia @t[0],{@t[0]-@t[3]} @ load key material
+ str @x[6],[r14,#-8]
+ add @x[0],sp,#4*(16+8)
+ str @x[7],[r14,#-4]
+
+ ldmia @x[0],{@x[0]-@x[7]} @ load second half
+
+ add @x[0],@x[0],@t[0] @ accumulate key material
+ ldr @t[0],[r12],#16 @ load input
+ add @x[1],@x[1],@t[1]
+ ldr @t[1],[r12,#-12]
+# ifdef __thumb2__
+ it hi
+# endif
+ strhi @t[2],[sp,#4*(16+10)] @ copy "@x[10]" while at it
+ add @x[2],@x[2],@t[2]
+ ldr @t[2],[r12,#-8]
+# ifdef __thumb2__
+ it hi
+# endif
+ strhi @t[3],[sp,#4*(16+11)] @ copy "@x[11]" while at it
+ add @x[3],@x[3],@t[3]
+ ldr @t[3],[r12,#-4]
+# ifdef __ARMEB__
+ rev @x[0],@x[0]
+ rev @x[1],@x[1]
+ rev @x[2],@x[2]
+ rev @x[3],@x[3]
+# endif
+ eor @x[0],@x[0],@t[0]
+ add @t[0],sp,#4*(12)
+ eor @x[1],@x[1],@t[1]
+ str @x[0],[r14],#16 @ store output
+ eor @x[2],@x[2],@t[2]
+ str @x[1],[r14,#-12]
+ eor @x[3],@x[3],@t[3]
+ ldmia @t[0],{@t[0]-@t[3]} @ load key material
+ str @x[2],[r14,#-8]
+ str @x[3],[r14,#-4]
+
+ add @x[4],@x[4],@t[0] @ accumulate key material
+ add @t[0],@t[0],#4 @ next counter value
+ add @x[5],@x[5],@t[1]
+ str @t[0],[sp,#4*(12)] @ save next counter value
+ ldr @t[0],[r12],#16 @ load input
+ add @x[6],@x[6],@t[2]
+ add @x[4],@x[4],#3 @ counter+3
+ ldr @t[1],[r12,#-12]
+ add @x[7],@x[7],@t[3]
+ ldr @t[2],[r12,#-8]
+ ldr @t[3],[r12,#-4]
+# ifdef __ARMEB__
+ rev @x[4],@x[4]
+ rev @x[5],@x[5]
+ rev @x[6],@x[6]
+ rev @x[7],@x[7]
+# endif
+ eor @x[4],@x[4],@t[0]
+# ifdef __thumb2__
+ it hi
+# endif
+ ldrhi @t[0],[sp,#4*(32+2)] @ re-load len
+ eor @x[5],@x[5],@t[1]
+ eor @x[6],@x[6],@t[2]
+ str @x[4],[r14],#16 @ store output
+ eor @x[7],@x[7],@t[3]
+ str @x[5],[r14,#-12]
+ sub @t[3],@t[0],#64*4 @ len-=64*4
+ str @x[6],[r14,#-8]
+ str @x[7],[r14,#-4]
+ bhi .Loop_neon_outer
+
+ b .Ldone_neon
+
+.align 4
+.Lbreak_neon:
+ @ harmonize NEON and integer-only stack frames: load data
+ @ from NEON frame, but save to integer-only one; distance
+ @ between the two is 4*(32+4+16-32)=4*(20).
+
+ str @t[3], [sp,#4*(20+32+2)] @ save len
+ add @t[3],sp,#4*(32+4)
+ str r12, [sp,#4*(20+32+1)] @ save inp
+ str r14, [sp,#4*(20+32+0)] @ save out
+
+ ldr @x[12],[sp,#4*(16+10)]
+ ldr @x[14],[sp,#4*(16+11)]
+ vldmia @t[3],{d8-d15} @ fulfill ABI requirement
+ str @x[12],[sp,#4*(20+16+10)] @ copy "@x[10]"
+ str @x[14],[sp,#4*(20+16+11)] @ copy "@x[11]"
+
+ ldr @t[3], [sp,#4*(15)]
+ ldr @x[12],[sp,#4*(12)] @ modulo-scheduled load
+ ldr @t[2], [sp,#4*(13)]
+ ldr @x[14],[sp,#4*(14)]
+ str @t[3], [sp,#4*(20+16+15)]
+ add @t[3],sp,#4*(20)
+ vst1.32 {$a0-$b0},[@t[3]]! @ copy key
+ add sp,sp,#4*(20) @ switch frame
+ vst1.32 {$c0-$d0},[@t[3]]
+ mov @t[3],#10
+ b .Loop @ go integer-only
+
+.align 4
+.Ltail_neon:
+ cmp @t[3],#64*3
+ bhs .L192_or_more_neon
+ cmp @t[3],#64*2
+ bhs .L128_or_more_neon
+ cmp @t[3],#64*1
+ bhs .L64_or_more_neon
+
+ add @t[0],sp,#4*(8)
+ vst1.8 {$a0-$b0},[sp]
+ add @t[2],sp,#4*(0)
+ vst1.8 {$c0-$d0},[@t[0]]
+ b .Loop_tail_neon
+
+.align 4
+.L64_or_more_neon:
+ vld1.8 {$t0-$t1},[r12]!
+ vld1.8 {$t2-$t3},[r12]!
+ veor $a0,$a0,$t0
+ veor $b0,$b0,$t1
+ veor $c0,$c0,$t2
+ veor $d0,$d0,$t3
+ vst1.8 {$a0-$b0},[r14]!
+ vst1.8 {$c0-$d0},[r14]!
+
+ beq .Ldone_neon
+
+ add @t[0],sp,#4*(8)
+ vst1.8 {$a1-$b1},[sp]
+ add @t[2],sp,#4*(0)
+ vst1.8 {$c1-$d1},[@t[0]]
+ sub @t[3],@t[3],#64*1 @ len-=64*1
+ b .Loop_tail_neon
+
+.align 4
+.L128_or_more_neon:
+ vld1.8 {$t0-$t1},[r12]!
+ vld1.8 {$t2-$t3},[r12]!
+ veor $a0,$a0,$t0
+ veor $b0,$b0,$t1
+ vld1.8 {$t0-$t1},[r12]!
+ veor $c0,$c0,$t2
+ veor $d0,$d0,$t3
+ vld1.8 {$t2-$t3},[r12]!
+
+ veor $a1,$a1,$t0
+ veor $b1,$b1,$t1
+ vst1.8 {$a0-$b0},[r14]!
+ veor $c1,$c1,$t2
+ vst1.8 {$c0-$d0},[r14]!
+ veor $d1,$d1,$t3
+ vst1.8 {$a1-$b1},[r14]!
+ vst1.8 {$c1-$d1},[r14]!
+
+ beq .Ldone_neon
+
+ add @t[0],sp,#4*(8)
+ vst1.8 {$a2-$b2},[sp]
+ add @t[2],sp,#4*(0)
+ vst1.8 {$c2-$d2},[@t[0]]
+ sub @t[3],@t[3],#64*2 @ len-=64*2
+ b .Loop_tail_neon
+
+.align 4
+.L192_or_more_neon:
+ vld1.8 {$t0-$t1},[r12]!
+ vld1.8 {$t2-$t3},[r12]!
+ veor $a0,$a0,$t0
+ veor $b0,$b0,$t1
+ vld1.8 {$t0-$t1},[r12]!
+ veor $c0,$c0,$t2
+ veor $d0,$d0,$t3
+ vld1.8 {$t2-$t3},[r12]!
+
+ veor $a1,$a1,$t0
+ veor $b1,$b1,$t1
+ vld1.8 {$t0-$t1},[r12]!
+ veor $c1,$c1,$t2
+ vst1.8 {$a0-$b0},[r14]!
+ veor $d1,$d1,$t3
+ vld1.8 {$t2-$t3},[r12]!
+
+ veor $a2,$a2,$t0
+ vst1.8 {$c0-$d0},[r14]!
+ veor $b2,$b2,$t1
+ vst1.8 {$a1-$b1},[r14]!
+ veor $c2,$c2,$t2
+ vst1.8 {$c1-$d1},[r14]!
+ veor $d2,$d2,$t3
+ vst1.8 {$a2-$b2},[r14]!
+ vst1.8 {$c2-$d2},[r14]!
+
+ beq .Ldone_neon
+
+ ldmia sp,{@t[0]-@t[3]} @ load key material
+ add @x[0],@x[0],@t[0] @ accumulate key material
+ add @t[0],sp,#4*(4)
+ add @x[1],@x[1],@t[1]
+ add @x[2],@x[2],@t[2]
+ add @x[3],@x[3],@t[3]
+ ldmia @t[0],{@t[0]-@t[3]} @ load key material
+
+ add @x[4],@x[4],@t[0] @ accumulate key material
+ add @t[0],sp,#4*(8)
+ add @x[5],@x[5],@t[1]
+ add @x[6],@x[6],@t[2]
+ add @x[7],@x[7],@t[3]
+ ldmia @t[0],{@t[0]-@t[3]} @ load key material
+# ifdef __ARMEB__
+ rev @x[0],@x[0]
+ rev @x[1],@x[1]
+ rev @x[2],@x[2]
+ rev @x[3],@x[3]
+ rev @x[4],@x[4]
+ rev @x[5],@x[5]
+ rev @x[6],@x[6]
+ rev @x[7],@x[7]
+# endif
+ stmia sp,{@x[0]-@x[7]}
+ add @x[0],sp,#4*(16+8)
+
+ ldmia @x[0],{@x[0]-@x[7]} @ load second half
+
+ add @x[0],@x[0],@t[0] @ accumulate key material
+ add @t[0],sp,#4*(12)
+ add @x[1],@x[1],@t[1]
+ add @x[2],@x[2],@t[2]
+ add @x[3],@x[3],@t[3]
+ ldmia @t[0],{@t[0]-@t[3]} @ load key material
+
+ add @x[4],@x[4],@t[0] @ accumulate key material
+ add @t[0],sp,#4*(8)
+ add @x[5],@x[5],@t[1]
+ add @x[4],@x[4],#3 @ counter+3
+ add @x[6],@x[6],@t[2]
+ add @x[7],@x[7],@t[3]
+ ldr @t[3],[sp,#4*(32+2)] @ re-load len
+# ifdef __ARMEB__
+ rev @x[0],@x[0]
+ rev @x[1],@x[1]
+ rev @x[2],@x[2]
+ rev @x[3],@x[3]
+ rev @x[4],@x[4]
+ rev @x[5],@x[5]
+ rev @x[6],@x[6]
+ rev @x[7],@x[7]
+# endif
+ stmia @t[0],{@x[0]-@x[7]}
+ add @t[2],sp,#4*(0)
+ sub @t[3],@t[3],#64*3 @ len-=64*3
+
+.Loop_tail_neon:
+ ldrb @t[0],[@t[2]],#1 @ read buffer on stack
+ ldrb @t[1],[r12],#1 @ read input
+ subs @t[3],@t[3],#1
+ eor @t[0],@t[0],@t[1]
+ strb @t[0],[r14],#1 @ store output
+ bne .Loop_tail_neon
+
+.Ldone_neon:
+ add sp,sp,#4*(32+4)
+ vldmia sp,{d8-d15}
+ add sp,sp,#4*(16+3)
+ ldmia sp!,{r4-r11,pc}
+.size ChaCha20_neon,.-ChaCha20_neon
+.comm OPENSSL_armcap_P,4,4
+#endif
+___
+}}}
+
+foreach (split("\n",$code)) {
+ s/\`([^\`]*)\`/eval $1/geo;
+
+ s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo;
+
+ print $_,"\n";
+}
+close STDOUT;
diff --git a/openssl-1.1.0h/crypto/chacha/asm/chacha-armv8.pl b/openssl-1.1.0h/crypto/chacha/asm/chacha-armv8.pl
new file mode 100755
index 0000000..f7e1074
--- /dev/null
+++ b/openssl-1.1.0h/crypto/chacha/asm/chacha-armv8.pl
@@ -0,0 +1,1135 @@
+#! /usr/bin/env perl
+# Copyright 2016 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License"). You may not use
+# this file except in compliance with the License. You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# June 2015
+#
+# ChaCha20 for ARMv8.
+#
+# Performance in cycles per byte out of large buffer.
+#
+# IALU/gcc-4.9 3xNEON+1xIALU 6xNEON+2xIALU
+#
+# Apple A7 5.50/+49% 3.33 1.70
+# Cortex-A53 8.40/+80% 4.72 4.72(*)
+# Cortex-A57 8.06/+43% 4.90 4.43(**)
+# Denver 4.50/+82% 2.63 2.67(*)
+# X-Gene 9.50/+46% 8.82 8.89(*)
+# Mongoose 8.00/+44% 3.64 3.25
+#
+# (*) it's expected that doubling interleave factor doesn't help
+# all processors, only those with higher NEON latency and
+# higher instruction issue rate;
+# (**) expected improvement was actually higher;
+
+$flavour=shift;
+$output=shift;
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
+die "can't locate arm-xlate.pl";
+
+open OUT,"| \"$^X\" $xlate $flavour $output";
+*STDOUT=*OUT;
+
+sub AUTOLOAD() # thunk [simplified] x86-style perlasm
+{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; $opcode =~ s/_/\./;
+ my $arg = pop;
+ $arg = "#$arg" if ($arg*1 eq $arg);
+ $code .= "\t$opcode\t".join(',',@_,$arg)."\n";
+}
+
+my ($out,$inp,$len,$key,$ctr) = map("x$_",(0..4));
+
+my @x=map("x$_",(5..17,19..21));
+my @d=map("x$_",(22..28,30));
+
+sub ROUND {
+my ($a0,$b0,$c0,$d0)=@_;
+my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
+my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
+my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
+
+ (
+ "&add_32 (@x[$a0],@x[$a0],@x[$b0])",
+ "&add_32 (@x[$a1],@x[$a1],@x[$b1])",
+ "&add_32 (@x[$a2],@x[$a2],@x[$b2])",
+ "&add_32 (@x[$a3],@x[$a3],@x[$b3])",
+ "&eor_32 (@x[$d0],@x[$d0],@x[$a0])",
+ "&eor_32 (@x[$d1],@x[$d1],@x[$a1])",
+ "&eor_32 (@x[$d2],@x[$d2],@x[$a2])",
+ "&eor_32 (@x[$d3],@x[$d3],@x[$a3])",
+ "&ror_32 (@x[$d0],@x[$d0],16)",
+ "&ror_32 (@x[$d1],@x[$d1],16)",
+ "&ror_32 (@x[$d2],@x[$d2],16)",
+ "&ror_32 (@x[$d3],@x[$d3],16)",
+
+ "&add_32 (@x[$c0],@x[$c0],@x[$d0])",
+ "&add_32 (@x[$c1],@x[$c1],@x[$d1])",
+ "&add_32 (@x[$c2],@x[$c2],@x[$d2])",
+ "&add_32 (@x[$c3],@x[$c3],@x[$d3])",
+ "&eor_32 (@x[$b0],@x[$b0],@x[$c0])",
+ "&eor_32 (@x[$b1],@x[$b1],@x[$c1])",
+ "&eor_32 (@x[$b2],@x[$b2],@x[$c2])",
+ "&eor_32 (@x[$b3],@x[$b3],@x[$c3])",
+ "&ror_32 (@x[$b0],@x[$b0],20)",
+ "&ror_32 (@x[$b1],@x[$b1],20)",
+ "&ror_32 (@x[$b2],@x[$b2],20)",
+ "&ror_32 (@x[$b3],@x[$b3],20)",
+
+ "&add_32 (@x[$a0],@x[$a0],@x[$b0])",
+ "&add_32 (@x[$a1],@x[$a1],@x[$b1])",
+ "&add_32 (@x[$a2],@x[$a2],@x[$b2])",
+ "&add_32 (@x[$a3],@x[$a3],@x[$b3])",
+ "&eor_32 (@x[$d0],@x[$d0],@x[$a0])",
+ "&eor_32 (@x[$d1],@x[$d1],@x[$a1])",
+ "&eor_32 (@x[$d2],@x[$d2],@x[$a2])",
+ "&eor_32 (@x[$d3],@x[$d3],@x[$a3])",
+ "&ror_32 (@x[$d0],@x[$d0],24)",
+ "&ror_32 (@x[$d1],@x[$d1],24)",
+ "&ror_32 (@x[$d2],@x[$d2],24)",
+ "&ror_32 (@x[$d3],@x[$d3],24)",
+
+ "&add_32 (@x[$c0],@x[$c0],@x[$d0])",
+ "&add_32 (@x[$c1],@x[$c1],@x[$d1])",
+ "&add_32 (@x[$c2],@x[$c2],@x[$d2])",
+ "&add_32 (@x[$c3],@x[$c3],@x[$d3])",
+ "&eor_32 (@x[$b0],@x[$b0],@x[$c0])",
+ "&eor_32 (@x[$b1],@x[$b1],@x[$c1])",
+ "&eor_32 (@x[$b2],@x[$b2],@x[$c2])",
+ "&eor_32 (@x[$b3],@x[$b3],@x[$c3])",
+ "&ror_32 (@x[$b0],@x[$b0],25)",
+ "&ror_32 (@x[$b1],@x[$b1],25)",
+ "&ror_32 (@x[$b2],@x[$b2],25)",
+ "&ror_32 (@x[$b3],@x[$b3],25)"
+ );
+}
+
+$code.=<<___;
+#include "arm_arch.h"
+
+.text
+
+.extern OPENSSL_armcap_P
+
+.align 5
+.Lsigma:
+.quad 0x3320646e61707865,0x6b20657479622d32 // endian-neutral
+.Lone:
+.long 1,0,0,0
+.LOPENSSL_armcap_P:
+#ifdef __ILP32__
+.long OPENSSL_armcap_P-.
+#else
+.quad OPENSSL_armcap_P-.
+#endif
+.asciz "ChaCha20 for ARMv8, CRYPTOGAMS by <appro\@openssl.org>"
+
+.globl ChaCha20_ctr32
+.type ChaCha20_ctr32,%function
+.align 5
+ChaCha20_ctr32:
+ cbz $len,.Labort
+ adr @x[0],.LOPENSSL_armcap_P
+ cmp $len,#192
+ b.lo .Lshort
+#ifdef __ILP32__
+ ldrsw @x[1],[@x[0]]
+#else
+ ldr @x[1],[@x[0]]
+#endif
+ ldr w17,[@x[1],@x[0]]
+ tst w17,#ARMV7_NEON
+ b.ne ChaCha20_neon
+
+.Lshort:
+ stp x29,x30,[sp,#-96]!
+ add x29,sp,#0
+
+ adr @x[0],.Lsigma
+ stp x19,x20,[sp,#16]
+ stp x21,x22,[sp,#32]
+ stp x23,x24,[sp,#48]
+ stp x25,x26,[sp,#64]
+ stp x27,x28,[sp,#80]
+ sub sp,sp,#64
+
+ ldp @d[0],@d[1],[@x[0]] // load sigma
+ ldp @d[2],@d[3],[$key] // load key
+ ldp @d[4],@d[5],[$key,#16]
+ ldp @d[6],@d[7],[$ctr] // load counter
+#ifdef __ARMEB__
+ ror @d[2],@d[2],#32
+ ror @d[3],@d[3],#32
+ ror @d[4],@d[4],#32
+ ror @d[5],@d[5],#32
+ ror @d[6],@d[6],#32
+ ror @d[7],@d[7],#32
+#endif
+
+.Loop_outer:
+ mov.32 @x[0],@d[0] // unpack key block
+ lsr @x[1],@d[0],#32
+ mov.32 @x[2],@d[1]
+ lsr @x[3],@d[1],#32
+ mov.32 @x[4],@d[2]
+ lsr @x[5],@d[2],#32
+ mov.32 @x[6],@d[3]
+ lsr @x[7],@d[3],#32
+ mov.32 @x[8],@d[4]
+ lsr @x[9],@d[4],#32
+ mov.32 @x[10],@d[5]
+ lsr @x[11],@d[5],#32
+ mov.32 @x[12],@d[6]
+ lsr @x[13],@d[6],#32
+ mov.32 @x[14],@d[7]
+ lsr @x[15],@d[7],#32
+
+ mov $ctr,#10
+ subs $len,$len,#64
+.Loop:
+ sub $ctr,$ctr,#1
+___
+ foreach (&ROUND(0, 4, 8,12)) { eval; }
+ foreach (&ROUND(0, 5,10,15)) { eval; }
+$code.=<<___;
+ cbnz $ctr,.Loop
+
+ add.32 @x[0],@x[0],@d[0] // accumulate key block
+ add @x[1],@x[1],@d[0],lsr#32
+ add.32 @x[2],@x[2],@d[1]
+ add @x[3],@x[3],@d[1],lsr#32
+ add.32 @x[4],@x[4],@d[2]
+ add @x[5],@x[5],@d[2],lsr#32
+ add.32 @x[6],@x[6],@d[3]
+ add @x[7],@x[7],@d[3],lsr#32
+ add.32 @x[8],@x[8],@d[4]
+ add @x[9],@x[9],@d[4],lsr#32
+ add.32 @x[10],@x[10],@d[5]
+ add @x[11],@x[11],@d[5],lsr#32
+ add.32 @x[12],@x[12],@d[6]
+ add @x[13],@x[13],@d[6],lsr#32
+ add.32 @x[14],@x[14],@d[7]
+ add @x[15],@x[15],@d[7],lsr#32
+
+ b.lo .Ltail
+
+ add @x[0],@x[0],@x[1],lsl#32 // pack
+ add @x[2],@x[2],@x[3],lsl#32
+ ldp @x[1],@x[3],[$inp,#0] // load input
+ add @x[4],@x[4],@x[5],lsl#32
+ add @x[6],@x[6],@x[7],lsl#32
+ ldp @x[5],@x[7],[$inp,#16]
+ add @x[8],@x[8],@x[9],lsl#32
+ add @x[10],@x[10],@x[11],lsl#32
+ ldp @x[9],@x[11],[$inp,#32]
+ add @x[12],@x[12],@x[13],lsl#32
+ add @x[14],@x[14],@x[15],lsl#32
+ ldp @x[13],@x[15],[$inp,#48]
+ add $inp,$inp,#64
+#ifdef __ARMEB__
+ rev @x[0],@x[0]
+ rev @x[2],@x[2]
+ rev @x[4],@x[4]
+ rev @x[6],@x[6]
+ rev @x[8],@x[8]
+ rev @x[10],@x[10]
+ rev @x[12],@x[12]
+ rev @x[14],@x[14]
+#endif
+ eor @x[0],@x[0],@x[1]
+ eor @x[2],@x[2],@x[3]
+ eor @x[4],@x[4],@x[5]
+ eor @x[6],@x[6],@x[7]
+ eor @x[8],@x[8],@x[9]
+ eor @x[10],@x[10],@x[11]
+ eor @x[12],@x[12],@x[13]
+ eor @x[14],@x[14],@x[15]
+
+ stp @x[0],@x[2],[$out,#0] // store output
+ add @d[6],@d[6],#1 // increment counter
+ stp @x[4],@x[6],[$out,#16]
+ stp @x[8],@x[10],[$out,#32]
+ stp @x[12],@x[14],[$out,#48]
+ add $out,$out,#64
+
+ b.hi .Loop_outer
+
+ ldp x19,x20,[x29,#16]
+ add sp,sp,#64
+ ldp x21,x22,[x29,#32]
+ ldp x23,x24,[x29,#48]
+ ldp x25,x26,[x29,#64]
+ ldp x27,x28,[x29,#80]
+ ldp x29,x30,[sp],#96
+.Labort:
+ ret
+
+.align 4
+.Ltail:
+ add $len,$len,#64
+.Less_than_64:
+ sub $out,$out,#1
+ add $inp,$inp,$len
+ add $out,$out,$len
+ add $ctr,sp,$len
+ neg $len,$len
+
+ add @x[0],@x[0],@x[1],lsl#32 // pack
+ add @x[2],@x[2],@x[3],lsl#32
+ add @x[4],@x[4],@x[5],lsl#32
+ add @x[6],@x[6],@x[7],lsl#32
+ add @x[8],@x[8],@x[9],lsl#32
+ add @x[10],@x[10],@x[11],lsl#32
+ add @x[12],@x[12],@x[13],lsl#32
+ add @x[14],@x[14],@x[15],lsl#32
+#ifdef __ARMEB__
+ rev @x[0],@x[0]
+ rev @x[2],@x[2]
+ rev @x[4],@x[4]
+ rev @x[6],@x[6]
+ rev @x[8],@x[8]
+ rev @x[10],@x[10]
+ rev @x[12],@x[12]
+ rev @x[14],@x[14]
+#endif
+ stp @x[0],@x[2],[sp,#0]
+ stp @x[4],@x[6],[sp,#16]
+ stp @x[8],@x[10],[sp,#32]
+ stp @x[12],@x[14],[sp,#48]
+
+.Loop_tail:
+ ldrb w10,[$inp,$len]
+ ldrb w11,[$ctr,$len]
+ add $len,$len,#1
+ eor w10,w10,w11
+ strb w10,[$out,$len]
+ cbnz $len,.Loop_tail
+
+ stp xzr,xzr,[sp,#0]
+ stp xzr,xzr,[sp,#16]
+ stp xzr,xzr,[sp,#32]
+ stp xzr,xzr,[sp,#48]
+
+ ldp x19,x20,[x29,#16]
+ add sp,sp,#64
+ ldp x21,x22,[x29,#32]
+ ldp x23,x24,[x29,#48]
+ ldp x25,x26,[x29,#64]
+ ldp x27,x28,[x29,#80]
+ ldp x29,x30,[sp],#96
+ ret
+.size ChaCha20_ctr32,.-ChaCha20_ctr32
+___
+
+{{{
+my ($A0,$B0,$C0,$D0,$A1,$B1,$C1,$D1,$A2,$B2,$C2,$D2,$T0,$T1,$T2,$T3) =
+ map("v$_.4s",(0..7,16..23));
+my (@K)=map("v$_.4s",(24..30));
+my $ONE="v31.4s";
+
+sub NEONROUND {
+my $odd = pop;
+my ($a,$b,$c,$d,$t)=@_;
+
+ (
+ "&add ('$a','$a','$b')",
+ "&eor ('$d','$d','$a')",
+ "&rev32_16 ('$d','$d')", # vrot ($d,16)
+
+ "&add ('$c','$c','$d')",
+ "&eor ('$t','$b','$c')",
+ "&ushr ('$b','$t',20)",
+ "&sli ('$b','$t',12)",
+
+ "&add ('$a','$a','$b')",
+ "&eor ('$t','$d','$a')",
+ "&ushr ('$d','$t',24)",
+ "&sli ('$d','$t',8)",
+
+ "&add ('$c','$c','$d')",
+ "&eor ('$t','$b','$c')",
+ "&ushr ('$b','$t',25)",
+ "&sli ('$b','$t',7)",
+
+ "&ext ('$c','$c','$c',8)",
+ "&ext ('$d','$d','$d',$odd?4:12)",
+ "&ext ('$b','$b','$b',$odd?12:4)"
+ );
+}
+
+$code.=<<___;
+
+.type ChaCha20_neon,%function
+.align 5
+ChaCha20_neon:
+ stp x29,x30,[sp,#-96]!
+ add x29,sp,#0
+
+ adr @x[0],.Lsigma
+ stp x19,x20,[sp,#16]
+ stp x21,x22,[sp,#32]
+ stp x23,x24,[sp,#48]
+ stp x25,x26,[sp,#64]
+ stp x27,x28,[sp,#80]
+ cmp $len,#512
+ b.hs .L512_or_more_neon
+
+ sub sp,sp,#64
+
+ ldp @d[0],@d[1],[@x[0]] // load sigma
+ ld1 {@K[0]},[@x[0]],#16
+ ldp @d[2],@d[3],[$key] // load key
+ ldp @d[4],@d[5],[$key,#16]
+ ld1 {@K[1],@K[2]},[$key]
+ ldp @d[6],@d[7],[$ctr] // load counter
+ ld1 {@K[3]},[$ctr]
+ ld1 {$ONE},[@x[0]]
+#ifdef __ARMEB__
+ rev64 @K[0],@K[0]
+ ror @d[2],@d[2],#32
+ ror @d[3],@d[3],#32
+ ror @d[4],@d[4],#32
+ ror @d[5],@d[5],#32
+ ror @d[6],@d[6],#32
+ ror @d[7],@d[7],#32
+#endif
+ add @K[3],@K[3],$ONE // += 1
+ add @K[4],@K[3],$ONE
+ add @K[5],@K[4],$ONE
+ shl $ONE,$ONE,#2 // 1 -> 4
+
+.Loop_outer_neon:
+ mov.32 @x[0],@d[0] // unpack key block
+ lsr @x[1],@d[0],#32
+ mov $A0,@K[0]
+ mov.32 @x[2],@d[1]
+ lsr @x[3],@d[1],#32
+ mov $A1,@K[0]
+ mov.32 @x[4],@d[2]
+ lsr @x[5],@d[2],#32
+ mov $A2,@K[0]
+ mov.32 @x[6],@d[3]
+ mov $B0,@K[1]
+ lsr @x[7],@d[3],#32
+ mov $B1,@K[1]
+ mov.32 @x[8],@d[4]
+ mov $B2,@K[1]
+ lsr @x[9],@d[4],#32
+ mov $D0,@K[3]
+ mov.32 @x[10],@d[5]
+ mov $D1,@K[4]
+ lsr @x[11],@d[5],#32
+ mov $D2,@K[5]
+ mov.32 @x[12],@d[6]
+ mov $C0,@K[2]
+ lsr @x[13],@d[6],#32
+ mov $C1,@K[2]
+ mov.32 @x[14],@d[7]
+ mov $C2,@K[2]
+ lsr @x[15],@d[7],#32
+
+ mov $ctr,#10
+ subs $len,$len,#256
+.Loop_neon:
+ sub $ctr,$ctr,#1
+___
+ my @thread0=&NEONROUND($A0,$B0,$C0,$D0,$T0,0);
+ my @thread1=&NEONROUND($A1,$B1,$C1,$D1,$T1,0);
+ my @thread2=&NEONROUND($A2,$B2,$C2,$D2,$T2,0);
+ my @thread3=&ROUND(0,4,8,12);
+
+ foreach (@thread0) {
+ eval; eval(shift(@thread3));
+ eval(shift(@thread1)); eval(shift(@thread3));
+ eval(shift(@thread2)); eval(shift(@thread3));
+ }
+
+ @thread0=&NEONROUND($A0,$B0,$C0,$D0,$T0,1);
+ @thread1=&NEONROUND($A1,$B1,$C1,$D1,$T1,1);
+ @thread2=&NEONROUND($A2,$B2,$C2,$D2,$T2,1);
+ @thread3=&ROUND(0,5,10,15);
+
+ foreach (@thread0) {
+ eval; eval(shift(@thread3));
+ eval(shift(@thread1)); eval(shift(@thread3));
+ eval(shift(@thread2)); eval(shift(@thread3));
+ }
+$code.=<<___;
+ cbnz $ctr,.Loop_neon
+
+ add.32 @x[0],@x[0],@d[0] // accumulate key block
+ add $A0,$A0,@K[0]
+ add @x[1],@x[1],@d[0],lsr#32
+ add $A1,$A1,@K[0]
+ add.32 @x[2],@x[2],@d[1]
+ add $A2,$A2,@K[0]
+ add @x[3],@x[3],@d[1],lsr#32
+ add $C0,$C0,@K[2]
+ add.32 @x[4],@x[4],@d[2]
+ add $C1,$C1,@K[2]
+ add @x[5],@x[5],@d[2],lsr#32
+ add $C2,$C2,@K[2]
+ add.32 @x[6],@x[6],@d[3]
+ add $D0,$D0,@K[3]
+ add @x[7],@x[7],@d[3],lsr#32
+ add.32 @x[8],@x[8],@d[4]
+ add $D1,$D1,@K[4]
+ add @x[9],@x[9],@d[4],lsr#32
+ add.32 @x[10],@x[10],@d[5]
+ add $D2,$D2,@K[5]
+ add @x[11],@x[11],@d[5],lsr#32
+ add.32 @x[12],@x[12],@d[6]
+ add $B0,$B0,@K[1]
+ add @x[13],@x[13],@d[6],lsr#32
+ add.32 @x[14],@x[14],@d[7]
+ add $B1,$B1,@K[1]
+ add @x[15],@x[15],@d[7],lsr#32
+ add $B2,$B2,@K[1]
+
+ b.lo .Ltail_neon
+
+ add @x[0],@x[0],@x[1],lsl#32 // pack
+ add @x[2],@x[2],@x[3],lsl#32
+ ldp @x[1],@x[3],[$inp,#0] // load input
+ add @x[4],@x[4],@x[5],lsl#32
+ add @x[6],@x[6],@x[7],lsl#32
+ ldp @x[5],@x[7],[$inp,#16]
+ add @x[8],@x[8],@x[9],lsl#32
+ add @x[10],@x[10],@x[11],lsl#32
+ ldp @x[9],@x[11],[$inp,#32]
+ add @x[12],@x[12],@x[13],lsl#32
+ add @x[14],@x[14],@x[15],lsl#32
+ ldp @x[13],@x[15],[$inp,#48]
+ add $inp,$inp,#64
+#ifdef __ARMEB__
+ rev @x[0],@x[0]
+ rev @x[2],@x[2]
+ rev @x[4],@x[4]
+ rev @x[6],@x[6]
+ rev @x[8],@x[8]
+ rev @x[10],@x[10]
+ rev @x[12],@x[12]
+ rev @x[14],@x[14]
+#endif
+ ld1.8 {$T0-$T3},[$inp],#64
+ eor @x[0],@x[0],@x[1]
+ eor @x[2],@x[2],@x[3]
+ eor @x[4],@x[4],@x[5]
+ eor @x[6],@x[6],@x[7]
+ eor @x[8],@x[8],@x[9]
+ eor $A0,$A0,$T0
+ eor @x[10],@x[10],@x[11]
+ eor $B0,$B0,$T1
+ eor @x[12],@x[12],@x[13]
+ eor $C0,$C0,$T2
+ eor @x[14],@x[14],@x[15]
+ eor $D0,$D0,$T3
+ ld1.8 {$T0-$T3},[$inp],#64
+
+ stp @x[0],@x[2],[$out,#0] // store output
+ add @d[6],@d[6],#4 // increment counter
+ stp @x[4],@x[6],[$out,#16]
+ add @K[3],@K[3],$ONE // += 4
+ stp @x[8],@x[10],[$out,#32]
+ add @K[4],@K[4],$ONE
+ stp @x[12],@x[14],[$out,#48]
+ add @K[5],@K[5],$ONE
+ add $out,$out,#64
+
+ st1.8 {$A0-$D0},[$out],#64
+ ld1.8 {$A0-$D0},[$inp],#64
+
+ eor $A1,$A1,$T0
+ eor $B1,$B1,$T1
+ eor $C1,$C1,$T2
+ eor $D1,$D1,$T3
+ st1.8 {$A1-$D1},[$out],#64
+
+ eor $A2,$A2,$A0
+ eor $B2,$B2,$B0
+ eor $C2,$C2,$C0
+ eor $D2,$D2,$D0
+ st1.8 {$A2-$D2},[$out],#64
+
+ b.hi .Loop_outer_neon
+
+ ldp x19,x20,[x29,#16]
+ add sp,sp,#64
+ ldp x21,x22,[x29,#32]
+ ldp x23,x24,[x29,#48]
+ ldp x25,x26,[x29,#64]
+ ldp x27,x28,[x29,#80]
+ ldp x29,x30,[sp],#96
+ ret
+
+.Ltail_neon:
+ add $len,$len,#256
+ cmp $len,#64
+ b.lo .Less_than_64
+
+ add @x[0],@x[0],@x[1],lsl#32 // pack
+ add @x[2],@x[2],@x[3],lsl#32
+ ldp @x[1],@x[3],[$inp,#0] // load input
+ add @x[4],@x[4],@x[5],lsl#32
+ add @x[6],@x[6],@x[7],lsl#32
+ ldp @x[5],@x[7],[$inp,#16]
+ add @x[8],@x[8],@x[9],lsl#32
+ add @x[10],@x[10],@x[11],lsl#32
+ ldp @x[9],@x[11],[$inp,#32]
+ add @x[12],@x[12],@x[13],lsl#32
+ add @x[14],@x[14],@x[15],lsl#32
+ ldp @x[13],@x[15],[$inp,#48]
+ add $inp,$inp,#64
+#ifdef __ARMEB__
+ rev @x[0],@x[0]
+ rev @x[2],@x[2]
+ rev @x[4],@x[4]
+ rev @x[6],@x[6]
+ rev @x[8],@x[8]
+ rev @x[10],@x[10]
+ rev @x[12],@x[12]
+ rev @x[14],@x[14]
+#endif
+ eor @x[0],@x[0],@x[1]
+ eor @x[2],@x[2],@x[3]
+ eor @x[4],@x[4],@x[5]
+ eor @x[6],@x[6],@x[7]
+ eor @x[8],@x[8],@x[9]
+ eor @x[10],@x[10],@x[11]
+ eor @x[12],@x[12],@x[13]
+ eor @x[14],@x[14],@x[15]
+
+ stp @x[0],@x[2],[$out,#0] // store output
+ add @d[6],@d[6],#4 // increment counter
+ stp @x[4],@x[6],[$out,#16]
+ stp @x[8],@x[10],[$out,#32]
+ stp @x[12],@x[14],[$out,#48]
+ add $out,$out,#64
+ b.eq .Ldone_neon
+ sub $len,$len,#64
+ cmp $len,#64
+ b.lo .Less_than_128
+
+ ld1.8 {$T0-$T3},[$inp],#64
+ eor $A0,$A0,$T0
+ eor $B0,$B0,$T1
+ eor $C0,$C0,$T2
+ eor $D0,$D0,$T3
+ st1.8 {$A0-$D0},[$out],#64
+ b.eq .Ldone_neon
+ sub $len,$len,#64
+ cmp $len,#64
+ b.lo .Less_than_192
+
+ ld1.8 {$T0-$T3},[$inp],#64
+ eor $A1,$A1,$T0
+ eor $B1,$B1,$T1
+ eor $C1,$C1,$T2
+ eor $D1,$D1,$T3
+ st1.8 {$A1-$D1},[$out],#64
+ b.eq .Ldone_neon
+ sub $len,$len,#64
+
+ st1.8 {$A2-$D2},[sp]
+ b .Last_neon
+
+.Less_than_128:
+ st1.8 {$A0-$D0},[sp]
+ b .Last_neon
+.Less_than_192:
+ st1.8 {$A1-$D1},[sp]
+ b .Last_neon
+
+.align 4
+.Last_neon:
+ sub $out,$out,#1
+ add $inp,$inp,$len
+ add $out,$out,$len
+ add $ctr,sp,$len
+ neg $len,$len
+
+.Loop_tail_neon:
+ ldrb w10,[$inp,$len]
+ ldrb w11,[$ctr,$len]
+ add $len,$len,#1
+ eor w10,w10,w11
+ strb w10,[$out,$len]
+ cbnz $len,.Loop_tail_neon
+
+ stp xzr,xzr,[sp,#0]
+ stp xzr,xzr,[sp,#16]
+ stp xzr,xzr,[sp,#32]
+ stp xzr,xzr,[sp,#48]
+
+.Ldone_neon:
+ ldp x19,x20,[x29,#16]
+ add sp,sp,#64
+ ldp x21,x22,[x29,#32]
+ ldp x23,x24,[x29,#48]
+ ldp x25,x26,[x29,#64]
+ ldp x27,x28,[x29,#80]
+ ldp x29,x30,[sp],#96
+ ret
+.size ChaCha20_neon,.-ChaCha20_neon
+___
+{
+my ($T0,$T1,$T2,$T3,$T4,$T5)=@K;
+my ($A0,$B0,$C0,$D0,$A1,$B1,$C1,$D1,$A2,$B2,$C2,$D2,
+ $A3,$B3,$C3,$D3,$A4,$B4,$C4,$D4,$A5,$B5,$C5,$D5) = map("v$_.4s",(0..23));
+
+$code.=<<___;
+.type ChaCha20_512_neon,%function
+.align 5
+ChaCha20_512_neon:
+ stp x29,x30,[sp,#-96]!
+ add x29,sp,#0
+
+ adr @x[0],.Lsigma
+ stp x19,x20,[sp,#16]
+ stp x21,x22,[sp,#32]
+ stp x23,x24,[sp,#48]
+ stp x25,x26,[sp,#64]
+ stp x27,x28,[sp,#80]
+
+.L512_or_more_neon:
+ sub sp,sp,#128+64
+
+ ldp @d[0],@d[1],[@x[0]] // load sigma
+ ld1 {@K[0]},[@x[0]],#16
+ ldp @d[2],@d[3],[$key] // load key
+ ldp @d[4],@d[5],[$key,#16]
+ ld1 {@K[1],@K[2]},[$key]
+ ldp @d[6],@d[7],[$ctr] // load counter
+ ld1 {@K[3]},[$ctr]
+ ld1 {$ONE},[@x[0]]
+#ifdef __ARMEB__
+ rev64 @K[0],@K[0]
+ ror @d[2],@d[2],#32
+ ror @d[3],@d[3],#32
+ ror @d[4],@d[4],#32
+ ror @d[5],@d[5],#32
+ ror @d[6],@d[6],#32
+ ror @d[7],@d[7],#32
+#endif
+ add @K[3],@K[3],$ONE // += 1
+ stp @K[0],@K[1],[sp,#0] // off-load key block, invariant part
+ add @K[3],@K[3],$ONE // not typo
+ str @K[2],[sp,#32]
+ add @K[4],@K[3],$ONE
+ add @K[5],@K[4],$ONE
+ add @K[6],@K[5],$ONE
+ shl $ONE,$ONE,#2 // 1 -> 4
+
+ stp d8,d9,[sp,#128+0] // meet ABI requirements
+ stp d10,d11,[sp,#128+16]
+ stp d12,d13,[sp,#128+32]
+ stp d14,d15,[sp,#128+48]
+
+ sub $len,$len,#512 // not typo
+
+.Loop_outer_512_neon:
+ mov $A0,@K[0]
+ mov $A1,@K[0]
+ mov $A2,@K[0]
+ mov $A3,@K[0]
+ mov $A4,@K[0]
+ mov $A5,@K[0]
+ mov $B0,@K[1]
+ mov.32 @x[0],@d[0] // unpack key block
+ mov $B1,@K[1]
+ lsr @x[1],@d[0],#32
+ mov $B2,@K[1]
+ mov.32 @x[2],@d[1]
+ mov $B3,@K[1]
+ lsr @x[3],@d[1],#32
+ mov $B4,@K[1]
+ mov.32 @x[4],@d[2]
+ mov $B5,@K[1]
+ lsr @x[5],@d[2],#32
+ mov $D0,@K[3]
+ mov.32 @x[6],@d[3]
+ mov $D1,@K[4]
+ lsr @x[7],@d[3],#32
+ mov $D2,@K[5]
+ mov.32 @x[8],@d[4]
+ mov $D3,@K[6]
+ lsr @x[9],@d[4],#32
+ mov $C0,@K[2]
+ mov.32 @x[10],@d[5]
+ mov $C1,@K[2]
+ lsr @x[11],@d[5],#32
+ add $D4,$D0,$ONE // +4
+ mov.32 @x[12],@d[6]
+ add $D5,$D1,$ONE // +4
+ lsr @x[13],@d[6],#32
+ mov $C2,@K[2]
+ mov.32 @x[14],@d[7]
+ mov $C3,@K[2]
+ lsr @x[15],@d[7],#32
+ mov $C4,@K[2]
+ stp @K[3],@K[4],[sp,#48] // off-load key block, variable part
+ mov $C5,@K[2]
+ str @K[5],[sp,#80]
+
+ mov $ctr,#5
+ subs $len,$len,#512
+.Loop_upper_neon:
+ sub $ctr,$ctr,#1
+___
+ my @thread0=&NEONROUND($A0,$B0,$C0,$D0,$T0,0);
+ my @thread1=&NEONROUND($A1,$B1,$C1,$D1,$T1,0);
+ my @thread2=&NEONROUND($A2,$B2,$C2,$D2,$T2,0);
+ my @thread3=&NEONROUND($A3,$B3,$C3,$D3,$T3,0);
+ my @thread4=&NEONROUND($A4,$B4,$C4,$D4,$T4,0);
+ my @thread5=&NEONROUND($A5,$B5,$C5,$D5,$T5,0);
+ my @thread67=(&ROUND(0,4,8,12),&ROUND(0,5,10,15));
+ my $diff = ($#thread0+1)*6 - $#thread67 - 1;
+ my $i = 0;
+
+ foreach (@thread0) {
+ eval; eval(shift(@thread67));
+ eval(shift(@thread1)); eval(shift(@thread67));
+ eval(shift(@thread2)); eval(shift(@thread67));
+ eval(shift(@thread3)); eval(shift(@thread67));
+ eval(shift(@thread4)); eval(shift(@thread67));
+ eval(shift(@thread5)); eval(shift(@thread67));
+ }
+
+ @thread0=&NEONROUND($A0,$B0,$C0,$D0,$T0,1);
+ @thread1=&NEONROUND($A1,$B1,$C1,$D1,$T1,1);
+ @thread2=&NEONROUND($A2,$B2,$C2,$D2,$T2,1);
+ @thread3=&NEONROUND($A3,$B3,$C3,$D3,$T3,1);
+ @thread4=&NEONROUND($A4,$B4,$C4,$D4,$T4,1);
+ @thread5=&NEONROUND($A5,$B5,$C5,$D5,$T5,1);
+ @thread67=(&ROUND(0,4,8,12),&ROUND(0,5,10,15));
+
+ foreach (@thread0) {
+ eval; eval(shift(@thread67));
+ eval(shift(@thread1)); eval(shift(@thread67));
+ eval(shift(@thread2)); eval(shift(@thread67));
+ eval(shift(@thread3)); eval(shift(@thread67));
+ eval(shift(@thread4)); eval(shift(@thread67));
+ eval(shift(@thread5)); eval(shift(@thread67));
+ }
+$code.=<<___;
+ cbnz $ctr,.Loop_upper_neon
+
+ add.32 @x[0],@x[0],@d[0] // accumulate key block
+ add @x[1],@x[1],@d[0],lsr#32
+ add.32 @x[2],@x[2],@d[1]
+ add @x[3],@x[3],@d[1],lsr#32
+ add.32 @x[4],@x[4],@d[2]
+ add @x[5],@x[5],@d[2],lsr#32
+ add.32 @x[6],@x[6],@d[3]
+ add @x[7],@x[7],@d[3],lsr#32
+ add.32 @x[8],@x[8],@d[4]
+ add @x[9],@x[9],@d[4],lsr#32
+ add.32 @x[10],@x[10],@d[5]
+ add @x[11],@x[11],@d[5],lsr#32
+ add.32 @x[12],@x[12],@d[6]
+ add @x[13],@x[13],@d[6],lsr#32
+ add.32 @x[14],@x[14],@d[7]
+ add @x[15],@x[15],@d[7],lsr#32
+
+ add @x[0],@x[0],@x[1],lsl#32 // pack
+ add @x[2],@x[2],@x[3],lsl#32
+ ldp @x[1],@x[3],[$inp,#0] // load input
+ add @x[4],@x[4],@x[5],lsl#32
+ add @x[6],@x[6],@x[7],lsl#32
+ ldp @x[5],@x[7],[$inp,#16]
+ add @x[8],@x[8],@x[9],lsl#32
+ add @x[10],@x[10],@x[11],lsl#32
+ ldp @x[9],@x[11],[$inp,#32]
+ add @x[12],@x[12],@x[13],lsl#32
+ add @x[14],@x[14],@x[15],lsl#32
+ ldp @x[13],@x[15],[$inp,#48]
+ add $inp,$inp,#64
+#ifdef __ARMEB__
+ rev @x[0],@x[0]
+ rev @x[2],@x[2]
+ rev @x[4],@x[4]
+ rev @x[6],@x[6]
+ rev @x[8],@x[8]
+ rev @x[10],@x[10]
+ rev @x[12],@x[12]
+ rev @x[14],@x[14]
+#endif
+ eor @x[0],@x[0],@x[1]
+ eor @x[2],@x[2],@x[3]
+ eor @x[4],@x[4],@x[5]
+ eor @x[6],@x[6],@x[7]
+ eor @x[8],@x[8],@x[9]
+ eor @x[10],@x[10],@x[11]
+ eor @x[12],@x[12],@x[13]
+ eor @x[14],@x[14],@x[15]
+
+ stp @x[0],@x[2],[$out,#0] // store output
+ add @d[6],@d[6],#1 // increment counter
+ mov.32 @x[0],@d[0] // unpack key block
+ lsr @x[1],@d[0],#32
+ stp @x[4],@x[6],[$out,#16]
+ mov.32 @x[2],@d[1]
+ lsr @x[3],@d[1],#32
+ stp @x[8],@x[10],[$out,#32]
+ mov.32 @x[4],@d[2]
+ lsr @x[5],@d[2],#32
+ stp @x[12],@x[14],[$out,#48]
+ add $out,$out,#64
+ mov.32 @x[6],@d[3]
+ lsr @x[7],@d[3],#32
+ mov.32 @x[8],@d[4]
+ lsr @x[9],@d[4],#32
+ mov.32 @x[10],@d[5]
+ lsr @x[11],@d[5],#32
+ mov.32 @x[12],@d[6]
+ lsr @x[13],@d[6],#32
+ mov.32 @x[14],@d[7]
+ lsr @x[15],@d[7],#32
+
+ mov $ctr,#5
+.Loop_lower_neon:
+ sub $ctr,$ctr,#1
+___
+ @thread0=&NEONROUND($A0,$B0,$C0,$D0,$T0,0);
+ @thread1=&NEONROUND($A1,$B1,$C1,$D1,$T1,0);
+ @thread2=&NEONROUND($A2,$B2,$C2,$D2,$T2,0);
+ @thread3=&NEONROUND($A3,$B3,$C3,$D3,$T3,0);
+ @thread4=&NEONROUND($A4,$B4,$C4,$D4,$T4,0);
+ @thread5=&NEONROUND($A5,$B5,$C5,$D5,$T5,0);
+ @thread67=(&ROUND(0,4,8,12),&ROUND(0,5,10,15));
+
+ foreach (@thread0) {
+ eval; eval(shift(@thread67));
+ eval(shift(@thread1)); eval(shift(@thread67));
+ eval(shift(@thread2)); eval(shift(@thread67));
+ eval(shift(@thread3)); eval(shift(@thread67));
+ eval(shift(@thread4)); eval(shift(@thread67));
+ eval(shift(@thread5)); eval(shift(@thread67));
+ }
+
+ @thread0=&NEONROUND($A0,$B0,$C0,$D0,$T0,1);
+ @thread1=&NEONROUND($A1,$B1,$C1,$D1,$T1,1);
+ @thread2=&NEONROUND($A2,$B2,$C2,$D2,$T2,1);
+ @thread3=&NEONROUND($A3,$B3,$C3,$D3,$T3,1);
+ @thread4=&NEONROUND($A4,$B4,$C4,$D4,$T4,1);
+ @thread5=&NEONROUND($A5,$B5,$C5,$D5,$T5,1);
+ @thread67=(&ROUND(0,4,8,12),&ROUND(0,5,10,15));
+
+ foreach (@thread0) {
+ eval; eval(shift(@thread67));
+ eval(shift(@thread1)); eval(shift(@thread67));
+ eval(shift(@thread2)); eval(shift(@thread67));
+ eval(shift(@thread3)); eval(shift(@thread67));
+ eval(shift(@thread4)); eval(shift(@thread67));
+ eval(shift(@thread5)); eval(shift(@thread67));
+ }
+$code.=<<___;
+ cbnz $ctr,.Loop_lower_neon
+
+ add.32 @x[0],@x[0],@d[0] // accumulate key block
+ ldp @K[0],@K[1],[sp,#0]
+ add @x[1],@x[1],@d[0],lsr#32
+ ldp @K[2],@K[3],[sp,#32]
+ add.32 @x[2],@x[2],@d[1]
+ ldp @K[4],@K[5],[sp,#64]
+ add @x[3],@x[3],@d[1],lsr#32
+ add $A0,$A0,@K[0]
+ add.32 @x[4],@x[4],@d[2]
+ add $A1,$A1,@K[0]
+ add @x[5],@x[5],@d[2],lsr#32
+ add $A2,$A2,@K[0]
+ add.32 @x[6],@x[6],@d[3]
+ add $A3,$A3,@K[0]
+ add @x[7],@x[7],@d[3],lsr#32
+ add $A4,$A4,@K[0]
+ add.32 @x[8],@x[8],@d[4]
+ add $A5,$A5,@K[0]
+ add @x[9],@x[9],@d[4],lsr#32
+ add $C0,$C0,@K[2]
+ add.32 @x[10],@x[10],@d[5]
+ add $C1,$C1,@K[2]
+ add @x[11],@x[11],@d[5],lsr#32
+ add $C2,$C2,@K[2]
+ add.32 @x[12],@x[12],@d[6]
+ add $C3,$C3,@K[2]
+ add @x[13],@x[13],@d[6],lsr#32
+ add $C4,$C4,@K[2]
+ add.32 @x[14],@x[14],@d[7]
+ add $C5,$C5,@K[2]
+ add @x[15],@x[15],@d[7],lsr#32
+ add $D4,$D4,$ONE // +4
+ add @x[0],@x[0],@x[1],lsl#32 // pack
+ add $D5,$D5,$ONE // +4
+ add @x[2],@x[2],@x[3],lsl#32
+ add $D0,$D0,@K[3]
+ ldp @x[1],@x[3],[$inp,#0] // load input
+ add $D1,$D1,@K[4]
+ add @x[4],@x[4],@x[5],lsl#32
+ add $D2,$D2,@K[5]
+ add @x[6],@x[6],@x[7],lsl#32
+ add $D3,$D3,@K[6]
+ ldp @x[5],@x[7],[$inp,#16]
+ add $D4,$D4,@K[3]
+ add @x[8],@x[8],@x[9],lsl#32
+ add $D5,$D5,@K[4]
+ add @x[10],@x[10],@x[11],lsl#32
+ add $B0,$B0,@K[1]
+ ldp @x[9],@x[11],[$inp,#32]
+ add $B1,$B1,@K[1]
+ add @x[12],@x[12],@x[13],lsl#32
+ add $B2,$B2,@K[1]
+ add @x[14],@x[14],@x[15],lsl#32
+ add $B3,$B3,@K[1]
+ ldp @x[13],@x[15],[$inp,#48]
+ add $B4,$B4,@K[1]
+ add $inp,$inp,#64
+ add $B5,$B5,@K[1]
+
+#ifdef __ARMEB__
+ rev @x[0],@x[0]
+ rev @x[2],@x[2]
+ rev @x[4],@x[4]
+ rev @x[6],@x[6]
+ rev @x[8],@x[8]
+ rev @x[10],@x[10]
+ rev @x[12],@x[12]
+ rev @x[14],@x[14]
+#endif
+ ld1.8 {$T0-$T3},[$inp],#64
+ eor @x[0],@x[0],@x[1]
+ eor @x[2],@x[2],@x[3]
+ eor @x[4],@x[4],@x[5]
+ eor @x[6],@x[6],@x[7]
+ eor @x[8],@x[8],@x[9]
+ eor $A0,$A0,$T0
+ eor @x[10],@x[10],@x[11]
+ eor $B0,$B0,$T1
+ eor @x[12],@x[12],@x[13]
+ eor $C0,$C0,$T2
+ eor @x[14],@x[14],@x[15]
+ eor $D0,$D0,$T3
+ ld1.8 {$T0-$T3},[$inp],#64
+
+ stp @x[0],@x[2],[$out,#0] // store output
+ add @d[6],@d[6],#7 // increment counter
+ stp @x[4],@x[6],[$out,#16]
+ stp @x[8],@x[10],[$out,#32]
+ stp @x[12],@x[14],[$out,#48]
+ add $out,$out,#64
+ st1.8 {$A0-$D0},[$out],#64
+
+ ld1.8 {$A0-$D0},[$inp],#64
+ eor $A1,$A1,$T0
+ eor $B1,$B1,$T1
+ eor $C1,$C1,$T2
+ eor $D1,$D1,$T3
+ st1.8 {$A1-$D1},[$out],#64
+
+ ld1.8 {$A1-$D1},[$inp],#64
+ eor $A2,$A2,$A0
+ ldp @K[0],@K[1],[sp,#0]
+ eor $B2,$B2,$B0
+ ldp @K[2],@K[3],[sp,#32]
+ eor $C2,$C2,$C0
+ eor $D2,$D2,$D0
+ st1.8 {$A2-$D2},[$out],#64
+
+ ld1.8 {$A2-$D2},[$inp],#64
+ eor $A3,$A3,$A1
+ eor $B3,$B3,$B1
+ eor $C3,$C3,$C1
+ eor $D3,$D3,$D1
+ st1.8 {$A3-$D3},[$out],#64
+
+ ld1.8 {$A3-$D3},[$inp],#64
+ eor $A4,$A4,$A2
+ eor $B4,$B4,$B2
+ eor $C4,$C4,$C2
+ eor $D4,$D4,$D2
+ st1.8 {$A4-$D4},[$out],#64
+
+ shl $A0,$ONE,#1 // 4 -> 8
+ eor $A5,$A5,$A3
+ eor $B5,$B5,$B3
+ eor $C5,$C5,$C3
+ eor $D5,$D5,$D3
+ st1.8 {$A5-$D5},[$out],#64
+
+ add @K[3],@K[3],$A0 // += 8
+ add @K[4],@K[4],$A0
+ add @K[5],@K[5],$A0
+ add @K[6],@K[6],$A0
+
+ b.hs .Loop_outer_512_neon
+
+ adds $len,$len,#512
+ ushr $A0,$ONE,#2 // 4 -> 1
+
+ ldp d8,d9,[sp,#128+0] // meet ABI requirements
+ ldp d10,d11,[sp,#128+16]
+ ldp d12,d13,[sp,#128+32]
+ ldp d14,d15,[sp,#128+48]
+
+ stp @K[0],$ONE,[sp,#0] // wipe off-load area
+ stp @K[0],$ONE,[sp,#32]
+ stp @K[0],$ONE,[sp,#64]
+
+ b.eq .Ldone_512_neon
+
+ cmp $len,#192
+ sub @K[3],@K[3],$A0 // -= 1
+ sub @K[4],@K[4],$A0
+ sub @K[5],@K[5],$A0
+ add sp,sp,#128
+ b.hs .Loop_outer_neon
+
+ eor @K[1],@K[1],@K[1]
+ eor @K[2],@K[2],@K[2]
+ eor @K[3],@K[3],@K[3]
+ eor @K[4],@K[4],@K[4]
+ eor @K[5],@K[5],@K[5]
+ eor @K[6],@K[6],@K[6]
+ b .Loop_outer
+
+.Ldone_512_neon:
+ ldp x19,x20,[x29,#16]
+ add sp,sp,#128+64
+ ldp x21,x22,[x29,#32]
+ ldp x23,x24,[x29,#48]
+ ldp x25,x26,[x29,#64]
+ ldp x27,x28,[x29,#80]
+ ldp x29,x30,[sp],#96
+ ret
+.size ChaCha20_512_neon,.-ChaCha20_512_neon
+___
+}
+}}}
+
+foreach (split("\n",$code)) {
+ s/\`([^\`]*)\`/eval $1/geo;
+
+ (s/\b([a-z]+)\.32\b/$1/ and (s/x([0-9]+)/w$1/g or 1)) or
+ (m/\b(eor|ext|mov)\b/ and (s/\.4s/\.16b/g or 1)) or
+ (s/\b((?:ld|st)1)\.8\b/$1/ and (s/\.4s/\.16b/g or 1)) or
+ (m/\b(ld|st)[rp]\b/ and (s/v([0-9]+)\.4s/q$1/g or 1)) or
+ (s/\brev32\.16\b/rev32/ and (s/\.4s/\.8h/g or 1));
+
+ #s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo;
+
+ print $_,"\n";
+}
+close STDOUT; # flush
diff --git a/openssl-1.1.0h/crypto/chacha/asm/chacha-c64xplus.pl b/openssl-1.1.0h/crypto/chacha/asm/chacha-c64xplus.pl
new file mode 100755
index 0000000..bdb3804
--- /dev/null
+++ b/openssl-1.1.0h/crypto/chacha/asm/chacha-c64xplus.pl
@@ -0,0 +1,926 @@
+#! /usr/bin/env perl
+# Copyright 2016 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License"). You may not use
+# this file except in compliance with the License. You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# ChaCha20 for C64x+.
+#
+# October 2015
+#
+# Performance is 3.54 cycles per processed byte, which is ~4.3 times
+# faster than code generated by TI compiler. Compiler also disables
+# interrupts for some reason, thus making interrupt response time
+# dependent on input length. This module on the other hand is free
+# from such limiation.
+
+$output=pop;
+open STDOUT,">$output";
+
+($OUT,$INP,$LEN,$KEYB,$COUNTERA)=("A4","B4","A6","B6","A8");
+($KEYA,$COUNTERB,$STEP)=("A7","B7","A3");
+
+@X= ("A16","B16","A17","B17","A18","B18","A19","B19",
+ "A20","B20","A21","B21","A22","B22","A23","B23");
+@Y= ("A24","B24","A25","B25","A26","B26","A27","B27",
+ "A28","B28","A29","B29","A30","B30","A31","B31");
+@DAT=("A6", "A7", "B6", "B7", "A8", "A9", "B8", "B9",
+ "A10","A11","B10","B11","A12","A13","B12","B13");
+
+# yes, overlaps with @DAT, used only in 2x interleave code path...
+@K2x=("A6", "B6", "A7", "B7", "A8", "B8", "A9", "B9",
+ "A10","B10","A11","B11","A2", "B2", "A13","B13");
+
+$code.=<<___;
+ .text
+
+ .if .ASSEMBLER_VERSION<7000000
+ .asg 0,__TI_EABI__
+ .endif
+ .if __TI_EABI__
+ .asg ChaCha20_ctr32,_ChaCha20_ctr32
+ .endif
+
+ .asg B3,RA
+ .asg A15,FP
+ .asg B15,SP
+
+ .global _ChaCha20_ctr32
+ .align 32
+_ChaCha20_ctr32:
+ .asmfunc stack_usage(40+64)
+ MV $LEN,A0 ; reassign
+ [!A0] BNOP RA ; no data
+|| [A0] STW FP,*SP--(40+64) ; save frame pointer and alloca(40+64)
+|| [A0] MV SP,FP
+ [A0] STDW B13:B12,*SP[4+8] ; ABI says so
+|| [A0] MV $KEYB,$KEYA
+|| [A0] MV $COUNTERA,$COUNTERB
+ [A0] STDW B11:B10,*SP[3+8]
+|| [A0] STDW A13:A12,*FP[-3]
+ [A0] STDW A11:A10,*FP[-4]
+|| [A0] MVK 128,$STEP ; 2 * input block size
+
+ [A0] LDW *${KEYA}[0],@Y[4] ; load key
+|| [A0] LDW *${KEYB}[1],@Y[5]
+|| [A0] MVK 0x00007865,@Y[0] ; synthesize sigma
+|| [A0] MVK 0x0000646e,@Y[1]
+ [A0] LDW *${KEYA}[2],@Y[6]
+|| [A0] LDW *${KEYB}[3],@Y[7]
+|| [A0] MVKH 0x61700000,@Y[0]
+|| [A0] MVKH 0x33200000,@Y[1]
+ LDW *${KEYA}[4],@Y[8]
+|| LDW *${KEYB}[5],@Y[9]
+|| MVK 0x00002d32,@Y[2]
+|| MVK 0x00006574,@Y[3]
+ LDW *${KEYA}[6],@Y[10]
+|| LDW *${KEYB}[7],@Y[11]
+|| MVKH 0x79620000,@Y[2]
+|| MVKH 0x6b200000,@Y[3]
+ LDW *${COUNTERA}[0],@Y[12] ; load counter||nonce
+|| LDW *${COUNTERB}[1],@Y[13]
+|| CMPLTU A0,$STEP,A1 ; is length < 2*blocks?
+ LDW *${COUNTERA}[2],@Y[14]
+|| LDW *${COUNTERB}[3],@Y[15]
+|| [A1] BNOP top1x?
+ [A1] MVK 64,$STEP ; input block size
+|| MVK 10,B0 ; inner loop counter
+
+ DMV @Y[2],@Y[0],@X[2]:@X[0] ; copy block
+|| DMV @Y[3],@Y[1],@X[3]:@X[1]
+||[!A1] STDW @Y[2]:@Y[0],*FP[-12] ; offload key material to stack
+||[!A1] STDW @Y[3]:@Y[1],*SP[2]
+ DMV @Y[6],@Y[4],@X[6]:@X[4]
+|| DMV @Y[7],@Y[5],@X[7]:@X[5]
+||[!A1] STDW @Y[6]:@Y[4],*FP[-10]
+||[!A1] STDW @Y[7]:@Y[5],*SP[4]
+ DMV @Y[10],@Y[8],@X[10]:@X[8]
+|| DMV @Y[11],@Y[9],@X[11]:@X[9]
+||[!A1] STDW @Y[10]:@Y[8],*FP[-8]
+||[!A1] STDW @Y[11]:@Y[9],*SP[6]
+ DMV @Y[14],@Y[12],@X[14]:@X[12]
+|| DMV @Y[15],@Y[13],@X[15]:@X[13]
+||[!A1] MV @Y[12],@K2x[12] ; counter
+||[!A1] MV @Y[13],@K2x[13]
+||[!A1] STW @Y[14],*FP[-6*2]
+||[!A1] STW @Y[15],*SP[8*2]
+___
+{ ################################################################
+ # 2x interleave gives 50% performance improvement
+ #
+my ($a0,$a1,$a2,$a3) = (0..3);
+my ($b0,$b1,$b2,$b3) = (4..7);
+my ($c0,$c1,$c2,$c3) = (8..11);
+my ($d0,$d1,$d2,$d3) = (12..15);
+
+$code.=<<___;
+outer2x?:
+ ADD @X[$b1],@X[$a1],@X[$a1]
+|| ADD @X[$b2],@X[$a2],@X[$a2]
+|| ADD @X[$b0],@X[$a0],@X[$a0]
+|| ADD @X[$b3],@X[$a3],@X[$a3]
+|| DMV @Y[2],@Y[0],@K2x[2]:@K2x[0]
+|| DMV @Y[3],@Y[1],@K2x[3]:@K2x[1]
+ XOR @X[$a1],@X[$d1],@X[$d1]
+|| XOR @X[$a2],@X[$d2],@X[$d2]
+|| XOR @X[$a0],@X[$d0],@X[$d0]
+|| XOR @X[$a3],@X[$d3],@X[$d3]
+|| DMV @Y[6],@Y[4],@K2x[6]:@K2x[4]
+|| DMV @Y[7],@Y[5],@K2x[7]:@K2x[5]
+ SWAP2 @X[$d1],@X[$d1] ; rotate by 16
+|| SWAP2 @X[$d2],@X[$d2]
+|| SWAP2 @X[$d0],@X[$d0]
+|| SWAP2 @X[$d3],@X[$d3]
+
+ ADD @X[$d1],@X[$c1],@X[$c1]
+|| ADD @X[$d2],@X[$c2],@X[$c2]
+|| ADD @X[$d0],@X[$c0],@X[$c0]
+|| ADD @X[$d3],@X[$c3],@X[$c3]
+|| DMV @Y[10],@Y[8],@K2x[10]:@K2x[8]
+|| DMV @Y[11],@Y[9],@K2x[11]:@K2x[9]
+ XOR @X[$c1],@X[$b1],@X[$b1]
+|| XOR @X[$c2],@X[$b2],@X[$b2]
+|| XOR @X[$c0],@X[$b0],@X[$b0]
+|| XOR @X[$c3],@X[$b3],@X[$b3]
+|| ADD 1,@Y[12],@Y[12] ; adjust counter for 2nd block
+ ROTL @X[$b1],12,@X[$b1]
+|| ROTL @X[$b2],12,@X[$b2]
+|| MV @Y[14],@K2x[14]
+|| MV @Y[15],@K2x[15]
+top2x?:
+ ROTL @X[$b0],12,@X[$b0]
+|| ROTL @X[$b3],12,@X[$b3]
+|| ADD @Y[$b1],@Y[$a1],@Y[$a1]
+|| ADD @Y[$b2],@Y[$a2],@Y[$a2]
+ ADD @Y[$b0],@Y[$a0],@Y[$a0]
+|| ADD @Y[$b3],@Y[$a3],@Y[$a3]
+
+|| ADD @X[$b1],@X[$a1],@X[$a1]
+|| ADD @X[$b2],@X[$a2],@X[$a2]
+|| XOR @Y[$a1],@Y[$d1],@Y[$d1]
+|| XOR @Y[$a2],@Y[$d2],@Y[$d2]
+ XOR @Y[$a0],@Y[$d0],@Y[$d0]
+|| XOR @Y[$a3],@Y[$d3],@Y[$d3]
+|| ADD @X[$b0],@X[$a0],@X[$a0]
+|| ADD @X[$b3],@X[$a3],@X[$a3]
+|| XOR @X[$a1],@X[$d1],@X[$d1]
+|| XOR @X[$a2],@X[$d2],@X[$d2]
+ XOR @X[$a0],@X[$d0],@X[$d0]
+|| XOR @X[$a3],@X[$d3],@X[$d3]
+|| ROTL @X[$d1],8,@X[$d1]
+|| ROTL @X[$d2],8,@X[$d2]
+|| SWAP2 @Y[$d1],@Y[$d1] ; rotate by 16
+|| SWAP2 @Y[$d2],@Y[$d2]
+|| SWAP2 @Y[$d0],@Y[$d0]
+|| SWAP2 @Y[$d3],@Y[$d3]
+ ROTL @X[$d0],8,@X[$d0]
+|| ROTL @X[$d3],8,@X[$d3]
+|| ADD @Y[$d1],@Y[$c1],@Y[$c1]
+|| ADD @Y[$d2],@Y[$c2],@Y[$c2]
+|| ADD @Y[$d0],@Y[$c0],@Y[$c0]
+|| ADD @Y[$d3],@Y[$c3],@Y[$c3]
+|| BNOP middle2x1? ; protect from interrupt
+
+ ADD @X[$d1],@X[$c1],@X[$c1]
+|| ADD @X[$d2],@X[$c2],@X[$c2]
+|| XOR @Y[$c1],@Y[$b1],@Y[$b1]
+|| XOR @Y[$c2],@Y[$b2],@Y[$b2]
+|| XOR @Y[$c0],@Y[$b0],@Y[$b0]
+|| XOR @Y[$c3],@Y[$b3],@Y[$b3]
+ ADD @X[$d0],@X[$c0],@X[$c0]
+|| ADD @X[$d3],@X[$c3],@X[$c3]
+|| XOR @X[$c1],@X[$b1],@X[$b1]
+|| XOR @X[$c2],@X[$b2],@X[$b2]
+|| ROTL @X[$d1],0,@X[$d2] ; moved to avoid cross-path stall
+|| ROTL @X[$d2],0,@X[$d3]
+ XOR @X[$c0],@X[$b0],@X[$b0]
+|| XOR @X[$c3],@X[$b3],@X[$b3]
+|| MV @X[$d0],@X[$d1]
+|| MV @X[$d3],@X[$d0]
+|| ROTL @Y[$b1],12,@Y[$b1]
+|| ROTL @Y[$b2],12,@Y[$b2]
+ ROTL @X[$b1],7,@X[$b0] ; avoided cross-path stall
+|| ROTL @X[$b2],7,@X[$b1]
+ ROTL @X[$b0],7,@X[$b3]
+|| ROTL @X[$b3],7,@X[$b2]
+middle2x1?:
+
+ ROTL @Y[$b0],12,@Y[$b0]
+|| ROTL @Y[$b3],12,@Y[$b3]
+|| ADD @X[$b0],@X[$a0],@X[$a0]
+|| ADD @X[$b1],@X[$a1],@X[$a1]
+ ADD @X[$b2],@X[$a2],@X[$a2]
+|| ADD @X[$b3],@X[$a3],@X[$a3]
+
+|| ADD @Y[$b1],@Y[$a1],@Y[$a1]
+|| ADD @Y[$b2],@Y[$a2],@Y[$a2]
+|| XOR @X[$a0],@X[$d0],@X[$d0]
+|| XOR @X[$a1],@X[$d1],@X[$d1]
+ XOR @X[$a2],@X[$d2],@X[$d2]
+|| XOR @X[$a3],@X[$d3],@X[$d3]
+|| ADD @Y[$b0],@Y[$a0],@Y[$a0]
+|| ADD @Y[$b3],@Y[$a3],@Y[$a3]
+|| XOR @Y[$a1],@Y[$d1],@Y[$d1]
+|| XOR @Y[$a2],@Y[$d2],@Y[$d2]
+ XOR @Y[$a0],@Y[$d0],@Y[$d0]
+|| XOR @Y[$a3],@Y[$d3],@Y[$d3]
+|| ROTL @Y[$d1],8,@Y[$d1]
+|| ROTL @Y[$d2],8,@Y[$d2]
+|| SWAP2 @X[$d0],@X[$d0] ; rotate by 16
+|| SWAP2 @X[$d1],@X[$d1]
+|| SWAP2 @X[$d2],@X[$d2]
+|| SWAP2 @X[$d3],@X[$d3]
+ ROTL @Y[$d0],8,@Y[$d0]
+|| ROTL @Y[$d3],8,@Y[$d3]
+|| ADD @X[$d0],@X[$c2],@X[$c2]
+|| ADD @X[$d1],@X[$c3],@X[$c3]
+|| ADD @X[$d2],@X[$c0],@X[$c0]
+|| ADD @X[$d3],@X[$c1],@X[$c1]
+|| BNOP middle2x2? ; protect from interrupt
+
+ ADD @Y[$d1],@Y[$c1],@Y[$c1]
+|| ADD @Y[$d2],@Y[$c2],@Y[$c2]
+|| XOR @X[$c2],@X[$b0],@X[$b0]
+|| XOR @X[$c3],@X[$b1],@X[$b1]
+|| XOR @X[$c0],@X[$b2],@X[$b2]
+|| XOR @X[$c1],@X[$b3],@X[$b3]
+ ADD @Y[$d0],@Y[$c0],@Y[$c0]
+|| ADD @Y[$d3],@Y[$c3],@Y[$c3]
+|| XOR @Y[$c1],@Y[$b1],@Y[$b1]
+|| XOR @Y[$c2],@Y[$b2],@Y[$b2]
+|| ROTL @Y[$d1],0,@Y[$d2] ; moved to avoid cross-path stall
+|| ROTL @Y[$d2],0,@Y[$d3]
+ XOR @Y[$c0],@Y[$b0],@Y[$b0]
+|| XOR @Y[$c3],@Y[$b3],@Y[$b3]
+|| MV @Y[$d0],@Y[$d1]
+|| MV @Y[$d3],@Y[$d0]
+|| ROTL @X[$b0],12,@X[$b0]
+|| ROTL @X[$b1],12,@X[$b1]
+ ROTL @Y[$b1],7,@Y[$b0] ; avoided cross-path stall
+|| ROTL @Y[$b2],7,@Y[$b1]
+ ROTL @Y[$b0],7,@Y[$b3]
+|| ROTL @Y[$b3],7,@Y[$b2]
+middle2x2?:
+
+ ROTL @X[$b2],12,@X[$b2]
+|| ROTL @X[$b3],12,@X[$b3]
+|| ADD @Y[$b0],@Y[$a0],@Y[$a0]
+|| ADD @Y[$b1],@Y[$a1],@Y[$a1]
+ ADD @Y[$b2],@Y[$a2],@Y[$a2]
+|| ADD @Y[$b3],@Y[$a3],@Y[$a3]
+
+|| ADD @X[$b0],@X[$a0],@X[$a0]
+|| ADD @X[$b1],@X[$a1],@X[$a1]
+|| XOR @Y[$a0],@Y[$d0],@Y[$d0]
+|| XOR @Y[$a1],@Y[$d1],@Y[$d1]
+ XOR @Y[$a2],@Y[$d2],@Y[$d2]
+|| XOR @Y[$a3],@Y[$d3],@Y[$d3]
+|| ADD @X[$b2],@X[$a2],@X[$a2]
+|| ADD @X[$b3],@X[$a3],@X[$a3]
+|| XOR @X[$a0],@X[$d0],@X[$d0]
+|| XOR @X[$a1],@X[$d1],@X[$d1]
+ XOR @X[$a2],@X[$d2],@X[$d2]
+|| XOR @X[$a3],@X[$d3],@X[$d3]
+|| ROTL @X[$d0],8,@X[$d0]
+|| ROTL @X[$d1],8,@X[$d1]
+|| SWAP2 @Y[$d0],@Y[$d0] ; rotate by 16
+|| SWAP2 @Y[$d1],@Y[$d1]
+|| SWAP2 @Y[$d2],@Y[$d2]
+|| SWAP2 @Y[$d3],@Y[$d3]
+ ROTL @X[$d2],8,@X[$d2]
+|| ROTL @X[$d3],8,@X[$d3]
+|| ADD @Y[$d0],@Y[$c2],@Y[$c2]
+|| ADD @Y[$d1],@Y[$c3],@Y[$c3]
+|| ADD @Y[$d2],@Y[$c0],@Y[$c0]
+|| ADD @Y[$d3],@Y[$c1],@Y[$c1]
+|| BNOP bottom2x1? ; protect from interrupt
+
+ ADD @X[$d0],@X[$c2],@X[$c2]
+|| ADD @X[$d1],@X[$c3],@X[$c3]
+|| XOR @Y[$c2],@Y[$b0],@Y[$b0]
+|| XOR @Y[$c3],@Y[$b1],@Y[$b1]
+|| XOR @Y[$c0],@Y[$b2],@Y[$b2]
+|| XOR @Y[$c1],@Y[$b3],@Y[$b3]
+ ADD @X[$d2],@X[$c0],@X[$c0]
+|| ADD @X[$d3],@X[$c1],@X[$c1]
+|| XOR @X[$c2],@X[$b0],@X[$b0]
+|| XOR @X[$c3],@X[$b1],@X[$b1]
+|| ROTL @X[$d0],0,@X[$d3] ; moved to avoid cross-path stall
+|| ROTL @X[$d1],0,@X[$d0]
+ XOR @X[$c0],@X[$b2],@X[$b2]
+|| XOR @X[$c1],@X[$b3],@X[$b3]
+|| MV @X[$d2],@X[$d1]
+|| MV @X[$d3],@X[$d2]
+|| ROTL @Y[$b0],12,@Y[$b0]
+|| ROTL @Y[$b1],12,@Y[$b1]
+ ROTL @X[$b0],7,@X[$b1] ; avoided cross-path stall
+|| ROTL @X[$b1],7,@X[$b2]
+ ROTL @X[$b2],7,@X[$b3]
+|| ROTL @X[$b3],7,@X[$b0]
+|| [B0] SUB B0,1,B0 ; decrement inner loop counter
+bottom2x1?:
+
+ ROTL @Y[$b2],12,@Y[$b2]
+|| ROTL @Y[$b3],12,@Y[$b3]
+|| [B0] ADD @X[$b1],@X[$a1],@X[$a1] ; modulo-scheduled
+|| [B0] ADD @X[$b2],@X[$a2],@X[$a2]
+ [B0] ADD @X[$b0],@X[$a0],@X[$a0]
+|| [B0] ADD @X[$b3],@X[$a3],@X[$a3]
+
+|| ADD @Y[$b0],@Y[$a0],@Y[$a0]
+|| ADD @Y[$b1],@Y[$a1],@Y[$a1]
+|| [B0] XOR @X[$a1],@X[$d1],@X[$d1]
+|| [B0] XOR @X[$a2],@X[$d2],@X[$d2]
+ [B0] XOR @X[$a0],@X[$d0],@X[$d0]
+|| [B0] XOR @X[$a3],@X[$d3],@X[$d3]
+|| ADD @Y[$b2],@Y[$a2],@Y[$a2]
+|| ADD @Y[$b3],@Y[$a3],@Y[$a3]
+|| XOR @Y[$a0],@Y[$d0],@Y[$d0]
+|| XOR @Y[$a1],@Y[$d1],@Y[$d1]
+ XOR @Y[$a2],@Y[$d2],@Y[$d2]
+|| XOR @Y[$a3],@Y[$d3],@Y[$d3]
+|| ROTL @Y[$d0],8,@Y[$d0]
+|| ROTL @Y[$d1],8,@Y[$d1]
+|| [B0] SWAP2 @X[$d1],@X[$d1] ; rotate by 16
+|| [B0] SWAP2 @X[$d2],@X[$d2]
+|| [B0] SWAP2 @X[$d0],@X[$d0]
+|| [B0] SWAP2 @X[$d3],@X[$d3]
+ ROTL @Y[$d2],8,@Y[$d2]
+|| ROTL @Y[$d3],8,@Y[$d3]
+|| [B0] ADD @X[$d1],@X[$c1],@X[$c1]
+|| [B0] ADD @X[$d2],@X[$c2],@X[$c2]
+|| [B0] ADD @X[$d0],@X[$c0],@X[$c0]
+|| [B0] ADD @X[$d3],@X[$c3],@X[$c3]
+|| [B0] BNOP top2x? ; even protects from interrupt
+
+ ADD @Y[$d0],@Y[$c2],@Y[$c2]
+|| ADD @Y[$d1],@Y[$c3],@Y[$c3]
+|| [B0] XOR @X[$c1],@X[$b1],@X[$b1]
+|| [B0] XOR @X[$c2],@X[$b2],@X[$b2]
+|| [B0] XOR @X[$c0],@X[$b0],@X[$b0]
+|| [B0] XOR @X[$c3],@X[$b3],@X[$b3]
+ ADD @Y[$d2],@Y[$c0],@Y[$c0]
+|| ADD @Y[$d3],@Y[$c1],@Y[$c1]
+|| XOR @Y[$c2],@Y[$b0],@Y[$b0]
+|| XOR @Y[$c3],@Y[$b1],@Y[$b1]
+|| ROTL @Y[$d0],0,@Y[$d3] ; moved to avoid cross-path stall
+|| ROTL @Y[$d1],0,@Y[$d0]
+ XOR @Y[$c0],@Y[$b2],@Y[$b2]
+|| XOR @Y[$c1],@Y[$b3],@Y[$b3]
+|| MV @Y[$d2],@Y[$d1]
+|| MV @Y[$d3],@Y[$d2]
+|| [B0] ROTL @X[$b1],12,@X[$b1]
+|| [B0] ROTL @X[$b2],12,@X[$b2]
+ ROTL @Y[$b0],7,@Y[$b1] ; avoided cross-path stall
+|| ROTL @Y[$b1],7,@Y[$b2]
+ ROTL @Y[$b2],7,@Y[$b3]
+|| ROTL @Y[$b3],7,@Y[$b0]
+bottom2x2?:
+___
+}
+
+$code.=<<___;
+ ADD @K2x[0],@X[0],@X[0] ; accumulate key material
+|| ADD @K2x[1],@X[1],@X[1]
+|| ADD @K2x[2],@X[2],@X[2]
+|| ADD @K2x[3],@X[3],@X[3]
+ ADD @K2x[0],@Y[0],@Y[0]
+|| ADD @K2x[1],@Y[1],@Y[1]
+|| ADD @K2x[2],@Y[2],@Y[2]
+|| ADD @K2x[3],@Y[3],@Y[3]
+|| LDNDW *${INP}++[8],@DAT[1]:@DAT[0]
+ ADD @K2x[4],@X[4],@X[4]
+|| ADD @K2x[5],@X[5],@X[5]
+|| ADD @K2x[6],@X[6],@X[6]
+|| ADD @K2x[7],@X[7],@X[7]
+|| LDNDW *${INP}[-7],@DAT[3]:@DAT[2]
+ ADD @K2x[4],@Y[4],@Y[4]
+|| ADD @K2x[5],@Y[5],@Y[5]
+|| ADD @K2x[6],@Y[6],@Y[6]
+|| ADD @K2x[7],@Y[7],@Y[7]
+|| LDNDW *${INP}[-6],@DAT[5]:@DAT[4]
+ ADD @K2x[8],@X[8],@X[8]
+|| ADD @K2x[9],@X[9],@X[9]
+|| ADD @K2x[10],@X[10],@X[10]
+|| ADD @K2x[11],@X[11],@X[11]
+|| LDNDW *${INP}[-5],@DAT[7]:@DAT[6]
+ ADD @K2x[8],@Y[8],@Y[8]
+|| ADD @K2x[9],@Y[9],@Y[9]
+|| ADD @K2x[10],@Y[10],@Y[10]
+|| ADD @K2x[11],@Y[11],@Y[11]
+|| LDNDW *${INP}[-4],@DAT[9]:@DAT[8]
+ ADD @K2x[12],@X[12],@X[12]
+|| ADD @K2x[13],@X[13],@X[13]
+|| ADD @K2x[14],@X[14],@X[14]
+|| ADD @K2x[15],@X[15],@X[15]
+|| LDNDW *${INP}[-3],@DAT[11]:@DAT[10]
+ ADD @K2x[12],@Y[12],@Y[12]
+|| ADD @K2x[13],@Y[13],@Y[13]
+|| ADD @K2x[14],@Y[14],@Y[14]
+|| ADD @K2x[15],@Y[15],@Y[15]
+|| LDNDW *${INP}[-2],@DAT[13]:@DAT[12]
+ ADD 1,@Y[12],@Y[12] ; adjust counter for 2nd block
+|| ADD 2,@K2x[12],@K2x[12] ; increment counter
+|| LDNDW *${INP}[-1],@DAT[15]:@DAT[14]
+
+ .if .BIG_ENDIAN
+ SWAP2 @X[0],@X[0]
+|| SWAP2 @X[1],@X[1]
+|| SWAP2 @X[2],@X[2]
+|| SWAP2 @X[3],@X[3]
+ SWAP2 @X[4],@X[4]
+|| SWAP2 @X[5],@X[5]
+|| SWAP2 @X[6],@X[6]
+|| SWAP2 @X[7],@X[7]
+ SWAP2 @X[8],@X[8]
+|| SWAP2 @X[9],@X[9]
+|| SWAP4 @X[0],@X[1]
+|| SWAP4 @X[1],@X[0]
+ SWAP2 @X[10],@X[10]
+|| SWAP2 @X[11],@X[11]
+|| SWAP4 @X[2],@X[3]
+|| SWAP4 @X[3],@X[2]
+ SWAP2 @X[12],@X[12]
+|| SWAP2 @X[13],@X[13]
+|| SWAP4 @X[4],@X[5]
+|| SWAP4 @X[5],@X[4]
+ SWAP2 @X[14],@X[14]
+|| SWAP2 @X[15],@X[15]
+|| SWAP4 @X[6],@X[7]
+|| SWAP4 @X[7],@X[6]
+ SWAP4 @X[8],@X[9]
+|| SWAP4 @X[9],@X[8]
+|| SWAP2 @Y[0],@Y[0]
+|| SWAP2 @Y[1],@Y[1]
+ SWAP4 @X[10],@X[11]
+|| SWAP4 @X[11],@X[10]
+|| SWAP2 @Y[2],@Y[2]
+|| SWAP2 @Y[3],@Y[3]
+ SWAP4 @X[12],@X[13]
+|| SWAP4 @X[13],@X[12]
+|| SWAP2 @Y[4],@Y[4]
+|| SWAP2 @Y[5],@Y[5]
+ SWAP4 @X[14],@X[15]
+|| SWAP4 @X[15],@X[14]
+|| SWAP2 @Y[6],@Y[6]
+|| SWAP2 @Y[7],@Y[7]
+ SWAP2 @Y[8],@Y[8]
+|| SWAP2 @Y[9],@Y[9]
+|| SWAP4 @Y[0],@Y[1]
+|| SWAP4 @Y[1],@Y[0]
+ SWAP2 @Y[10],@Y[10]
+|| SWAP2 @Y[11],@Y[11]
+|| SWAP4 @Y[2],@Y[3]
+|| SWAP4 @Y[3],@Y[2]
+ SWAP2 @Y[12],@Y[12]
+|| SWAP2 @Y[13],@Y[13]
+|| SWAP4 @Y[4],@Y[5]
+|| SWAP4 @Y[5],@Y[4]
+ SWAP2 @Y[14],@Y[14]
+|| SWAP2 @Y[15],@Y[15]
+|| SWAP4 @Y[6],@Y[7]
+|| SWAP4 @Y[7],@Y[6]
+ SWAP4 @Y[8],@Y[9]
+|| SWAP4 @Y[9],@Y[8]
+ SWAP4 @Y[10],@Y[11]
+|| SWAP4 @Y[11],@Y[10]
+ SWAP4 @Y[12],@Y[13]
+|| SWAP4 @Y[13],@Y[12]
+ SWAP4 @Y[14],@Y[15]
+|| SWAP4 @Y[15],@Y[14]
+ .endif
+
+ XOR @DAT[0],@X[0],@X[0] ; xor 1st block
+|| XOR @DAT[3],@X[3],@X[3]
+|| XOR @DAT[2],@X[2],@X[1]
+|| XOR @DAT[1],@X[1],@X[2]
+|| LDNDW *${INP}++[8],@DAT[1]:@DAT[0]
+ XOR @DAT[4],@X[4],@X[4]
+|| XOR @DAT[7],@X[7],@X[7]
+|| LDNDW *${INP}[-7],@DAT[3]:@DAT[2]
+ XOR @DAT[6],@X[6],@X[5]
+|| XOR @DAT[5],@X[5],@X[6]
+|| LDNDW *${INP}[-6],@DAT[5]:@DAT[4]
+ XOR @DAT[8],@X[8],@X[8]
+|| XOR @DAT[11],@X[11],@X[11]
+|| LDNDW *${INP}[-5],@DAT[7]:@DAT[6]
+ XOR @DAT[10],@X[10],@X[9]
+|| XOR @DAT[9],@X[9],@X[10]
+|| LDNDW *${INP}[-4],@DAT[9]:@DAT[8]
+ XOR @DAT[12],@X[12],@X[12]
+|| XOR @DAT[15],@X[15],@X[15]
+|| LDNDW *${INP}[-3],@DAT[11]:@DAT[10]
+ XOR @DAT[14],@X[14],@X[13]
+|| XOR @DAT[13],@X[13],@X[14]
+|| LDNDW *${INP}[-2],@DAT[13]:@DAT[12]
+ [A0] SUB A0,$STEP,A0 ; SUB A0,128,A0
+|| LDNDW *${INP}[-1],@DAT[15]:@DAT[14]
+
+ XOR @Y[0],@DAT[0],@DAT[0] ; xor 2nd block
+|| XOR @Y[1],@DAT[1],@DAT[1]
+|| STNDW @X[2]:@X[0],*${OUT}++[8]
+ XOR @Y[2],@DAT[2],@DAT[2]
+|| XOR @Y[3],@DAT[3],@DAT[3]
+|| STNDW @X[3]:@X[1],*${OUT}[-7]
+ XOR @Y[4],@DAT[4],@DAT[4]
+|| [A0] LDDW *FP[-12],@X[2]:@X[0] ; re-load key material from stack
+|| [A0] LDDW *SP[2], @X[3]:@X[1]
+ XOR @Y[5],@DAT[5],@DAT[5]
+|| STNDW @X[6]:@X[4],*${OUT}[-6]
+ XOR @Y[6],@DAT[6],@DAT[6]
+|| XOR @Y[7],@DAT[7],@DAT[7]
+|| STNDW @X[7]:@X[5],*${OUT}[-5]
+ XOR @Y[8],@DAT[8],@DAT[8]
+|| [A0] LDDW *FP[-10],@X[6]:@X[4]
+|| [A0] LDDW *SP[4], @X[7]:@X[5]
+ XOR @Y[9],@DAT[9],@DAT[9]
+|| STNDW @X[10]:@X[8],*${OUT}[-4]
+ XOR @Y[10],@DAT[10],@DAT[10]
+|| XOR @Y[11],@DAT[11],@DAT[11]
+|| STNDW @X[11]:@X[9],*${OUT}[-3]
+ XOR @Y[12],@DAT[12],@DAT[12]
+|| [A0] LDDW *FP[-8], @X[10]:@X[8]
+|| [A0] LDDW *SP[6], @X[11]:@X[9]
+ XOR @Y[13],@DAT[13],@DAT[13]
+|| STNDW @X[14]:@X[12],*${OUT}[-2]
+ XOR @Y[14],@DAT[14],@DAT[14]
+|| XOR @Y[15],@DAT[15],@DAT[15]
+|| STNDW @X[15]:@X[13],*${OUT}[-1]
+
+ [A0] MV @K2x[12],@X[12]
+|| [A0] MV @K2x[13],@X[13]
+|| [A0] LDW *FP[-6*2], @X[14]
+|| [A0] LDW *SP[8*2], @X[15]
+
+ [A0] DMV @X[2],@X[0],@Y[2]:@Y[0] ; duplicate key material
+|| STNDW @DAT[1]:@DAT[0],*${OUT}++[8]
+ [A0] DMV @X[3],@X[1],@Y[3]:@Y[1]
+|| STNDW @DAT[3]:@DAT[2],*${OUT}[-7]
+ [A0] DMV @X[6],@X[4],@Y[6]:@Y[4]
+|| STNDW @DAT[5]:@DAT[4],*${OUT}[-6]
+|| CMPLTU A0,$STEP,A1 ; is remaining length < 2*blocks?
+||[!A0] BNOP epilogue?
+ [A0] DMV @X[7],@X[5],@Y[7]:@Y[5]
+|| STNDW @DAT[7]:@DAT[6],*${OUT}[-5]
+||[!A1] BNOP outer2x?
+ [A0] DMV @X[10],@X[8],@Y[10]:@Y[8]
+|| STNDW @DAT[9]:@DAT[8],*${OUT}[-4]
+ [A0] DMV @X[11],@X[9],@Y[11]:@Y[9]
+|| STNDW @DAT[11]:@DAT[10],*${OUT}[-3]
+ [A0] DMV @X[14],@X[12],@Y[14]:@Y[12]
+|| STNDW @DAT[13]:@DAT[12],*${OUT}[-2]
+ [A0] DMV @X[15],@X[13],@Y[15]:@Y[13]
+|| STNDW @DAT[15]:@DAT[14],*${OUT}[-1]
+;;===== branch to epilogue? is taken here
+ [A1] MVK 64,$STEP
+|| [A0] MVK 10,B0 ; inner loop counter
+;;===== branch to outer2x? is taken here
+___
+{
+my ($a0,$a1,$a2,$a3) = (0..3);
+my ($b0,$b1,$b2,$b3) = (4..7);
+my ($c0,$c1,$c2,$c3) = (8..11);
+my ($d0,$d1,$d2,$d3) = (12..15);
+
+$code.=<<___;
+top1x?:
+ ADD @X[$b1],@X[$a1],@X[$a1]
+|| ADD @X[$b2],@X[$a2],@X[$a2]
+ ADD @X[$b0],@X[$a0],@X[$a0]
+|| ADD @X[$b3],@X[$a3],@X[$a3]
+|| XOR @X[$a1],@X[$d1],@X[$d1]
+|| XOR @X[$a2],@X[$d2],@X[$d2]
+ XOR @X[$a0],@X[$d0],@X[$d0]
+|| XOR @X[$a3],@X[$d3],@X[$d3]
+|| SWAP2 @X[$d1],@X[$d1] ; rotate by 16
+|| SWAP2 @X[$d2],@X[$d2]
+ SWAP2 @X[$d0],@X[$d0]
+|| SWAP2 @X[$d3],@X[$d3]
+
+|| ADD @X[$d1],@X[$c1],@X[$c1]
+|| ADD @X[$d2],@X[$c2],@X[$c2]
+ ADD @X[$d0],@X[$c0],@X[$c0]
+|| ADD @X[$d3],@X[$c3],@X[$c3]
+|| XOR @X[$c1],@X[$b1],@X[$b1]
+|| XOR @X[$c2],@X[$b2],@X[$b2]
+ XOR @X[$c0],@X[$b0],@X[$b0]
+|| XOR @X[$c3],@X[$b3],@X[$b3]
+|| ROTL @X[$b1],12,@X[$b1]
+|| ROTL @X[$b2],12,@X[$b2]
+ ROTL @X[$b0],12,@X[$b0]
+|| ROTL @X[$b3],12,@X[$b3]
+
+ ADD @X[$b1],@X[$a1],@X[$a1]
+|| ADD @X[$b2],@X[$a2],@X[$a2]
+ ADD @X[$b0],@X[$a0],@X[$a0]
+|| ADD @X[$b3],@X[$a3],@X[$a3]
+|| XOR @X[$a1],@X[$d1],@X[$d1]
+|| XOR @X[$a2],@X[$d2],@X[$d2]
+ XOR @X[$a0],@X[$d0],@X[$d0]
+|| XOR @X[$a3],@X[$d3],@X[$d3]
+|| ROTL @X[$d1],8,@X[$d1]
+|| ROTL @X[$d2],8,@X[$d2]
+ ROTL @X[$d0],8,@X[$d0]
+|| ROTL @X[$d3],8,@X[$d3]
+|| BNOP middle1x? ; protect from interrupt
+
+ ADD @X[$d1],@X[$c1],@X[$c1]
+|| ADD @X[$d2],@X[$c2],@X[$c2]
+ ADD @X[$d0],@X[$c0],@X[$c0]
+|| ADD @X[$d3],@X[$c3],@X[$c3]
+|| XOR @X[$c1],@X[$b1],@X[$b1]
+|| XOR @X[$c2],@X[$b2],@X[$b2]
+|| ROTL @X[$d1],0,@X[$d2] ; moved to avoid cross-path stall
+|| ROTL @X[$d2],0,@X[$d3]
+ XOR @X[$c0],@X[$b0],@X[$b0]
+|| XOR @X[$c3],@X[$b3],@X[$b3]
+|| ROTL @X[$d0],0,@X[$d1]
+|| ROTL @X[$d3],0,@X[$d0]
+ ROTL @X[$b1],7,@X[$b0] ; avoided cross-path stall
+|| ROTL @X[$b2],7,@X[$b1]
+ ROTL @X[$b0],7,@X[$b3]
+|| ROTL @X[$b3],7,@X[$b2]
+middle1x?:
+
+ ADD @X[$b0],@X[$a0],@X[$a0]
+|| ADD @X[$b1],@X[$a1],@X[$a1]
+ ADD @X[$b2],@X[$a2],@X[$a2]
+|| ADD @X[$b3],@X[$a3],@X[$a3]
+|| XOR @X[$a0],@X[$d0],@X[$d0]
+|| XOR @X[$a1],@X[$d1],@X[$d1]
+ XOR @X[$a2],@X[$d2],@X[$d2]
+|| XOR @X[$a3],@X[$d3],@X[$d3]
+|| SWAP2 @X[$d0],@X[$d0] ; rotate by 16
+|| SWAP2 @X[$d1],@X[$d1]
+ SWAP2 @X[$d2],@X[$d2]
+|| SWAP2 @X[$d3],@X[$d3]
+
+|| ADD @X[$d0],@X[$c2],@X[$c2]
+|| ADD @X[$d1],@X[$c3],@X[$c3]
+ ADD @X[$d2],@X[$c0],@X[$c0]
+|| ADD @X[$d3],@X[$c1],@X[$c1]
+|| XOR @X[$c2],@X[$b0],@X[$b0]
+|| XOR @X[$c3],@X[$b1],@X[$b1]
+ XOR @X[$c0],@X[$b2],@X[$b2]
+|| XOR @X[$c1],@X[$b3],@X[$b3]
+|| ROTL @X[$b0],12,@X[$b0]
+|| ROTL @X[$b1],12,@X[$b1]
+ ROTL @X[$b2],12,@X[$b2]
+|| ROTL @X[$b3],12,@X[$b3]
+
+ ADD @X[$b0],@X[$a0],@X[$a0]
+|| ADD @X[$b1],@X[$a1],@X[$a1]
+|| [B0] SUB B0,1,B0 ; decrement inner loop counter
+ ADD @X[$b2],@X[$a2],@X[$a2]
+|| ADD @X[$b3],@X[$a3],@X[$a3]
+|| XOR @X[$a0],@X[$d0],@X[$d0]
+|| XOR @X[$a1],@X[$d1],@X[$d1]
+ XOR @X[$a2],@X[$d2],@X[$d2]
+|| XOR @X[$a3],@X[$d3],@X[$d3]
+|| ROTL @X[$d0],8,@X[$d0]
+|| ROTL @X[$d1],8,@X[$d1]
+ ROTL @X[$d2],8,@X[$d2]
+|| ROTL @X[$d3],8,@X[$d3]
+|| [B0] BNOP top1x? ; even protects from interrupt
+
+ ADD @X[$d0],@X[$c2],@X[$c2]
+|| ADD @X[$d1],@X[$c3],@X[$c3]
+ ADD @X[$d2],@X[$c0],@X[$c0]
+|| ADD @X[$d3],@X[$c1],@X[$c1]
+|| XOR @X[$c2],@X[$b0],@X[$b0]
+|| XOR @X[$c3],@X[$b1],@X[$b1]
+|| ROTL @X[$d0],0,@X[$d3] ; moved to avoid cross-path stall
+|| ROTL @X[$d1],0,@X[$d0]
+ XOR @X[$c0],@X[$b2],@X[$b2]
+|| XOR @X[$c1],@X[$b3],@X[$b3]
+|| ROTL @X[$d2],0,@X[$d1]
+|| ROTL @X[$d3],0,@X[$d2]
+ ROTL @X[$b0],7,@X[$b1] ; avoided cross-path stall
+|| ROTL @X[$b1],7,@X[$b2]
+ ROTL @X[$b2],7,@X[$b3]
+|| ROTL @X[$b3],7,@X[$b0]
+||[!B0] CMPLTU A0,$STEP,A1 ; less than 64 bytes left?
+bottom1x?:
+___
+}
+
+$code.=<<___;
+ ADD @Y[0],@X[0],@X[0] ; accumulate key material
+|| ADD @Y[1],@X[1],@X[1]
+|| ADD @Y[2],@X[2],@X[2]
+|| ADD @Y[3],@X[3],@X[3]
+||[!A1] LDNDW *${INP}++[8],@DAT[1]:@DAT[0]
+|| [A1] BNOP tail?
+ ADD @Y[4],@X[4],@X[4]
+|| ADD @Y[5],@X[5],@X[5]
+|| ADD @Y[6],@X[6],@X[6]
+|| ADD @Y[7],@X[7],@X[7]
+||[!A1] LDNDW *${INP}[-7],@DAT[3]:@DAT[2]
+ ADD @Y[8],@X[8],@X[8]
+|| ADD @Y[9],@X[9],@X[9]
+|| ADD @Y[10],@X[10],@X[10]
+|| ADD @Y[11],@X[11],@X[11]
+||[!A1] LDNDW *${INP}[-6],@DAT[5]:@DAT[4]
+ ADD @Y[12],@X[12],@X[12]
+|| ADD @Y[13],@X[13],@X[13]
+|| ADD @Y[14],@X[14],@X[14]
+|| ADD @Y[15],@X[15],@X[15]
+||[!A1] LDNDW *${INP}[-5],@DAT[7]:@DAT[6]
+ [!A1] LDNDW *${INP}[-4],@DAT[9]:@DAT[8]
+ [!A1] LDNDW *${INP}[-3],@DAT[11]:@DAT[10]
+ LDNDW *${INP}[-2],@DAT[13]:@DAT[12]
+ LDNDW *${INP}[-1],@DAT[15]:@DAT[14]
+
+ .if .BIG_ENDIAN
+ SWAP2 @X[0],@X[0]
+|| SWAP2 @X[1],@X[1]
+|| SWAP2 @X[2],@X[2]
+|| SWAP2 @X[3],@X[3]
+ SWAP2 @X[4],@X[4]
+|| SWAP2 @X[5],@X[5]
+|| SWAP2 @X[6],@X[6]
+|| SWAP2 @X[7],@X[7]
+ SWAP2 @X[8],@X[8]
+|| SWAP2 @X[9],@X[9]
+|| SWAP4 @X[0],@X[1]
+|| SWAP4 @X[1],@X[0]
+ SWAP2 @X[10],@X[10]
+|| SWAP2 @X[11],@X[11]
+|| SWAP4 @X[2],@X[3]
+|| SWAP4 @X[3],@X[2]
+ SWAP2 @X[12],@X[12]
+|| SWAP2 @X[13],@X[13]
+|| SWAP4 @X[4],@X[5]
+|| SWAP4 @X[5],@X[4]
+ SWAP2 @X[14],@X[14]
+|| SWAP2 @X[15],@X[15]
+|| SWAP4 @X[6],@X[7]
+|| SWAP4 @X[7],@X[6]
+ SWAP4 @X[8],@X[9]
+|| SWAP4 @X[9],@X[8]
+ SWAP4 @X[10],@X[11]
+|| SWAP4 @X[11],@X[10]
+ SWAP4 @X[12],@X[13]
+|| SWAP4 @X[13],@X[12]
+ SWAP4 @X[14],@X[15]
+|| SWAP4 @X[15],@X[14]
+ .else
+ NOP 1
+ .endif
+
+ XOR @X[0],@DAT[0],@DAT[0] ; xor with input
+|| XOR @X[1],@DAT[1],@DAT[1]
+|| XOR @X[2],@DAT[2],@DAT[2]
+|| XOR @X[3],@DAT[3],@DAT[3]
+|| [A0] SUB A0,$STEP,A0 ; SUB A0,64,A0
+ XOR @X[4],@DAT[4],@DAT[4]
+|| XOR @X[5],@DAT[5],@DAT[5]
+|| XOR @X[6],@DAT[6],@DAT[6]
+|| XOR @X[7],@DAT[7],@DAT[7]
+|| STNDW @DAT[1]:@DAT[0],*${OUT}++[8]
+ XOR @X[8],@DAT[8],@DAT[8]
+|| XOR @X[9],@DAT[9],@DAT[9]
+|| XOR @X[10],@DAT[10],@DAT[10]
+|| XOR @X[11],@DAT[11],@DAT[11]
+|| STNDW @DAT[3]:@DAT[2],*${OUT}[-7]
+ XOR @X[12],@DAT[12],@DAT[12]
+|| XOR @X[13],@DAT[13],@DAT[13]
+|| XOR @X[14],@DAT[14],@DAT[14]
+|| XOR @X[15],@DAT[15],@DAT[15]
+|| STNDW @DAT[5]:@DAT[4],*${OUT}[-6]
+|| [A0] BNOP top1x?
+ [A0] DMV @Y[2],@Y[0],@X[2]:@X[0] ; duplicate key material
+|| [A0] DMV @Y[3],@Y[1],@X[3]:@X[1]
+|| STNDW @DAT[7]:@DAT[6],*${OUT}[-5]
+ [A0] DMV @Y[6],@Y[4],@X[6]:@X[4]
+|| [A0] DMV @Y[7],@Y[5],@X[7]:@X[5]
+|| STNDW @DAT[9]:@DAT[8],*${OUT}[-4]
+ [A0] DMV @Y[10],@Y[8],@X[10]:@X[8]
+|| [A0] DMV @Y[11],@Y[9],@X[11]:@X[9]
+|| [A0] ADD 1,@Y[12],@Y[12] ; increment counter
+|| STNDW @DAT[11]:@DAT[10],*${OUT}[-3]
+ [A0] DMV @Y[14],@Y[12],@X[14]:@X[12]
+|| [A0] DMV @Y[15],@Y[13],@X[15]:@X[13]
+|| STNDW @DAT[13]:@DAT[12],*${OUT}[-2]
+ [A0] MVK 10,B0 ; inner loop counter
+|| STNDW @DAT[15]:@DAT[14],*${OUT}[-1]
+;;===== branch to top1x? is taken here
+
+epilogue?:
+ LDDW *FP[-4],A11:A10 ; ABI says so
+ LDDW *FP[-3],A13:A12
+|| LDDW *SP[3+8],B11:B10
+ LDDW *SP[4+8],B13:B12
+|| BNOP RA
+ LDW *++SP(40+64),FP ; restore frame pointer
+ NOP 4
+
+tail?:
+ LDBU *${INP}++[1],B24 ; load byte by byte
+|| SUB A0,1,A0
+|| SUB A0,1,B1
+ [!B1] BNOP epilogue? ; interrupts are disabled for whole time
+|| [A0] LDBU *${INP}++[1],B24
+|| [A0] SUB A0,1,A0
+|| SUB B1,1,B1
+ [!B1] BNOP epilogue?
+|| [A0] LDBU *${INP}++[1],B24
+|| [A0] SUB A0,1,A0
+|| SUB B1,1,B1
+ [!B1] BNOP epilogue?
+|| ROTL @X[0],0,A24
+|| [A0] LDBU *${INP}++[1],B24
+|| [A0] SUB A0,1,A0
+|| SUB B1,1,B1
+ [!B1] BNOP epilogue?
+|| ROTL @X[0],24,A24
+|| [A0] LDBU *${INP}++[1],A24
+|| [A0] SUB A0,1,A0
+|| SUB B1,1,B1
+ [!B1] BNOP epilogue?
+|| ROTL @X[0],16,A24
+|| [A0] LDBU *${INP}++[1],A24
+|| [A0] SUB A0,1,A0
+|| SUB B1,1,B1
+|| XOR A24,B24,B25
+ STB B25,*${OUT}++[1] ; store byte by byte
+||[!B1] BNOP epilogue?
+|| ROTL @X[0],8,A24
+|| [A0] LDBU *${INP}++[1],A24
+|| [A0] SUB A0,1,A0
+|| SUB B1,1,B1
+|| XOR A24,B24,B25
+ STB B25,*${OUT}++[1]
+___
+sub TAIL_STEP {
+my $Xi= shift;
+my $T = ($Xi=~/^B/?"B24":"A24"); # match @X[i] to avoid cross path
+my $D = $T; $D=~tr/AB/BA/;
+my $O = $D; $O=~s/24/25/;
+
+$code.=<<___;
+||[!B1] BNOP epilogue?
+|| ROTL $Xi,0,$T
+|| [A0] LDBU *${INP}++[1],$D
+|| [A0] SUB A0,1,A0
+|| SUB B1,1,B1
+|| XOR A24,B24,$O
+ STB $O,*${OUT}++[1]
+||[!B1] BNOP epilogue?
+|| ROTL $Xi,24,$T
+|| [A0] LDBU *${INP}++[1],$T
+|| [A0] SUB A0,1,A0
+|| SUB B1,1,B1
+|| XOR A24,B24,$O
+ STB $O,*${OUT}++[1]
+||[!B1] BNOP epilogue?
+|| ROTL $Xi,16,$T
+|| [A0] LDBU *${INP}++[1],$T
+|| [A0] SUB A0,1,A0
+|| SUB B1,1,B1
+|| XOR A24,B24,$O
+ STB $O,*${OUT}++[1]
+||[!B1] BNOP epilogue?
+|| ROTL $Xi,8,$T
+|| [A0] LDBU *${INP}++[1],$T
+|| [A0] SUB A0,1,A0
+|| SUB B1,1,B1
+|| XOR A24,B24,$O
+ STB $O,*${OUT}++[1]
+___
+}
+ foreach (1..14) { TAIL_STEP(@X[$_]); }
+$code.=<<___;
+||[!B1] BNOP epilogue?
+|| ROTL @X[15],0,B24
+|| XOR A24,B24,A25
+ STB A25,*${OUT}++[1]
+|| ROTL @X[15],24,B24
+|| XOR A24,B24,A25
+ STB A25,*${OUT}++[1]
+|| ROTL @X[15],16,B24
+|| XOR A24,B24,A25
+ STB A25,*${OUT}++[1]
+|| XOR A24,B24,A25
+ STB A25,*${OUT}++[1]
+|| XOR A24,B24,B25
+ STB B25,*${OUT}++[1]
+ .endasmfunc
+
+ .sect .const
+ .cstring "ChaCha20 for C64x+, CRYPTOGAMS by <appro\@openssl.org>"
+ .align 4
+___
+
+print $code;
+close STDOUT;
diff --git a/openssl-1.1.0h/crypto/chacha/asm/chacha-ppc.pl b/openssl-1.1.0h/crypto/chacha/asm/chacha-ppc.pl
new file mode 100755
index 0000000..181decd
--- /dev/null
+++ b/openssl-1.1.0h/crypto/chacha/asm/chacha-ppc.pl
@@ -0,0 +1,953 @@
+#! /usr/bin/env perl
+# Copyright 2016 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License"). You may not use
+# this file except in compliance with the License. You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# October 2015
+#
+# ChaCha20 for PowerPC/AltiVec.
+#
+# Performance in cycles per byte out of large buffer.
+#
+# IALU/gcc-4.x 3xAltiVec+1xIALU
+#
+# Freescale e300 13.6/+115% -
+# PPC74x0/G4e 6.81/+310% 4.66
+# PPC970/G5 9.29/+160% 4.60
+# POWER7 8.62/+61% 4.27
+# POWER8 8.70/+51% 3.96
+
+$flavour = shift;
+
+if ($flavour =~ /64/) {
+ $SIZE_T =8;
+ $LRSAVE =2*$SIZE_T;
+ $STU ="stdu";
+ $POP ="ld";
+ $PUSH ="std";
+ $UCMP ="cmpld";
+} elsif ($flavour =~ /32/) {
+ $SIZE_T =4;
+ $LRSAVE =$SIZE_T;
+ $STU ="stwu";
+ $POP ="lwz";
+ $PUSH ="stw";
+ $UCMP ="cmplw";
+} else { die "nonsense $flavour"; }
+
+$LITTLE_ENDIAN = ($flavour=~/le$/) ? 1 : 0;
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
+die "can't locate ppc-xlate.pl";
+
+open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
+
+$LOCALS=6*$SIZE_T;
+$FRAME=$LOCALS+64+18*$SIZE_T; # 64 is for local variables
+
+sub AUTOLOAD() # thunk [simplified] x86-style perlasm
+{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; $opcode =~ s/_/\./;
+ $code .= "\t$opcode\t".join(',',@_)."\n";
+}
+
+my $sp = "r1";
+
+my ($out,$inp,$len,$key,$ctr) = map("r$_",(3..7));
+
+my @x=map("r$_",(16..31));
+my @d=map("r$_",(11,12,14,15));
+my @t=map("r$_",(7..10));
+
+sub ROUND {
+my ($a0,$b0,$c0,$d0)=@_;
+my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
+my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
+my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
+
+ (
+ "&add (@x[$a0],@x[$a0],@x[$b0])",
+ "&add (@x[$a1],@x[$a1],@x[$b1])",
+ "&add (@x[$a2],@x[$a2],@x[$b2])",
+ "&add (@x[$a3],@x[$a3],@x[$b3])",
+ "&xor (@x[$d0],@x[$d0],@x[$a0])",
+ "&xor (@x[$d1],@x[$d1],@x[$a1])",
+ "&xor (@x[$d2],@x[$d2],@x[$a2])",
+ "&xor (@x[$d3],@x[$d3],@x[$a3])",
+ "&rotlwi (@x[$d0],@x[$d0],16)",
+ "&rotlwi (@x[$d1],@x[$d1],16)",
+ "&rotlwi (@x[$d2],@x[$d2],16)",
+ "&rotlwi (@x[$d3],@x[$d3],16)",
+
+ "&add (@x[$c0],@x[$c0],@x[$d0])",
+ "&add (@x[$c1],@x[$c1],@x[$d1])",
+ "&add (@x[$c2],@x[$c2],@x[$d2])",
+ "&add (@x[$c3],@x[$c3],@x[$d3])",
+ "&xor (@x[$b0],@x[$b0],@x[$c0])",
+ "&xor (@x[$b1],@x[$b1],@x[$c1])",
+ "&xor (@x[$b2],@x[$b2],@x[$c2])",
+ "&xor (@x[$b3],@x[$b3],@x[$c3])",
+ "&rotlwi (@x[$b0],@x[$b0],12)",
+ "&rotlwi (@x[$b1],@x[$b1],12)",
+ "&rotlwi (@x[$b2],@x[$b2],12)",
+ "&rotlwi (@x[$b3],@x[$b3],12)",
+
+ "&add (@x[$a0],@x[$a0],@x[$b0])",
+ "&add (@x[$a1],@x[$a1],@x[$b1])",
+ "&add (@x[$a2],@x[$a2],@x[$b2])",
+ "&add (@x[$a3],@x[$a3],@x[$b3])",
+ "&xor (@x[$d0],@x[$d0],@x[$a0])",
+ "&xor (@x[$d1],@x[$d1],@x[$a1])",
+ "&xor (@x[$d2],@x[$d2],@x[$a2])",
+ "&xor (@x[$d3],@x[$d3],@x[$a3])",
+ "&rotlwi (@x[$d0],@x[$d0],8)",
+ "&rotlwi (@x[$d1],@x[$d1],8)",
+ "&rotlwi (@x[$d2],@x[$d2],8)",
+ "&rotlwi (@x[$d3],@x[$d3],8)",
+
+ "&add (@x[$c0],@x[$c0],@x[$d0])",
+ "&add (@x[$c1],@x[$c1],@x[$d1])",
+ "&add (@x[$c2],@x[$c2],@x[$d2])",
+ "&add (@x[$c3],@x[$c3],@x[$d3])",
+ "&xor (@x[$b0],@x[$b0],@x[$c0])",
+ "&xor (@x[$b1],@x[$b1],@x[$c1])",
+ "&xor (@x[$b2],@x[$b2],@x[$c2])",
+ "&xor (@x[$b3],@x[$b3],@x[$c3])",
+ "&rotlwi (@x[$b0],@x[$b0],7)",
+ "&rotlwi (@x[$b1],@x[$b1],7)",
+ "&rotlwi (@x[$b2],@x[$b2],7)",
+ "&rotlwi (@x[$b3],@x[$b3],7)"
+ );
+}
+
+$code.=<<___;
+.machine "any"
+.text
+
+.globl .ChaCha20_ctr32_int
+.align 5
+.ChaCha20_ctr32_int:
+__ChaCha20_ctr32_int:
+ ${UCMP}i $len,0
+ beqlr-
+
+ $STU $sp,-$FRAME($sp)
+ mflr r0
+
+ $PUSH r14,`$FRAME-$SIZE_T*18`($sp)
+ $PUSH r15,`$FRAME-$SIZE_T*17`($sp)
+ $PUSH r16,`$FRAME-$SIZE_T*16`($sp)
+ $PUSH r17,`$FRAME-$SIZE_T*15`($sp)
+ $PUSH r18,`$FRAME-$SIZE_T*14`($sp)
+ $PUSH r19,`$FRAME-$SIZE_T*13`($sp)
+ $PUSH r20,`$FRAME-$SIZE_T*12`($sp)
+ $PUSH r21,`$FRAME-$SIZE_T*11`($sp)
+ $PUSH r22,`$FRAME-$SIZE_T*10`($sp)
+ $PUSH r23,`$FRAME-$SIZE_T*9`($sp)
+ $PUSH r24,`$FRAME-$SIZE_T*8`($sp)
+ $PUSH r25,`$FRAME-$SIZE_T*7`($sp)
+ $PUSH r26,`$FRAME-$SIZE_T*6`($sp)
+ $PUSH r27,`$FRAME-$SIZE_T*5`($sp)
+ $PUSH r28,`$FRAME-$SIZE_T*4`($sp)
+ $PUSH r29,`$FRAME-$SIZE_T*3`($sp)
+ $PUSH r30,`$FRAME-$SIZE_T*2`($sp)
+ $PUSH r31,`$FRAME-$SIZE_T*1`($sp)
+ $PUSH r0,`$FRAME+$LRSAVE`($sp)
+
+ lwz @d[0],0($ctr) # load counter
+ lwz @d[1],4($ctr)
+ lwz @d[2],8($ctr)
+ lwz @d[3],12($ctr)
+
+ bl __ChaCha20_1x
+
+ $POP r0,`$FRAME+$LRSAVE`($sp)
+ $POP r14,`$FRAME-$SIZE_T*18`($sp)
+ $POP r15,`$FRAME-$SIZE_T*17`($sp)
+ $POP r16,`$FRAME-$SIZE_T*16`($sp)
+ $POP r17,`$FRAME-$SIZE_T*15`($sp)
+ $POP r18,`$FRAME-$SIZE_T*14`($sp)
+ $POP r19,`$FRAME-$SIZE_T*13`($sp)
+ $POP r20,`$FRAME-$SIZE_T*12`($sp)
+ $POP r21,`$FRAME-$SIZE_T*11`($sp)
+ $POP r22,`$FRAME-$SIZE_T*10`($sp)
+ $POP r23,`$FRAME-$SIZE_T*9`($sp)
+ $POP r24,`$FRAME-$SIZE_T*8`($sp)
+ $POP r25,`$FRAME-$SIZE_T*7`($sp)
+ $POP r26,`$FRAME-$SIZE_T*6`($sp)
+ $POP r27,`$FRAME-$SIZE_T*5`($sp)
+ $POP r28,`$FRAME-$SIZE_T*4`($sp)
+ $POP r29,`$FRAME-$SIZE_T*3`($sp)
+ $POP r30,`$FRAME-$SIZE_T*2`($sp)
+ $POP r31,`$FRAME-$SIZE_T*1`($sp)
+ mtlr r0
+ addi $sp,$sp,$FRAME
+ blr
+ .long 0
+ .byte 0,12,4,1,0x80,18,5,0
+ .long 0
+.size .ChaCha20_ctr32_int,.-.ChaCha20_ctr32_int
+
+.align 5
+__ChaCha20_1x:
+Loop_outer:
+ lis @x[0],0x6170 # synthesize sigma
+ lis @x[1],0x3320
+ lis @x[2],0x7962
+ lis @x[3],0x6b20
+ ori @x[0],@x[0],0x7865
+ ori @x[1],@x[1],0x646e
+ ori @x[2],@x[2],0x2d32
+ ori @x[3],@x[3],0x6574
+
+ li r0,10 # inner loop counter
+ lwz @x[4],0($key) # load key
+ lwz @x[5],4($key)
+ lwz @x[6],8($key)
+ lwz @x[7],12($key)
+ lwz @x[8],16($key)
+ mr @x[12],@d[0] # copy counter
+ lwz @x[9],20($key)
+ mr @x[13],@d[1]
+ lwz @x[10],24($key)
+ mr @x[14],@d[2]
+ lwz @x[11],28($key)
+ mr @x[15],@d[3]
+
+ mr @t[0],@x[4]
+ mr @t[1],@x[5]
+ mr @t[2],@x[6]
+ mr @t[3],@x[7]
+
+ mtctr r0
+Loop:
+___
+ foreach (&ROUND(0, 4, 8,12)) { eval; }
+ foreach (&ROUND(0, 5,10,15)) { eval; }
+$code.=<<___;
+ bdnz Loop
+
+ subic $len,$len,64 # $len-=64
+ addi @x[0],@x[0],0x7865 # accumulate key block
+ addi @x[1],@x[1],0x646e
+ addi @x[2],@x[2],0x2d32
+ addi @x[3],@x[3],0x6574
+ addis @x[0],@x[0],0x6170
+ addis @x[1],@x[1],0x3320
+ addis @x[2],@x[2],0x7962
+ addis @x[3],@x[3],0x6b20
+
+ subfe. r0,r0,r0 # borrow?-1:0
+ add @x[4],@x[4],@t[0]
+ lwz @t[0],16($key)
+ add @x[5],@x[5],@t[1]
+ lwz @t[1],20($key)
+ add @x[6],@x[6],@t[2]
+ lwz @t[2],24($key)
+ add @x[7],@x[7],@t[3]
+ lwz @t[3],28($key)
+ add @x[8],@x[8],@t[0]
+ add @x[9],@x[9],@t[1]
+ add @x[10],@x[10],@t[2]
+ add @x[11],@x[11],@t[3]
+
+ add @x[12],@x[12],@d[0]
+ add @x[13],@x[13],@d[1]
+ add @x[14],@x[14],@d[2]
+ add @x[15],@x[15],@d[3]
+ addi @d[0],@d[0],1 # increment counter
+___
+if (!$LITTLE_ENDIAN) { for($i=0;$i<16;$i++) { # flip byte order
+$code.=<<___;
+ mr @t[$i&3],@x[$i]
+ rotlwi @x[$i],@x[$i],8
+ rlwimi @x[$i],@t[$i&3],24,0,7
+ rlwimi @x[$i],@t[$i&3],24,16,23
+___
+} }
+$code.=<<___;
+ bne Ltail # $len-=64 borrowed
+
+ lwz @t[0],0($inp) # load input, aligned or not
+ lwz @t[1],4($inp)
+ ${UCMP}i $len,0 # done already?
+ lwz @t[2],8($inp)
+ lwz @t[3],12($inp)
+ xor @x[0],@x[0],@t[0] # xor with input
+ lwz @t[0],16($inp)
+ xor @x[1],@x[1],@t[1]
+ lwz @t[1],20($inp)
+ xor @x[2],@x[2],@t[2]
+ lwz @t[2],24($inp)
+ xor @x[3],@x[3],@t[3]
+ lwz @t[3],28($inp)
+ xor @x[4],@x[4],@t[0]
+ lwz @t[0],32($inp)
+ xor @x[5],@x[5],@t[1]
+ lwz @t[1],36($inp)
+ xor @x[6],@x[6],@t[2]
+ lwz @t[2],40($inp)
+ xor @x[7],@x[7],@t[3]
+ lwz @t[3],44($inp)
+ xor @x[8],@x[8],@t[0]
+ lwz @t[0],48($inp)
+ xor @x[9],@x[9],@t[1]
+ lwz @t[1],52($inp)
+ xor @x[10],@x[10],@t[2]
+ lwz @t[2],56($inp)
+ xor @x[11],@x[11],@t[3]
+ lwz @t[3],60($inp)
+ xor @x[12],@x[12],@t[0]
+ stw @x[0],0($out) # store output, aligned or not
+ xor @x[13],@x[13],@t[1]
+ stw @x[1],4($out)
+ xor @x[14],@x[14],@t[2]
+ stw @x[2],8($out)
+ xor @x[15],@x[15],@t[3]
+ stw @x[3],12($out)
+ stw @x[4],16($out)
+ stw @x[5],20($out)
+ stw @x[6],24($out)
+ stw @x[7],28($out)
+ stw @x[8],32($out)
+ stw @x[9],36($out)
+ stw @x[10],40($out)
+ stw @x[11],44($out)
+ stw @x[12],48($out)
+ stw @x[13],52($out)
+ stw @x[14],56($out)
+ addi $inp,$inp,64
+ stw @x[15],60($out)
+ addi $out,$out,64
+
+ bne Loop_outer
+
+ blr
+
+.align 4
+Ltail:
+ addi $len,$len,64 # restore tail length
+ subi $inp,$inp,1 # prepare for *++ptr
+ subi $out,$out,1
+ addi @t[0],$sp,$LOCALS-1
+ mtctr $len
+
+ stw @x[0],`$LOCALS+0`($sp) # save whole block to stack
+ stw @x[1],`$LOCALS+4`($sp)
+ stw @x[2],`$LOCALS+8`($sp)
+ stw @x[3],`$LOCALS+12`($sp)
+ stw @x[4],`$LOCALS+16`($sp)
+ stw @x[5],`$LOCALS+20`($sp)
+ stw @x[6],`$LOCALS+24`($sp)
+ stw @x[7],`$LOCALS+28`($sp)
+ stw @x[8],`$LOCALS+32`($sp)
+ stw @x[9],`$LOCALS+36`($sp)
+ stw @x[10],`$LOCALS+40`($sp)
+ stw @x[11],`$LOCALS+44`($sp)
+ stw @x[12],`$LOCALS+48`($sp)
+ stw @x[13],`$LOCALS+52`($sp)
+ stw @x[14],`$LOCALS+56`($sp)
+ stw @x[15],`$LOCALS+60`($sp)
+
+Loop_tail: # byte-by-byte loop
+ lbzu @d[0],1($inp)
+ lbzu @x[0],1(@t[0])
+ xor @d[1],@d[0],@x[0]
+ stbu @d[1],1($out)
+ bdnz Loop_tail
+
+ stw $sp,`$LOCALS+0`($sp) # wipe block on stack
+ stw $sp,`$LOCALS+4`($sp)
+ stw $sp,`$LOCALS+8`($sp)
+ stw $sp,`$LOCALS+12`($sp)
+ stw $sp,`$LOCALS+16`($sp)
+ stw $sp,`$LOCALS+20`($sp)
+ stw $sp,`$LOCALS+24`($sp)
+ stw $sp,`$LOCALS+28`($sp)
+ stw $sp,`$LOCALS+32`($sp)
+ stw $sp,`$LOCALS+36`($sp)
+ stw $sp,`$LOCALS+40`($sp)
+ stw $sp,`$LOCALS+44`($sp)
+ stw $sp,`$LOCALS+48`($sp)
+ stw $sp,`$LOCALS+52`($sp)
+ stw $sp,`$LOCALS+56`($sp)
+ stw $sp,`$LOCALS+60`($sp)
+
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,0,0
+___
+
+{{{
+my ($A0,$B0,$C0,$D0,$A1,$B1,$C1,$D1,$A2,$B2,$C2,$D2,$T0,$T1,$T2) =
+ map("v$_",(0..14));
+my (@K)=map("v$_",(15..20));
+my ($FOUR,$sixteen,$twenty4,$twenty,$twelve,$twenty5,$seven) =
+ map("v$_",(21..27));
+my ($inpperm,$outperm,$outmask) = map("v$_",(28..30));
+my @D=("v31",$seven,$T0,$T1,$T2);
+
+my $FRAME=$LOCALS+64+13*16+18*$SIZE_T; # 13*16 is for v20-v31 offload
+
+sub VMXROUND {
+my $odd = pop;
+my ($a,$b,$c,$d,$t)=@_;
+
+ (
+ "&vadduwm ('$a','$a','$b')",
+ "&vxor ('$d','$d','$a')",
+ "&vperm ('$d','$d','$d','$sixteen')",
+
+ "&vadduwm ('$c','$c','$d')",
+ "&vxor ('$t','$b','$c')",
+ "&vsrw ('$b','$t','$twenty')",
+ "&vslw ('$t','$t','$twelve')",
+ "&vor ('$b','$b','$t')",
+
+ "&vadduwm ('$a','$a','$b')",
+ "&vxor ('$d','$d','$a')",
+ "&vperm ('$d','$d','$d','$twenty4')",
+
+ "&vadduwm ('$c','$c','$d')",
+ "&vxor ('$t','$b','$c')",
+ "&vsrw ('$b','$t','$twenty5')",
+ "&vslw ('$t','$t','$seven')",
+ "&vor ('$b','$b','$t')",
+
+ "&vsldoi ('$c','$c','$c',8)",
+ "&vsldoi ('$b','$b','$b',$odd?4:12)",
+ "&vsldoi ('$d','$d','$d',$odd?12:4)"
+ );
+}
+
+$code.=<<___;
+
+.globl .ChaCha20_ctr32_vmx
+.align 5
+.ChaCha20_ctr32_vmx:
+ ${UCMP}i $len,256
+ blt __ChaCha20_ctr32_int
+
+ $STU $sp,-$FRAME($sp)
+ mflr r0
+ li r10,`15+$LOCALS+64`
+ li r11,`31+$LOCALS+64`
+ mfspr r12,256
+ stvx v20,r10,$sp
+ addi r10,r10,32
+ stvx v21,r11,$sp
+ addi r11,r11,32
+ stvx v22,r10,$sp
+ addi r10,r10,32
+ stvx v23,r11,$sp
+ addi r11,r11,32
+ stvx v24,r10,$sp
+ addi r10,r10,32
+ stvx v25,r11,$sp
+ addi r11,r11,32
+ stvx v26,r10,$sp
+ addi r10,r10,32
+ stvx v27,r11,$sp
+ addi r11,r11,32
+ stvx v28,r10,$sp
+ addi r10,r10,32
+ stvx v29,r11,$sp
+ addi r11,r11,32
+ stvx v30,r10,$sp
+ stvx v31,r11,$sp
+ stw r12,`$FRAME-$SIZE_T*18-4`($sp) # save vrsave
+ $PUSH r14,`$FRAME-$SIZE_T*18`($sp)
+ $PUSH r15,`$FRAME-$SIZE_T*17`($sp)
+ $PUSH r16,`$FRAME-$SIZE_T*16`($sp)
+ $PUSH r17,`$FRAME-$SIZE_T*15`($sp)
+ $PUSH r18,`$FRAME-$SIZE_T*14`($sp)
+ $PUSH r19,`$FRAME-$SIZE_T*13`($sp)
+ $PUSH r20,`$FRAME-$SIZE_T*12`($sp)
+ $PUSH r21,`$FRAME-$SIZE_T*11`($sp)
+ $PUSH r22,`$FRAME-$SIZE_T*10`($sp)
+ $PUSH r23,`$FRAME-$SIZE_T*9`($sp)
+ $PUSH r24,`$FRAME-$SIZE_T*8`($sp)
+ $PUSH r25,`$FRAME-$SIZE_T*7`($sp)
+ $PUSH r26,`$FRAME-$SIZE_T*6`($sp)
+ $PUSH r27,`$FRAME-$SIZE_T*5`($sp)
+ $PUSH r28,`$FRAME-$SIZE_T*4`($sp)
+ $PUSH r29,`$FRAME-$SIZE_T*3`($sp)
+ $PUSH r30,`$FRAME-$SIZE_T*2`($sp)
+ $PUSH r31,`$FRAME-$SIZE_T*1`($sp)
+ li r12,-1
+ $PUSH r0, `$FRAME+$LRSAVE`($sp)
+ mtspr 256,r12 # preserve all AltiVec registers
+
+ bl Lconsts # returns pointer Lsigma in r12
+ li @x[0],16
+ li @x[1],32
+ li @x[2],48
+ li @x[3],64
+ li @x[4],31 # 31 is not a typo
+ li @x[5],15 # nor is 15
+
+ lvx @K[1],0,$key # load key
+ ?lvsr $T0,0,$key # prepare unaligned load
+ lvx @K[2],@x[0],$key
+ lvx @D[0],@x[4],$key
+
+ lvx @K[3],0,$ctr # load counter
+ ?lvsr $T1,0,$ctr # prepare unaligned load
+ lvx @D[1],@x[5],$ctr
+
+ lvx @K[0],0,r12 # load constants
+ lvx @K[5],@x[0],r12 # one
+ lvx $FOUR,@x[1],r12
+ lvx $sixteen,@x[2],r12
+ lvx $twenty4,@x[3],r12
+
+ ?vperm @K[1],@K[2],@K[1],$T0 # align key
+ ?vperm @K[2],@D[0],@K[2],$T0
+ ?vperm @K[3],@D[1],@K[3],$T1 # align counter
+
+ lwz @d[0],0($ctr) # load counter to GPR
+ lwz @d[1],4($ctr)
+ vadduwm @K[3],@K[3],@K[5] # adjust AltiVec counter
+ lwz @d[2],8($ctr)
+ vadduwm @K[4],@K[3],@K[5]
+ lwz @d[3],12($ctr)
+ vadduwm @K[5],@K[4],@K[5]
+
+ vspltisw $twenty,-12 # synthesize constants
+ vspltisw $twelve,12
+ vspltisw $twenty5,-7
+ #vspltisw $seven,7 # synthesized in the loop
+
+ vxor $T0,$T0,$T0 # 0x00..00
+ vspltisw $outmask,-1 # 0xff..ff
+ ?lvsr $inpperm,0,$inp # prepare for unaligned load
+ ?lvsl $outperm,0,$out # prepare for unaligned store
+ ?vperm $outmask,$outmask,$T0,$outperm
+
+ be?lvsl $T0,0,@x[0] # 0x00..0f
+ be?vspltisb $T1,3 # 0x03..03
+ be?vxor $T0,$T0,$T1 # swap bytes within words
+ be?vxor $outperm,$outperm,$T1
+ be?vperm $inpperm,$inpperm,$inpperm,$T0
+
+ b Loop_outer_vmx
+
+.align 4
+Loop_outer_vmx:
+ lis @x[0],0x6170 # synthesize sigma
+ lis @x[1],0x3320
+ vmr $A0,@K[0]
+ lis @x[2],0x7962
+ lis @x[3],0x6b20
+ vmr $A1,@K[0]
+ ori @x[0],@x[0],0x7865
+ ori @x[1],@x[1],0x646e
+ vmr $A2,@K[0]
+ ori @x[2],@x[2],0x2d32
+ ori @x[3],@x[3],0x6574
+ vmr $B0,@K[1]
+
+ li r0,10 # inner loop counter
+ lwz @x[4],0($key) # load key to GPR
+ vmr $B1,@K[1]
+ lwz @x[5],4($key)
+ vmr $B2,@K[1]
+ lwz @x[6],8($key)
+ vmr $C0,@K[2]
+ lwz @x[7],12($key)
+ vmr $C1,@K[2]
+ lwz @x[8],16($key)
+ vmr $C2,@K[2]
+ mr @x[12],@d[0] # copy GPR counter
+ lwz @x[9],20($key)
+ vmr $D0,@K[3]
+ mr @x[13],@d[1]
+ lwz @x[10],24($key)
+ vmr $D1,@K[4]
+ mr @x[14],@d[2]
+ lwz @x[11],28($key)
+ vmr $D2,@K[5]
+ mr @x[15],@d[3]
+
+ mr @t[0],@x[4]
+ mr @t[1],@x[5]
+ mr @t[2],@x[6]
+ mr @t[3],@x[7]
+ vspltisw $seven,7
+
+ mtctr r0
+ nop
+Loop_vmx:
+___
+ my @thread0=&VMXROUND($A0,$B0,$C0,$D0,$T0,0);
+ my @thread1=&VMXROUND($A1,$B1,$C1,$D1,$T1,0);
+ my @thread2=&VMXROUND($A2,$B2,$C2,$D2,$T2,0);
+ my @thread3=&ROUND(0,4,8,12);
+
+ foreach (@thread0) {
+ eval; eval(shift(@thread3));
+ eval(shift(@thread1)); eval(shift(@thread3));
+ eval(shift(@thread2)); eval(shift(@thread3));
+ }
+
+ @thread0=&VMXROUND($A0,$B0,$C0,$D0,$T0,1);
+ @thread1=&VMXROUND($A1,$B1,$C1,$D1,$T1,1);
+ @thread2=&VMXROUND($A2,$B2,$C2,$D2,$T2,1);
+ @thread3=&ROUND(0,5,10,15);
+
+ foreach (@thread0) {
+ eval; eval(shift(@thread3));
+ eval(shift(@thread1)); eval(shift(@thread3));
+ eval(shift(@thread2)); eval(shift(@thread3));
+ }
+$code.=<<___;
+ bdnz Loop_vmx
+
+ subi $len,$len,256 # $len-=256
+ addi @x[0],@x[0],0x7865 # accumulate key block
+ addi @x[1],@x[1],0x646e
+ addi @x[2],@x[2],0x2d32
+ addi @x[3],@x[3],0x6574
+ addis @x[0],@x[0],0x6170
+ addis @x[1],@x[1],0x3320
+ addis @x[2],@x[2],0x7962
+ addis @x[3],@x[3],0x6b20
+ add @x[4],@x[4],@t[0]
+ lwz @t[0],16($key)
+ add @x[5],@x[5],@t[1]
+ lwz @t[1],20($key)
+ add @x[6],@x[6],@t[2]
+ lwz @t[2],24($key)
+ add @x[7],@x[7],@t[3]
+ lwz @t[3],28($key)
+ add @x[8],@x[8],@t[0]
+ add @x[9],@x[9],@t[1]
+ add @x[10],@x[10],@t[2]
+ add @x[11],@x[11],@t[3]
+ add @x[12],@x[12],@d[0]
+ add @x[13],@x[13],@d[1]
+ add @x[14],@x[14],@d[2]
+ add @x[15],@x[15],@d[3]
+
+ vadduwm $A0,$A0,@K[0] # accumulate key block
+ vadduwm $A1,$A1,@K[0]
+ vadduwm $A2,$A2,@K[0]
+ vadduwm $B0,$B0,@K[1]
+ vadduwm $B1,$B1,@K[1]
+ vadduwm $B2,$B2,@K[1]
+ vadduwm $C0,$C0,@K[2]
+ vadduwm $C1,$C1,@K[2]
+ vadduwm $C2,$C2,@K[2]
+ vadduwm $D0,$D0,@K[3]
+ vadduwm $D1,$D1,@K[4]
+ vadduwm $D2,$D2,@K[5]
+
+ addi @d[0],@d[0],4 # increment counter
+ vadduwm @K[3],@K[3],$FOUR
+ vadduwm @K[4],@K[4],$FOUR
+ vadduwm @K[5],@K[5],$FOUR
+
+___
+if (!$LITTLE_ENDIAN) { for($i=0;$i<16;$i++) { # flip byte order
+$code.=<<___;
+ mr @t[$i&3],@x[$i]
+ rotlwi @x[$i],@x[$i],8
+ rlwimi @x[$i],@t[$i&3],24,0,7
+ rlwimi @x[$i],@t[$i&3],24,16,23
+___
+} }
+$code.=<<___;
+ lwz @t[0],0($inp) # load input, aligned or not
+ lwz @t[1],4($inp)
+ lwz @t[2],8($inp)
+ lwz @t[3],12($inp)
+ xor @x[0],@x[0],@t[0] # xor with input
+ lwz @t[0],16($inp)
+ xor @x[1],@x[1],@t[1]
+ lwz @t[1],20($inp)
+ xor @x[2],@x[2],@t[2]
+ lwz @t[2],24($inp)
+ xor @x[3],@x[3],@t[3]
+ lwz @t[3],28($inp)
+ xor @x[4],@x[4],@t[0]
+ lwz @t[0],32($inp)
+ xor @x[5],@x[5],@t[1]
+ lwz @t[1],36($inp)
+ xor @x[6],@x[6],@t[2]
+ lwz @t[2],40($inp)
+ xor @x[7],@x[7],@t[3]
+ lwz @t[3],44($inp)
+ xor @x[8],@x[8],@t[0]
+ lwz @t[0],48($inp)
+ xor @x[9],@x[9],@t[1]
+ lwz @t[1],52($inp)
+ xor @x[10],@x[10],@t[2]
+ lwz @t[2],56($inp)
+ xor @x[11],@x[11],@t[3]
+ lwz @t[3],60($inp)
+ xor @x[12],@x[12],@t[0]
+ stw @x[0],0($out) # store output, aligned or not
+ xor @x[13],@x[13],@t[1]
+ stw @x[1],4($out)
+ xor @x[14],@x[14],@t[2]
+ stw @x[2],8($out)
+ xor @x[15],@x[15],@t[3]
+ stw @x[3],12($out)
+ addi $inp,$inp,64
+ stw @x[4],16($out)
+ li @t[0],16
+ stw @x[5],20($out)
+ li @t[1],32
+ stw @x[6],24($out)
+ li @t[2],48
+ stw @x[7],28($out)
+ li @t[3],64
+ stw @x[8],32($out)
+ stw @x[9],36($out)
+ stw @x[10],40($out)
+ stw @x[11],44($out)
+ stw @x[12],48($out)
+ stw @x[13],52($out)
+ stw @x[14],56($out)
+ stw @x[15],60($out)
+ addi $out,$out,64
+
+ lvx @D[0],0,$inp # load input
+ lvx @D[1],@t[0],$inp
+ lvx @D[2],@t[1],$inp
+ lvx @D[3],@t[2],$inp
+ lvx @D[4],@t[3],$inp
+ addi $inp,$inp,64
+
+ ?vperm @D[0],@D[1],@D[0],$inpperm # align input
+ ?vperm @D[1],@D[2],@D[1],$inpperm
+ ?vperm @D[2],@D[3],@D[2],$inpperm
+ ?vperm @D[3],@D[4],@D[3],$inpperm
+ vxor $A0,$A0,@D[0] # xor with input
+ vxor $B0,$B0,@D[1]
+ lvx @D[1],@t[0],$inp # keep loading input
+ vxor $C0,$C0,@D[2]
+ lvx @D[2],@t[1],$inp
+ vxor $D0,$D0,@D[3]
+ lvx @D[3],@t[2],$inp
+ lvx @D[0],@t[3],$inp
+ addi $inp,$inp,64
+ li @t[3],63 # 63 is not a typo
+ vperm $A0,$A0,$A0,$outperm # pre-misalign output
+ vperm $B0,$B0,$B0,$outperm
+ vperm $C0,$C0,$C0,$outperm
+ vperm $D0,$D0,$D0,$outperm
+
+ ?vperm @D[4],@D[1],@D[4],$inpperm # align input
+ ?vperm @D[1],@D[2],@D[1],$inpperm
+ ?vperm @D[2],@D[3],@D[2],$inpperm
+ ?vperm @D[3],@D[0],@D[3],$inpperm
+ vxor $A1,$A1,@D[4]
+ vxor $B1,$B1,@D[1]
+ lvx @D[1],@t[0],$inp # keep loading input
+ vxor $C1,$C1,@D[2]
+ lvx @D[2],@t[1],$inp
+ vxor $D1,$D1,@D[3]
+ lvx @D[3],@t[2],$inp
+ lvx @D[4],@t[3],$inp # redundant in aligned case
+ addi $inp,$inp,64
+ vperm $A1,$A1,$A1,$outperm # pre-misalign output
+ vperm $B1,$B1,$B1,$outperm
+ vperm $C1,$C1,$C1,$outperm
+ vperm $D1,$D1,$D1,$outperm
+
+ ?vperm @D[0],@D[1],@D[0],$inpperm # align input
+ ?vperm @D[1],@D[2],@D[1],$inpperm
+ ?vperm @D[2],@D[3],@D[2],$inpperm
+ ?vperm @D[3],@D[4],@D[3],$inpperm
+ vxor $A2,$A2,@D[0]
+ vxor $B2,$B2,@D[1]
+ vxor $C2,$C2,@D[2]
+ vxor $D2,$D2,@D[3]
+ vperm $A2,$A2,$A2,$outperm # pre-misalign output
+ vperm $B2,$B2,$B2,$outperm
+ vperm $C2,$C2,$C2,$outperm
+ vperm $D2,$D2,$D2,$outperm
+
+ andi. @x[1],$out,15 # is $out aligned?
+ mr @x[0],$out
+
+ vsel @D[0],$A0,$B0,$outmask # collect pre-misaligned output
+ vsel @D[1],$B0,$C0,$outmask
+ vsel @D[2],$C0,$D0,$outmask
+ vsel @D[3],$D0,$A1,$outmask
+ vsel $B0,$A1,$B1,$outmask
+ vsel $C0,$B1,$C1,$outmask
+ vsel $D0,$C1,$D1,$outmask
+ vsel $A1,$D1,$A2,$outmask
+ vsel $B1,$A2,$B2,$outmask
+ vsel $C1,$B2,$C2,$outmask
+ vsel $D1,$C2,$D2,$outmask
+
+ #stvx $A0,0,$out # take it easy on the edges
+ stvx @D[0],@t[0],$out # store output
+ stvx @D[1],@t[1],$out
+ stvx @D[2],@t[2],$out
+ addi $out,$out,64
+ stvx @D[3],0,$out
+ stvx $B0,@t[0],$out
+ stvx $C0,@t[1],$out
+ stvx $D0,@t[2],$out
+ addi $out,$out,64
+ stvx $A1,0,$out
+ stvx $B1,@t[0],$out
+ stvx $C1,@t[1],$out
+ stvx $D1,@t[2],$out
+ addi $out,$out,64
+
+ beq Laligned_vmx
+
+ sub @x[2],$out,@x[1] # in misaligned case edges
+ li @x[3],0 # are written byte-by-byte
+Lunaligned_tail_vmx:
+ stvebx $D2,@x[3],@x[2]
+ addi @x[3],@x[3],1
+ cmpw @x[3],@x[1]
+ bne Lunaligned_tail_vmx
+
+ sub @x[2],@x[0],@x[1]
+Lunaligned_head_vmx:
+ stvebx $A0,@x[1],@x[2]
+ cmpwi @x[1],15
+ addi @x[1],@x[1],1
+ bne Lunaligned_head_vmx
+
+ ${UCMP}i $len,255 # done with 256-byte blocks yet?
+ bgt Loop_outer_vmx
+
+ b Ldone_vmx
+
+.align 4
+Laligned_vmx:
+ stvx $A0,0,@x[0] # head hexaword was not stored
+
+ ${UCMP}i $len,255 # done with 256-byte blocks yet?
+ bgt Loop_outer_vmx
+ nop
+
+Ldone_vmx:
+ ${UCMP}i $len,0 # done yet?
+ bnel __ChaCha20_1x
+
+ lwz r12,`$FRAME-$SIZE_T*18-4`($sp) # pull vrsave
+ li r10,`15+$LOCALS+64`
+ li r11,`31+$LOCALS+64`
+ mtspr 256,r12 # restore vrsave
+ lvx v20,r10,$sp
+ addi r10,r10,32
+ lvx v21,r11,$sp
+ addi r11,r11,32
+ lvx v22,r10,$sp
+ addi r10,r10,32
+ lvx v23,r11,$sp
+ addi r11,r11,32
+ lvx v24,r10,$sp
+ addi r10,r10,32
+ lvx v25,r11,$sp
+ addi r11,r11,32
+ lvx v26,r10,$sp
+ addi r10,r10,32
+ lvx v27,r11,$sp
+ addi r11,r11,32
+ lvx v28,r10,$sp
+ addi r10,r10,32
+ lvx v29,r11,$sp
+ addi r11,r11,32
+ lvx v30,r10,$sp
+ lvx v31,r11,$sp
+ $POP r0, `$FRAME+$LRSAVE`($sp)
+ $POP r14,`$FRAME-$SIZE_T*18`($sp)
+ $POP r15,`$FRAME-$SIZE_T*17`($sp)
+ $POP r16,`$FRAME-$SIZE_T*16`($sp)
+ $POP r17,`$FRAME-$SIZE_T*15`($sp)
+ $POP r18,`$FRAME-$SIZE_T*14`($sp)
+ $POP r19,`$FRAME-$SIZE_T*13`($sp)
+ $POP r20,`$FRAME-$SIZE_T*12`($sp)
+ $POP r21,`$FRAME-$SIZE_T*11`($sp)
+ $POP r22,`$FRAME-$SIZE_T*10`($sp)
+ $POP r23,`$FRAME-$SIZE_T*9`($sp)
+ $POP r24,`$FRAME-$SIZE_T*8`($sp)
+ $POP r25,`$FRAME-$SIZE_T*7`($sp)
+ $POP r26,`$FRAME-$SIZE_T*6`($sp)
+ $POP r27,`$FRAME-$SIZE_T*5`($sp)
+ $POP r28,`$FRAME-$SIZE_T*4`($sp)
+ $POP r29,`$FRAME-$SIZE_T*3`($sp)
+ $POP r30,`$FRAME-$SIZE_T*2`($sp)
+ $POP r31,`$FRAME-$SIZE_T*1`($sp)
+ mtlr r0
+ addi $sp,$sp,$FRAME
+ blr
+ .long 0
+ .byte 0,12,0x04,1,0x80,18,5,0
+ .long 0
+.size .ChaCha20_ctr32_vmx,.-.ChaCha20_ctr32_vmx
+
+.align 5
+Lconsts:
+ mflr r0
+ bcl 20,31,\$+4
+ mflr r12 #vvvvv "distance between . and _vpaes_consts
+ addi r12,r12,`64-8`
+ mtlr r0
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,0,0
+ .space `64-9*4`
+Lsigma:
+ .long 0x61707865,0x3320646e,0x79622d32,0x6b206574
+ .long 1,0,0,0
+ .long 4,0,0,0
+___
+$code.=<<___ if ($LITTLE_ENDIAN);
+ .long 0x0e0f0c0d,0x0a0b0809,0x06070405,0x02030001
+ .long 0x0d0e0f0c,0x090a0b08,0x05060704,0x01020300
+___
+$code.=<<___ if (!$LITTLE_ENDIAN); # flipped words
+ .long 0x02030001,0x06070405,0x0a0b0809,0x0e0f0c0d
+ .long 0x01020300,0x05060704,0x090a0b08,0x0d0e0f0c
+___
+$code.=<<___;
+.asciz "ChaCha20 for PowerPC/AltiVec, CRYPTOGAMS by <appro\@openssl.org>"
+.align 2
+___
+}}}
+
+foreach (split("\n",$code)) {
+ s/\`([^\`]*)\`/eval $1/ge;
+
+ # instructions prefixed with '?' are endian-specific and need
+ # to be adjusted accordingly...
+ if ($flavour !~ /le$/) { # big-endian
+ s/be\?// or
+ s/le\?/#le#/ or
+ s/\?lvsr/lvsl/ or
+ s/\?lvsl/lvsr/ or
+ s/\?(vperm\s+v[0-9]+,\s*)(v[0-9]+,\s*)(v[0-9]+,\s*)(v[0-9]+)/$1$3$2$4/ or
+ s/(vsldoi\s+v[0-9]+,\s*)(v[0-9]+,)\s*(v[0-9]+,\s*)([0-9]+)/$1$3$2 16-$4/;
+ } else { # little-endian
+ s/le\?// or
+ s/be\?/#be#/ or
+ s/\?([a-z]+)/$1/;
+ }
+
+ print $_,"\n";
+}
+
+close STDOUT;
diff --git a/openssl-1.1.0h/crypto/chacha/asm/chacha-s390x.pl b/openssl-1.1.0h/crypto/chacha/asm/chacha-s390x.pl
new file mode 100755
index 0000000..c315264
--- /dev/null
+++ b/openssl-1.1.0h/crypto/chacha/asm/chacha-s390x.pl
@@ -0,0 +1,326 @@
+#! /usr/bin/env perl
+# Copyright 2016 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License"). You may not use
+# this file except in compliance with the License. You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# December 2015
+#
+# ChaCha20 for s390x.
+#
+# 3 times faster than compiler-generated code.
+
+$flavour = shift;
+
+if ($flavour =~ /3[12]/) {
+ $SIZE_T=4;
+ $g="";
+} else {
+ $SIZE_T=8;
+ $g="g";
+}
+
+while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {}
+open STDOUT,">$output";
+
+sub AUTOLOAD() # thunk [simplified] x86-style perlasm
+{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://;
+ $code .= "\t$opcode\t".join(',',@_)."\n";
+}
+
+my $sp="%r15";
+
+my $stdframe=16*$SIZE_T+4*8;
+my $frame=$stdframe+4*20;
+
+my ($out,$inp,$len,$key,$counter)=map("%r$_",(2..6));
+
+my @x=map("%r$_",(0..7,"x","x","x","x",(10..13)));
+my @t=map("%r$_",(8,9));
+
+sub ROUND {
+my ($a0,$b0,$c0,$d0)=@_;
+my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
+my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
+my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
+my ($xc,$xc_)=map("\"$_\"",@t);
+my @x=map("\"$_\"",@x);
+
+ # Consider order in which variables are addressed by their
+ # index:
+ #
+ # a b c d
+ #
+ # 0 4 8 12 < even round
+ # 1 5 9 13
+ # 2 6 10 14
+ # 3 7 11 15
+ # 0 5 10 15 < odd round
+ # 1 6 11 12
+ # 2 7 8 13
+ # 3 4 9 14
+ #
+ # 'a', 'b' and 'd's are permanently allocated in registers,
+ # @x[0..7,12..15], while 'c's are maintained in memory. If
+ # you observe 'c' column, you'll notice that pair of 'c's is
+ # invariant between rounds. This means that we have to reload
+ # them once per round, in the middle. This is why you'll see
+ # 'c' stores and loads in the middle, but none in the beginning
+ # or end.
+
+ (
+ "&alr (@x[$a0],@x[$b0])", # Q1
+ "&alr (@x[$a1],@x[$b1])", # Q2
+ "&xr (@x[$d0],@x[$a0])",
+ "&xr (@x[$d1],@x[$a1])",
+ "&rll (@x[$d0],@x[$d0],16)",
+ "&rll (@x[$d1],@x[$d1],16)",
+
+ "&alr ($xc,@x[$d0])",
+ "&alr ($xc_,@x[$d1])",
+ "&xr (@x[$b0],$xc)",
+ "&xr (@x[$b1],$xc_)",
+ "&rll (@x[$b0],@x[$b0],12)",
+ "&rll (@x[$b1],@x[$b1],12)",
+
+ "&alr (@x[$a0],@x[$b0])",
+ "&alr (@x[$a1],@x[$b1])",
+ "&xr (@x[$d0],@x[$a0])",
+ "&xr (@x[$d1],@x[$a1])",
+ "&rll (@x[$d0],@x[$d0],8)",
+ "&rll (@x[$d1],@x[$d1],8)",
+
+ "&alr ($xc,@x[$d0])",
+ "&alr ($xc_,@x[$d1])",
+ "&xr (@x[$b0],$xc)",
+ "&xr (@x[$b1],$xc_)",
+ "&rll (@x[$b0],@x[$b0],7)",
+ "&rll (@x[$b1],@x[$b1],7)",
+
+ "&stm ($xc,$xc_,'$stdframe+4*8+4*$c0($sp)')", # reload pair of 'c's
+ "&lm ($xc,$xc_,'$stdframe+4*8+4*$c2($sp)')",
+
+ "&alr (@x[$a2],@x[$b2])", # Q3
+ "&alr (@x[$a3],@x[$b3])", # Q4
+ "&xr (@x[$d2],@x[$a2])",
+ "&xr (@x[$d3],@x[$a3])",
+ "&rll (@x[$d2],@x[$d2],16)",
+ "&rll (@x[$d3],@x[$d3],16)",
+
+ "&alr ($xc,@x[$d2])",
+ "&alr ($xc_,@x[$d3])",
+ "&xr (@x[$b2],$xc)",
+ "&xr (@x[$b3],$xc_)",
+ "&rll (@x[$b2],@x[$b2],12)",
+ "&rll (@x[$b3],@x[$b3],12)",
+
+ "&alr (@x[$a2],@x[$b2])",
+ "&alr (@x[$a3],@x[$b3])",
+ "&xr (@x[$d2],@x[$a2])",
+ "&xr (@x[$d3],@x[$a3])",
+ "&rll (@x[$d2],@x[$d2],8)",
+ "&rll (@x[$d3],@x[$d3],8)",
+
+ "&alr ($xc,@x[$d2])",
+ "&alr ($xc_,@x[$d3])",
+ "&xr (@x[$b2],$xc)",
+ "&xr (@x[$b3],$xc_)",
+ "&rll (@x[$b2],@x[$b2],7)",
+ "&rll (@x[$b3],@x[$b3],7)"
+ );
+}
+
+$code.=<<___;
+.text
+
+.globl ChaCha20_ctr32
+.type ChaCha20_ctr32,\@function
+.align 32
+ChaCha20_ctr32:
+ lt${g}r $len,$len # $len==0?
+ bzr %r14
+ a${g}hi $len,-64
+ l${g}hi %r1,-$frame
+ stm${g} %r6,%r15,`6*$SIZE_T`($sp)
+ sl${g}r $out,$inp # difference
+ la $len,0($inp,$len) # end of input minus 64
+ larl %r7,.Lsigma
+ lgr %r0,$sp
+ la $sp,0(%r1,$sp)
+ st${g} %r0,0($sp)
+
+ lmg %r8,%r11,0($key) # load key
+ lmg %r12,%r13,0($counter) # load counter
+ lmg %r6,%r7,0(%r7) # load sigma constant
+
+ la %r14,0($inp)
+ st${g} $out,$frame+3*$SIZE_T($sp)
+ st${g} $len,$frame+4*$SIZE_T($sp)
+ stmg %r6,%r13,$stdframe($sp) # copy key schedule to stack
+ srlg @x[12],%r12,32 # 32-bit counter value
+ j .Loop_outer
+
+.align 16
+.Loop_outer:
+ lm @x[0],@x[7],$stdframe+4*0($sp) # load x[0]-x[7]
+ lm @t[0],@t[1],$stdframe+4*10($sp) # load x[10]-x[11]
+ lm @x[13],@x[15],$stdframe+4*13($sp) # load x[13]-x[15]
+ stm @t[0],@t[1],$stdframe+4*8+4*10($sp) # offload x[10]-x[11]
+ lm @t[0],@t[1],$stdframe+4*8($sp) # load x[8]-x[9]
+ st @x[12],$stdframe+4*12($sp) # save counter
+ st${g} %r14,$frame+2*$SIZE_T($sp) # save input pointer
+ lhi %r14,10
+ j .Loop
+
+.align 4
+.Loop:
+___
+ foreach (&ROUND(0, 4, 8,12)) { eval; }
+ foreach (&ROUND(0, 5,10,15)) { eval; }
+$code.=<<___;
+ brct %r14,.Loop
+
+ l${g} %r14,$frame+2*$SIZE_T($sp) # pull input pointer
+ stm @t[0],@t[1],$stdframe+4*8+4*8($sp) # offload x[8]-x[9]
+ lm${g} @t[0],@t[1],$frame+3*$SIZE_T($sp)
+
+ al @x[0],$stdframe+4*0($sp) # accumulate key schedule
+ al @x[1],$stdframe+4*1($sp)
+ al @x[2],$stdframe+4*2($sp)
+ al @x[3],$stdframe+4*3($sp)
+ al @x[4],$stdframe+4*4($sp)
+ al @x[5],$stdframe+4*5($sp)
+ al @x[6],$stdframe+4*6($sp)
+ al @x[7],$stdframe+4*7($sp)
+ lrvr @x[0],@x[0]
+ lrvr @x[1],@x[1]
+ lrvr @x[2],@x[2]
+ lrvr @x[3],@x[3]
+ lrvr @x[4],@x[4]
+ lrvr @x[5],@x[5]
+ lrvr @x[6],@x[6]
+ lrvr @x[7],@x[7]
+ al @x[12],$stdframe+4*12($sp)
+ al @x[13],$stdframe+4*13($sp)
+ al @x[14],$stdframe+4*14($sp)
+ al @x[15],$stdframe+4*15($sp)
+ lrvr @x[12],@x[12]
+ lrvr @x[13],@x[13]
+ lrvr @x[14],@x[14]
+ lrvr @x[15],@x[15]
+
+ la @t[0],0(@t[0],%r14) # reconstruct output pointer
+ cl${g}r %r14,@t[1]
+ jh .Ltail
+
+ x @x[0],4*0(%r14) # xor with input
+ x @x[1],4*1(%r14)
+ st @x[0],4*0(@t[0]) # store output
+ x @x[2],4*2(%r14)
+ st @x[1],4*1(@t[0])
+ x @x[3],4*3(%r14)
+ st @x[2],4*2(@t[0])
+ x @x[4],4*4(%r14)
+ st @x[3],4*3(@t[0])
+ lm @x[0],@x[3],$stdframe+4*8+4*8($sp) # load x[8]-x[11]
+ x @x[5],4*5(%r14)
+ st @x[4],4*4(@t[0])
+ x @x[6],4*6(%r14)
+ al @x[0],$stdframe+4*8($sp)
+ st @x[5],4*5(@t[0])
+ x @x[7],4*7(%r14)
+ al @x[1],$stdframe+4*9($sp)
+ st @x[6],4*6(@t[0])
+ x @x[12],4*12(%r14)
+ al @x[2],$stdframe+4*10($sp)
+ st @x[7],4*7(@t[0])
+ x @x[13],4*13(%r14)
+ al @x[3],$stdframe+4*11($sp)
+ st @x[12],4*12(@t[0])
+ x @x[14],4*14(%r14)
+ st @x[13],4*13(@t[0])
+ x @x[15],4*15(%r14)
+ st @x[14],4*14(@t[0])
+ lrvr @x[0],@x[0]
+ st @x[15],4*15(@t[0])
+ lrvr @x[1],@x[1]
+ lrvr @x[2],@x[2]
+ lrvr @x[3],@x[3]
+ lhi @x[12],1
+ x @x[0],4*8(%r14)
+ al @x[12],$stdframe+4*12($sp) # increment counter
+ x @x[1],4*9(%r14)
+ st @x[0],4*8(@t[0])
+ x @x[2],4*10(%r14)
+ st @x[1],4*9(@t[0])
+ x @x[3],4*11(%r14)
+ st @x[2],4*10(@t[0])
+ st @x[3],4*11(@t[0])
+
+ cl${g}r %r14,@t[1] # done yet?
+ la %r14,64(%r14)
+ jl .Loop_outer
+
+.Ldone:
+ xgr %r0,%r0
+ xgr %r1,%r1
+ xgr %r2,%r2
+ xgr %r3,%r3
+ stmg %r0,%r3,$stdframe+4*4($sp) # wipe key copy
+ stmg %r0,%r3,$stdframe+4*12($sp)
+
+ lm${g} %r6,%r15,`$frame+6*$SIZE_T`($sp)
+ br %r14
+
+.align 16
+.Ltail:
+ la @t[1],64($t[1])
+ stm @x[0],@x[7],$stdframe+4*0($sp)
+ sl${g}r @t[1],%r14
+ lm @x[0],@x[3],$stdframe+4*8+4*8($sp)
+ l${g}hi @x[6],0
+ stm @x[12],@x[15],$stdframe+4*12($sp)
+ al @x[0],$stdframe+4*8($sp)
+ al @x[1],$stdframe+4*9($sp)
+ al @x[2],$stdframe+4*10($sp)
+ al @x[3],$stdframe+4*11($sp)
+ lrvr @x[0],@x[0]
+ lrvr @x[1],@x[1]
+ lrvr @x[2],@x[2]
+ lrvr @x[3],@x[3]
+ stm @x[0],@x[3],$stdframe+4*8($sp)
+
+.Loop_tail:
+ llgc @x[4],0(@x[6],%r14)
+ llgc @x[5],$stdframe(@x[6],$sp)
+ xr @x[5],@x[4]
+ stc @x[5],0(@x[6],@t[0])
+ la @x[6],1(@x[6])
+ brct @t[1],.Loop_tail
+
+ j .Ldone
+.size ChaCha20_ctr32,.-ChaCha20_ctr32
+
+.align 32
+.Lsigma:
+.long 0x61707865,0x3320646e,0x79622d32,0x6b206574 # endian-neutral
+.asciz "ChaCha20 for s390x, CRYPTOGAMS by <appro\@openssl.org>"
+.align 4
+___
+
+foreach (split("\n",$code)) {
+ s/\`([^\`]*)\`/eval $1/ge;
+
+ print $_,"\n";
+}
+close STDOUT;
diff --git a/openssl-1.1.0h/crypto/chacha/asm/chacha-x86.pl b/openssl-1.1.0h/crypto/chacha/asm/chacha-x86.pl
new file mode 100755
index 0000000..61b3286
--- /dev/null
+++ b/openssl-1.1.0h/crypto/chacha/asm/chacha-x86.pl
@@ -0,0 +1,1154 @@
+#! /usr/bin/env perl
+# Copyright 2016 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License"). You may not use
+# this file except in compliance with the License. You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# January 2015
+#
+# ChaCha20 for x86.
+#
+# Performance in cycles per byte out of large buffer.
+#
+# 1xIALU/gcc 4xSSSE3
+# Pentium 17.5/+80%
+# PIII 14.2/+60%
+# P4 18.6/+84%
+# Core2 9.56/+89% 4.83
+# Westmere 9.50/+45% 3.35
+# Sandy Bridge 10.5/+47% 3.20
+# Haswell 8.15/+50% 2.83
+# Silvermont 17.4/+36% 8.35
+# Goldmont 13.4/+40% 4.36
+# Sledgehammer 10.2/+54%
+# Bulldozer 13.4/+50% 4.38(*)
+#
+# (*) Bulldozer actually executes 4xXOP code path that delivers 3.55;
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+push(@INC,"${dir}","${dir}../../perlasm");
+require "x86asm.pl";
+
+$output=pop;
+open STDOUT,">$output";
+
+&asm_init($ARGV[0],"chacha-x86.pl",$ARGV[$#ARGV] eq "386");
+
+$xmm=$ymm=0;
+for (@ARGV) { $xmm=1 if (/-DOPENSSL_IA32_SSE2/); }
+
+$ymm=1 if ($xmm &&
+ `$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
+ =~ /GNU assembler version ([2-9]\.[0-9]+)/ &&
+ ($gasver=$1)>=2.19); # first version supporting AVX
+
+$ymm=1 if ($xmm && !$ymm && $ARGV[0] eq "win32n" &&
+ `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/ &&
+ $1>=2.03); # first version supporting AVX
+
+$ymm=1 if ($xmm && !$ymm && $ARGV[0] eq "win32" &&
+ `ml 2>&1` =~ /Version ([0-9]+)\./ &&
+ $1>=10); # first version supporting AVX
+
+$ymm=1 if ($xmm && !$ymm &&
+ `$ENV{CC} -v 2>&1` =~ /(^clang version|based on LLVM) ([3-9]\.[0-9]+)/ &&
+ $2>=3.0); # first version supporting AVX
+
+$a="eax";
+($b,$b_)=("ebx","ebp");
+($c,$c_)=("ecx","esi");
+($d,$d_)=("edx","edi");
+
+sub QUARTERROUND {
+my ($ai,$bi,$ci,$di,$i)=@_;
+my ($an,$bn,$cn,$dn)=map(($_&~3)+(($_+1)&3),($ai,$bi,$ci,$di)); # next
+my ($ap,$bp,$cp,$dp)=map(($_&~3)+(($_-1)&3),($ai,$bi,$ci,$di)); # previous
+
+ # a b c d
+ #
+ # 0 4 8 12 < even round
+ # 1 5 9 13
+ # 2 6 10 14
+ # 3 7 11 15
+ # 0 5 10 15 < odd round
+ # 1 6 11 12
+ # 2 7 8 13
+ # 3 4 9 14
+
+ if ($i==0) {
+ my $j=4;
+ ($ap,$bp,$cp,$dp)=map(($_&~3)+(($_-$j--)&3),($ap,$bp,$cp,$dp));
+ } elsif ($i==3) {
+ my $j=0;
+ ($an,$bn,$cn,$dn)=map(($_&~3)+(($_+$j++)&3),($an,$bn,$cn,$dn));
+ } elsif ($i==4) {
+ my $j=4;
+ ($ap,$bp,$cp,$dp)=map(($_&~3)+(($_+$j--)&3),($ap,$bp,$cp,$dp));
+ } elsif ($i==7) {
+ my $j=0;
+ ($an,$bn,$cn,$dn)=map(($_&~3)+(($_-$j++)&3),($an,$bn,$cn,$dn));
+ }
+
+ #&add ($a,$b); # see elsewhere
+ &xor ($d,$a);
+ &mov (&DWP(4*$cp,"esp"),$c_) if ($ai>0 && $ai<3);
+ &rol ($d,16);
+ &mov (&DWP(4*$bp,"esp"),$b_) if ($i!=0);
+ &add ($c,$d);
+ &mov ($c_,&DWP(4*$cn,"esp")) if ($ai>0 && $ai<3);
+ &xor ($b,$c);
+ &mov ($d_,&DWP(4*$dn,"esp")) if ($di!=$dn);
+ &rol ($b,12);
+ &mov ($b_,&DWP(4*$bn,"esp")) if ($i<7);
+ &mov ($b_,&DWP(128,"esp")) if ($i==7); # loop counter
+ &add ($a,$b);
+ &xor ($d,$a);
+ &mov (&DWP(4*$ai,"esp"),$a);
+ &rol ($d,8);
+ &mov ($a,&DWP(4*$an,"esp"));
+ &add ($c,$d);
+ &mov (&DWP(4*$di,"esp"),$d) if ($di!=$dn);
+ &mov ($d_,$d) if ($di==$dn);
+ &xor ($b,$c);
+ &add ($a,$b_) if ($i<7); # elsewhere
+ &rol ($b,7);
+
+ ($b,$b_)=($b_,$b);
+ ($c,$c_)=($c_,$c);
+ ($d,$d_)=($d_,$d);
+}
+
+&static_label("ssse3_shortcut");
+&static_label("xop_shortcut");
+&static_label("ssse3_data");
+&static_label("pic_point");
+
+&function_begin("ChaCha20_ctr32");
+ &xor ("eax","eax");
+ &cmp ("eax",&wparam(2)); # len==0?
+ &je (&label("no_data"));
+if ($xmm) {
+ &call (&label("pic_point"));
+&set_label("pic_point");
+ &blindpop("eax");
+ &picmeup("ebp","OPENSSL_ia32cap_P","eax",&label("pic_point"));
+ &test (&DWP(0,"ebp"),1<<24); # test FXSR bit
+ &jz (&label("x86"));
+ &test (&DWP(4,"ebp"),1<<9); # test SSSE3 bit
+ &jz (&label("x86"));
+ &jmp (&label("ssse3_shortcut"));
+&set_label("x86");
+}
+ &mov ("esi",&wparam(3)); # key
+ &mov ("edi",&wparam(4)); # counter and nonce
+
+ &stack_push(33);
+
+ &mov ("eax",&DWP(4*0,"esi")); # copy key
+ &mov ("ebx",&DWP(4*1,"esi"));
+ &mov ("ecx",&DWP(4*2,"esi"));
+ &mov ("edx",&DWP(4*3,"esi"));
+ &mov (&DWP(64+4*4,"esp"),"eax");
+ &mov (&DWP(64+4*5,"esp"),"ebx");
+ &mov (&DWP(64+4*6,"esp"),"ecx");
+ &mov (&DWP(64+4*7,"esp"),"edx");
+ &mov ("eax",&DWP(4*4,"esi"));
+ &mov ("ebx",&DWP(4*5,"esi"));
+ &mov ("ecx",&DWP(4*6,"esi"));
+ &mov ("edx",&DWP(4*7,"esi"));
+ &mov (&DWP(64+4*8,"esp"),"eax");
+ &mov (&DWP(64+4*9,"esp"),"ebx");
+ &mov (&DWP(64+4*10,"esp"),"ecx");
+ &mov (&DWP(64+4*11,"esp"),"edx");
+ &mov ("eax",&DWP(4*0,"edi")); # copy counter and nonce
+ &mov ("ebx",&DWP(4*1,"edi"));
+ &mov ("ecx",&DWP(4*2,"edi"));
+ &mov ("edx",&DWP(4*3,"edi"));
+ &sub ("eax",1);
+ &mov (&DWP(64+4*12,"esp"),"eax");
+ &mov (&DWP(64+4*13,"esp"),"ebx");
+ &mov (&DWP(64+4*14,"esp"),"ecx");
+ &mov (&DWP(64+4*15,"esp"),"edx");
+ &jmp (&label("entry"));
+
+&set_label("outer_loop",16);
+ &mov (&wparam(1),$b); # save input
+ &mov (&wparam(0),$a); # save output
+ &mov (&wparam(2),$c); # save len
+&set_label("entry");
+ &mov ($a,0x61707865);
+ &mov (&DWP(4*1,"esp"),0x3320646e);
+ &mov (&DWP(4*2,"esp"),0x79622d32);
+ &mov (&DWP(4*3,"esp"),0x6b206574);
+
+ &mov ($b, &DWP(64+4*5,"esp")); # copy key material
+ &mov ($b_,&DWP(64+4*6,"esp"));
+ &mov ($c, &DWP(64+4*10,"esp"));
+ &mov ($c_,&DWP(64+4*11,"esp"));
+ &mov ($d, &DWP(64+4*13,"esp"));
+ &mov ($d_,&DWP(64+4*14,"esp"));
+ &mov (&DWP(4*5,"esp"),$b);
+ &mov (&DWP(4*6,"esp"),$b_);
+ &mov (&DWP(4*10,"esp"),$c);
+ &mov (&DWP(4*11,"esp"),$c_);
+ &mov (&DWP(4*13,"esp"),$d);
+ &mov (&DWP(4*14,"esp"),$d_);
+
+ &mov ($b, &DWP(64+4*7,"esp"));
+ &mov ($d_,&DWP(64+4*15,"esp"));
+ &mov ($d, &DWP(64+4*12,"esp"));
+ &mov ($b_,&DWP(64+4*4,"esp"));
+ &mov ($c, &DWP(64+4*8,"esp"));
+ &mov ($c_,&DWP(64+4*9,"esp"));
+ &add ($d,1); # counter value
+ &mov (&DWP(4*7,"esp"),$b);
+ &mov (&DWP(4*15,"esp"),$d_);
+ &mov (&DWP(64+4*12,"esp"),$d); # save counter value
+
+ &mov ($b,10); # loop counter
+ &jmp (&label("loop"));
+
+&set_label("loop",16);
+ &add ($a,$b_); # elsewhere
+ &mov (&DWP(128,"esp"),$b); # save loop counter
+ &mov ($b,$b_);
+ &QUARTERROUND(0, 4, 8, 12, 0);
+ &QUARTERROUND(1, 5, 9, 13, 1);
+ &QUARTERROUND(2, 6,10, 14, 2);
+ &QUARTERROUND(3, 7,11, 15, 3);
+ &QUARTERROUND(0, 5,10, 15, 4);
+ &QUARTERROUND(1, 6,11, 12, 5);
+ &QUARTERROUND(2, 7, 8, 13, 6);
+ &QUARTERROUND(3, 4, 9, 14, 7);
+ &dec ($b);
+ &jnz (&label("loop"));
+
+ &mov ($b,&wparam(2)); # load len
+
+ &add ($a,0x61707865); # accumulate key material
+ &add ($b_,&DWP(64+4*4,"esp"));
+ &add ($c, &DWP(64+4*8,"esp"));
+ &add ($c_,&DWP(64+4*9,"esp"));
+
+ &cmp ($b,64);
+ &jb (&label("tail"));
+
+ &mov ($b,&wparam(1)); # load input pointer
+ &add ($d, &DWP(64+4*12,"esp"));
+ &add ($d_,&DWP(64+4*14,"esp"));
+
+ &xor ($a, &DWP(4*0,$b)); # xor with input
+ &xor ($b_,&DWP(4*4,$b));
+ &mov (&DWP(4*0,"esp"),$a);
+ &mov ($a,&wparam(0)); # load output pointer
+ &xor ($c, &DWP(4*8,$b));
+ &xor ($c_,&DWP(4*9,$b));
+ &xor ($d, &DWP(4*12,$b));
+ &xor ($d_,&DWP(4*14,$b));
+ &mov (&DWP(4*4,$a),$b_); # write output
+ &mov (&DWP(4*8,$a),$c);
+ &mov (&DWP(4*9,$a),$c_);
+ &mov (&DWP(4*12,$a),$d);
+ &mov (&DWP(4*14,$a),$d_);
+
+ &mov ($b_,&DWP(4*1,"esp"));
+ &mov ($c, &DWP(4*2,"esp"));
+ &mov ($c_,&DWP(4*3,"esp"));
+ &mov ($d, &DWP(4*5,"esp"));
+ &mov ($d_,&DWP(4*6,"esp"));
+ &add ($b_,0x3320646e); # accumulate key material
+ &add ($c, 0x79622d32);
+ &add ($c_,0x6b206574);
+ &add ($d, &DWP(64+4*5,"esp"));
+ &add ($d_,&DWP(64+4*6,"esp"));
+ &xor ($b_,&DWP(4*1,$b));
+ &xor ($c, &DWP(4*2,$b));
+ &xor ($c_,&DWP(4*3,$b));
+ &xor ($d, &DWP(4*5,$b));
+ &xor ($d_,&DWP(4*6,$b));
+ &mov (&DWP(4*1,$a),$b_);
+ &mov (&DWP(4*2,$a),$c);
+ &mov (&DWP(4*3,$a),$c_);
+ &mov (&DWP(4*5,$a),$d);
+ &mov (&DWP(4*6,$a),$d_);
+
+ &mov ($b_,&DWP(4*7,"esp"));
+ &mov ($c, &DWP(4*10,"esp"));
+ &mov ($c_,&DWP(4*11,"esp"));
+ &mov ($d, &DWP(4*13,"esp"));
+ &mov ($d_,&DWP(4*15,"esp"));
+ &add ($b_,&DWP(64+4*7,"esp"));
+ &add ($c, &DWP(64+4*10,"esp"));
+ &add ($c_,&DWP(64+4*11,"esp"));
+ &add ($d, &DWP(64+4*13,"esp"));
+ &add ($d_,&DWP(64+4*15,"esp"));
+ &xor ($b_,&DWP(4*7,$b));
+ &xor ($c, &DWP(4*10,$b));
+ &xor ($c_,&DWP(4*11,$b));
+ &xor ($d, &DWP(4*13,$b));
+ &xor ($d_,&DWP(4*15,$b));
+ &lea ($b,&DWP(4*16,$b));
+ &mov (&DWP(4*7,$a),$b_);
+ &mov ($b_,&DWP(4*0,"esp"));
+ &mov (&DWP(4*10,$a),$c);
+ &mov ($c,&wparam(2)); # len
+ &mov (&DWP(4*11,$a),$c_);
+ &mov (&DWP(4*13,$a),$d);
+ &mov (&DWP(4*15,$a),$d_);
+ &mov (&DWP(4*0,$a),$b_);
+ &lea ($a,&DWP(4*16,$a));
+ &sub ($c,64);
+ &jnz (&label("outer_loop"));
+
+ &jmp (&label("done"));
+
+&set_label("tail");
+ &add ($d, &DWP(64+4*12,"esp"));
+ &add ($d_,&DWP(64+4*14,"esp"));
+ &mov (&DWP(4*0,"esp"),$a);
+ &mov (&DWP(4*4,"esp"),$b_);
+ &mov (&DWP(4*8,"esp"),$c);
+ &mov (&DWP(4*9,"esp"),$c_);
+ &mov (&DWP(4*12,"esp"),$d);
+ &mov (&DWP(4*14,"esp"),$d_);
+
+ &mov ($b_,&DWP(4*1,"esp"));
+ &mov ($c, &DWP(4*2,"esp"));
+ &mov ($c_,&DWP(4*3,"esp"));
+ &mov ($d, &DWP(4*5,"esp"));
+ &mov ($d_,&DWP(4*6,"esp"));
+ &add ($b_,0x3320646e); # accumulate key material
+ &add ($c, 0x79622d32);
+ &add ($c_,0x6b206574);
+ &add ($d, &DWP(64+4*5,"esp"));
+ &add ($d_,&DWP(64+4*6,"esp"));
+ &mov (&DWP(4*1,"esp"),$b_);
+ &mov (&DWP(4*2,"esp"),$c);
+ &mov (&DWP(4*3,"esp"),$c_);
+ &mov (&DWP(4*5,"esp"),$d);
+ &mov (&DWP(4*6,"esp"),$d_);
+
+ &mov ($b_,&DWP(4*7,"esp"));
+ &mov ($c, &DWP(4*10,"esp"));
+ &mov ($c_,&DWP(4*11,"esp"));
+ &mov ($d, &DWP(4*13,"esp"));
+ &mov ($d_,&DWP(4*15,"esp"));
+ &add ($b_,&DWP(64+4*7,"esp"));
+ &add ($c, &DWP(64+4*10,"esp"));
+ &add ($c_,&DWP(64+4*11,"esp"));
+ &add ($d, &DWP(64+4*13,"esp"));
+ &add ($d_,&DWP(64+4*15,"esp"));
+ &mov (&DWP(4*7,"esp"),$b_);
+ &mov ($b_,&wparam(1)); # load input
+ &mov (&DWP(4*10,"esp"),$c);
+ &mov ($c,&wparam(0)); # load output
+ &mov (&DWP(4*11,"esp"),$c_);
+ &xor ($c_,$c_);
+ &mov (&DWP(4*13,"esp"),$d);
+ &mov (&DWP(4*15,"esp"),$d_);
+
+ &xor ("eax","eax");
+ &xor ("edx","edx");
+&set_label("tail_loop");
+ &movb ("al",&BP(0,$c_,$b_));
+ &movb ("dl",&BP(0,"esp",$c_));
+ &lea ($c_,&DWP(1,$c_));
+ &xor ("al","dl");
+ &mov (&BP(-1,$c,$c_),"al");
+ &dec ($b);
+ &jnz (&label("tail_loop"));
+
+&set_label("done");
+ &stack_pop(33);
+&set_label("no_data");
+&function_end("ChaCha20_ctr32");
+
+if ($xmm) {
+my ($xa,$xa_,$xb,$xb_,$xc,$xc_,$xd,$xd_)=map("xmm$_",(0..7));
+my ($out,$inp,$len)=("edi","esi","ecx");
+
+sub QUARTERROUND_SSSE3 {
+my ($ai,$bi,$ci,$di,$i)=@_;
+my ($an,$bn,$cn,$dn)=map(($_&~3)+(($_+1)&3),($ai,$bi,$ci,$di)); # next
+my ($ap,$bp,$cp,$dp)=map(($_&~3)+(($_-1)&3),($ai,$bi,$ci,$di)); # previous
+
+ # a b c d
+ #
+ # 0 4 8 12 < even round
+ # 1 5 9 13
+ # 2 6 10 14
+ # 3 7 11 15
+ # 0 5 10 15 < odd round
+ # 1 6 11 12
+ # 2 7 8 13
+ # 3 4 9 14
+
+ if ($i==0) {
+ my $j=4;
+ ($ap,$bp,$cp,$dp)=map(($_&~3)+(($_-$j--)&3),($ap,$bp,$cp,$dp));
+ } elsif ($i==3) {
+ my $j=0;
+ ($an,$bn,$cn,$dn)=map(($_&~3)+(($_+$j++)&3),($an,$bn,$cn,$dn));
+ } elsif ($i==4) {
+ my $j=4;
+ ($ap,$bp,$cp,$dp)=map(($_&~3)+(($_+$j--)&3),($ap,$bp,$cp,$dp));
+ } elsif ($i==7) {
+ my $j=0;
+ ($an,$bn,$cn,$dn)=map(($_&~3)+(($_-$j++)&3),($an,$bn,$cn,$dn));
+ }
+
+ #&paddd ($xa,$xb); # see elsewhere
+ #&pxor ($xd,$xa); # see elsewhere
+ &movdqa(&QWP(16*$cp-128,"ebx"),$xc_) if ($ai>0 && $ai<3);
+ &pshufb ($xd,&QWP(0,"eax")); # rot16
+ &movdqa(&QWP(16*$bp-128,"ebx"),$xb_) if ($i!=0);
+ &paddd ($xc,$xd);
+ &movdqa($xc_,&QWP(16*$cn-128,"ebx")) if ($ai>0 && $ai<3);
+ &pxor ($xb,$xc);
+ &movdqa($xb_,&QWP(16*$bn-128,"ebx")) if ($i<7);
+ &movdqa ($xa_,$xb); # borrow as temporary
+ &pslld ($xb,12);
+ &psrld ($xa_,20);
+ &por ($xb,$xa_);
+ &movdqa($xa_,&QWP(16*$an-128,"ebx"));
+ &paddd ($xa,$xb);
+ &movdqa($xd_,&QWP(16*$dn-128,"ebx")) if ($di!=$dn);
+ &pxor ($xd,$xa);
+ &movdqa (&QWP(16*$ai-128,"ebx"),$xa);
+ &pshufb ($xd,&QWP(16,"eax")); # rot8
+ &paddd ($xc,$xd);
+ &movdqa (&QWP(16*$di-128,"ebx"),$xd) if ($di!=$dn);
+ &movdqa ($xd_,$xd) if ($di==$dn);
+ &pxor ($xb,$xc);
+ &paddd ($xa_,$xb_) if ($i<7); # elsewhere
+ &movdqa ($xa,$xb); # borrow as temporary
+ &pslld ($xb,7);
+ &psrld ($xa,25);
+ &pxor ($xd_,$xa_) if ($i<7); # elsewhere
+ &por ($xb,$xa);
+
+ ($xa,$xa_)=($xa_,$xa);
+ ($xb,$xb_)=($xb_,$xb);
+ ($xc,$xc_)=($xc_,$xc);
+ ($xd,$xd_)=($xd_,$xd);
+}
+
+&function_begin("ChaCha20_ssse3");
+&set_label("ssse3_shortcut");
+if ($ymm) {
+ &test (&DWP(4,"ebp"),1<<11); # test XOP bit
+ &jnz (&label("xop_shortcut"));
+}
+
+ &mov ($out,&wparam(0));
+ &mov ($inp,&wparam(1));
+ &mov ($len,&wparam(2));
+ &mov ("edx",&wparam(3)); # key
+ &mov ("ebx",&wparam(4)); # counter and nonce
+
+ &mov ("ebp","esp");
+ &stack_push (131);
+ &and ("esp",-64);
+ &mov (&DWP(512,"esp"),"ebp");
+
+ &lea ("eax",&DWP(&label("ssse3_data")."-".
+ &label("pic_point"),"eax"));
+ &movdqu ("xmm3",&QWP(0,"ebx")); # counter and nonce
+
+if (defined($gasver) && $gasver>=2.17) { # even though we encode
+ # pshufb manually, we
+ # handle only register
+ # operands, while this
+ # segment uses memory
+ # operand...
+ &cmp ($len,64*4);
+ &jb (&label("1x"));
+
+ &mov (&DWP(512+4,"esp"),"edx"); # offload pointers
+ &mov (&DWP(512+8,"esp"),"ebx");
+ &sub ($len,64*4); # bias len
+ &lea ("ebp",&DWP(256+128,"esp")); # size optimization
+
+ &movdqu ("xmm7",&QWP(0,"edx")); # key
+ &pshufd ("xmm0","xmm3",0x00);
+ &pshufd ("xmm1","xmm3",0x55);
+ &pshufd ("xmm2","xmm3",0xaa);
+ &pshufd ("xmm3","xmm3",0xff);
+ &paddd ("xmm0",&QWP(16*3,"eax")); # fix counters
+ &pshufd ("xmm4","xmm7",0x00);
+ &pshufd ("xmm5","xmm7",0x55);
+ &psubd ("xmm0",&QWP(16*4,"eax"));
+ &pshufd ("xmm6","xmm7",0xaa);
+ &pshufd ("xmm7","xmm7",0xff);
+ &movdqa (&QWP(16*12-128,"ebp"),"xmm0");
+ &movdqa (&QWP(16*13-128,"ebp"),"xmm1");
+ &movdqa (&QWP(16*14-128,"ebp"),"xmm2");
+ &movdqa (&QWP(16*15-128,"ebp"),"xmm3");
+ &movdqu ("xmm3",&QWP(16,"edx")); # key
+ &movdqa (&QWP(16*4-128,"ebp"),"xmm4");
+ &movdqa (&QWP(16*5-128,"ebp"),"xmm5");
+ &movdqa (&QWP(16*6-128,"ebp"),"xmm6");
+ &movdqa (&QWP(16*7-128,"ebp"),"xmm7");
+ &movdqa ("xmm7",&QWP(16*2,"eax")); # sigma
+ &lea ("ebx",&DWP(128,"esp")); # size optimization
+
+ &pshufd ("xmm0","xmm3",0x00);
+ &pshufd ("xmm1","xmm3",0x55);
+ &pshufd ("xmm2","xmm3",0xaa);
+ &pshufd ("xmm3","xmm3",0xff);
+ &pshufd ("xmm4","xmm7",0x00);
+ &pshufd ("xmm5","xmm7",0x55);
+ &pshufd ("xmm6","xmm7",0xaa);
+ &pshufd ("xmm7","xmm7",0xff);
+ &movdqa (&QWP(16*8-128,"ebp"),"xmm0");
+ &movdqa (&QWP(16*9-128,"ebp"),"xmm1");
+ &movdqa (&QWP(16*10-128,"ebp"),"xmm2");
+ &movdqa (&QWP(16*11-128,"ebp"),"xmm3");
+ &movdqa (&QWP(16*0-128,"ebp"),"xmm4");
+ &movdqa (&QWP(16*1-128,"ebp"),"xmm5");
+ &movdqa (&QWP(16*2-128,"ebp"),"xmm6");
+ &movdqa (&QWP(16*3-128,"ebp"),"xmm7");
+
+ &lea ($inp,&DWP(128,$inp)); # size optimization
+ &lea ($out,&DWP(128,$out)); # size optimization
+ &jmp (&label("outer_loop"));
+
+&set_label("outer_loop",16);
+ #&movdqa ("xmm0",&QWP(16*0-128,"ebp")); # copy key material
+ &movdqa ("xmm1",&QWP(16*1-128,"ebp"));
+ &movdqa ("xmm2",&QWP(16*2-128,"ebp"));
+ &movdqa ("xmm3",&QWP(16*3-128,"ebp"));
+ #&movdqa ("xmm4",&QWP(16*4-128,"ebp"));
+ &movdqa ("xmm5",&QWP(16*5-128,"ebp"));
+ &movdqa ("xmm6",&QWP(16*6-128,"ebp"));
+ &movdqa ("xmm7",&QWP(16*7-128,"ebp"));
+ #&movdqa (&QWP(16*0-128,"ebx"),"xmm0");
+ &movdqa (&QWP(16*1-128,"ebx"),"xmm1");
+ &movdqa (&QWP(16*2-128,"ebx"),"xmm2");
+ &movdqa (&QWP(16*3-128,"ebx"),"xmm3");
+ #&movdqa (&QWP(16*4-128,"ebx"),"xmm4");
+ &movdqa (&QWP(16*5-128,"ebx"),"xmm5");
+ &movdqa (&QWP(16*6-128,"ebx"),"xmm6");
+ &movdqa (&QWP(16*7-128,"ebx"),"xmm7");
+ #&movdqa ("xmm0",&QWP(16*8-128,"ebp"));
+ #&movdqa ("xmm1",&QWP(16*9-128,"ebp"));
+ &movdqa ("xmm2",&QWP(16*10-128,"ebp"));
+ &movdqa ("xmm3",&QWP(16*11-128,"ebp"));
+ &movdqa ("xmm4",&QWP(16*12-128,"ebp"));
+ &movdqa ("xmm5",&QWP(16*13-128,"ebp"));
+ &movdqa ("xmm6",&QWP(16*14-128,"ebp"));
+ &movdqa ("xmm7",&QWP(16*15-128,"ebp"));
+ &paddd ("xmm4",&QWP(16*4,"eax")); # counter value
+ #&movdqa (&QWP(16*8-128,"ebx"),"xmm0");
+ #&movdqa (&QWP(16*9-128,"ebx"),"xmm1");
+ &movdqa (&QWP(16*10-128,"ebx"),"xmm2");
+ &movdqa (&QWP(16*11-128,"ebx"),"xmm3");
+ &movdqa (&QWP(16*12-128,"ebx"),"xmm4");
+ &movdqa (&QWP(16*13-128,"ebx"),"xmm5");
+ &movdqa (&QWP(16*14-128,"ebx"),"xmm6");
+ &movdqa (&QWP(16*15-128,"ebx"),"xmm7");
+ &movdqa (&QWP(16*12-128,"ebp"),"xmm4"); # save counter value
+
+ &movdqa ($xa, &QWP(16*0-128,"ebp"));
+ &movdqa ($xd, "xmm4");
+ &movdqa ($xb_,&QWP(16*4-128,"ebp"));
+ &movdqa ($xc, &QWP(16*8-128,"ebp"));
+ &movdqa ($xc_,&QWP(16*9-128,"ebp"));
+
+ &mov ("edx",10); # loop counter
+ &nop ();
+
+&set_label("loop",16);
+ &paddd ($xa,$xb_); # elsewhere
+ &movdqa ($xb,$xb_);
+ &pxor ($xd,$xa); # elsewhere
+ &QUARTERROUND_SSSE3(0, 4, 8, 12, 0);
+ &QUARTERROUND_SSSE3(1, 5, 9, 13, 1);
+ &QUARTERROUND_SSSE3(2, 6,10, 14, 2);
+ &QUARTERROUND_SSSE3(3, 7,11, 15, 3);
+ &QUARTERROUND_SSSE3(0, 5,10, 15, 4);
+ &QUARTERROUND_SSSE3(1, 6,11, 12, 5);
+ &QUARTERROUND_SSSE3(2, 7, 8, 13, 6);
+ &QUARTERROUND_SSSE3(3, 4, 9, 14, 7);
+ &dec ("edx");
+ &jnz (&label("loop"));
+
+ &movdqa (&QWP(16*4-128,"ebx"),$xb_);
+ &movdqa (&QWP(16*8-128,"ebx"),$xc);
+ &movdqa (&QWP(16*9-128,"ebx"),$xc_);
+ &movdqa (&QWP(16*12-128,"ebx"),$xd);
+ &movdqa (&QWP(16*14-128,"ebx"),$xd_);
+
+ my ($xa0,$xa1,$xa2,$xa3,$xt0,$xt1,$xt2,$xt3)=map("xmm$_",(0..7));
+
+ #&movdqa ($xa0,&QWP(16*0-128,"ebx")); # it's there
+ &movdqa ($xa1,&QWP(16*1-128,"ebx"));
+ &movdqa ($xa2,&QWP(16*2-128,"ebx"));
+ &movdqa ($xa3,&QWP(16*3-128,"ebx"));
+
+ for($i=0;$i<256;$i+=64) {
+ &paddd ($xa0,&QWP($i+16*0-128,"ebp")); # accumulate key material
+ &paddd ($xa1,&QWP($i+16*1-128,"ebp"));
+ &paddd ($xa2,&QWP($i+16*2-128,"ebp"));
+ &paddd ($xa3,&QWP($i+16*3-128,"ebp"));
+
+ &movdqa ($xt2,$xa0); # "de-interlace" data
+ &punpckldq ($xa0,$xa1);
+ &movdqa ($xt3,$xa2);
+ &punpckldq ($xa2,$xa3);
+ &punpckhdq ($xt2,$xa1);
+ &punpckhdq ($xt3,$xa3);
+ &movdqa ($xa1,$xa0);
+ &punpcklqdq ($xa0,$xa2); # "a0"
+ &movdqa ($xa3,$xt2);
+ &punpcklqdq ($xt2,$xt3); # "a2"
+ &punpckhqdq ($xa1,$xa2); # "a1"
+ &punpckhqdq ($xa3,$xt3); # "a3"
+
+ #($xa2,$xt2)=($xt2,$xa2);
+
+ &movdqu ($xt0,&QWP(64*0-128,$inp)); # load input
+ &movdqu ($xt1,&QWP(64*1-128,$inp));
+ &movdqu ($xa2,&QWP(64*2-128,$inp));
+ &movdqu ($xt3,&QWP(64*3-128,$inp));
+ &lea ($inp,&QWP($i<192?16:(64*4-16*3),$inp));
+ &pxor ($xt0,$xa0);
+ &movdqa ($xa0,&QWP($i+16*4-128,"ebx")) if ($i<192);
+ &pxor ($xt1,$xa1);
+ &movdqa ($xa1,&QWP($i+16*5-128,"ebx")) if ($i<192);
+ &pxor ($xt2,$xa2);
+ &movdqa ($xa2,&QWP($i+16*6-128,"ebx")) if ($i<192);
+ &pxor ($xt3,$xa3);
+ &movdqa ($xa3,&QWP($i+16*7-128,"ebx")) if ($i<192);
+ &movdqu (&QWP(64*0-128,$out),$xt0); # store output
+ &movdqu (&QWP(64*1-128,$out),$xt1);
+ &movdqu (&QWP(64*2-128,$out),$xt2);
+ &movdqu (&QWP(64*3-128,$out),$xt3);
+ &lea ($out,&QWP($i<192?16:(64*4-16*3),$out));
+ }
+ &sub ($len,64*4);
+ &jnc (&label("outer_loop"));
+
+ &add ($len,64*4);
+ &jz (&label("done"));
+
+ &mov ("ebx",&DWP(512+8,"esp")); # restore pointers
+ &lea ($inp,&DWP(-128,$inp));
+ &mov ("edx",&DWP(512+4,"esp"));
+ &lea ($out,&DWP(-128,$out));
+
+ &movd ("xmm2",&DWP(16*12-128,"ebp")); # counter value
+ &movdqu ("xmm3",&QWP(0,"ebx"));
+ &paddd ("xmm2",&QWP(16*6,"eax")); # +four
+ &pand ("xmm3",&QWP(16*7,"eax"));
+ &por ("xmm3","xmm2"); # counter value
+}
+{
+my ($a,$b,$c,$d,$t,$t1,$rot16,$rot24)=map("xmm$_",(0..7));
+
+sub SSSE3ROUND { # critical path is 20 "SIMD ticks" per round
+ &paddd ($a,$b);
+ &pxor ($d,$a);
+ &pshufb ($d,$rot16);
+
+ &paddd ($c,$d);
+ &pxor ($b,$c);
+ &movdqa ($t,$b);
+ &psrld ($b,20);
+ &pslld ($t,12);
+ &por ($b,$t);
+
+ &paddd ($a,$b);
+ &pxor ($d,$a);
+ &pshufb ($d,$rot24);
+
+ &paddd ($c,$d);
+ &pxor ($b,$c);
+ &movdqa ($t,$b);
+ &psrld ($b,25);
+ &pslld ($t,7);
+ &por ($b,$t);
+}
+
+&set_label("1x");
+ &movdqa ($a,&QWP(16*2,"eax")); # sigma
+ &movdqu ($b,&QWP(0,"edx"));
+ &movdqu ($c,&QWP(16,"edx"));
+ #&movdqu ($d,&QWP(0,"ebx")); # already loaded
+ &movdqa ($rot16,&QWP(0,"eax"));
+ &movdqa ($rot24,&QWP(16,"eax"));
+ &mov (&DWP(16*3,"esp"),"ebp");
+
+ &movdqa (&QWP(16*0,"esp"),$a);
+ &movdqa (&QWP(16*1,"esp"),$b);
+ &movdqa (&QWP(16*2,"esp"),$c);
+ &movdqa (&QWP(16*3,"esp"),$d);
+ &mov ("edx",10);
+ &jmp (&label("loop1x"));
+
+&set_label("outer1x",16);
+ &movdqa ($d,&QWP(16*5,"eax")); # one
+ &movdqa ($a,&QWP(16*0,"esp"));
+ &movdqa ($b,&QWP(16*1,"esp"));
+ &movdqa ($c,&QWP(16*2,"esp"));
+ &paddd ($d,&QWP(16*3,"esp"));
+ &mov ("edx",10);
+ &movdqa (&QWP(16*3,"esp"),$d);
+ &jmp (&label("loop1x"));
+
+&set_label("loop1x",16);
+ &SSSE3ROUND();
+ &pshufd ($c,$c,0b01001110);
+ &pshufd ($b,$b,0b00111001);
+ &pshufd ($d,$d,0b10010011);
+ &nop ();
+
+ &SSSE3ROUND();
+ &pshufd ($c,$c,0b01001110);
+ &pshufd ($b,$b,0b10010011);
+ &pshufd ($d,$d,0b00111001);
+
+ &dec ("edx");
+ &jnz (&label("loop1x"));
+
+ &paddd ($a,&QWP(16*0,"esp"));
+ &paddd ($b,&QWP(16*1,"esp"));
+ &paddd ($c,&QWP(16*2,"esp"));
+ &paddd ($d,&QWP(16*3,"esp"));
+
+ &cmp ($len,64);
+ &jb (&label("tail"));
+
+ &movdqu ($t,&QWP(16*0,$inp));
+ &movdqu ($t1,&QWP(16*1,$inp));
+ &pxor ($a,$t); # xor with input
+ &movdqu ($t,&QWP(16*2,$inp));
+ &pxor ($b,$t1);
+ &movdqu ($t1,&QWP(16*3,$inp));
+ &pxor ($c,$t);
+ &pxor ($d,$t1);
+ &lea ($inp,&DWP(16*4,$inp)); # inp+=64
+
+ &movdqu (&QWP(16*0,$out),$a); # write output
+ &movdqu (&QWP(16*1,$out),$b);
+ &movdqu (&QWP(16*2,$out),$c);
+ &movdqu (&QWP(16*3,$out),$d);
+ &lea ($out,&DWP(16*4,$out)); # inp+=64
+
+ &sub ($len,64);
+ &jnz (&label("outer1x"));
+
+ &jmp (&label("done"));
+
+&set_label("tail");
+ &movdqa (&QWP(16*0,"esp"),$a);
+ &movdqa (&QWP(16*1,"esp"),$b);
+ &movdqa (&QWP(16*2,"esp"),$c);
+ &movdqa (&QWP(16*3,"esp"),$d);
+
+ &xor ("eax","eax");
+ &xor ("edx","edx");
+ &xor ("ebp","ebp");
+
+&set_label("tail_loop");
+ &movb ("al",&BP(0,"esp","ebp"));
+ &movb ("dl",&BP(0,$inp,"ebp"));
+ &lea ("ebp",&DWP(1,"ebp"));
+ &xor ("al","dl");
+ &movb (&BP(-1,$out,"ebp"),"al");
+ &dec ($len);
+ &jnz (&label("tail_loop"));
+}
+&set_label("done");
+ &mov ("esp",&DWP(512,"esp"));
+&function_end("ChaCha20_ssse3");
+
+&align (64);
+&set_label("ssse3_data");
+&data_byte(0x2,0x3,0x0,0x1, 0x6,0x7,0x4,0x5, 0xa,0xb,0x8,0x9, 0xe,0xf,0xc,0xd);
+&data_byte(0x3,0x0,0x1,0x2, 0x7,0x4,0x5,0x6, 0xb,0x8,0x9,0xa, 0xf,0xc,0xd,0xe);
+&data_word(0x61707865,0x3320646e,0x79622d32,0x6b206574);
+&data_word(0,1,2,3);
+&data_word(4,4,4,4);
+&data_word(1,0,0,0);
+&data_word(4,0,0,0);
+&data_word(0,-1,-1,-1);
+&align (64);
+}
+&asciz ("ChaCha20 for x86, CRYPTOGAMS by <appro\@openssl.org>");
+
+if ($ymm) {
+my ($xa,$xa_,$xb,$xb_,$xc,$xc_,$xd,$xd_)=map("xmm$_",(0..7));
+my ($out,$inp,$len)=("edi","esi","ecx");
+
+sub QUARTERROUND_XOP {
+my ($ai,$bi,$ci,$di,$i)=@_;
+my ($an,$bn,$cn,$dn)=map(($_&~3)+(($_+1)&3),($ai,$bi,$ci,$di)); # next
+my ($ap,$bp,$cp,$dp)=map(($_&~3)+(($_-1)&3),($ai,$bi,$ci,$di)); # previous
+
+ # a b c d
+ #
+ # 0 4 8 12 < even round
+ # 1 5 9 13
+ # 2 6 10 14
+ # 3 7 11 15
+ # 0 5 10 15 < odd round
+ # 1 6 11 12
+ # 2 7 8 13
+ # 3 4 9 14
+
+ if ($i==0) {
+ my $j=4;
+ ($ap,$bp,$cp,$dp)=map(($_&~3)+(($_-$j--)&3),($ap,$bp,$cp,$dp));
+ } elsif ($i==3) {
+ my $j=0;
+ ($an,$bn,$cn,$dn)=map(($_&~3)+(($_+$j++)&3),($an,$bn,$cn,$dn));
+ } elsif ($i==4) {
+ my $j=4;
+ ($ap,$bp,$cp,$dp)=map(($_&~3)+(($_+$j--)&3),($ap,$bp,$cp,$dp));
+ } elsif ($i==7) {
+ my $j=0;
+ ($an,$bn,$cn,$dn)=map(($_&~3)+(($_-$j++)&3),($an,$bn,$cn,$dn));
+ }
+
+ #&vpaddd ($xa,$xa,$xb); # see elsewhere
+ #&vpxor ($xd,$xd,$xa); # see elsewhere
+ &vmovdqa (&QWP(16*$cp-128,"ebx"),$xc_) if ($ai>0 && $ai<3);
+ &vprotd ($xd,$xd,16);
+ &vmovdqa (&QWP(16*$bp-128,"ebx"),$xb_) if ($i!=0);
+ &vpaddd ($xc,$xc,$xd);
+ &vmovdqa ($xc_,&QWP(16*$cn-128,"ebx")) if ($ai>0 && $ai<3);
+ &vpxor ($xb,$i!=0?$xb:$xb_,$xc);
+ &vmovdqa ($xa_,&QWP(16*$an-128,"ebx"));
+ &vprotd ($xb,$xb,12);
+ &vmovdqa ($xb_,&QWP(16*$bn-128,"ebx")) if ($i<7);
+ &vpaddd ($xa,$xa,$xb);
+ &vmovdqa ($xd_,&QWP(16*$dn-128,"ebx")) if ($di!=$dn);
+ &vpxor ($xd,$xd,$xa);
+ &vpaddd ($xa_,$xa_,$xb_) if ($i<7); # elsewhere
+ &vprotd ($xd,$xd,8);
+ &vmovdqa (&QWP(16*$ai-128,"ebx"),$xa);
+ &vpaddd ($xc,$xc,$xd);
+ &vmovdqa (&QWP(16*$di-128,"ebx"),$xd) if ($di!=$dn);
+ &vpxor ($xb,$xb,$xc);
+ &vpxor ($xd_,$di==$dn?$xd:$xd_,$xa_) if ($i<7); # elsewhere
+ &vprotd ($xb,$xb,7);
+
+ ($xa,$xa_)=($xa_,$xa);
+ ($xb,$xb_)=($xb_,$xb);
+ ($xc,$xc_)=($xc_,$xc);
+ ($xd,$xd_)=($xd_,$xd);
+}
+
+&function_begin("ChaCha20_xop");
+&set_label("xop_shortcut");
+ &mov ($out,&wparam(0));
+ &mov ($inp,&wparam(1));
+ &mov ($len,&wparam(2));
+ &mov ("edx",&wparam(3)); # key
+ &mov ("ebx",&wparam(4)); # counter and nonce
+ &vzeroupper ();
+
+ &mov ("ebp","esp");
+ &stack_push (131);
+ &and ("esp",-64);
+ &mov (&DWP(512,"esp"),"ebp");
+
+ &lea ("eax",&DWP(&label("ssse3_data")."-".
+ &label("pic_point"),"eax"));
+ &vmovdqu ("xmm3",&QWP(0,"ebx")); # counter and nonce
+
+ &cmp ($len,64*4);
+ &jb (&label("1x"));
+
+ &mov (&DWP(512+4,"esp"),"edx"); # offload pointers
+ &mov (&DWP(512+8,"esp"),"ebx");
+ &sub ($len,64*4); # bias len
+ &lea ("ebp",&DWP(256+128,"esp")); # size optimization
+
+ &vmovdqu ("xmm7",&QWP(0,"edx")); # key
+ &vpshufd ("xmm0","xmm3",0x00);
+ &vpshufd ("xmm1","xmm3",0x55);
+ &vpshufd ("xmm2","xmm3",0xaa);
+ &vpshufd ("xmm3","xmm3",0xff);
+ &vpaddd ("xmm0","xmm0",&QWP(16*3,"eax")); # fix counters
+ &vpshufd ("xmm4","xmm7",0x00);
+ &vpshufd ("xmm5","xmm7",0x55);
+ &vpsubd ("xmm0","xmm0",&QWP(16*4,"eax"));
+ &vpshufd ("xmm6","xmm7",0xaa);
+ &vpshufd ("xmm7","xmm7",0xff);
+ &vmovdqa (&QWP(16*12-128,"ebp"),"xmm0");
+ &vmovdqa (&QWP(16*13-128,"ebp"),"xmm1");
+ &vmovdqa (&QWP(16*14-128,"ebp"),"xmm2");
+ &vmovdqa (&QWP(16*15-128,"ebp"),"xmm3");
+ &vmovdqu ("xmm3",&QWP(16,"edx")); # key
+ &vmovdqa (&QWP(16*4-128,"ebp"),"xmm4");
+ &vmovdqa (&QWP(16*5-128,"ebp"),"xmm5");
+ &vmovdqa (&QWP(16*6-128,"ebp"),"xmm6");
+ &vmovdqa (&QWP(16*7-128,"ebp"),"xmm7");
+ &vmovdqa ("xmm7",&QWP(16*2,"eax")); # sigma
+ &lea ("ebx",&DWP(128,"esp")); # size optimization
+
+ &vpshufd ("xmm0","xmm3",0x00);
+ &vpshufd ("xmm1","xmm3",0x55);
+ &vpshufd ("xmm2","xmm3",0xaa);
+ &vpshufd ("xmm3","xmm3",0xff);
+ &vpshufd ("xmm4","xmm7",0x00);
+ &vpshufd ("xmm5","xmm7",0x55);
+ &vpshufd ("xmm6","xmm7",0xaa);
+ &vpshufd ("xmm7","xmm7",0xff);
+ &vmovdqa (&QWP(16*8-128,"ebp"),"xmm0");
+ &vmovdqa (&QWP(16*9-128,"ebp"),"xmm1");
+ &vmovdqa (&QWP(16*10-128,"ebp"),"xmm2");
+ &vmovdqa (&QWP(16*11-128,"ebp"),"xmm3");
+ &vmovdqa (&QWP(16*0-128,"ebp"),"xmm4");
+ &vmovdqa (&QWP(16*1-128,"ebp"),"xmm5");
+ &vmovdqa (&QWP(16*2-128,"ebp"),"xmm6");
+ &vmovdqa (&QWP(16*3-128,"ebp"),"xmm7");
+
+ &lea ($inp,&DWP(128,$inp)); # size optimization
+ &lea ($out,&DWP(128,$out)); # size optimization
+ &jmp (&label("outer_loop"));
+
+&set_label("outer_loop",32);
+ #&vmovdqa ("xmm0",&QWP(16*0-128,"ebp")); # copy key material
+ &vmovdqa ("xmm1",&QWP(16*1-128,"ebp"));
+ &vmovdqa ("xmm2",&QWP(16*2-128,"ebp"));
+ &vmovdqa ("xmm3",&QWP(16*3-128,"ebp"));
+ #&vmovdqa ("xmm4",&QWP(16*4-128,"ebp"));
+ &vmovdqa ("xmm5",&QWP(16*5-128,"ebp"));
+ &vmovdqa ("xmm6",&QWP(16*6-128,"ebp"));
+ &vmovdqa ("xmm7",&QWP(16*7-128,"ebp"));
+ #&vmovdqa (&QWP(16*0-128,"ebx"),"xmm0");
+ &vmovdqa (&QWP(16*1-128,"ebx"),"xmm1");
+ &vmovdqa (&QWP(16*2-128,"ebx"),"xmm2");
+ &vmovdqa (&QWP(16*3-128,"ebx"),"xmm3");
+ #&vmovdqa (&QWP(16*4-128,"ebx"),"xmm4");
+ &vmovdqa (&QWP(16*5-128,"ebx"),"xmm5");
+ &vmovdqa (&QWP(16*6-128,"ebx"),"xmm6");
+ &vmovdqa (&QWP(16*7-128,"ebx"),"xmm7");
+ #&vmovdqa ("xmm0",&QWP(16*8-128,"ebp"));
+ #&vmovdqa ("xmm1",&QWP(16*9-128,"ebp"));
+ &vmovdqa ("xmm2",&QWP(16*10-128,"ebp"));
+ &vmovdqa ("xmm3",&QWP(16*11-128,"ebp"));
+ &vmovdqa ("xmm4",&QWP(16*12-128,"ebp"));
+ &vmovdqa ("xmm5",&QWP(16*13-128,"ebp"));
+ &vmovdqa ("xmm6",&QWP(16*14-128,"ebp"));
+ &vmovdqa ("xmm7",&QWP(16*15-128,"ebp"));
+ &vpaddd ("xmm4","xmm4",&QWP(16*4,"eax")); # counter value
+ #&vmovdqa (&QWP(16*8-128,"ebx"),"xmm0");
+ #&vmovdqa (&QWP(16*9-128,"ebx"),"xmm1");
+ &vmovdqa (&QWP(16*10-128,"ebx"),"xmm2");
+ &vmovdqa (&QWP(16*11-128,"ebx"),"xmm3");
+ &vmovdqa (&QWP(16*12-128,"ebx"),"xmm4");
+ &vmovdqa (&QWP(16*13-128,"ebx"),"xmm5");
+ &vmovdqa (&QWP(16*14-128,"ebx"),"xmm6");
+ &vmovdqa (&QWP(16*15-128,"ebx"),"xmm7");
+ &vmovdqa (&QWP(16*12-128,"ebp"),"xmm4"); # save counter value
+
+ &vmovdqa ($xa, &QWP(16*0-128,"ebp"));
+ &vmovdqa ($xd, "xmm4");
+ &vmovdqa ($xb_,&QWP(16*4-128,"ebp"));
+ &vmovdqa ($xc, &QWP(16*8-128,"ebp"));
+ &vmovdqa ($xc_,&QWP(16*9-128,"ebp"));
+
+ &mov ("edx",10); # loop counter
+ &nop ();
+
+&set_label("loop",32);
+ &vpaddd ($xa,$xa,$xb_); # elsewhere
+ &vpxor ($xd,$xd,$xa); # elsewhere
+ &QUARTERROUND_XOP(0, 4, 8, 12, 0);
+ &QUARTERROUND_XOP(1, 5, 9, 13, 1);
+ &QUARTERROUND_XOP(2, 6,10, 14, 2);
+ &QUARTERROUND_XOP(3, 7,11, 15, 3);
+ &QUARTERROUND_XOP(0, 5,10, 15, 4);
+ &QUARTERROUND_XOP(1, 6,11, 12, 5);
+ &QUARTERROUND_XOP(2, 7, 8, 13, 6);
+ &QUARTERROUND_XOP(3, 4, 9, 14, 7);
+ &dec ("edx");
+ &jnz (&label("loop"));
+
+ &vmovdqa (&QWP(16*4-128,"ebx"),$xb_);
+ &vmovdqa (&QWP(16*8-128,"ebx"),$xc);
+ &vmovdqa (&QWP(16*9-128,"ebx"),$xc_);
+ &vmovdqa (&QWP(16*12-128,"ebx"),$xd);
+ &vmovdqa (&QWP(16*14-128,"ebx"),$xd_);
+
+ my ($xa0,$xa1,$xa2,$xa3,$xt0,$xt1,$xt2,$xt3)=map("xmm$_",(0..7));
+
+ #&vmovdqa ($xa0,&QWP(16*0-128,"ebx")); # it's there
+ &vmovdqa ($xa1,&QWP(16*1-128,"ebx"));
+ &vmovdqa ($xa2,&QWP(16*2-128,"ebx"));
+ &vmovdqa ($xa3,&QWP(16*3-128,"ebx"));
+
+ for($i=0;$i<256;$i+=64) {
+ &vpaddd ($xa0,$xa0,&QWP($i+16*0-128,"ebp")); # accumulate key material
+ &vpaddd ($xa1,$xa1,&QWP($i+16*1-128,"ebp"));
+ &vpaddd ($xa2,$xa2,&QWP($i+16*2-128,"ebp"));
+ &vpaddd ($xa3,$xa3,&QWP($i+16*3-128,"ebp"));
+
+ &vpunpckldq ($xt2,$xa0,$xa1); # "de-interlace" data
+ &vpunpckldq ($xt3,$xa2,$xa3);
+ &vpunpckhdq ($xa0,$xa0,$xa1);
+ &vpunpckhdq ($xa2,$xa2,$xa3);
+ &vpunpcklqdq ($xa1,$xt2,$xt3); # "a0"
+ &vpunpckhqdq ($xt2,$xt2,$xt3); # "a1"
+ &vpunpcklqdq ($xt3,$xa0,$xa2); # "a2"
+ &vpunpckhqdq ($xa3,$xa0,$xa2); # "a3"
+
+ &vpxor ($xt0,$xa1,&QWP(64*0-128,$inp));
+ &vpxor ($xt1,$xt2,&QWP(64*1-128,$inp));
+ &vpxor ($xt2,$xt3,&QWP(64*2-128,$inp));
+ &vpxor ($xt3,$xa3,&QWP(64*3-128,$inp));
+ &lea ($inp,&QWP($i<192?16:(64*4-16*3),$inp));
+ &vmovdqa ($xa0,&QWP($i+16*4-128,"ebx")) if ($i<192);
+ &vmovdqa ($xa1,&QWP($i+16*5-128,"ebx")) if ($i<192);
+ &vmovdqa ($xa2,&QWP($i+16*6-128,"ebx")) if ($i<192);
+ &vmovdqa ($xa3,&QWP($i+16*7-128,"ebx")) if ($i<192);
+ &vmovdqu (&QWP(64*0-128,$out),$xt0); # store output
+ &vmovdqu (&QWP(64*1-128,$out),$xt1);
+ &vmovdqu (&QWP(64*2-128,$out),$xt2);
+ &vmovdqu (&QWP(64*3-128,$out),$xt3);
+ &lea ($out,&QWP($i<192?16:(64*4-16*3),$out));
+ }
+ &sub ($len,64*4);
+ &jnc (&label("outer_loop"));
+
+ &add ($len,64*4);
+ &jz (&label("done"));
+
+ &mov ("ebx",&DWP(512+8,"esp")); # restore pointers
+ &lea ($inp,&DWP(-128,$inp));
+ &mov ("edx",&DWP(512+4,"esp"));
+ &lea ($out,&DWP(-128,$out));
+
+ &vmovd ("xmm2",&DWP(16*12-128,"ebp")); # counter value
+ &vmovdqu ("xmm3",&QWP(0,"ebx"));
+ &vpaddd ("xmm2","xmm2",&QWP(16*6,"eax"));# +four
+ &vpand ("xmm3","xmm3",&QWP(16*7,"eax"));
+ &vpor ("xmm3","xmm3","xmm2"); # counter value
+{
+my ($a,$b,$c,$d,$t,$t1,$rot16,$rot24)=map("xmm$_",(0..7));
+
+sub XOPROUND {
+ &vpaddd ($a,$a,$b);
+ &vpxor ($d,$d,$a);
+ &vprotd ($d,$d,16);
+
+ &vpaddd ($c,$c,$d);
+ &vpxor ($b,$b,$c);
+ &vprotd ($b,$b,12);
+
+ &vpaddd ($a,$a,$b);
+ &vpxor ($d,$d,$a);
+ &vprotd ($d,$d,8);
+
+ &vpaddd ($c,$c,$d);
+ &vpxor ($b,$b,$c);
+ &vprotd ($b,$b,7);
+}
+
+&set_label("1x");
+ &vmovdqa ($a,&QWP(16*2,"eax")); # sigma
+ &vmovdqu ($b,&QWP(0,"edx"));
+ &vmovdqu ($c,&QWP(16,"edx"));
+ #&vmovdqu ($d,&QWP(0,"ebx")); # already loaded
+ &vmovdqa ($rot16,&QWP(0,"eax"));
+ &vmovdqa ($rot24,&QWP(16,"eax"));
+ &mov (&DWP(16*3,"esp"),"ebp");
+
+ &vmovdqa (&QWP(16*0,"esp"),$a);
+ &vmovdqa (&QWP(16*1,"esp"),$b);
+ &vmovdqa (&QWP(16*2,"esp"),$c);
+ &vmovdqa (&QWP(16*3,"esp"),$d);
+ &mov ("edx",10);
+ &jmp (&label("loop1x"));
+
+&set_label("outer1x",16);
+ &vmovdqa ($d,&QWP(16*5,"eax")); # one
+ &vmovdqa ($a,&QWP(16*0,"esp"));
+ &vmovdqa ($b,&QWP(16*1,"esp"));
+ &vmovdqa ($c,&QWP(16*2,"esp"));
+ &vpaddd ($d,$d,&QWP(16*3,"esp"));
+ &mov ("edx",10);
+ &vmovdqa (&QWP(16*3,"esp"),$d);
+ &jmp (&label("loop1x"));
+
+&set_label("loop1x",16);
+ &XOPROUND();
+ &vpshufd ($c,$c,0b01001110);
+ &vpshufd ($b,$b,0b00111001);
+ &vpshufd ($d,$d,0b10010011);
+
+ &XOPROUND();
+ &vpshufd ($c,$c,0b01001110);
+ &vpshufd ($b,$b,0b10010011);
+ &vpshufd ($d,$d,0b00111001);
+
+ &dec ("edx");
+ &jnz (&label("loop1x"));
+
+ &vpaddd ($a,$a,&QWP(16*0,"esp"));
+ &vpaddd ($b,$b,&QWP(16*1,"esp"));
+ &vpaddd ($c,$c,&QWP(16*2,"esp"));
+ &vpaddd ($d,$d,&QWP(16*3,"esp"));
+
+ &cmp ($len,64);
+ &jb (&label("tail"));
+
+ &vpxor ($a,$a,&QWP(16*0,$inp)); # xor with input
+ &vpxor ($b,$b,&QWP(16*1,$inp));
+ &vpxor ($c,$c,&QWP(16*2,$inp));
+ &vpxor ($d,$d,&QWP(16*3,$inp));
+ &lea ($inp,&DWP(16*4,$inp)); # inp+=64
+
+ &vmovdqu (&QWP(16*0,$out),$a); # write output
+ &vmovdqu (&QWP(16*1,$out),$b);
+ &vmovdqu (&QWP(16*2,$out),$c);
+ &vmovdqu (&QWP(16*3,$out),$d);
+ &lea ($out,&DWP(16*4,$out)); # inp+=64
+
+ &sub ($len,64);
+ &jnz (&label("outer1x"));
+
+ &jmp (&label("done"));
+
+&set_label("tail");
+ &vmovdqa (&QWP(16*0,"esp"),$a);
+ &vmovdqa (&QWP(16*1,"esp"),$b);
+ &vmovdqa (&QWP(16*2,"esp"),$c);
+ &vmovdqa (&QWP(16*3,"esp"),$d);
+
+ &xor ("eax","eax");
+ &xor ("edx","edx");
+ &xor ("ebp","ebp");
+
+&set_label("tail_loop");
+ &movb ("al",&BP(0,"esp","ebp"));
+ &movb ("dl",&BP(0,$inp,"ebp"));
+ &lea ("ebp",&DWP(1,"ebp"));
+ &xor ("al","dl");
+ &movb (&BP(-1,$out,"ebp"),"al");
+ &dec ($len);
+ &jnz (&label("tail_loop"));
+}
+&set_label("done");
+ &vzeroupper ();
+ &mov ("esp",&DWP(512,"esp"));
+&function_end("ChaCha20_xop");
+}
+
+&asm_finish();
+
+close STDOUT;
diff --git a/openssl-1.1.0h/crypto/chacha/asm/chacha-x86_64.pl b/openssl-1.1.0h/crypto/chacha/asm/chacha-x86_64.pl
new file mode 100755
index 0000000..347dfcb
--- /dev/null
+++ b/openssl-1.1.0h/crypto/chacha/asm/chacha-x86_64.pl
@@ -0,0 +1,2245 @@
+#! /usr/bin/env perl
+# Copyright 2016 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License"). You may not use
+# this file except in compliance with the License. You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# November 2014
+#
+# ChaCha20 for x86_64.
+#
+# Performance in cycles per byte out of large buffer.
+#
+# IALU/gcc 4.8(i) 1xSSSE3/SSE2 4xSSSE3 8xAVX2
+#
+# P4 9.48/+99% -/22.7(ii) -
+# Core2 7.83/+55% 7.90/8.08 4.35
+# Westmere 7.19/+50% 5.60/6.70 3.00
+# Sandy Bridge 8.31/+42% 5.45/6.76 2.72
+# Ivy Bridge 6.71/+46% 5.40/6.49 2.41
+# Haswell 5.92/+43% 5.20/6.45 2.42 1.23
+# Silvermont 12.0/+33% 7.75/7.40 7.03(iii)
+# Goldmont 10.6/+17% 5.10/- 3.28
+# Sledgehammer 7.28/+52% -/14.2(ii) -
+# Bulldozer 9.66/+28% 9.85/11.1 3.06(iv)
+# VIA Nano 10.5/+46% 6.72/8.60 6.05
+#
+# (i) compared to older gcc 3.x one can observe >2x improvement on
+# most platforms;
+# (ii) as it can be seen, SSE2 performance is too low on legacy
+# processors; NxSSE2 results are naturally better, but not
+# impressively better than IALU ones, which is why you won't
+# find SSE2 code below;
+# (iii) this is not optimal result for Atom because of MSROM
+# limitations, SSE2 can do better, but gain is considered too
+# low to justify the [maintenance] effort;
+# (iv) Bulldozer actually executes 4xXOP code path that delivers 2.20;
+
+$flavour = shift;
+$output = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
+ =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
+ $avx = ($1>=2.19) + ($1>=2.22);
+}
+
+if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
+ `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
+ $avx = ($1>=2.09) + ($1>=2.10);
+}
+
+if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
+ `ml64 2>&1` =~ /Version ([0-9]+)\./) {
+ $avx = ($1>=10) + ($1>=11);
+}
+
+if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9]\.[0-9]+)/) {
+ $avx = ($2>=3.0) + ($2>3.0);
+}
+
+open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
+*STDOUT=*OUT;
+
+# input parameter block
+($out,$inp,$len,$key,$counter)=("%rdi","%rsi","%rdx","%rcx","%r8");
+
+$code.=<<___;
+.text
+
+.extern OPENSSL_ia32cap_P
+
+.align 64
+.Lzero:
+.long 0,0,0,0
+.Lone:
+.long 1,0,0,0
+.Linc:
+.long 0,1,2,3
+.Lfour:
+.long 4,4,4,4
+.Lincy:
+.long 0,2,4,6,1,3,5,7
+.Leight:
+.long 8,8,8,8,8,8,8,8
+.Lrot16:
+.byte 0x2,0x3,0x0,0x1, 0x6,0x7,0x4,0x5, 0xa,0xb,0x8,0x9, 0xe,0xf,0xc,0xd
+.Lrot24:
+.byte 0x3,0x0,0x1,0x2, 0x7,0x4,0x5,0x6, 0xb,0x8,0x9,0xa, 0xf,0xc,0xd,0xe
+.Lsigma:
+.asciz "expand 32-byte k"
+.asciz "ChaCha20 for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
+___
+
+sub AUTOLOAD() # thunk [simplified] 32-bit style perlasm
+{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://;
+ my $arg = pop;
+ $arg = "\$$arg" if ($arg*1 eq $arg);
+ $code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n";
+}
+
+@x=("%eax","%ebx","%ecx","%edx",map("%r${_}d",(8..11)),
+ "%nox","%nox","%nox","%nox",map("%r${_}d",(12..15)));
+@t=("%esi","%edi");
+
+sub ROUND { # critical path is 24 cycles per round
+my ($a0,$b0,$c0,$d0)=@_;
+my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
+my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
+my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
+my ($xc,$xc_)=map("\"$_\"",@t);
+my @x=map("\"$_\"",@x);
+
+ # Consider order in which variables are addressed by their
+ # index:
+ #
+ # a b c d
+ #
+ # 0 4 8 12 < even round
+ # 1 5 9 13
+ # 2 6 10 14
+ # 3 7 11 15
+ # 0 5 10 15 < odd round
+ # 1 6 11 12
+ # 2 7 8 13
+ # 3 4 9 14
+ #
+ # 'a', 'b' and 'd's are permanently allocated in registers,
+ # @x[0..7,12..15], while 'c's are maintained in memory. If
+ # you observe 'c' column, you'll notice that pair of 'c's is
+ # invariant between rounds. This means that we have to reload
+ # them once per round, in the middle. This is why you'll see
+ # bunch of 'c' stores and loads in the middle, but none in
+ # the beginning or end.
+
+ # Normally instructions would be interleaved to favour in-order
+ # execution. Generally out-of-order cores manage it gracefully,
+ # but not this time for some reason. As in-order execution
+ # cores are dying breed, old Atom is the only one around,
+ # instructions are left uninterleaved. Besides, Atom is better
+ # off executing 1xSSSE3 code anyway...
+
+ (
+ "&add (@x[$a0],@x[$b0])", # Q1
+ "&xor (@x[$d0],@x[$a0])",
+ "&rol (@x[$d0],16)",
+ "&add (@x[$a1],@x[$b1])", # Q2
+ "&xor (@x[$d1],@x[$a1])",
+ "&rol (@x[$d1],16)",
+
+ "&add ($xc,@x[$d0])",
+ "&xor (@x[$b0],$xc)",
+ "&rol (@x[$b0],12)",
+ "&add ($xc_,@x[$d1])",
+ "&xor (@x[$b1],$xc_)",
+ "&rol (@x[$b1],12)",
+
+ "&add (@x[$a0],@x[$b0])",
+ "&xor (@x[$d0],@x[$a0])",
+ "&rol (@x[$d0],8)",
+ "&add (@x[$a1],@x[$b1])",
+ "&xor (@x[$d1],@x[$a1])",
+ "&rol (@x[$d1],8)",
+
+ "&add ($xc,@x[$d0])",
+ "&xor (@x[$b0],$xc)",
+ "&rol (@x[$b0],7)",
+ "&add ($xc_,@x[$d1])",
+ "&xor (@x[$b1],$xc_)",
+ "&rol (@x[$b1],7)",
+
+ "&mov (\"4*$c0(%rsp)\",$xc)", # reload pair of 'c's
+ "&mov (\"4*$c1(%rsp)\",$xc_)",
+ "&mov ($xc,\"4*$c2(%rsp)\")",
+ "&mov ($xc_,\"4*$c3(%rsp)\")",
+
+ "&add (@x[$a2],@x[$b2])", # Q3
+ "&xor (@x[$d2],@x[$a2])",
+ "&rol (@x[$d2],16)",
+ "&add (@x[$a3],@x[$b3])", # Q4
+ "&xor (@x[$d3],@x[$a3])",
+ "&rol (@x[$d3],16)",
+
+ "&add ($xc,@x[$d2])",
+ "&xor (@x[$b2],$xc)",
+ "&rol (@x[$b2],12)",
+ "&add ($xc_,@x[$d3])",
+ "&xor (@x[$b3],$xc_)",
+ "&rol (@x[$b3],12)",
+
+ "&add (@x[$a2],@x[$b2])",
+ "&xor (@x[$d2],@x[$a2])",
+ "&rol (@x[$d2],8)",
+ "&add (@x[$a3],@x[$b3])",
+ "&xor (@x[$d3],@x[$a3])",
+ "&rol (@x[$d3],8)",
+
+ "&add ($xc,@x[$d2])",
+ "&xor (@x[$b2],$xc)",
+ "&rol (@x[$b2],7)",
+ "&add ($xc_,@x[$d3])",
+ "&xor (@x[$b3],$xc_)",
+ "&rol (@x[$b3],7)"
+ );
+}
+
+########################################################################
+# Generic code path that handles all lengths on pre-SSSE3 processors.
+$code.=<<___;
+.globl ChaCha20_ctr32
+.type ChaCha20_ctr32,\@function,5
+.align 64
+ChaCha20_ctr32:
+ cmp \$0,$len
+ je .Lno_data
+ mov OPENSSL_ia32cap_P+4(%rip),%r10
+ test \$`1<<(41-32)`,%r10d
+ jnz .LChaCha20_ssse3
+
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ sub \$64+24,%rsp
+
+ #movdqa .Lsigma(%rip),%xmm0
+ movdqu ($key),%xmm1
+ movdqu 16($key),%xmm2
+ movdqu ($counter),%xmm3
+ movdqa .Lone(%rip),%xmm4
+
+ #movdqa %xmm0,4*0(%rsp) # key[0]
+ movdqa %xmm1,4*4(%rsp) # key[1]
+ movdqa %xmm2,4*8(%rsp) # key[2]
+ movdqa %xmm3,4*12(%rsp) # key[3]
+ mov $len,%rbp # reassign $len
+ jmp .Loop_outer
+
+.align 32
+.Loop_outer:
+ mov \$0x61707865,@x[0] # 'expa'
+ mov \$0x3320646e,@x[1] # 'nd 3'
+ mov \$0x79622d32,@x[2] # '2-by'
+ mov \$0x6b206574,@x[3] # 'te k'
+ mov 4*4(%rsp),@x[4]
+ mov 4*5(%rsp),@x[5]
+ mov 4*6(%rsp),@x[6]
+ mov 4*7(%rsp),@x[7]
+ movd %xmm3,@x[12]
+ mov 4*13(%rsp),@x[13]
+ mov 4*14(%rsp),@x[14]
+ mov 4*15(%rsp),@x[15]
+
+ mov %rbp,64+0(%rsp) # save len
+ mov \$10,%ebp
+ mov $inp,64+8(%rsp) # save inp
+ movq %xmm2,%rsi # "@x[8]"
+ mov $out,64+16(%rsp) # save out
+ mov %rsi,%rdi
+ shr \$32,%rdi # "@x[9]"
+ jmp .Loop
+
+.align 32
+.Loop:
+___
+ foreach (&ROUND (0, 4, 8,12)) { eval; }
+ foreach (&ROUND (0, 5,10,15)) { eval; }
+ &dec ("%ebp");
+ &jnz (".Loop");
+
+$code.=<<___;
+ mov @t[1],4*9(%rsp) # modulo-scheduled
+ mov @t[0],4*8(%rsp)
+ mov 64(%rsp),%rbp # load len
+ movdqa %xmm2,%xmm1
+ mov 64+8(%rsp),$inp # load inp
+ paddd %xmm4,%xmm3 # increment counter
+ mov 64+16(%rsp),$out # load out
+
+ add \$0x61707865,@x[0] # 'expa'
+ add \$0x3320646e,@x[1] # 'nd 3'
+ add \$0x79622d32,@x[2] # '2-by'
+ add \$0x6b206574,@x[3] # 'te k'
+ add 4*4(%rsp),@x[4]
+ add 4*5(%rsp),@x[5]
+ add 4*6(%rsp),@x[6]
+ add 4*7(%rsp),@x[7]
+ add 4*12(%rsp),@x[12]
+ add 4*13(%rsp),@x[13]
+ add 4*14(%rsp),@x[14]
+ add 4*15(%rsp),@x[15]
+ paddd 4*8(%rsp),%xmm1
+
+ cmp \$64,%rbp
+ jb .Ltail
+
+ xor 4*0($inp),@x[0] # xor with input
+ xor 4*1($inp),@x[1]
+ xor 4*2($inp),@x[2]
+ xor 4*3($inp),@x[3]
+ xor 4*4($inp),@x[4]
+ xor 4*5($inp),@x[5]
+ xor 4*6($inp),@x[6]
+ xor 4*7($inp),@x[7]
+ movdqu 4*8($inp),%xmm0
+ xor 4*12($inp),@x[12]
+ xor 4*13($inp),@x[13]
+ xor 4*14($inp),@x[14]
+ xor 4*15($inp),@x[15]
+ lea 4*16($inp),$inp # inp+=64
+ pxor %xmm1,%xmm0
+
+ movdqa %xmm2,4*8(%rsp)
+ movd %xmm3,4*12(%rsp)
+
+ mov @x[0],4*0($out) # write output
+ mov @x[1],4*1($out)
+ mov @x[2],4*2($out)
+ mov @x[3],4*3($out)
+ mov @x[4],4*4($out)
+ mov @x[5],4*5($out)
+ mov @x[6],4*6($out)
+ mov @x[7],4*7($out)
+ movdqu %xmm0,4*8($out)
+ mov @x[12],4*12($out)
+ mov @x[13],4*13($out)
+ mov @x[14],4*14($out)
+ mov @x[15],4*15($out)
+ lea 4*16($out),$out # out+=64
+
+ sub \$64,%rbp
+ jnz .Loop_outer
+
+ jmp .Ldone
+
+.align 16
+.Ltail:
+ mov @x[0],4*0(%rsp)
+ mov @x[1],4*1(%rsp)
+ xor %rbx,%rbx
+ mov @x[2],4*2(%rsp)
+ mov @x[3],4*3(%rsp)
+ mov @x[4],4*4(%rsp)
+ mov @x[5],4*5(%rsp)
+ mov @x[6],4*6(%rsp)
+ mov @x[7],4*7(%rsp)
+ movdqa %xmm1,4*8(%rsp)
+ mov @x[12],4*12(%rsp)
+ mov @x[13],4*13(%rsp)
+ mov @x[14],4*14(%rsp)
+ mov @x[15],4*15(%rsp)
+
+.Loop_tail:
+ movzb ($inp,%rbx),%eax
+ movzb (%rsp,%rbx),%edx
+ lea 1(%rbx),%rbx
+ xor %edx,%eax
+ mov %al,-1($out,%rbx)
+ dec %rbp
+ jnz .Loop_tail
+
+.Ldone:
+ add \$64+24,%rsp
+ pop %r15
+ pop %r14
+ pop %r13
+ pop %r12
+ pop %rbp
+ pop %rbx
+.Lno_data:
+ ret
+.size ChaCha20_ctr32,.-ChaCha20_ctr32
+___
+
+########################################################################
+# SSSE3 code path that handles shorter lengths
+{
+my ($a,$b,$c,$d,$t,$t1,$rot16,$rot24)=map("%xmm$_",(0..7));
+
+sub SSSE3ROUND { # critical path is 20 "SIMD ticks" per round
+ &paddd ($a,$b);
+ &pxor ($d,$a);
+ &pshufb ($d,$rot16);
+
+ &paddd ($c,$d);
+ &pxor ($b,$c);
+ &movdqa ($t,$b);
+ &psrld ($b,20);
+ &pslld ($t,12);
+ &por ($b,$t);
+
+ &paddd ($a,$b);
+ &pxor ($d,$a);
+ &pshufb ($d,$rot24);
+
+ &paddd ($c,$d);
+ &pxor ($b,$c);
+ &movdqa ($t,$b);
+ &psrld ($b,25);
+ &pslld ($t,7);
+ &por ($b,$t);
+}
+
+my $xframe = $win64 ? 32+32+8 : 24;
+
+$code.=<<___;
+.type ChaCha20_ssse3,\@function,5
+.align 32
+ChaCha20_ssse3:
+.LChaCha20_ssse3:
+___
+$code.=<<___ if ($avx);
+ test \$`1<<(43-32)`,%r10d
+ jnz .LChaCha20_4xop # XOP is fastest even if we use 1/4
+___
+$code.=<<___;
+ cmp \$128,$len # we might throw away some data,
+ ja .LChaCha20_4x # but overall it won't be slower
+
+.Ldo_sse3_after_all:
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+
+ sub \$64+$xframe,%rsp
+___
+$code.=<<___ if ($win64);
+ movaps %xmm6,64+32(%rsp)
+ movaps %xmm7,64+48(%rsp)
+___
+$code.=<<___;
+ movdqa .Lsigma(%rip),$a
+ movdqu ($key),$b
+ movdqu 16($key),$c
+ movdqu ($counter),$d
+ movdqa .Lrot16(%rip),$rot16
+ movdqa .Lrot24(%rip),$rot24
+
+ movdqa $a,0x00(%rsp)
+ movdqa $b,0x10(%rsp)
+ movdqa $c,0x20(%rsp)
+ movdqa $d,0x30(%rsp)
+ mov \$10,%ebp
+ jmp .Loop_ssse3
+
+.align 32
+.Loop_outer_ssse3:
+ movdqa .Lone(%rip),$d
+ movdqa 0x00(%rsp),$a
+ movdqa 0x10(%rsp),$b
+ movdqa 0x20(%rsp),$c
+ paddd 0x30(%rsp),$d
+ mov \$10,%ebp
+ movdqa $d,0x30(%rsp)
+ jmp .Loop_ssse3
+
+.align 32
+.Loop_ssse3:
+___
+ &SSSE3ROUND();
+ &pshufd ($c,$c,0b01001110);
+ &pshufd ($b,$b,0b00111001);
+ &pshufd ($d,$d,0b10010011);
+ &nop ();
+
+ &SSSE3ROUND();
+ &pshufd ($c,$c,0b01001110);
+ &pshufd ($b,$b,0b10010011);
+ &pshufd ($d,$d,0b00111001);
+
+ &dec ("%ebp");
+ &jnz (".Loop_ssse3");
+
+$code.=<<___;
+ paddd 0x00(%rsp),$a
+ paddd 0x10(%rsp),$b
+ paddd 0x20(%rsp),$c
+ paddd 0x30(%rsp),$d
+
+ cmp \$64,$len
+ jb .Ltail_ssse3
+
+ movdqu 0x00($inp),$t
+ movdqu 0x10($inp),$t1
+ pxor $t,$a # xor with input
+ movdqu 0x20($inp),$t
+ pxor $t1,$b
+ movdqu 0x30($inp),$t1
+ lea 0x40($inp),$inp # inp+=64
+ pxor $t,$c
+ pxor $t1,$d
+
+ movdqu $a,0x00($out) # write output
+ movdqu $b,0x10($out)
+ movdqu $c,0x20($out)
+ movdqu $d,0x30($out)
+ lea 0x40($out),$out # out+=64
+
+ sub \$64,$len
+ jnz .Loop_outer_ssse3
+
+ jmp .Ldone_ssse3
+
+.align 16
+.Ltail_ssse3:
+ movdqa $a,0x00(%rsp)
+ movdqa $b,0x10(%rsp)
+ movdqa $c,0x20(%rsp)
+ movdqa $d,0x30(%rsp)
+ xor %rbx,%rbx
+
+.Loop_tail_ssse3:
+ movzb ($inp,%rbx),%eax
+ movzb (%rsp,%rbx),%ecx
+ lea 1(%rbx),%rbx
+ xor %ecx,%eax
+ mov %al,-1($out,%rbx)
+ dec $len
+ jnz .Loop_tail_ssse3
+
+.Ldone_ssse3:
+___
+$code.=<<___ if ($win64);
+ movaps 64+32(%rsp),%xmm6
+ movaps 64+48(%rsp),%xmm7
+___
+$code.=<<___;
+ add \$64+$xframe,%rsp
+ pop %r15
+ pop %r14
+ pop %r13
+ pop %r12
+ pop %rbp
+ pop %rbx
+ ret
+.size ChaCha20_ssse3,.-ChaCha20_ssse3
+___
+}
+
+########################################################################
+# SSSE3 code path that handles longer messages.
+{
+# assign variables to favor Atom front-end
+my ($xd0,$xd1,$xd2,$xd3, $xt0,$xt1,$xt2,$xt3,
+ $xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3)=map("%xmm$_",(0..15));
+my @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
+ "%nox","%nox","%nox","%nox", $xd0,$xd1,$xd2,$xd3);
+
+sub SSSE3_lane_ROUND {
+my ($a0,$b0,$c0,$d0)=@_;
+my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
+my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
+my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
+my ($xc,$xc_,$t0,$t1)=map("\"$_\"",$xt0,$xt1,$xt2,$xt3);
+my @x=map("\"$_\"",@xx);
+
+ # Consider order in which variables are addressed by their
+ # index:
+ #
+ # a b c d
+ #
+ # 0 4 8 12 < even round
+ # 1 5 9 13
+ # 2 6 10 14
+ # 3 7 11 15
+ # 0 5 10 15 < odd round
+ # 1 6 11 12
+ # 2 7 8 13
+ # 3 4 9 14
+ #
+ # 'a', 'b' and 'd's are permanently allocated in registers,
+ # @x[0..7,12..15], while 'c's are maintained in memory. If
+ # you observe 'c' column, you'll notice that pair of 'c's is
+ # invariant between rounds. This means that we have to reload
+ # them once per round, in the middle. This is why you'll see
+ # bunch of 'c' stores and loads in the middle, but none in
+ # the beginning or end.
+
+ (
+ "&paddd (@x[$a0],@x[$b0])", # Q1
+ "&paddd (@x[$a1],@x[$b1])", # Q2
+ "&pxor (@x[$d0],@x[$a0])",
+ "&pxor (@x[$d1],@x[$a1])",
+ "&pshufb (@x[$d0],$t1)",
+ "&pshufb (@x[$d1],$t1)",
+
+ "&paddd ($xc,@x[$d0])",
+ "&paddd ($xc_,@x[$d1])",
+ "&pxor (@x[$b0],$xc)",
+ "&pxor (@x[$b1],$xc_)",
+ "&movdqa ($t0,@x[$b0])",
+ "&pslld (@x[$b0],12)",
+ "&psrld ($t0,20)",
+ "&movdqa ($t1,@x[$b1])",
+ "&pslld (@x[$b1],12)",
+ "&por (@x[$b0],$t0)",
+ "&psrld ($t1,20)",
+ "&movdqa ($t0,'(%r11)')", # .Lrot24(%rip)
+ "&por (@x[$b1],$t1)",
+
+ "&paddd (@x[$a0],@x[$b0])",
+ "&paddd (@x[$a1],@x[$b1])",
+ "&pxor (@x[$d0],@x[$a0])",
+ "&pxor (@x[$d1],@x[$a1])",
+ "&pshufb (@x[$d0],$t0)",
+ "&pshufb (@x[$d1],$t0)",
+
+ "&paddd ($xc,@x[$d0])",
+ "&paddd ($xc_,@x[$d1])",
+ "&pxor (@x[$b0],$xc)",
+ "&pxor (@x[$b1],$xc_)",
+ "&movdqa ($t1,@x[$b0])",
+ "&pslld (@x[$b0],7)",
+ "&psrld ($t1,25)",
+ "&movdqa ($t0,@x[$b1])",
+ "&pslld (@x[$b1],7)",
+ "&por (@x[$b0],$t1)",
+ "&psrld ($t0,25)",
+ "&movdqa ($t1,'(%r10)')", # .Lrot16(%rip)
+ "&por (@x[$b1],$t0)",
+
+ "&movdqa (\"`16*($c0-8)`(%rsp)\",$xc)", # reload pair of 'c's
+ "&movdqa (\"`16*($c1-8)`(%rsp)\",$xc_)",
+ "&movdqa ($xc,\"`16*($c2-8)`(%rsp)\")",
+ "&movdqa ($xc_,\"`16*($c3-8)`(%rsp)\")",
+
+ "&paddd (@x[$a2],@x[$b2])", # Q3
+ "&paddd (@x[$a3],@x[$b3])", # Q4
+ "&pxor (@x[$d2],@x[$a2])",
+ "&pxor (@x[$d3],@x[$a3])",
+ "&pshufb (@x[$d2],$t1)",
+ "&pshufb (@x[$d3],$t1)",
+
+ "&paddd ($xc,@x[$d2])",
+ "&paddd ($xc_,@x[$d3])",
+ "&pxor (@x[$b2],$xc)",
+ "&pxor (@x[$b3],$xc_)",
+ "&movdqa ($t0,@x[$b2])",
+ "&pslld (@x[$b2],12)",
+ "&psrld ($t0,20)",
+ "&movdqa ($t1,@x[$b3])",
+ "&pslld (@x[$b3],12)",
+ "&por (@x[$b2],$t0)",
+ "&psrld ($t1,20)",
+ "&movdqa ($t0,'(%r11)')", # .Lrot24(%rip)
+ "&por (@x[$b3],$t1)",
+
+ "&paddd (@x[$a2],@x[$b2])",
+ "&paddd (@x[$a3],@x[$b3])",
+ "&pxor (@x[$d2],@x[$a2])",
+ "&pxor (@x[$d3],@x[$a3])",
+ "&pshufb (@x[$d2],$t0)",
+ "&pshufb (@x[$d3],$t0)",
+
+ "&paddd ($xc,@x[$d2])",
+ "&paddd ($xc_,@x[$d3])",
+ "&pxor (@x[$b2],$xc)",
+ "&pxor (@x[$b3],$xc_)",
+ "&movdqa ($t1,@x[$b2])",
+ "&pslld (@x[$b2],7)",
+ "&psrld ($t1,25)",
+ "&movdqa ($t0,@x[$b3])",
+ "&pslld (@x[$b3],7)",
+ "&por (@x[$b2],$t1)",
+ "&psrld ($t0,25)",
+ "&movdqa ($t1,'(%r10)')", # .Lrot16(%rip)
+ "&por (@x[$b3],$t0)"
+ );
+}
+
+my $xframe = $win64 ? 0xa0 : 0;
+
+$code.=<<___;
+.type ChaCha20_4x,\@function,5
+.align 32
+ChaCha20_4x:
+.LChaCha20_4x:
+ mov %r10,%r11
+___
+$code.=<<___ if ($avx>1);
+ shr \$32,%r10 # OPENSSL_ia32cap_P+8
+ test \$`1<<5`,%r10 # test AVX2
+ jnz .LChaCha20_8x
+___
+$code.=<<___;
+ cmp \$192,$len
+ ja .Lproceed4x
+
+ and \$`1<<26|1<<22`,%r11 # isolate XSAVE+MOVBE
+ cmp \$`1<<22`,%r11 # check for MOVBE without XSAVE
+ je .Ldo_sse3_after_all # to detect Atom
+
+.Lproceed4x:
+ lea -0x78(%rsp),%r11
+ sub \$0x148+$xframe,%rsp
+___
+ ################ stack layout
+ # +0x00 SIMD equivalent of @x[8-12]
+ # ...
+ # +0x40 constant copy of key[0-2] smashed by lanes
+ # ...
+ # +0x100 SIMD counters (with nonce smashed by lanes)
+ # ...
+ # +0x140
+$code.=<<___ if ($win64);
+ movaps %xmm6,-0x30(%r11)
+ movaps %xmm7,-0x20(%r11)
+ movaps %xmm8,-0x10(%r11)
+ movaps %xmm9,0x00(%r11)
+ movaps %xmm10,0x10(%r11)
+ movaps %xmm11,0x20(%r11)
+ movaps %xmm12,0x30(%r11)
+ movaps %xmm13,0x40(%r11)
+ movaps %xmm14,0x50(%r11)
+ movaps %xmm15,0x60(%r11)
+___
+$code.=<<___;
+ movdqa .Lsigma(%rip),$xa3 # key[0]
+ movdqu ($key),$xb3 # key[1]
+ movdqu 16($key),$xt3 # key[2]
+ movdqu ($counter),$xd3 # key[3]
+ lea 0x100(%rsp),%rcx # size optimization
+ lea .Lrot16(%rip),%r10
+ lea .Lrot24(%rip),%r11
+
+ pshufd \$0x00,$xa3,$xa0 # smash key by lanes...
+ pshufd \$0x55,$xa3,$xa1
+ movdqa $xa0,0x40(%rsp) # ... and offload
+ pshufd \$0xaa,$xa3,$xa2
+ movdqa $xa1,0x50(%rsp)
+ pshufd \$0xff,$xa3,$xa3
+ movdqa $xa2,0x60(%rsp)
+ movdqa $xa3,0x70(%rsp)
+
+ pshufd \$0x00,$xb3,$xb0
+ pshufd \$0x55,$xb3,$xb1
+ movdqa $xb0,0x80-0x100(%rcx)
+ pshufd \$0xaa,$xb3,$xb2
+ movdqa $xb1,0x90-0x100(%rcx)
+ pshufd \$0xff,$xb3,$xb3
+ movdqa $xb2,0xa0-0x100(%rcx)
+ movdqa $xb3,0xb0-0x100(%rcx)
+
+ pshufd \$0x00,$xt3,$xt0 # "$xc0"
+ pshufd \$0x55,$xt3,$xt1 # "$xc1"
+ movdqa $xt0,0xc0-0x100(%rcx)
+ pshufd \$0xaa,$xt3,$xt2 # "$xc2"
+ movdqa $xt1,0xd0-0x100(%rcx)
+ pshufd \$0xff,$xt3,$xt3 # "$xc3"
+ movdqa $xt2,0xe0-0x100(%rcx)
+ movdqa $xt3,0xf0-0x100(%rcx)
+
+ pshufd \$0x00,$xd3,$xd0
+ pshufd \$0x55,$xd3,$xd1
+ paddd .Linc(%rip),$xd0 # don't save counters yet
+ pshufd \$0xaa,$xd3,$xd2
+ movdqa $xd1,0x110-0x100(%rcx)
+ pshufd \$0xff,$xd3,$xd3
+ movdqa $xd2,0x120-0x100(%rcx)
+ movdqa $xd3,0x130-0x100(%rcx)
+
+ jmp .Loop_enter4x
+
+.align 32
+.Loop_outer4x:
+ movdqa 0x40(%rsp),$xa0 # re-load smashed key
+ movdqa 0x50(%rsp),$xa1
+ movdqa 0x60(%rsp),$xa2
+ movdqa 0x70(%rsp),$xa3
+ movdqa 0x80-0x100(%rcx),$xb0
+ movdqa 0x90-0x100(%rcx),$xb1
+ movdqa 0xa0-0x100(%rcx),$xb2
+ movdqa 0xb0-0x100(%rcx),$xb3
+ movdqa 0xc0-0x100(%rcx),$xt0 # "$xc0"
+ movdqa 0xd0-0x100(%rcx),$xt1 # "$xc1"
+ movdqa 0xe0-0x100(%rcx),$xt2 # "$xc2"
+ movdqa 0xf0-0x100(%rcx),$xt3 # "$xc3"
+ movdqa 0x100-0x100(%rcx),$xd0
+ movdqa 0x110-0x100(%rcx),$xd1
+ movdqa 0x120-0x100(%rcx),$xd2
+ movdqa 0x130-0x100(%rcx),$xd3
+ paddd .Lfour(%rip),$xd0 # next SIMD counters
+
+.Loop_enter4x:
+ movdqa $xt2,0x20(%rsp) # SIMD equivalent of "@x[10]"
+ movdqa $xt3,0x30(%rsp) # SIMD equivalent of "@x[11]"
+ movdqa (%r10),$xt3 # .Lrot16(%rip)
+ mov \$10,%eax
+ movdqa $xd0,0x100-0x100(%rcx) # save SIMD counters
+ jmp .Loop4x
+
+.align 32
+.Loop4x:
+___
+ foreach (&SSSE3_lane_ROUND(0, 4, 8,12)) { eval; }
+ foreach (&SSSE3_lane_ROUND(0, 5,10,15)) { eval; }
+$code.=<<___;
+ dec %eax
+ jnz .Loop4x
+
+ paddd 0x40(%rsp),$xa0 # accumulate key material
+ paddd 0x50(%rsp),$xa1
+ paddd 0x60(%rsp),$xa2
+ paddd 0x70(%rsp),$xa3
+
+ movdqa $xa0,$xt2 # "de-interlace" data
+ punpckldq $xa1,$xa0
+ movdqa $xa2,$xt3
+ punpckldq $xa3,$xa2
+ punpckhdq $xa1,$xt2
+ punpckhdq $xa3,$xt3
+ movdqa $xa0,$xa1
+ punpcklqdq $xa2,$xa0 # "a0"
+ movdqa $xt2,$xa3
+ punpcklqdq $xt3,$xt2 # "a2"
+ punpckhqdq $xa2,$xa1 # "a1"
+ punpckhqdq $xt3,$xa3 # "a3"
+___
+ ($xa2,$xt2)=($xt2,$xa2);
+$code.=<<___;
+ paddd 0x80-0x100(%rcx),$xb0
+ paddd 0x90-0x100(%rcx),$xb1
+ paddd 0xa0-0x100(%rcx),$xb2
+ paddd 0xb0-0x100(%rcx),$xb3
+
+ movdqa $xa0,0x00(%rsp) # offload $xaN
+ movdqa $xa1,0x10(%rsp)
+ movdqa 0x20(%rsp),$xa0 # "xc2"
+ movdqa 0x30(%rsp),$xa1 # "xc3"
+
+ movdqa $xb0,$xt2
+ punpckldq $xb1,$xb0
+ movdqa $xb2,$xt3
+ punpckldq $xb3,$xb2
+ punpckhdq $xb1,$xt2
+ punpckhdq $xb3,$xt3
+ movdqa $xb0,$xb1
+ punpcklqdq $xb2,$xb0 # "b0"
+ movdqa $xt2,$xb3
+ punpcklqdq $xt3,$xt2 # "b2"
+ punpckhqdq $xb2,$xb1 # "b1"
+ punpckhqdq $xt3,$xb3 # "b3"
+___
+ ($xb2,$xt2)=($xt2,$xb2);
+ my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1);
+$code.=<<___;
+ paddd 0xc0-0x100(%rcx),$xc0
+ paddd 0xd0-0x100(%rcx),$xc1
+ paddd 0xe0-0x100(%rcx),$xc2
+ paddd 0xf0-0x100(%rcx),$xc3
+
+ movdqa $xa2,0x20(%rsp) # keep offloading $xaN
+ movdqa $xa3,0x30(%rsp)
+
+ movdqa $xc0,$xt2
+ punpckldq $xc1,$xc0
+ movdqa $xc2,$xt3
+ punpckldq $xc3,$xc2
+ punpckhdq $xc1,$xt2
+ punpckhdq $xc3,$xt3
+ movdqa $xc0,$xc1
+ punpcklqdq $xc2,$xc0 # "c0"
+ movdqa $xt2,$xc3
+ punpcklqdq $xt3,$xt2 # "c2"
+ punpckhqdq $xc2,$xc1 # "c1"
+ punpckhqdq $xt3,$xc3 # "c3"
+___
+ ($xc2,$xt2)=($xt2,$xc2);
+ ($xt0,$xt1)=($xa2,$xa3); # use $xaN as temporary
+$code.=<<___;
+ paddd 0x100-0x100(%rcx),$xd0
+ paddd 0x110-0x100(%rcx),$xd1
+ paddd 0x120-0x100(%rcx),$xd2
+ paddd 0x130-0x100(%rcx),$xd3
+
+ movdqa $xd0,$xt2
+ punpckldq $xd1,$xd0
+ movdqa $xd2,$xt3
+ punpckldq $xd3,$xd2
+ punpckhdq $xd1,$xt2
+ punpckhdq $xd3,$xt3
+ movdqa $xd0,$xd1
+ punpcklqdq $xd2,$xd0 # "d0"
+ movdqa $xt2,$xd3
+ punpcklqdq $xt3,$xt2 # "d2"
+ punpckhqdq $xd2,$xd1 # "d1"
+ punpckhqdq $xt3,$xd3 # "d3"
+___
+ ($xd2,$xt2)=($xt2,$xd2);
+$code.=<<___;
+ cmp \$64*4,$len
+ jb .Ltail4x
+
+ movdqu 0x00($inp),$xt0 # xor with input
+ movdqu 0x10($inp),$xt1
+ movdqu 0x20($inp),$xt2
+ movdqu 0x30($inp),$xt3
+ pxor 0x00(%rsp),$xt0 # $xaN is offloaded, remember?
+ pxor $xb0,$xt1
+ pxor $xc0,$xt2
+ pxor $xd0,$xt3
+
+ movdqu $xt0,0x00($out)
+ movdqu 0x40($inp),$xt0
+ movdqu $xt1,0x10($out)
+ movdqu 0x50($inp),$xt1
+ movdqu $xt2,0x20($out)
+ movdqu 0x60($inp),$xt2
+ movdqu $xt3,0x30($out)
+ movdqu 0x70($inp),$xt3
+ lea 0x80($inp),$inp # size optimization
+ pxor 0x10(%rsp),$xt0
+ pxor $xb1,$xt1
+ pxor $xc1,$xt2
+ pxor $xd1,$xt3
+
+ movdqu $xt0,0x40($out)
+ movdqu 0x00($inp),$xt0
+ movdqu $xt1,0x50($out)
+ movdqu 0x10($inp),$xt1
+ movdqu $xt2,0x60($out)
+ movdqu 0x20($inp),$xt2
+ movdqu $xt3,0x70($out)
+ lea 0x80($out),$out # size optimization
+ movdqu 0x30($inp),$xt3
+ pxor 0x20(%rsp),$xt0
+ pxor $xb2,$xt1
+ pxor $xc2,$xt2
+ pxor $xd2,$xt3
+
+ movdqu $xt0,0x00($out)
+ movdqu 0x40($inp),$xt0
+ movdqu $xt1,0x10($out)
+ movdqu 0x50($inp),$xt1
+ movdqu $xt2,0x20($out)
+ movdqu 0x60($inp),$xt2
+ movdqu $xt3,0x30($out)
+ movdqu 0x70($inp),$xt3
+ lea 0x80($inp),$inp # inp+=64*4
+ pxor 0x30(%rsp),$xt0
+ pxor $xb3,$xt1
+ pxor $xc3,$xt2
+ pxor $xd3,$xt3
+ movdqu $xt0,0x40($out)
+ movdqu $xt1,0x50($out)
+ movdqu $xt2,0x60($out)
+ movdqu $xt3,0x70($out)
+ lea 0x80($out),$out # out+=64*4
+
+ sub \$64*4,$len
+ jnz .Loop_outer4x
+
+ jmp .Ldone4x
+
+.Ltail4x:
+ cmp \$192,$len
+ jae .L192_or_more4x
+ cmp \$128,$len
+ jae .L128_or_more4x
+ cmp \$64,$len
+ jae .L64_or_more4x
+
+ #movdqa 0x00(%rsp),$xt0 # $xaN is offloaded, remember?
+ xor %r10,%r10
+ #movdqa $xt0,0x00(%rsp)
+ movdqa $xb0,0x10(%rsp)
+ movdqa $xc0,0x20(%rsp)
+ movdqa $xd0,0x30(%rsp)
+ jmp .Loop_tail4x
+
+.align 32
+.L64_or_more4x:
+ movdqu 0x00($inp),$xt0 # xor with input
+ movdqu 0x10($inp),$xt1
+ movdqu 0x20($inp),$xt2
+ movdqu 0x30($inp),$xt3
+ pxor 0x00(%rsp),$xt0 # $xaxN is offloaded, remember?
+ pxor $xb0,$xt1
+ pxor $xc0,$xt2
+ pxor $xd0,$xt3
+ movdqu $xt0,0x00($out)
+ movdqu $xt1,0x10($out)
+ movdqu $xt2,0x20($out)
+ movdqu $xt3,0x30($out)
+ je .Ldone4x
+
+ movdqa 0x10(%rsp),$xt0 # $xaN is offloaded, remember?
+ lea 0x40($inp),$inp # inp+=64*1
+ xor %r10,%r10
+ movdqa $xt0,0x00(%rsp)
+ movdqa $xb1,0x10(%rsp)
+ lea 0x40($out),$out # out+=64*1
+ movdqa $xc1,0x20(%rsp)
+ sub \$64,$len # len-=64*1
+ movdqa $xd1,0x30(%rsp)
+ jmp .Loop_tail4x
+
+.align 32
+.L128_or_more4x:
+ movdqu 0x00($inp),$xt0 # xor with input
+ movdqu 0x10($inp),$xt1
+ movdqu 0x20($inp),$xt2
+ movdqu 0x30($inp),$xt3
+ pxor 0x00(%rsp),$xt0 # $xaN is offloaded, remember?
+ pxor $xb0,$xt1
+ pxor $xc0,$xt2
+ pxor $xd0,$xt3
+
+ movdqu $xt0,0x00($out)
+ movdqu 0x40($inp),$xt0
+ movdqu $xt1,0x10($out)
+ movdqu 0x50($inp),$xt1
+ movdqu $xt2,0x20($out)
+ movdqu 0x60($inp),$xt2
+ movdqu $xt3,0x30($out)
+ movdqu 0x70($inp),$xt3
+ pxor 0x10(%rsp),$xt0
+ pxor $xb1,$xt1
+ pxor $xc1,$xt2
+ pxor $xd1,$xt3
+ movdqu $xt0,0x40($out)
+ movdqu $xt1,0x50($out)
+ movdqu $xt2,0x60($out)
+ movdqu $xt3,0x70($out)
+ je .Ldone4x
+
+ movdqa 0x20(%rsp),$xt0 # $xaN is offloaded, remember?
+ lea 0x80($inp),$inp # inp+=64*2
+ xor %r10,%r10
+ movdqa $xt0,0x00(%rsp)
+ movdqa $xb2,0x10(%rsp)
+ lea 0x80($out),$out # out+=64*2
+ movdqa $xc2,0x20(%rsp)
+ sub \$128,$len # len-=64*2
+ movdqa $xd2,0x30(%rsp)
+ jmp .Loop_tail4x
+
+.align 32
+.L192_or_more4x:
+ movdqu 0x00($inp),$xt0 # xor with input
+ movdqu 0x10($inp),$xt1
+ movdqu 0x20($inp),$xt2
+ movdqu 0x30($inp),$xt3
+ pxor 0x00(%rsp),$xt0 # $xaN is offloaded, remember?
+ pxor $xb0,$xt1
+ pxor $xc0,$xt2
+ pxor $xd0,$xt3
+
+ movdqu $xt0,0x00($out)
+ movdqu 0x40($inp),$xt0
+ movdqu $xt1,0x10($out)
+ movdqu 0x50($inp),$xt1
+ movdqu $xt2,0x20($out)
+ movdqu 0x60($inp),$xt2
+ movdqu $xt3,0x30($out)
+ movdqu 0x70($inp),$xt3
+ lea 0x80($inp),$inp # size optimization
+ pxor 0x10(%rsp),$xt0
+ pxor $xb1,$xt1
+ pxor $xc1,$xt2
+ pxor $xd1,$xt3
+
+ movdqu $xt0,0x40($out)
+ movdqu 0x00($inp),$xt0
+ movdqu $xt1,0x50($out)
+ movdqu 0x10($inp),$xt1
+ movdqu $xt2,0x60($out)
+ movdqu 0x20($inp),$xt2
+ movdqu $xt3,0x70($out)
+ lea 0x80($out),$out # size optimization
+ movdqu 0x30($inp),$xt3
+ pxor 0x20(%rsp),$xt0
+ pxor $xb2,$xt1
+ pxor $xc2,$xt2
+ pxor $xd2,$xt3
+ movdqu $xt0,0x00($out)
+ movdqu $xt1,0x10($out)
+ movdqu $xt2,0x20($out)
+ movdqu $xt3,0x30($out)
+ je .Ldone4x
+
+ movdqa 0x30(%rsp),$xt0 # $xaN is offloaded, remember?
+ lea 0x40($inp),$inp # inp+=64*3
+ xor %r10,%r10
+ movdqa $xt0,0x00(%rsp)
+ movdqa $xb3,0x10(%rsp)
+ lea 0x40($out),$out # out+=64*3
+ movdqa $xc3,0x20(%rsp)
+ sub \$192,$len # len-=64*3
+ movdqa $xd3,0x30(%rsp)
+
+.Loop_tail4x:
+ movzb ($inp,%r10),%eax
+ movzb (%rsp,%r10),%ecx
+ lea 1(%r10),%r10
+ xor %ecx,%eax
+ mov %al,-1($out,%r10)
+ dec $len
+ jnz .Loop_tail4x
+
+.Ldone4x:
+___
+$code.=<<___ if ($win64);
+ lea 0x140+0x30(%rsp),%r11
+ movaps -0x30(%r11),%xmm6
+ movaps -0x20(%r11),%xmm7
+ movaps -0x10(%r11),%xmm8
+ movaps 0x00(%r11),%xmm9
+ movaps 0x10(%r11),%xmm10
+ movaps 0x20(%r11),%xmm11
+ movaps 0x30(%r11),%xmm12
+ movaps 0x40(%r11),%xmm13
+ movaps 0x50(%r11),%xmm14
+ movaps 0x60(%r11),%xmm15
+___
+$code.=<<___;
+ add \$0x148+$xframe,%rsp
+ ret
+.size ChaCha20_4x,.-ChaCha20_4x
+___
+}
+
+########################################################################
+# XOP code path that handles all lengths.
+if ($avx) {
+# There is some "anomaly" observed depending on instructions' size or
+# alignment. If you look closely at below code you'll notice that
+# sometimes argument order varies. The order affects instruction
+# encoding by making it larger, and such fiddling gives 5% performance
+# improvement. This is on FX-4100...
+
+my ($xb0,$xb1,$xb2,$xb3, $xd0,$xd1,$xd2,$xd3,
+ $xa0,$xa1,$xa2,$xa3, $xt0,$xt1,$xt2,$xt3)=map("%xmm$_",(0..15));
+my @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
+ $xt0,$xt1,$xt2,$xt3, $xd0,$xd1,$xd2,$xd3);
+
+sub XOP_lane_ROUND {
+my ($a0,$b0,$c0,$d0)=@_;
+my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
+my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
+my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
+my @x=map("\"$_\"",@xx);
+
+ (
+ "&vpaddd (@x[$a0],@x[$a0],@x[$b0])", # Q1
+ "&vpaddd (@x[$a1],@x[$a1],@x[$b1])", # Q2
+ "&vpaddd (@x[$a2],@x[$a2],@x[$b2])", # Q3
+ "&vpaddd (@x[$a3],@x[$a3],@x[$b3])", # Q4
+ "&vpxor (@x[$d0],@x[$a0],@x[$d0])",
+ "&vpxor (@x[$d1],@x[$a1],@x[$d1])",
+ "&vpxor (@x[$d2],@x[$a2],@x[$d2])",
+ "&vpxor (@x[$d3],@x[$a3],@x[$d3])",
+ "&vprotd (@x[$d0],@x[$d0],16)",
+ "&vprotd (@x[$d1],@x[$d1],16)",
+ "&vprotd (@x[$d2],@x[$d2],16)",
+ "&vprotd (@x[$d3],@x[$d3],16)",
+
+ "&vpaddd (@x[$c0],@x[$c0],@x[$d0])",
+ "&vpaddd (@x[$c1],@x[$c1],@x[$d1])",
+ "&vpaddd (@x[$c2],@x[$c2],@x[$d2])",
+ "&vpaddd (@x[$c3],@x[$c3],@x[$d3])",
+ "&vpxor (@x[$b0],@x[$c0],@x[$b0])",
+ "&vpxor (@x[$b1],@x[$c1],@x[$b1])",
+ "&vpxor (@x[$b2],@x[$b2],@x[$c2])", # flip
+ "&vpxor (@x[$b3],@x[$b3],@x[$c3])", # flip
+ "&vprotd (@x[$b0],@x[$b0],12)",
+ "&vprotd (@x[$b1],@x[$b1],12)",
+ "&vprotd (@x[$b2],@x[$b2],12)",
+ "&vprotd (@x[$b3],@x[$b3],12)",
+
+ "&vpaddd (@x[$a0],@x[$b0],@x[$a0])", # flip
+ "&vpaddd (@x[$a1],@x[$b1],@x[$a1])", # flip
+ "&vpaddd (@x[$a2],@x[$a2],@x[$b2])",
+ "&vpaddd (@x[$a3],@x[$a3],@x[$b3])",
+ "&vpxor (@x[$d0],@x[$a0],@x[$d0])",
+ "&vpxor (@x[$d1],@x[$a1],@x[$d1])",
+ "&vpxor (@x[$d2],@x[$a2],@x[$d2])",
+ "&vpxor (@x[$d3],@x[$a3],@x[$d3])",
+ "&vprotd (@x[$d0],@x[$d0],8)",
+ "&vprotd (@x[$d1],@x[$d1],8)",
+ "&vprotd (@x[$d2],@x[$d2],8)",
+ "&vprotd (@x[$d3],@x[$d3],8)",
+
+ "&vpaddd (@x[$c0],@x[$c0],@x[$d0])",
+ "&vpaddd (@x[$c1],@x[$c1],@x[$d1])",
+ "&vpaddd (@x[$c2],@x[$c2],@x[$d2])",
+ "&vpaddd (@x[$c3],@x[$c3],@x[$d3])",
+ "&vpxor (@x[$b0],@x[$c0],@x[$b0])",
+ "&vpxor (@x[$b1],@x[$c1],@x[$b1])",
+ "&vpxor (@x[$b2],@x[$b2],@x[$c2])", # flip
+ "&vpxor (@x[$b3],@x[$b3],@x[$c3])", # flip
+ "&vprotd (@x[$b0],@x[$b0],7)",
+ "&vprotd (@x[$b1],@x[$b1],7)",
+ "&vprotd (@x[$b2],@x[$b2],7)",
+ "&vprotd (@x[$b3],@x[$b3],7)"
+ );
+}
+
+my $xframe = $win64 ? 0xa0 : 0;
+
+$code.=<<___;
+.type ChaCha20_4xop,\@function,5
+.align 32
+ChaCha20_4xop:
+.LChaCha20_4xop:
+ lea -0x78(%rsp),%r11
+ sub \$0x148+$xframe,%rsp
+___
+ ################ stack layout
+ # +0x00 SIMD equivalent of @x[8-12]
+ # ...
+ # +0x40 constant copy of key[0-2] smashed by lanes
+ # ...
+ # +0x100 SIMD counters (with nonce smashed by lanes)
+ # ...
+ # +0x140
+$code.=<<___ if ($win64);
+ movaps %xmm6,-0x30(%r11)
+ movaps %xmm7,-0x20(%r11)
+ movaps %xmm8,-0x10(%r11)
+ movaps %xmm9,0x00(%r11)
+ movaps %xmm10,0x10(%r11)
+ movaps %xmm11,0x20(%r11)
+ movaps %xmm12,0x30(%r11)
+ movaps %xmm13,0x40(%r11)
+ movaps %xmm14,0x50(%r11)
+ movaps %xmm15,0x60(%r11)
+___
+$code.=<<___;
+ vzeroupper
+
+ vmovdqa .Lsigma(%rip),$xa3 # key[0]
+ vmovdqu ($key),$xb3 # key[1]
+ vmovdqu 16($key),$xt3 # key[2]
+ vmovdqu ($counter),$xd3 # key[3]
+ lea 0x100(%rsp),%rcx # size optimization
+
+ vpshufd \$0x00,$xa3,$xa0 # smash key by lanes...
+ vpshufd \$0x55,$xa3,$xa1
+ vmovdqa $xa0,0x40(%rsp) # ... and offload
+ vpshufd \$0xaa,$xa3,$xa2
+ vmovdqa $xa1,0x50(%rsp)
+ vpshufd \$0xff,$xa3,$xa3
+ vmovdqa $xa2,0x60(%rsp)
+ vmovdqa $xa3,0x70(%rsp)
+
+ vpshufd \$0x00,$xb3,$xb0
+ vpshufd \$0x55,$xb3,$xb1
+ vmovdqa $xb0,0x80-0x100(%rcx)
+ vpshufd \$0xaa,$xb3,$xb2
+ vmovdqa $xb1,0x90-0x100(%rcx)
+ vpshufd \$0xff,$xb3,$xb3
+ vmovdqa $xb2,0xa0-0x100(%rcx)
+ vmovdqa $xb3,0xb0-0x100(%rcx)
+
+ vpshufd \$0x00,$xt3,$xt0 # "$xc0"
+ vpshufd \$0x55,$xt3,$xt1 # "$xc1"
+ vmovdqa $xt0,0xc0-0x100(%rcx)
+ vpshufd \$0xaa,$xt3,$xt2 # "$xc2"
+ vmovdqa $xt1,0xd0-0x100(%rcx)
+ vpshufd \$0xff,$xt3,$xt3 # "$xc3"
+ vmovdqa $xt2,0xe0-0x100(%rcx)
+ vmovdqa $xt3,0xf0-0x100(%rcx)
+
+ vpshufd \$0x00,$xd3,$xd0
+ vpshufd \$0x55,$xd3,$xd1
+ vpaddd .Linc(%rip),$xd0,$xd0 # don't save counters yet
+ vpshufd \$0xaa,$xd3,$xd2
+ vmovdqa $xd1,0x110-0x100(%rcx)
+ vpshufd \$0xff,$xd3,$xd3
+ vmovdqa $xd2,0x120-0x100(%rcx)
+ vmovdqa $xd3,0x130-0x100(%rcx)
+
+ jmp .Loop_enter4xop
+
+.align 32
+.Loop_outer4xop:
+ vmovdqa 0x40(%rsp),$xa0 # re-load smashed key
+ vmovdqa 0x50(%rsp),$xa1
+ vmovdqa 0x60(%rsp),$xa2
+ vmovdqa 0x70(%rsp),$xa3
+ vmovdqa 0x80-0x100(%rcx),$xb0
+ vmovdqa 0x90-0x100(%rcx),$xb1
+ vmovdqa 0xa0-0x100(%rcx),$xb2
+ vmovdqa 0xb0-0x100(%rcx),$xb3
+ vmovdqa 0xc0-0x100(%rcx),$xt0 # "$xc0"
+ vmovdqa 0xd0-0x100(%rcx),$xt1 # "$xc1"
+ vmovdqa 0xe0-0x100(%rcx),$xt2 # "$xc2"
+ vmovdqa 0xf0-0x100(%rcx),$xt3 # "$xc3"
+ vmovdqa 0x100-0x100(%rcx),$xd0
+ vmovdqa 0x110-0x100(%rcx),$xd1
+ vmovdqa 0x120-0x100(%rcx),$xd2
+ vmovdqa 0x130-0x100(%rcx),$xd3
+ vpaddd .Lfour(%rip),$xd0,$xd0 # next SIMD counters
+
+.Loop_enter4xop:
+ mov \$10,%eax
+ vmovdqa $xd0,0x100-0x100(%rcx) # save SIMD counters
+ jmp .Loop4xop
+
+.align 32
+.Loop4xop:
+___
+ foreach (&XOP_lane_ROUND(0, 4, 8,12)) { eval; }
+ foreach (&XOP_lane_ROUND(0, 5,10,15)) { eval; }
+$code.=<<___;
+ dec %eax
+ jnz .Loop4xop
+
+ vpaddd 0x40(%rsp),$xa0,$xa0 # accumulate key material
+ vpaddd 0x50(%rsp),$xa1,$xa1
+ vpaddd 0x60(%rsp),$xa2,$xa2
+ vpaddd 0x70(%rsp),$xa3,$xa3
+
+ vmovdqa $xt2,0x20(%rsp) # offload $xc2,3
+ vmovdqa $xt3,0x30(%rsp)
+
+ vpunpckldq $xa1,$xa0,$xt2 # "de-interlace" data
+ vpunpckldq $xa3,$xa2,$xt3
+ vpunpckhdq $xa1,$xa0,$xa0
+ vpunpckhdq $xa3,$xa2,$xa2
+ vpunpcklqdq $xt3,$xt2,$xa1 # "a0"
+ vpunpckhqdq $xt3,$xt2,$xt2 # "a1"
+ vpunpcklqdq $xa2,$xa0,$xa3 # "a2"
+ vpunpckhqdq $xa2,$xa0,$xa0 # "a3"
+___
+ ($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2);
+$code.=<<___;
+ vpaddd 0x80-0x100(%rcx),$xb0,$xb0
+ vpaddd 0x90-0x100(%rcx),$xb1,$xb1
+ vpaddd 0xa0-0x100(%rcx),$xb2,$xb2
+ vpaddd 0xb0-0x100(%rcx),$xb3,$xb3
+
+ vmovdqa $xa0,0x00(%rsp) # offload $xa0,1
+ vmovdqa $xa1,0x10(%rsp)
+ vmovdqa 0x20(%rsp),$xa0 # "xc2"
+ vmovdqa 0x30(%rsp),$xa1 # "xc3"
+
+ vpunpckldq $xb1,$xb0,$xt2
+ vpunpckldq $xb3,$xb2,$xt3
+ vpunpckhdq $xb1,$xb0,$xb0
+ vpunpckhdq $xb3,$xb2,$xb2
+ vpunpcklqdq $xt3,$xt2,$xb1 # "b0"
+ vpunpckhqdq $xt3,$xt2,$xt2 # "b1"
+ vpunpcklqdq $xb2,$xb0,$xb3 # "b2"
+ vpunpckhqdq $xb2,$xb0,$xb0 # "b3"
+___
+ ($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2);
+ my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1);
+$code.=<<___;
+ vpaddd 0xc0-0x100(%rcx),$xc0,$xc0
+ vpaddd 0xd0-0x100(%rcx),$xc1,$xc1
+ vpaddd 0xe0-0x100(%rcx),$xc2,$xc2
+ vpaddd 0xf0-0x100(%rcx),$xc3,$xc3
+
+ vpunpckldq $xc1,$xc0,$xt2
+ vpunpckldq $xc3,$xc2,$xt3
+ vpunpckhdq $xc1,$xc0,$xc0
+ vpunpckhdq $xc3,$xc2,$xc2
+ vpunpcklqdq $xt3,$xt2,$xc1 # "c0"
+ vpunpckhqdq $xt3,$xt2,$xt2 # "c1"
+ vpunpcklqdq $xc2,$xc0,$xc3 # "c2"
+ vpunpckhqdq $xc2,$xc0,$xc0 # "c3"
+___
+ ($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2);
+$code.=<<___;
+ vpaddd 0x100-0x100(%rcx),$xd0,$xd0
+ vpaddd 0x110-0x100(%rcx),$xd1,$xd1
+ vpaddd 0x120-0x100(%rcx),$xd2,$xd2
+ vpaddd 0x130-0x100(%rcx),$xd3,$xd3
+
+ vpunpckldq $xd1,$xd0,$xt2
+ vpunpckldq $xd3,$xd2,$xt3
+ vpunpckhdq $xd1,$xd0,$xd0
+ vpunpckhdq $xd3,$xd2,$xd2
+ vpunpcklqdq $xt3,$xt2,$xd1 # "d0"
+ vpunpckhqdq $xt3,$xt2,$xt2 # "d1"
+ vpunpcklqdq $xd2,$xd0,$xd3 # "d2"
+ vpunpckhqdq $xd2,$xd0,$xd0 # "d3"
+___
+ ($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2);
+ ($xa0,$xa1)=($xt2,$xt3);
+$code.=<<___;
+ vmovdqa 0x00(%rsp),$xa0 # restore $xa0,1
+ vmovdqa 0x10(%rsp),$xa1
+
+ cmp \$64*4,$len
+ jb .Ltail4xop
+
+ vpxor 0x00($inp),$xa0,$xa0 # xor with input
+ vpxor 0x10($inp),$xb0,$xb0
+ vpxor 0x20($inp),$xc0,$xc0
+ vpxor 0x30($inp),$xd0,$xd0
+ vpxor 0x40($inp),$xa1,$xa1
+ vpxor 0x50($inp),$xb1,$xb1
+ vpxor 0x60($inp),$xc1,$xc1
+ vpxor 0x70($inp),$xd1,$xd1
+ lea 0x80($inp),$inp # size optimization
+ vpxor 0x00($inp),$xa2,$xa2
+ vpxor 0x10($inp),$xb2,$xb2
+ vpxor 0x20($inp),$xc2,$xc2
+ vpxor 0x30($inp),$xd2,$xd2
+ vpxor 0x40($inp),$xa3,$xa3
+ vpxor 0x50($inp),$xb3,$xb3
+ vpxor 0x60($inp),$xc3,$xc3
+ vpxor 0x70($inp),$xd3,$xd3
+ lea 0x80($inp),$inp # inp+=64*4
+
+ vmovdqu $xa0,0x00($out)
+ vmovdqu $xb0,0x10($out)
+ vmovdqu $xc0,0x20($out)
+ vmovdqu $xd0,0x30($out)
+ vmovdqu $xa1,0x40($out)
+ vmovdqu $xb1,0x50($out)
+ vmovdqu $xc1,0x60($out)
+ vmovdqu $xd1,0x70($out)
+ lea 0x80($out),$out # size optimization
+ vmovdqu $xa2,0x00($out)
+ vmovdqu $xb2,0x10($out)
+ vmovdqu $xc2,0x20($out)
+ vmovdqu $xd2,0x30($out)
+ vmovdqu $xa3,0x40($out)
+ vmovdqu $xb3,0x50($out)
+ vmovdqu $xc3,0x60($out)
+ vmovdqu $xd3,0x70($out)
+ lea 0x80($out),$out # out+=64*4
+
+ sub \$64*4,$len
+ jnz .Loop_outer4xop
+
+ jmp .Ldone4xop
+
+.align 32
+.Ltail4xop:
+ cmp \$192,$len
+ jae .L192_or_more4xop
+ cmp \$128,$len
+ jae .L128_or_more4xop
+ cmp \$64,$len
+ jae .L64_or_more4xop
+
+ xor %r10,%r10
+ vmovdqa $xa0,0x00(%rsp)
+ vmovdqa $xb0,0x10(%rsp)
+ vmovdqa $xc0,0x20(%rsp)
+ vmovdqa $xd0,0x30(%rsp)
+ jmp .Loop_tail4xop
+
+.align 32
+.L64_or_more4xop:
+ vpxor 0x00($inp),$xa0,$xa0 # xor with input
+ vpxor 0x10($inp),$xb0,$xb0
+ vpxor 0x20($inp),$xc0,$xc0
+ vpxor 0x30($inp),$xd0,$xd0
+ vmovdqu $xa0,0x00($out)
+ vmovdqu $xb0,0x10($out)
+ vmovdqu $xc0,0x20($out)
+ vmovdqu $xd0,0x30($out)
+ je .Ldone4xop
+
+ lea 0x40($inp),$inp # inp+=64*1
+ vmovdqa $xa1,0x00(%rsp)
+ xor %r10,%r10
+ vmovdqa $xb1,0x10(%rsp)
+ lea 0x40($out),$out # out+=64*1
+ vmovdqa $xc1,0x20(%rsp)
+ sub \$64,$len # len-=64*1
+ vmovdqa $xd1,0x30(%rsp)
+ jmp .Loop_tail4xop
+
+.align 32
+.L128_or_more4xop:
+ vpxor 0x00($inp),$xa0,$xa0 # xor with input
+ vpxor 0x10($inp),$xb0,$xb0
+ vpxor 0x20($inp),$xc0,$xc0
+ vpxor 0x30($inp),$xd0,$xd0
+ vpxor 0x40($inp),$xa1,$xa1
+ vpxor 0x50($inp),$xb1,$xb1
+ vpxor 0x60($inp),$xc1,$xc1
+ vpxor 0x70($inp),$xd1,$xd1
+
+ vmovdqu $xa0,0x00($out)
+ vmovdqu $xb0,0x10($out)
+ vmovdqu $xc0,0x20($out)
+ vmovdqu $xd0,0x30($out)
+ vmovdqu $xa1,0x40($out)
+ vmovdqu $xb1,0x50($out)
+ vmovdqu $xc1,0x60($out)
+ vmovdqu $xd1,0x70($out)
+ je .Ldone4xop
+
+ lea 0x80($inp),$inp # inp+=64*2
+ vmovdqa $xa2,0x00(%rsp)
+ xor %r10,%r10
+ vmovdqa $xb2,0x10(%rsp)
+ lea 0x80($out),$out # out+=64*2
+ vmovdqa $xc2,0x20(%rsp)
+ sub \$128,$len # len-=64*2
+ vmovdqa $xd2,0x30(%rsp)
+ jmp .Loop_tail4xop
+
+.align 32
+.L192_or_more4xop:
+ vpxor 0x00($inp),$xa0,$xa0 # xor with input
+ vpxor 0x10($inp),$xb0,$xb0
+ vpxor 0x20($inp),$xc0,$xc0
+ vpxor 0x30($inp),$xd0,$xd0
+ vpxor 0x40($inp),$xa1,$xa1
+ vpxor 0x50($inp),$xb1,$xb1
+ vpxor 0x60($inp),$xc1,$xc1
+ vpxor 0x70($inp),$xd1,$xd1
+ lea 0x80($inp),$inp # size optimization
+ vpxor 0x00($inp),$xa2,$xa2
+ vpxor 0x10($inp),$xb2,$xb2
+ vpxor 0x20($inp),$xc2,$xc2
+ vpxor 0x30($inp),$xd2,$xd2
+
+ vmovdqu $xa0,0x00($out)
+ vmovdqu $xb0,0x10($out)
+ vmovdqu $xc0,0x20($out)
+ vmovdqu $xd0,0x30($out)
+ vmovdqu $xa1,0x40($out)
+ vmovdqu $xb1,0x50($out)
+ vmovdqu $xc1,0x60($out)
+ vmovdqu $xd1,0x70($out)
+ lea 0x80($out),$out # size optimization
+ vmovdqu $xa2,0x00($out)
+ vmovdqu $xb2,0x10($out)
+ vmovdqu $xc2,0x20($out)
+ vmovdqu $xd2,0x30($out)
+ je .Ldone4xop
+
+ lea 0x40($inp),$inp # inp+=64*3
+ vmovdqa $xa3,0x00(%rsp)
+ xor %r10,%r10
+ vmovdqa $xb3,0x10(%rsp)
+ lea 0x40($out),$out # out+=64*3
+ vmovdqa $xc3,0x20(%rsp)
+ sub \$192,$len # len-=64*3
+ vmovdqa $xd3,0x30(%rsp)
+
+.Loop_tail4xop:
+ movzb ($inp,%r10),%eax
+ movzb (%rsp,%r10),%ecx
+ lea 1(%r10),%r10
+ xor %ecx,%eax
+ mov %al,-1($out,%r10)
+ dec $len
+ jnz .Loop_tail4xop
+
+.Ldone4xop:
+ vzeroupper
+___
+$code.=<<___ if ($win64);
+ lea 0x140+0x30(%rsp),%r11
+ movaps -0x30(%r11),%xmm6
+ movaps -0x20(%r11),%xmm7
+ movaps -0x10(%r11),%xmm8
+ movaps 0x00(%r11),%xmm9
+ movaps 0x10(%r11),%xmm10
+ movaps 0x20(%r11),%xmm11
+ movaps 0x30(%r11),%xmm12
+ movaps 0x40(%r11),%xmm13
+ movaps 0x50(%r11),%xmm14
+ movaps 0x60(%r11),%xmm15
+___
+$code.=<<___;
+ add \$0x148+$xframe,%rsp
+ ret
+.size ChaCha20_4xop,.-ChaCha20_4xop
+___
+}
+
+########################################################################
+# AVX2 code path
+if ($avx>1) {
+my ($xb0,$xb1,$xb2,$xb3, $xd0,$xd1,$xd2,$xd3,
+ $xa0,$xa1,$xa2,$xa3, $xt0,$xt1,$xt2,$xt3)=map("%ymm$_",(0..15));
+my @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
+ "%nox","%nox","%nox","%nox", $xd0,$xd1,$xd2,$xd3);
+
+sub AVX2_lane_ROUND {
+my ($a0,$b0,$c0,$d0)=@_;
+my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
+my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
+my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
+my ($xc,$xc_,$t0,$t1)=map("\"$_\"",$xt0,$xt1,$xt2,$xt3);
+my @x=map("\"$_\"",@xx);
+
+ # Consider order in which variables are addressed by their
+ # index:
+ #
+ # a b c d
+ #
+ # 0 4 8 12 < even round
+ # 1 5 9 13
+ # 2 6 10 14
+ # 3 7 11 15
+ # 0 5 10 15 < odd round
+ # 1 6 11 12
+ # 2 7 8 13
+ # 3 4 9 14
+ #
+ # 'a', 'b' and 'd's are permanently allocated in registers,
+ # @x[0..7,12..15], while 'c's are maintained in memory. If
+ # you observe 'c' column, you'll notice that pair of 'c's is
+ # invariant between rounds. This means that we have to reload
+ # them once per round, in the middle. This is why you'll see
+ # bunch of 'c' stores and loads in the middle, but none in
+ # the beginning or end.
+
+ (
+ "&vpaddd (@x[$a0],@x[$a0],@x[$b0])", # Q1
+ "&vpxor (@x[$d0],@x[$a0],@x[$d0])",
+ "&vpshufb (@x[$d0],@x[$d0],$t1)",
+ "&vpaddd (@x[$a1],@x[$a1],@x[$b1])", # Q2
+ "&vpxor (@x[$d1],@x[$a1],@x[$d1])",
+ "&vpshufb (@x[$d1],@x[$d1],$t1)",
+
+ "&vpaddd ($xc,$xc,@x[$d0])",
+ "&vpxor (@x[$b0],$xc,@x[$b0])",
+ "&vpslld ($t0,@x[$b0],12)",
+ "&vpsrld (@x[$b0],@x[$b0],20)",
+ "&vpor (@x[$b0],$t0,@x[$b0])",
+ "&vbroadcasti128($t0,'(%r11)')", # .Lrot24(%rip)
+ "&vpaddd ($xc_,$xc_,@x[$d1])",
+ "&vpxor (@x[$b1],$xc_,@x[$b1])",
+ "&vpslld ($t1,@x[$b1],12)",
+ "&vpsrld (@x[$b1],@x[$b1],20)",
+ "&vpor (@x[$b1],$t1,@x[$b1])",
+
+ "&vpaddd (@x[$a0],@x[$a0],@x[$b0])",
+ "&vpxor (@x[$d0],@x[$a0],@x[$d0])",
+ "&vpshufb (@x[$d0],@x[$d0],$t0)",
+ "&vpaddd (@x[$a1],@x[$a1],@x[$b1])",
+ "&vpxor (@x[$d1],@x[$a1],@x[$d1])",
+ "&vpshufb (@x[$d1],@x[$d1],$t0)",
+
+ "&vpaddd ($xc,$xc,@x[$d0])",
+ "&vpxor (@x[$b0],$xc,@x[$b0])",
+ "&vpslld ($t1,@x[$b0],7)",
+ "&vpsrld (@x[$b0],@x[$b0],25)",
+ "&vpor (@x[$b0],$t1,@x[$b0])",
+ "&vbroadcasti128($t1,'(%r10)')", # .Lrot16(%rip)
+ "&vpaddd ($xc_,$xc_,@x[$d1])",
+ "&vpxor (@x[$b1],$xc_,@x[$b1])",
+ "&vpslld ($t0,@x[$b1],7)",
+ "&vpsrld (@x[$b1],@x[$b1],25)",
+ "&vpor (@x[$b1],$t0,@x[$b1])",
+
+ "&vmovdqa (\"`32*($c0-8)`(%rsp)\",$xc)", # reload pair of 'c's
+ "&vmovdqa (\"`32*($c1-8)`(%rsp)\",$xc_)",
+ "&vmovdqa ($xc,\"`32*($c2-8)`(%rsp)\")",
+ "&vmovdqa ($xc_,\"`32*($c3-8)`(%rsp)\")",
+
+ "&vpaddd (@x[$a2],@x[$a2],@x[$b2])", # Q3
+ "&vpxor (@x[$d2],@x[$a2],@x[$d2])",
+ "&vpshufb (@x[$d2],@x[$d2],$t1)",
+ "&vpaddd (@x[$a3],@x[$a3],@x[$b3])", # Q4
+ "&vpxor (@x[$d3],@x[$a3],@x[$d3])",
+ "&vpshufb (@x[$d3],@x[$d3],$t1)",
+
+ "&vpaddd ($xc,$xc,@x[$d2])",
+ "&vpxor (@x[$b2],$xc,@x[$b2])",
+ "&vpslld ($t0,@x[$b2],12)",
+ "&vpsrld (@x[$b2],@x[$b2],20)",
+ "&vpor (@x[$b2],$t0,@x[$b2])",
+ "&vbroadcasti128($t0,'(%r11)')", # .Lrot24(%rip)
+ "&vpaddd ($xc_,$xc_,@x[$d3])",
+ "&vpxor (@x[$b3],$xc_,@x[$b3])",
+ "&vpslld ($t1,@x[$b3],12)",
+ "&vpsrld (@x[$b3],@x[$b3],20)",
+ "&vpor (@x[$b3],$t1,@x[$b3])",
+
+ "&vpaddd (@x[$a2],@x[$a2],@x[$b2])",
+ "&vpxor (@x[$d2],@x[$a2],@x[$d2])",
+ "&vpshufb (@x[$d2],@x[$d2],$t0)",
+ "&vpaddd (@x[$a3],@x[$a3],@x[$b3])",
+ "&vpxor (@x[$d3],@x[$a3],@x[$d3])",
+ "&vpshufb (@x[$d3],@x[$d3],$t0)",
+
+ "&vpaddd ($xc,$xc,@x[$d2])",
+ "&vpxor (@x[$b2],$xc,@x[$b2])",
+ "&vpslld ($t1,@x[$b2],7)",
+ "&vpsrld (@x[$b2],@x[$b2],25)",
+ "&vpor (@x[$b2],$t1,@x[$b2])",
+ "&vbroadcasti128($t1,'(%r10)')", # .Lrot16(%rip)
+ "&vpaddd ($xc_,$xc_,@x[$d3])",
+ "&vpxor (@x[$b3],$xc_,@x[$b3])",
+ "&vpslld ($t0,@x[$b3],7)",
+ "&vpsrld (@x[$b3],@x[$b3],25)",
+ "&vpor (@x[$b3],$t0,@x[$b3])"
+ );
+}
+
+my $xframe = $win64 ? 0xb0 : 8;
+
+$code.=<<___;
+.type ChaCha20_8x,\@function,5
+.align 32
+ChaCha20_8x:
+.LChaCha20_8x:
+ mov %rsp,%r10
+ sub \$0x280+$xframe,%rsp
+ and \$-32,%rsp
+___
+$code.=<<___ if ($win64);
+ lea 0x290+0x30(%rsp),%r11
+ movaps %xmm6,-0x30(%r11)
+ movaps %xmm7,-0x20(%r11)
+ movaps %xmm8,-0x10(%r11)
+ movaps %xmm9,0x00(%r11)
+ movaps %xmm10,0x10(%r11)
+ movaps %xmm11,0x20(%r11)
+ movaps %xmm12,0x30(%r11)
+ movaps %xmm13,0x40(%r11)
+ movaps %xmm14,0x50(%r11)
+ movaps %xmm15,0x60(%r11)
+___
+$code.=<<___;
+ vzeroupper
+ mov %r10,0x280(%rsp)
+
+ ################ stack layout
+ # +0x00 SIMD equivalent of @x[8-12]
+ # ...
+ # +0x80 constant copy of key[0-2] smashed by lanes
+ # ...
+ # +0x200 SIMD counters (with nonce smashed by lanes)
+ # ...
+ # +0x280 saved %rsp
+
+ vbroadcasti128 .Lsigma(%rip),$xa3 # key[0]
+ vbroadcasti128 ($key),$xb3 # key[1]
+ vbroadcasti128 16($key),$xt3 # key[2]
+ vbroadcasti128 ($counter),$xd3 # key[3]
+ lea 0x100(%rsp),%rcx # size optimization
+ lea 0x200(%rsp),%rax # size optimization
+ lea .Lrot16(%rip),%r10
+ lea .Lrot24(%rip),%r11
+
+ vpshufd \$0x00,$xa3,$xa0 # smash key by lanes...
+ vpshufd \$0x55,$xa3,$xa1
+ vmovdqa $xa0,0x80-0x100(%rcx) # ... and offload
+ vpshufd \$0xaa,$xa3,$xa2
+ vmovdqa $xa1,0xa0-0x100(%rcx)
+ vpshufd \$0xff,$xa3,$xa3
+ vmovdqa $xa2,0xc0-0x100(%rcx)
+ vmovdqa $xa3,0xe0-0x100(%rcx)
+
+ vpshufd \$0x00,$xb3,$xb0
+ vpshufd \$0x55,$xb3,$xb1
+ vmovdqa $xb0,0x100-0x100(%rcx)
+ vpshufd \$0xaa,$xb3,$xb2
+ vmovdqa $xb1,0x120-0x100(%rcx)
+ vpshufd \$0xff,$xb3,$xb3
+ vmovdqa $xb2,0x140-0x100(%rcx)
+ vmovdqa $xb3,0x160-0x100(%rcx)
+
+ vpshufd \$0x00,$xt3,$xt0 # "xc0"
+ vpshufd \$0x55,$xt3,$xt1 # "xc1"
+ vmovdqa $xt0,0x180-0x200(%rax)
+ vpshufd \$0xaa,$xt3,$xt2 # "xc2"
+ vmovdqa $xt1,0x1a0-0x200(%rax)
+ vpshufd \$0xff,$xt3,$xt3 # "xc3"
+ vmovdqa $xt2,0x1c0-0x200(%rax)
+ vmovdqa $xt3,0x1e0-0x200(%rax)
+
+ vpshufd \$0x00,$xd3,$xd0
+ vpshufd \$0x55,$xd3,$xd1
+ vpaddd .Lincy(%rip),$xd0,$xd0 # don't save counters yet
+ vpshufd \$0xaa,$xd3,$xd2
+ vmovdqa $xd1,0x220-0x200(%rax)
+ vpshufd \$0xff,$xd3,$xd3
+ vmovdqa $xd2,0x240-0x200(%rax)
+ vmovdqa $xd3,0x260-0x200(%rax)
+
+ jmp .Loop_enter8x
+
+.align 32
+.Loop_outer8x:
+ vmovdqa 0x80-0x100(%rcx),$xa0 # re-load smashed key
+ vmovdqa 0xa0-0x100(%rcx),$xa1
+ vmovdqa 0xc0-0x100(%rcx),$xa2
+ vmovdqa 0xe0-0x100(%rcx),$xa3
+ vmovdqa 0x100-0x100(%rcx),$xb0
+ vmovdqa 0x120-0x100(%rcx),$xb1
+ vmovdqa 0x140-0x100(%rcx),$xb2
+ vmovdqa 0x160-0x100(%rcx),$xb3
+ vmovdqa 0x180-0x200(%rax),$xt0 # "xc0"
+ vmovdqa 0x1a0-0x200(%rax),$xt1 # "xc1"
+ vmovdqa 0x1c0-0x200(%rax),$xt2 # "xc2"
+ vmovdqa 0x1e0-0x200(%rax),$xt3 # "xc3"
+ vmovdqa 0x200-0x200(%rax),$xd0
+ vmovdqa 0x220-0x200(%rax),$xd1
+ vmovdqa 0x240-0x200(%rax),$xd2
+ vmovdqa 0x260-0x200(%rax),$xd3
+ vpaddd .Leight(%rip),$xd0,$xd0 # next SIMD counters
+
+.Loop_enter8x:
+ vmovdqa $xt2,0x40(%rsp) # SIMD equivalent of "@x[10]"
+ vmovdqa $xt3,0x60(%rsp) # SIMD equivalent of "@x[11]"
+ vbroadcasti128 (%r10),$xt3
+ vmovdqa $xd0,0x200-0x200(%rax) # save SIMD counters
+ mov \$10,%eax
+ jmp .Loop8x
+
+.align 32
+.Loop8x:
+___
+ foreach (&AVX2_lane_ROUND(0, 4, 8,12)) { eval; }
+ foreach (&AVX2_lane_ROUND(0, 5,10,15)) { eval; }
+$code.=<<___;
+ dec %eax
+ jnz .Loop8x
+
+ lea 0x200(%rsp),%rax # size optimization
+ vpaddd 0x80-0x100(%rcx),$xa0,$xa0 # accumulate key
+ vpaddd 0xa0-0x100(%rcx),$xa1,$xa1
+ vpaddd 0xc0-0x100(%rcx),$xa2,$xa2
+ vpaddd 0xe0-0x100(%rcx),$xa3,$xa3
+
+ vpunpckldq $xa1,$xa0,$xt2 # "de-interlace" data
+ vpunpckldq $xa3,$xa2,$xt3
+ vpunpckhdq $xa1,$xa0,$xa0
+ vpunpckhdq $xa3,$xa2,$xa2
+ vpunpcklqdq $xt3,$xt2,$xa1 # "a0"
+ vpunpckhqdq $xt3,$xt2,$xt2 # "a1"
+ vpunpcklqdq $xa2,$xa0,$xa3 # "a2"
+ vpunpckhqdq $xa2,$xa0,$xa0 # "a3"
+___
+ ($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2);
+$code.=<<___;
+ vpaddd 0x100-0x100(%rcx),$xb0,$xb0
+ vpaddd 0x120-0x100(%rcx),$xb1,$xb1
+ vpaddd 0x140-0x100(%rcx),$xb2,$xb2
+ vpaddd 0x160-0x100(%rcx),$xb3,$xb3
+
+ vpunpckldq $xb1,$xb0,$xt2
+ vpunpckldq $xb3,$xb2,$xt3
+ vpunpckhdq $xb1,$xb0,$xb0
+ vpunpckhdq $xb3,$xb2,$xb2
+ vpunpcklqdq $xt3,$xt2,$xb1 # "b0"
+ vpunpckhqdq $xt3,$xt2,$xt2 # "b1"
+ vpunpcklqdq $xb2,$xb0,$xb3 # "b2"
+ vpunpckhqdq $xb2,$xb0,$xb0 # "b3"
+___
+ ($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2);
+$code.=<<___;
+ vperm2i128 \$0x20,$xb0,$xa0,$xt3 # "de-interlace" further
+ vperm2i128 \$0x31,$xb0,$xa0,$xb0
+ vperm2i128 \$0x20,$xb1,$xa1,$xa0
+ vperm2i128 \$0x31,$xb1,$xa1,$xb1
+ vperm2i128 \$0x20,$xb2,$xa2,$xa1
+ vperm2i128 \$0x31,$xb2,$xa2,$xb2
+ vperm2i128 \$0x20,$xb3,$xa3,$xa2
+ vperm2i128 \$0x31,$xb3,$xa3,$xb3
+___
+ ($xa0,$xa1,$xa2,$xa3,$xt3)=($xt3,$xa0,$xa1,$xa2,$xa3);
+ my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1);
+$code.=<<___;
+ vmovdqa $xa0,0x00(%rsp) # offload $xaN
+ vmovdqa $xa1,0x20(%rsp)
+ vmovdqa 0x40(%rsp),$xc2 # $xa0
+ vmovdqa 0x60(%rsp),$xc3 # $xa1
+
+ vpaddd 0x180-0x200(%rax),$xc0,$xc0
+ vpaddd 0x1a0-0x200(%rax),$xc1,$xc1
+ vpaddd 0x1c0-0x200(%rax),$xc2,$xc2
+ vpaddd 0x1e0-0x200(%rax),$xc3,$xc3
+
+ vpunpckldq $xc1,$xc0,$xt2
+ vpunpckldq $xc3,$xc2,$xt3
+ vpunpckhdq $xc1,$xc0,$xc0
+ vpunpckhdq $xc3,$xc2,$xc2
+ vpunpcklqdq $xt3,$xt2,$xc1 # "c0"
+ vpunpckhqdq $xt3,$xt2,$xt2 # "c1"
+ vpunpcklqdq $xc2,$xc0,$xc3 # "c2"
+ vpunpckhqdq $xc2,$xc0,$xc0 # "c3"
+___
+ ($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2);
+$code.=<<___;
+ vpaddd 0x200-0x200(%rax),$xd0,$xd0
+ vpaddd 0x220-0x200(%rax),$xd1,$xd1
+ vpaddd 0x240-0x200(%rax),$xd2,$xd2
+ vpaddd 0x260-0x200(%rax),$xd3,$xd3
+
+ vpunpckldq $xd1,$xd0,$xt2
+ vpunpckldq $xd3,$xd2,$xt3
+ vpunpckhdq $xd1,$xd0,$xd0
+ vpunpckhdq $xd3,$xd2,$xd2
+ vpunpcklqdq $xt3,$xt2,$xd1 # "d0"
+ vpunpckhqdq $xt3,$xt2,$xt2 # "d1"
+ vpunpcklqdq $xd2,$xd0,$xd3 # "d2"
+ vpunpckhqdq $xd2,$xd0,$xd0 # "d3"
+___
+ ($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2);
+$code.=<<___;
+ vperm2i128 \$0x20,$xd0,$xc0,$xt3 # "de-interlace" further
+ vperm2i128 \$0x31,$xd0,$xc0,$xd0
+ vperm2i128 \$0x20,$xd1,$xc1,$xc0
+ vperm2i128 \$0x31,$xd1,$xc1,$xd1
+ vperm2i128 \$0x20,$xd2,$xc2,$xc1
+ vperm2i128 \$0x31,$xd2,$xc2,$xd2
+ vperm2i128 \$0x20,$xd3,$xc3,$xc2
+ vperm2i128 \$0x31,$xd3,$xc3,$xd3
+___
+ ($xc0,$xc1,$xc2,$xc3,$xt3)=($xt3,$xc0,$xc1,$xc2,$xc3);
+ ($xb0,$xb1,$xb2,$xb3,$xc0,$xc1,$xc2,$xc3)=
+ ($xc0,$xc1,$xc2,$xc3,$xb0,$xb1,$xb2,$xb3);
+ ($xa0,$xa1)=($xt2,$xt3);
+$code.=<<___;
+ vmovdqa 0x00(%rsp),$xa0 # $xaN was offloaded, remember?
+ vmovdqa 0x20(%rsp),$xa1
+
+ cmp \$64*8,$len
+ jb .Ltail8x
+
+ vpxor 0x00($inp),$xa0,$xa0 # xor with input
+ vpxor 0x20($inp),$xb0,$xb0
+ vpxor 0x40($inp),$xc0,$xc0
+ vpxor 0x60($inp),$xd0,$xd0
+ lea 0x80($inp),$inp # size optimization
+ vmovdqu $xa0,0x00($out)
+ vmovdqu $xb0,0x20($out)
+ vmovdqu $xc0,0x40($out)
+ vmovdqu $xd0,0x60($out)
+ lea 0x80($out),$out # size optimization
+
+ vpxor 0x00($inp),$xa1,$xa1
+ vpxor 0x20($inp),$xb1,$xb1
+ vpxor 0x40($inp),$xc1,$xc1
+ vpxor 0x60($inp),$xd1,$xd1
+ lea 0x80($inp),$inp # size optimization
+ vmovdqu $xa1,0x00($out)
+ vmovdqu $xb1,0x20($out)
+ vmovdqu $xc1,0x40($out)
+ vmovdqu $xd1,0x60($out)
+ lea 0x80($out),$out # size optimization
+
+ vpxor 0x00($inp),$xa2,$xa2
+ vpxor 0x20($inp),$xb2,$xb2
+ vpxor 0x40($inp),$xc2,$xc2
+ vpxor 0x60($inp),$xd2,$xd2
+ lea 0x80($inp),$inp # size optimization
+ vmovdqu $xa2,0x00($out)
+ vmovdqu $xb2,0x20($out)
+ vmovdqu $xc2,0x40($out)
+ vmovdqu $xd2,0x60($out)
+ lea 0x80($out),$out # size optimization
+
+ vpxor 0x00($inp),$xa3,$xa3
+ vpxor 0x20($inp),$xb3,$xb3
+ vpxor 0x40($inp),$xc3,$xc3
+ vpxor 0x60($inp),$xd3,$xd3
+ lea 0x80($inp),$inp # size optimization
+ vmovdqu $xa3,0x00($out)
+ vmovdqu $xb3,0x20($out)
+ vmovdqu $xc3,0x40($out)
+ vmovdqu $xd3,0x60($out)
+ lea 0x80($out),$out # size optimization
+
+ sub \$64*8,$len
+ jnz .Loop_outer8x
+
+ jmp .Ldone8x
+
+.Ltail8x:
+ cmp \$448,$len
+ jae .L448_or_more8x
+ cmp \$384,$len
+ jae .L384_or_more8x
+ cmp \$320,$len
+ jae .L320_or_more8x
+ cmp \$256,$len
+ jae .L256_or_more8x
+ cmp \$192,$len
+ jae .L192_or_more8x
+ cmp \$128,$len
+ jae .L128_or_more8x
+ cmp \$64,$len
+ jae .L64_or_more8x
+
+ xor %r10,%r10
+ vmovdqa $xa0,0x00(%rsp)
+ vmovdqa $xb0,0x20(%rsp)
+ jmp .Loop_tail8x
+
+.align 32
+.L64_or_more8x:
+ vpxor 0x00($inp),$xa0,$xa0 # xor with input
+ vpxor 0x20($inp),$xb0,$xb0
+ vmovdqu $xa0,0x00($out)
+ vmovdqu $xb0,0x20($out)
+ je .Ldone8x
+
+ lea 0x40($inp),$inp # inp+=64*1
+ xor %r10,%r10
+ vmovdqa $xc0,0x00(%rsp)
+ lea 0x40($out),$out # out+=64*1
+ sub \$64,$len # len-=64*1
+ vmovdqa $xd0,0x20(%rsp)
+ jmp .Loop_tail8x
+
+.align 32
+.L128_or_more8x:
+ vpxor 0x00($inp),$xa0,$xa0 # xor with input
+ vpxor 0x20($inp),$xb0,$xb0
+ vpxor 0x40($inp),$xc0,$xc0
+ vpxor 0x60($inp),$xd0,$xd0
+ vmovdqu $xa0,0x00($out)
+ vmovdqu $xb0,0x20($out)
+ vmovdqu $xc0,0x40($out)
+ vmovdqu $xd0,0x60($out)
+ je .Ldone8x
+
+ lea 0x80($inp),$inp # inp+=64*2
+ xor %r10,%r10
+ vmovdqa $xa1,0x00(%rsp)
+ lea 0x80($out),$out # out+=64*2
+ sub \$128,$len # len-=64*2
+ vmovdqa $xb1,0x20(%rsp)
+ jmp .Loop_tail8x
+
+.align 32
+.L192_or_more8x:
+ vpxor 0x00($inp),$xa0,$xa0 # xor with input
+ vpxor 0x20($inp),$xb0,$xb0
+ vpxor 0x40($inp),$xc0,$xc0
+ vpxor 0x60($inp),$xd0,$xd0
+ vpxor 0x80($inp),$xa1,$xa1
+ vpxor 0xa0($inp),$xb1,$xb1
+ vmovdqu $xa0,0x00($out)
+ vmovdqu $xb0,0x20($out)
+ vmovdqu $xc0,0x40($out)
+ vmovdqu $xd0,0x60($out)
+ vmovdqu $xa1,0x80($out)
+ vmovdqu $xb1,0xa0($out)
+ je .Ldone8x
+
+ lea 0xc0($inp),$inp # inp+=64*3
+ xor %r10,%r10
+ vmovdqa $xc1,0x00(%rsp)
+ lea 0xc0($out),$out # out+=64*3
+ sub \$192,$len # len-=64*3
+ vmovdqa $xd1,0x20(%rsp)
+ jmp .Loop_tail8x
+
+.align 32
+.L256_or_more8x:
+ vpxor 0x00($inp),$xa0,$xa0 # xor with input
+ vpxor 0x20($inp),$xb0,$xb0
+ vpxor 0x40($inp),$xc0,$xc0
+ vpxor 0x60($inp),$xd0,$xd0
+ vpxor 0x80($inp),$xa1,$xa1
+ vpxor 0xa0($inp),$xb1,$xb1
+ vpxor 0xc0($inp),$xc1,$xc1
+ vpxor 0xe0($inp),$xd1,$xd1
+ vmovdqu $xa0,0x00($out)
+ vmovdqu $xb0,0x20($out)
+ vmovdqu $xc0,0x40($out)
+ vmovdqu $xd0,0x60($out)
+ vmovdqu $xa1,0x80($out)
+ vmovdqu $xb1,0xa0($out)
+ vmovdqu $xc1,0xc0($out)
+ vmovdqu $xd1,0xe0($out)
+ je .Ldone8x
+
+ lea 0x100($inp),$inp # inp+=64*4
+ xor %r10,%r10
+ vmovdqa $xa2,0x00(%rsp)
+ lea 0x100($out),$out # out+=64*4
+ sub \$256,$len # len-=64*4
+ vmovdqa $xb2,0x20(%rsp)
+ jmp .Loop_tail8x
+
+.align 32
+.L320_or_more8x:
+ vpxor 0x00($inp),$xa0,$xa0 # xor with input
+ vpxor 0x20($inp),$xb0,$xb0
+ vpxor 0x40($inp),$xc0,$xc0
+ vpxor 0x60($inp),$xd0,$xd0
+ vpxor 0x80($inp),$xa1,$xa1
+ vpxor 0xa0($inp),$xb1,$xb1
+ vpxor 0xc0($inp),$xc1,$xc1
+ vpxor 0xe0($inp),$xd1,$xd1
+ vpxor 0x100($inp),$xa2,$xa2
+ vpxor 0x120($inp),$xb2,$xb2
+ vmovdqu $xa0,0x00($out)
+ vmovdqu $xb0,0x20($out)
+ vmovdqu $xc0,0x40($out)
+ vmovdqu $xd0,0x60($out)
+ vmovdqu $xa1,0x80($out)
+ vmovdqu $xb1,0xa0($out)
+ vmovdqu $xc1,0xc0($out)
+ vmovdqu $xd1,0xe0($out)
+ vmovdqu $xa2,0x100($out)
+ vmovdqu $xb2,0x120($out)
+ je .Ldone8x
+
+ lea 0x140($inp),$inp # inp+=64*5
+ xor %r10,%r10
+ vmovdqa $xc2,0x00(%rsp)
+ lea 0x140($out),$out # out+=64*5
+ sub \$320,$len # len-=64*5
+ vmovdqa $xd2,0x20(%rsp)
+ jmp .Loop_tail8x
+
+.align 32
+.L384_or_more8x:
+ vpxor 0x00($inp),$xa0,$xa0 # xor with input
+ vpxor 0x20($inp),$xb0,$xb0
+ vpxor 0x40($inp),$xc0,$xc0
+ vpxor 0x60($inp),$xd0,$xd0
+ vpxor 0x80($inp),$xa1,$xa1
+ vpxor 0xa0($inp),$xb1,$xb1
+ vpxor 0xc0($inp),$xc1,$xc1
+ vpxor 0xe0($inp),$xd1,$xd1
+ vpxor 0x100($inp),$xa2,$xa2
+ vpxor 0x120($inp),$xb2,$xb2
+ vpxor 0x140($inp),$xc2,$xc2
+ vpxor 0x160($inp),$xd2,$xd2
+ vmovdqu $xa0,0x00($out)
+ vmovdqu $xb0,0x20($out)
+ vmovdqu $xc0,0x40($out)
+ vmovdqu $xd0,0x60($out)
+ vmovdqu $xa1,0x80($out)
+ vmovdqu $xb1,0xa0($out)
+ vmovdqu $xc1,0xc0($out)
+ vmovdqu $xd1,0xe0($out)
+ vmovdqu $xa2,0x100($out)
+ vmovdqu $xb2,0x120($out)
+ vmovdqu $xc2,0x140($out)
+ vmovdqu $xd2,0x160($out)
+ je .Ldone8x
+
+ lea 0x180($inp),$inp # inp+=64*6
+ xor %r10,%r10
+ vmovdqa $xa3,0x00(%rsp)
+ lea 0x180($out),$out # out+=64*6
+ sub \$384,$len # len-=64*6
+ vmovdqa $xb3,0x20(%rsp)
+ jmp .Loop_tail8x
+
+.align 32
+.L448_or_more8x:
+ vpxor 0x00($inp),$xa0,$xa0 # xor with input
+ vpxor 0x20($inp),$xb0,$xb0
+ vpxor 0x40($inp),$xc0,$xc0
+ vpxor 0x60($inp),$xd0,$xd0
+ vpxor 0x80($inp),$xa1,$xa1
+ vpxor 0xa0($inp),$xb1,$xb1
+ vpxor 0xc0($inp),$xc1,$xc1
+ vpxor 0xe0($inp),$xd1,$xd1
+ vpxor 0x100($inp),$xa2,$xa2
+ vpxor 0x120($inp),$xb2,$xb2
+ vpxor 0x140($inp),$xc2,$xc2
+ vpxor 0x160($inp),$xd2,$xd2
+ vpxor 0x180($inp),$xa3,$xa3
+ vpxor 0x1a0($inp),$xb3,$xb3
+ vmovdqu $xa0,0x00($out)
+ vmovdqu $xb0,0x20($out)
+ vmovdqu $xc0,0x40($out)
+ vmovdqu $xd0,0x60($out)
+ vmovdqu $xa1,0x80($out)
+ vmovdqu $xb1,0xa0($out)
+ vmovdqu $xc1,0xc0($out)
+ vmovdqu $xd1,0xe0($out)
+ vmovdqu $xa2,0x100($out)
+ vmovdqu $xb2,0x120($out)
+ vmovdqu $xc2,0x140($out)
+ vmovdqu $xd2,0x160($out)
+ vmovdqu $xa3,0x180($out)
+ vmovdqu $xb3,0x1a0($out)
+ je .Ldone8x
+
+ lea 0x1c0($inp),$inp # inp+=64*7
+ xor %r10,%r10
+ vmovdqa $xc3,0x00(%rsp)
+ lea 0x1c0($out),$out # out+=64*7
+ sub \$448,$len # len-=64*7
+ vmovdqa $xd3,0x20(%rsp)
+
+.Loop_tail8x:
+ movzb ($inp,%r10),%eax
+ movzb (%rsp,%r10),%ecx
+ lea 1(%r10),%r10
+ xor %ecx,%eax
+ mov %al,-1($out,%r10)
+ dec $len
+ jnz .Loop_tail8x
+
+.Ldone8x:
+ vzeroall
+___
+$code.=<<___ if ($win64);
+ lea 0x290+0x30(%rsp),%r11
+ movaps -0x30(%r11),%xmm6
+ movaps -0x20(%r11),%xmm7
+ movaps -0x10(%r11),%xmm8
+ movaps 0x00(%r11),%xmm9
+ movaps 0x10(%r11),%xmm10
+ movaps 0x20(%r11),%xmm11
+ movaps 0x30(%r11),%xmm12
+ movaps 0x40(%r11),%xmm13
+ movaps 0x50(%r11),%xmm14
+ movaps 0x60(%r11),%xmm15
+___
+$code.=<<___;
+ mov 0x280(%rsp),%rsp
+ ret
+.size ChaCha20_8x,.-ChaCha20_8x
+___
+}
+
+foreach (split("\n",$code)) {
+ s/\`([^\`]*)\`/eval $1/geo;
+
+ s/%x#%y/%x/go;
+
+ print $_,"\n";
+}
+
+close STDOUT;