aboutsummaryrefslogtreecommitdiff
path: root/openssl-1.1.0h/crypto/bn/asm
diff options
context:
space:
mode:
Diffstat (limited to 'openssl-1.1.0h/crypto/bn/asm')
-rw-r--r--openssl-1.1.0h/crypto/bn/asm/alpha-mont.pl331
-rw-r--r--openssl-1.1.0h/crypto/bn/asm/armv4-gf2m.pl332
-rw-r--r--openssl-1.1.0h/crypto/bn/asm/armv4-mont.pl756
-rwxr-xr-xopenssl-1.1.0h/crypto/bn/asm/armv8-mont.pl1510
-rw-r--r--openssl-1.1.0h/crypto/bn/asm/bn-586.pl785
-rw-r--r--openssl-1.1.0h/crypto/bn/asm/bn-c64xplus.asm382
-rw-r--r--openssl-1.1.0h/crypto/bn/asm/c64xplus-gf2m.pl160
-rw-r--r--openssl-1.1.0h/crypto/bn/asm/co-586.pl298
-rw-r--r--openssl-1.1.0h/crypto/bn/asm/ia64-mont.pl860
-rw-r--r--openssl-1.1.0h/crypto/bn/asm/ia64.S1562
-rw-r--r--openssl-1.1.0h/crypto/bn/asm/mips-mont.pl433
-rw-r--r--openssl-1.1.0h/crypto/bn/asm/mips.pl2241
-rw-r--r--openssl-1.1.0h/crypto/bn/asm/pa-risc2.s1624
-rw-r--r--openssl-1.1.0h/crypto/bn/asm/pa-risc2W.s1612
-rw-r--r--openssl-1.1.0h/crypto/bn/asm/parisc-mont.pl1002
-rw-r--r--openssl-1.1.0h/crypto/bn/asm/ppc-mont.pl342
-rw-r--r--openssl-1.1.0h/crypto/bn/asm/ppc.pl2014
-rw-r--r--openssl-1.1.0h/crypto/bn/asm/ppc64-mont.pl1635
-rwxr-xr-xopenssl-1.1.0h/crypto/bn/asm/rsaz-avx2.pl1967
-rwxr-xr-xopenssl-1.1.0h/crypto/bn/asm/rsaz-x86_64.pl2358
-rw-r--r--openssl-1.1.0h/crypto/bn/asm/s390x-gf2m.pl228
-rw-r--r--openssl-1.1.0h/crypto/bn/asm/s390x-mont.pl284
-rw-r--r--openssl-1.1.0h/crypto/bn/asm/s390x.S713
-rwxr-xr-xopenssl-1.1.0h/crypto/bn/asm/sparct4-mont.pl1232
-rw-r--r--openssl-1.1.0h/crypto/bn/asm/sparcv8.S1458
-rw-r--r--openssl-1.1.0h/crypto/bn/asm/sparcv8plus.S1562
-rw-r--r--openssl-1.1.0h/crypto/bn/asm/sparcv9-gf2m.pl200
-rw-r--r--openssl-1.1.0h/crypto/bn/asm/sparcv9-mont.pl619
-rwxr-xr-xopenssl-1.1.0h/crypto/bn/asm/sparcv9a-mont.pl887
-rw-r--r--openssl-1.1.0h/crypto/bn/asm/via-mont.pl254
-rw-r--r--openssl-1.1.0h/crypto/bn/asm/vis3-mont.pl384
-rw-r--r--openssl-1.1.0h/crypto/bn/asm/x86-gf2m.pl325
-rwxr-xr-xopenssl-1.1.0h/crypto/bn/asm/x86-mont.pl629
-rw-r--r--openssl-1.1.0h/crypto/bn/asm/x86_64-gcc.c649
-rw-r--r--openssl-1.1.0h/crypto/bn/asm/x86_64-gf2m.pl397
-rwxr-xr-xopenssl-1.1.0h/crypto/bn/asm/x86_64-mont.pl1521
-rwxr-xr-xopenssl-1.1.0h/crypto/bn/asm/x86_64-mont5.pl3835
37 files changed, 37381 insertions, 0 deletions
diff --git a/openssl-1.1.0h/crypto/bn/asm/alpha-mont.pl b/openssl-1.1.0h/crypto/bn/asm/alpha-mont.pl
new file mode 100644
index 0000000..1d68d6d
--- /dev/null
+++ b/openssl-1.1.0h/crypto/bn/asm/alpha-mont.pl
@@ -0,0 +1,331 @@
+#! /usr/bin/env perl
+# Copyright 2006-2016 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License"). You may not use
+# this file except in compliance with the License. You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# On 21264 RSA sign performance improves by 70/35/20/15 percent for
+# 512/1024/2048/4096 bit key lengths. This is against vendor compiler
+# instructed to '-tune host' code with in-line assembler. Other
+# benchmarks improve by 15-20%. To anchor it to something else, the
+# code provides approximately the same performance per GHz as AMD64.
+# I.e. if you compare 1GHz 21264 and 2GHz Opteron, you'll observe ~2x
+# difference.
+
+$output=pop;
+open STDOUT,">$output";
+
+# int bn_mul_mont(
+$rp="a0"; # BN_ULONG *rp,
+$ap="a1"; # const BN_ULONG *ap,
+$bp="a2"; # const BN_ULONG *bp,
+$np="a3"; # const BN_ULONG *np,
+$n0="a4"; # const BN_ULONG *n0,
+$num="a5"; # int num);
+
+$lo0="t0";
+$hi0="t1";
+$lo1="t2";
+$hi1="t3";
+$aj="t4";
+$bi="t5";
+$nj="t6";
+$tp="t7";
+$alo="t8";
+$ahi="t9";
+$nlo="t10";
+$nhi="t11";
+$tj="t12";
+$i="s3";
+$j="s4";
+$m1="s5";
+
+$code=<<___;
+#ifdef __linux__
+#include <asm/regdef.h>
+#else
+#include <asm.h>
+#include <regdef.h>
+#endif
+
+.text
+
+.set noat
+.set noreorder
+
+.globl bn_mul_mont
+.align 5
+.ent bn_mul_mont
+bn_mul_mont:
+ lda sp,-48(sp)
+ stq ra,0(sp)
+ stq s3,8(sp)
+ stq s4,16(sp)
+ stq s5,24(sp)
+ stq fp,32(sp)
+ mov sp,fp
+ .mask 0x0400f000,-48
+ .frame fp,48,ra
+ .prologue 0
+
+ .align 4
+ .set reorder
+ sextl $num,$num
+ mov 0,v0
+ cmplt $num,4,AT
+ bne AT,.Lexit
+
+ ldq $hi0,0($ap) # ap[0]
+ s8addq $num,16,AT
+ ldq $aj,8($ap)
+ subq sp,AT,sp
+ ldq $bi,0($bp) # bp[0]
+ lda AT,-4096(zero) # mov -4096,AT
+ ldq $n0,0($n0)
+ and sp,AT,sp
+
+ mulq $hi0,$bi,$lo0
+ ldq $hi1,0($np) # np[0]
+ umulh $hi0,$bi,$hi0
+ ldq $nj,8($np)
+
+ mulq $lo0,$n0,$m1
+
+ mulq $hi1,$m1,$lo1
+ umulh $hi1,$m1,$hi1
+
+ addq $lo1,$lo0,$lo1
+ cmpult $lo1,$lo0,AT
+ addq $hi1,AT,$hi1
+
+ mulq $aj,$bi,$alo
+ mov 2,$j
+ umulh $aj,$bi,$ahi
+ mov sp,$tp
+
+ mulq $nj,$m1,$nlo
+ s8addq $j,$ap,$aj
+ umulh $nj,$m1,$nhi
+ s8addq $j,$np,$nj
+.align 4
+.L1st:
+ .set noreorder
+ ldq $aj,0($aj)
+ addl $j,1,$j
+ ldq $nj,0($nj)
+ lda $tp,8($tp)
+
+ addq $alo,$hi0,$lo0
+ mulq $aj,$bi,$alo
+ cmpult $lo0,$hi0,AT
+ addq $nlo,$hi1,$lo1
+
+ mulq $nj,$m1,$nlo
+ addq $ahi,AT,$hi0
+ cmpult $lo1,$hi1,v0
+ cmplt $j,$num,$tj
+
+ umulh $aj,$bi,$ahi
+ addq $nhi,v0,$hi1
+ addq $lo1,$lo0,$lo1
+ s8addq $j,$ap,$aj
+
+ umulh $nj,$m1,$nhi
+ cmpult $lo1,$lo0,v0
+ addq $hi1,v0,$hi1
+ s8addq $j,$np,$nj
+
+ stq $lo1,-8($tp)
+ nop
+ unop
+ bne $tj,.L1st
+ .set reorder
+
+ addq $alo,$hi0,$lo0
+ addq $nlo,$hi1,$lo1
+ cmpult $lo0,$hi0,AT
+ cmpult $lo1,$hi1,v0
+ addq $ahi,AT,$hi0
+ addq $nhi,v0,$hi1
+
+ addq $lo1,$lo0,$lo1
+ cmpult $lo1,$lo0,v0
+ addq $hi1,v0,$hi1
+
+ stq $lo1,0($tp)
+
+ addq $hi1,$hi0,$hi1
+ cmpult $hi1,$hi0,AT
+ stq $hi1,8($tp)
+ stq AT,16($tp)
+
+ mov 1,$i
+.align 4
+.Louter:
+ s8addq $i,$bp,$bi
+ ldq $hi0,0($ap)
+ ldq $aj,8($ap)
+ ldq $bi,0($bi)
+ ldq $hi1,0($np)
+ ldq $nj,8($np)
+ ldq $tj,0(sp)
+
+ mulq $hi0,$bi,$lo0
+ umulh $hi0,$bi,$hi0
+
+ addq $lo0,$tj,$lo0
+ cmpult $lo0,$tj,AT
+ addq $hi0,AT,$hi0
+
+ mulq $lo0,$n0,$m1
+
+ mulq $hi1,$m1,$lo1
+ umulh $hi1,$m1,$hi1
+
+ addq $lo1,$lo0,$lo1
+ cmpult $lo1,$lo0,AT
+ mov 2,$j
+ addq $hi1,AT,$hi1
+
+ mulq $aj,$bi,$alo
+ mov sp,$tp
+ umulh $aj,$bi,$ahi
+
+ mulq $nj,$m1,$nlo
+ s8addq $j,$ap,$aj
+ umulh $nj,$m1,$nhi
+.align 4
+.Linner:
+ .set noreorder
+ ldq $tj,8($tp) #L0
+ nop #U1
+ ldq $aj,0($aj) #L1
+ s8addq $j,$np,$nj #U0
+
+ ldq $nj,0($nj) #L0
+ nop #U1
+ addq $alo,$hi0,$lo0 #L1
+ lda $tp,8($tp)
+
+ mulq $aj,$bi,$alo #U1
+ cmpult $lo0,$hi0,AT #L0
+ addq $nlo,$hi1,$lo1 #L1
+ addl $j,1,$j
+
+ mulq $nj,$m1,$nlo #U1
+ addq $ahi,AT,$hi0 #L0
+ addq $lo0,$tj,$lo0 #L1
+ cmpult $lo1,$hi1,v0 #U0
+
+ umulh $aj,$bi,$ahi #U1
+ cmpult $lo0,$tj,AT #L0
+ addq $lo1,$lo0,$lo1 #L1
+ addq $nhi,v0,$hi1 #U0
+
+ umulh $nj,$m1,$nhi #U1
+ s8addq $j,$ap,$aj #L0
+ cmpult $lo1,$lo0,v0 #L1
+ cmplt $j,$num,$tj #U0 # borrow $tj
+
+ addq $hi0,AT,$hi0 #L0
+ addq $hi1,v0,$hi1 #U1
+ stq $lo1,-8($tp) #L1
+ bne $tj,.Linner #U0
+ .set reorder
+
+ ldq $tj,8($tp)
+ addq $alo,$hi0,$lo0
+ addq $nlo,$hi1,$lo1
+ cmpult $lo0,$hi0,AT
+ cmpult $lo1,$hi1,v0
+ addq $ahi,AT,$hi0
+ addq $nhi,v0,$hi1
+
+ addq $lo0,$tj,$lo0
+ cmpult $lo0,$tj,AT
+ addq $hi0,AT,$hi0
+
+ ldq $tj,16($tp)
+ addq $lo1,$lo0,$j
+ cmpult $j,$lo0,v0
+ addq $hi1,v0,$hi1
+
+ addq $hi1,$hi0,$lo1
+ stq $j,0($tp)
+ cmpult $lo1,$hi0,$hi1
+ addq $lo1,$tj,$lo1
+ cmpult $lo1,$tj,AT
+ addl $i,1,$i
+ addq $hi1,AT,$hi1
+ stq $lo1,8($tp)
+ cmplt $i,$num,$tj # borrow $tj
+ stq $hi1,16($tp)
+ bne $tj,.Louter
+
+ s8addq $num,sp,$tj # &tp[num]
+ mov $rp,$bp # put rp aside
+ mov sp,$tp
+ mov sp,$ap
+ mov 0,$hi0 # clear borrow bit
+
+.align 4
+.Lsub: ldq $lo0,0($tp)
+ ldq $lo1,0($np)
+ lda $tp,8($tp)
+ lda $np,8($np)
+ subq $lo0,$lo1,$lo1 # tp[i]-np[i]
+ cmpult $lo0,$lo1,AT
+ subq $lo1,$hi0,$lo0
+ cmpult $lo1,$lo0,$hi0
+ or $hi0,AT,$hi0
+ stq $lo0,0($rp)
+ cmpult $tp,$tj,v0
+ lda $rp,8($rp)
+ bne v0,.Lsub
+
+ subq $hi1,$hi0,$hi0 # handle upmost overflow bit
+ mov sp,$tp
+ mov $bp,$rp # restore rp
+
+ and sp,$hi0,$ap
+ bic $bp,$hi0,$bp
+ bis $bp,$ap,$ap # ap=borrow?tp:rp
+
+.align 4
+.Lcopy: ldq $aj,0($ap) # copy or in-place refresh
+ lda $tp,8($tp)
+ lda $rp,8($rp)
+ lda $ap,8($ap)
+ stq zero,-8($tp) # zap tp
+ cmpult $tp,$tj,AT
+ stq $aj,-8($rp)
+ bne AT,.Lcopy
+ mov 1,v0
+
+.Lexit:
+ .set noreorder
+ mov fp,sp
+ /*ldq ra,0(sp)*/
+ ldq s3,8(sp)
+ ldq s4,16(sp)
+ ldq s5,24(sp)
+ ldq fp,32(sp)
+ lda sp,48(sp)
+ ret (ra)
+.end bn_mul_mont
+.ascii "Montgomery Multiplication for Alpha, CRYPTOGAMS by <appro\@openssl.org>"
+.align 2
+___
+
+print $code;
+close STDOUT;
diff --git a/openssl-1.1.0h/crypto/bn/asm/armv4-gf2m.pl b/openssl-1.1.0h/crypto/bn/asm/armv4-gf2m.pl
new file mode 100644
index 0000000..0bb5433
--- /dev/null
+++ b/openssl-1.1.0h/crypto/bn/asm/armv4-gf2m.pl
@@ -0,0 +1,332 @@
+#! /usr/bin/env perl
+# Copyright 2011-2016 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License"). You may not use
+# this file except in compliance with the License. You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# May 2011
+#
+# The module implements bn_GF2m_mul_2x2 polynomial multiplication
+# used in bn_gf2m.c. It's kind of low-hanging mechanical port from
+# C for the time being... Except that it has two code paths: pure
+# integer code suitable for any ARMv4 and later CPU and NEON code
+# suitable for ARMv7. Pure integer 1x1 multiplication subroutine runs
+# in ~45 cycles on dual-issue core such as Cortex A8, which is ~50%
+# faster than compiler-generated code. For ECDH and ECDSA verify (but
+# not for ECDSA sign) it means 25%-45% improvement depending on key
+# length, more for longer keys. Even though NEON 1x1 multiplication
+# runs in even less cycles, ~30, improvement is measurable only on
+# longer keys. One has to optimize code elsewhere to get NEON glow...
+#
+# April 2014
+#
+# Double bn_GF2m_mul_2x2 performance by using algorithm from paper
+# referred below, which improves ECDH and ECDSA verify benchmarks
+# by 18-40%.
+#
+# Câmara, D.; Gouvêa, C. P. L.; López, J. & Dahab, R.: Fast Software
+# Polynomial Multiplication on ARM Processors using the NEON Engine.
+#
+# http://conradoplg.cryptoland.net/files/2010/12/mocrysen13.pdf
+
+$flavour = shift;
+if ($flavour=~/\w[\w\-]*\.\w+$/) { $output=$flavour; undef $flavour; }
+else { while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {} }
+
+if ($flavour && $flavour ne "void") {
+ $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+ ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
+ ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
+ die "can't locate arm-xlate.pl";
+
+ open STDOUT,"| \"$^X\" $xlate $flavour $output";
+} else {
+ open STDOUT,">$output";
+}
+
+$code=<<___;
+#include "arm_arch.h"
+
+.text
+#if defined(__thumb2__)
+.syntax unified
+.thumb
+#else
+.code 32
+#endif
+___
+################
+# private interface to mul_1x1_ialu
+#
+$a="r1";
+$b="r0";
+
+($a0,$a1,$a2,$a12,$a4,$a14)=
+($hi,$lo,$t0,$t1, $i0,$i1 )=map("r$_",(4..9),12);
+
+$mask="r12";
+
+$code.=<<___;
+.type mul_1x1_ialu,%function
+.align 5
+mul_1x1_ialu:
+ mov $a0,#0
+ bic $a1,$a,#3<<30 @ a1=a&0x3fffffff
+ str $a0,[sp,#0] @ tab[0]=0
+ add $a2,$a1,$a1 @ a2=a1<<1
+ str $a1,[sp,#4] @ tab[1]=a1
+ eor $a12,$a1,$a2 @ a1^a2
+ str $a2,[sp,#8] @ tab[2]=a2
+ mov $a4,$a1,lsl#2 @ a4=a1<<2
+ str $a12,[sp,#12] @ tab[3]=a1^a2
+ eor $a14,$a1,$a4 @ a1^a4
+ str $a4,[sp,#16] @ tab[4]=a4
+ eor $a0,$a2,$a4 @ a2^a4
+ str $a14,[sp,#20] @ tab[5]=a1^a4
+ eor $a12,$a12,$a4 @ a1^a2^a4
+ str $a0,[sp,#24] @ tab[6]=a2^a4
+ and $i0,$mask,$b,lsl#2
+ str $a12,[sp,#28] @ tab[7]=a1^a2^a4
+
+ and $i1,$mask,$b,lsr#1
+ ldr $lo,[sp,$i0] @ tab[b & 0x7]
+ and $i0,$mask,$b,lsr#4
+ ldr $t1,[sp,$i1] @ tab[b >> 3 & 0x7]
+ and $i1,$mask,$b,lsr#7
+ ldr $t0,[sp,$i0] @ tab[b >> 6 & 0x7]
+ eor $lo,$lo,$t1,lsl#3 @ stall
+ mov $hi,$t1,lsr#29
+ ldr $t1,[sp,$i1] @ tab[b >> 9 & 0x7]
+
+ and $i0,$mask,$b,lsr#10
+ eor $lo,$lo,$t0,lsl#6
+ eor $hi,$hi,$t0,lsr#26
+ ldr $t0,[sp,$i0] @ tab[b >> 12 & 0x7]
+
+ and $i1,$mask,$b,lsr#13
+ eor $lo,$lo,$t1,lsl#9
+ eor $hi,$hi,$t1,lsr#23
+ ldr $t1,[sp,$i1] @ tab[b >> 15 & 0x7]
+
+ and $i0,$mask,$b,lsr#16
+ eor $lo,$lo,$t0,lsl#12
+ eor $hi,$hi,$t0,lsr#20
+ ldr $t0,[sp,$i0] @ tab[b >> 18 & 0x7]
+
+ and $i1,$mask,$b,lsr#19
+ eor $lo,$lo,$t1,lsl#15
+ eor $hi,$hi,$t1,lsr#17
+ ldr $t1,[sp,$i1] @ tab[b >> 21 & 0x7]
+
+ and $i0,$mask,$b,lsr#22
+ eor $lo,$lo,$t0,lsl#18
+ eor $hi,$hi,$t0,lsr#14
+ ldr $t0,[sp,$i0] @ tab[b >> 24 & 0x7]
+
+ and $i1,$mask,$b,lsr#25
+ eor $lo,$lo,$t1,lsl#21
+ eor $hi,$hi,$t1,lsr#11
+ ldr $t1,[sp,$i1] @ tab[b >> 27 & 0x7]
+
+ tst $a,#1<<30
+ and $i0,$mask,$b,lsr#28
+ eor $lo,$lo,$t0,lsl#24
+ eor $hi,$hi,$t0,lsr#8
+ ldr $t0,[sp,$i0] @ tab[b >> 30 ]
+
+#ifdef __thumb2__
+ itt ne
+#endif
+ eorne $lo,$lo,$b,lsl#30
+ eorne $hi,$hi,$b,lsr#2
+ tst $a,#1<<31
+ eor $lo,$lo,$t1,lsl#27
+ eor $hi,$hi,$t1,lsr#5
+#ifdef __thumb2__
+ itt ne
+#endif
+ eorne $lo,$lo,$b,lsl#31
+ eorne $hi,$hi,$b,lsr#1
+ eor $lo,$lo,$t0,lsl#30
+ eor $hi,$hi,$t0,lsr#2
+
+ mov pc,lr
+.size mul_1x1_ialu,.-mul_1x1_ialu
+___
+################
+# void bn_GF2m_mul_2x2(BN_ULONG *r,
+# BN_ULONG a1,BN_ULONG a0,
+# BN_ULONG b1,BN_ULONG b0); # r[3..0]=a1a0·b1b0
+{
+$code.=<<___;
+.global bn_GF2m_mul_2x2
+.type bn_GF2m_mul_2x2,%function
+.align 5
+bn_GF2m_mul_2x2:
+#if __ARM_MAX_ARCH__>=7
+ stmdb sp!,{r10,lr}
+ ldr r12,.LOPENSSL_armcap
+ adr r10,.LOPENSSL_armcap
+ ldr r12,[r12,r10]
+#ifdef __APPLE__
+ ldr r12,[r12]
+#endif
+ tst r12,#ARMV7_NEON
+ itt ne
+ ldrne r10,[sp],#8
+ bne .LNEON
+ stmdb sp!,{r4-r9}
+#else
+ stmdb sp!,{r4-r10,lr}
+#endif
+___
+$ret="r10"; # reassigned 1st argument
+$code.=<<___;
+ mov $ret,r0 @ reassign 1st argument
+ mov $b,r3 @ $b=b1
+ sub r7,sp,#36
+ mov r8,sp
+ and r7,r7,#-32
+ ldr r3,[sp,#32] @ load b0
+ mov $mask,#7<<2
+ mov sp,r7 @ allocate tab[8]
+ str r8,[r7,#32]
+
+ bl mul_1x1_ialu @ a1·b1
+ str $lo,[$ret,#8]
+ str $hi,[$ret,#12]
+
+ eor $b,$b,r3 @ flip b0 and b1
+ eor $a,$a,r2 @ flip a0 and a1
+ eor r3,r3,$b
+ eor r2,r2,$a
+ eor $b,$b,r3
+ eor $a,$a,r2
+ bl mul_1x1_ialu @ a0·b0
+ str $lo,[$ret]
+ str $hi,[$ret,#4]
+
+ eor $a,$a,r2
+ eor $b,$b,r3
+ bl mul_1x1_ialu @ (a1+a0)·(b1+b0)
+___
+@r=map("r$_",(6..9));
+$code.=<<___;
+ ldmia $ret,{@r[0]-@r[3]}
+ eor $lo,$lo,$hi
+ ldr sp,[sp,#32] @ destroy tab[8]
+ eor $hi,$hi,@r[1]
+ eor $lo,$lo,@r[0]
+ eor $hi,$hi,@r[2]
+ eor $lo,$lo,@r[3]
+ eor $hi,$hi,@r[3]
+ str $hi,[$ret,#8]
+ eor $lo,$lo,$hi
+ str $lo,[$ret,#4]
+
+#if __ARM_ARCH__>=5
+ ldmia sp!,{r4-r10,pc}
+#else
+ ldmia sp!,{r4-r10,lr}
+ tst lr,#1
+ moveq pc,lr @ be binary compatible with V4, yet
+ bx lr @ interoperable with Thumb ISA:-)
+#endif
+___
+}
+{
+my ($r,$t0,$t1,$t2,$t3)=map("q$_",(0..3,8..12));
+my ($a,$b,$k48,$k32,$k16)=map("d$_",(26..31));
+
+$code.=<<___;
+#if __ARM_MAX_ARCH__>=7
+.arch armv7-a
+.fpu neon
+
+.align 5
+.LNEON:
+ ldr r12, [sp] @ 5th argument
+ vmov $a, r2, r1
+ vmov $b, r12, r3
+ vmov.i64 $k48, #0x0000ffffffffffff
+ vmov.i64 $k32, #0x00000000ffffffff
+ vmov.i64 $k16, #0x000000000000ffff
+
+ vext.8 $t0#lo, $a, $a, #1 @ A1
+ vmull.p8 $t0, $t0#lo, $b @ F = A1*B
+ vext.8 $r#lo, $b, $b, #1 @ B1
+ vmull.p8 $r, $a, $r#lo @ E = A*B1
+ vext.8 $t1#lo, $a, $a, #2 @ A2
+ vmull.p8 $t1, $t1#lo, $b @ H = A2*B
+ vext.8 $t3#lo, $b, $b, #2 @ B2
+ vmull.p8 $t3, $a, $t3#lo @ G = A*B2
+ vext.8 $t2#lo, $a, $a, #3 @ A3
+ veor $t0, $t0, $r @ L = E + F
+ vmull.p8 $t2, $t2#lo, $b @ J = A3*B
+ vext.8 $r#lo, $b, $b, #3 @ B3
+ veor $t1, $t1, $t3 @ M = G + H
+ vmull.p8 $r, $a, $r#lo @ I = A*B3
+ veor $t0#lo, $t0#lo, $t0#hi @ t0 = (L) (P0 + P1) << 8
+ vand $t0#hi, $t0#hi, $k48
+ vext.8 $t3#lo, $b, $b, #4 @ B4
+ veor $t1#lo, $t1#lo, $t1#hi @ t1 = (M) (P2 + P3) << 16
+ vand $t1#hi, $t1#hi, $k32
+ vmull.p8 $t3, $a, $t3#lo @ K = A*B4
+ veor $t2, $t2, $r @ N = I + J
+ veor $t0#lo, $t0#lo, $t0#hi
+ veor $t1#lo, $t1#lo, $t1#hi
+ veor $t2#lo, $t2#lo, $t2#hi @ t2 = (N) (P4 + P5) << 24
+ vand $t2#hi, $t2#hi, $k16
+ vext.8 $t0, $t0, $t0, #15
+ veor $t3#lo, $t3#lo, $t3#hi @ t3 = (K) (P6 + P7) << 32
+ vmov.i64 $t3#hi, #0
+ vext.8 $t1, $t1, $t1, #14
+ veor $t2#lo, $t2#lo, $t2#hi
+ vmull.p8 $r, $a, $b @ D = A*B
+ vext.8 $t3, $t3, $t3, #12
+ vext.8 $t2, $t2, $t2, #13
+ veor $t0, $t0, $t1
+ veor $t2, $t2, $t3
+ veor $r, $r, $t0
+ veor $r, $r, $t2
+
+ vst1.32 {$r}, [r0]
+ ret @ bx lr
+#endif
+___
+}
+$code.=<<___;
+.size bn_GF2m_mul_2x2,.-bn_GF2m_mul_2x2
+#if __ARM_MAX_ARCH__>=7
+.align 5
+.LOPENSSL_armcap:
+.word OPENSSL_armcap_P-.
+#endif
+.asciz "GF(2^m) Multiplication for ARMv4/NEON, CRYPTOGAMS by <appro\@openssl.org>"
+.align 5
+
+#if __ARM_MAX_ARCH__>=7
+.comm OPENSSL_armcap_P,4,4
+#endif
+___
+
+foreach (split("\n",$code)) {
+ s/\`([^\`]*)\`/eval $1/geo;
+
+ s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo or
+ s/\bret\b/bx lr/go or
+ s/\bbx\s+lr\b/.word\t0xe12fff1e/go; # make it possible to compile with -march=armv4
+
+ print $_,"\n";
+}
+close STDOUT; # enforce flush
diff --git a/openssl-1.1.0h/crypto/bn/asm/armv4-mont.pl b/openssl-1.1.0h/crypto/bn/asm/armv4-mont.pl
new file mode 100644
index 0000000..0dc4fe9
--- /dev/null
+++ b/openssl-1.1.0h/crypto/bn/asm/armv4-mont.pl
@@ -0,0 +1,756 @@
+#! /usr/bin/env perl
+# Copyright 2007-2016 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License"). You may not use
+# this file except in compliance with the License. You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# January 2007.
+
+# Montgomery multiplication for ARMv4.
+#
+# Performance improvement naturally varies among CPU implementations
+# and compilers. The code was observed to provide +65-35% improvement
+# [depending on key length, less for longer keys] on ARM920T, and
+# +115-80% on Intel IXP425. This is compared to pre-bn_mul_mont code
+# base and compiler generated code with in-lined umull and even umlal
+# instructions. The latter means that this code didn't really have an
+# "advantage" of utilizing some "secret" instruction.
+#
+# The code is interoperable with Thumb ISA and is rather compact, less
+# than 1/2KB. Windows CE port would be trivial, as it's exclusively
+# about decorations, ABI and instruction syntax are identical.
+
+# November 2013
+#
+# Add NEON code path, which handles lengths divisible by 8. RSA/DSA
+# performance improvement on Cortex-A8 is ~45-100% depending on key
+# length, more for longer keys. On Cortex-A15 the span is ~10-105%.
+# On Snapdragon S4 improvement was measured to vary from ~70% to
+# incredible ~380%, yes, 4.8x faster, for RSA4096 sign. But this is
+# rather because original integer-only code seems to perform
+# suboptimally on S4. Situation on Cortex-A9 is unfortunately
+# different. It's being looked into, but the trouble is that
+# performance for vectors longer than 256 bits is actually couple
+# of percent worse than for integer-only code. The code is chosen
+# for execution on all NEON-capable processors, because gain on
+# others outweighs the marginal loss on Cortex-A9.
+
+# September 2015
+#
+# Align Cortex-A9 performance with November 2013 improvements, i.e.
+# NEON code is now ~20-105% faster than integer-only one on this
+# processor. But this optimization further improved performance even
+# on other processors: NEON code path is ~45-180% faster than original
+# integer-only on Cortex-A8, ~10-210% on Cortex-A15, ~70-450% on
+# Snapdragon S4.
+
+$flavour = shift;
+if ($flavour=~/\w[\w\-]*\.\w+$/) { $output=$flavour; undef $flavour; }
+else { while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {} }
+
+if ($flavour && $flavour ne "void") {
+ $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+ ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
+ ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
+ die "can't locate arm-xlate.pl";
+
+ open STDOUT,"| \"$^X\" $xlate $flavour $output";
+} else {
+ open STDOUT,">$output";
+}
+
+$num="r0"; # starts as num argument, but holds &tp[num-1]
+$ap="r1";
+$bp="r2"; $bi="r2"; $rp="r2";
+$np="r3";
+$tp="r4";
+$aj="r5";
+$nj="r6";
+$tj="r7";
+$n0="r8";
+########### # r9 is reserved by ELF as platform specific, e.g. TLS pointer
+$alo="r10"; # sl, gcc uses it to keep @GOT
+$ahi="r11"; # fp
+$nlo="r12"; # ip
+########### # r13 is stack pointer
+$nhi="r14"; # lr
+########### # r15 is program counter
+
+#### argument block layout relative to &tp[num-1], a.k.a. $num
+$_rp="$num,#12*4";
+# ap permanently resides in r1
+$_bp="$num,#13*4";
+# np permanently resides in r3
+$_n0="$num,#14*4";
+$_num="$num,#15*4"; $_bpend=$_num;
+
+$code=<<___;
+#include "arm_arch.h"
+
+.text
+#if defined(__thumb2__)
+.syntax unified
+.thumb
+#else
+.code 32
+#endif
+
+#if __ARM_MAX_ARCH__>=7
+.align 5
+.LOPENSSL_armcap:
+.word OPENSSL_armcap_P-.Lbn_mul_mont
+#endif
+
+.global bn_mul_mont
+.type bn_mul_mont,%function
+
+.align 5
+bn_mul_mont:
+.Lbn_mul_mont:
+ ldr ip,[sp,#4] @ load num
+ stmdb sp!,{r0,r2} @ sp points at argument block
+#if __ARM_MAX_ARCH__>=7
+ tst ip,#7
+ bne .Lialu
+ adr r0,.Lbn_mul_mont
+ ldr r2,.LOPENSSL_armcap
+ ldr r0,[r0,r2]
+#ifdef __APPLE__
+ ldr r0,[r0]
+#endif
+ tst r0,#ARMV7_NEON @ NEON available?
+ ldmia sp, {r0,r2}
+ beq .Lialu
+ add sp,sp,#8
+ b bn_mul8x_mont_neon
+.align 4
+.Lialu:
+#endif
+ cmp ip,#2
+ mov $num,ip @ load num
+#ifdef __thumb2__
+ ittt lt
+#endif
+ movlt r0,#0
+ addlt sp,sp,#2*4
+ blt .Labrt
+
+ stmdb sp!,{r4-r12,lr} @ save 10 registers
+
+ mov $num,$num,lsl#2 @ rescale $num for byte count
+ sub sp,sp,$num @ alloca(4*num)
+ sub sp,sp,#4 @ +extra dword
+ sub $num,$num,#4 @ "num=num-1"
+ add $tp,$bp,$num @ &bp[num-1]
+
+ add $num,sp,$num @ $num to point at &tp[num-1]
+ ldr $n0,[$_n0] @ &n0
+ ldr $bi,[$bp] @ bp[0]
+ ldr $aj,[$ap],#4 @ ap[0],ap++
+ ldr $nj,[$np],#4 @ np[0],np++
+ ldr $n0,[$n0] @ *n0
+ str $tp,[$_bpend] @ save &bp[num]
+
+ umull $alo,$ahi,$aj,$bi @ ap[0]*bp[0]
+ str $n0,[$_n0] @ save n0 value
+ mul $n0,$alo,$n0 @ "tp[0]"*n0
+ mov $nlo,#0
+ umlal $alo,$nlo,$nj,$n0 @ np[0]*n0+"t[0]"
+ mov $tp,sp
+
+.L1st:
+ ldr $aj,[$ap],#4 @ ap[j],ap++
+ mov $alo,$ahi
+ ldr $nj,[$np],#4 @ np[j],np++
+ mov $ahi,#0
+ umlal $alo,$ahi,$aj,$bi @ ap[j]*bp[0]
+ mov $nhi,#0
+ umlal $nlo,$nhi,$nj,$n0 @ np[j]*n0
+ adds $nlo,$nlo,$alo
+ str $nlo,[$tp],#4 @ tp[j-1]=,tp++
+ adc $nlo,$nhi,#0
+ cmp $tp,$num
+ bne .L1st
+
+ adds $nlo,$nlo,$ahi
+ ldr $tp,[$_bp] @ restore bp
+ mov $nhi,#0
+ ldr $n0,[$_n0] @ restore n0
+ adc $nhi,$nhi,#0
+ str $nlo,[$num] @ tp[num-1]=
+ mov $tj,sp
+ str $nhi,[$num,#4] @ tp[num]=
+
+.Louter:
+ sub $tj,$num,$tj @ "original" $num-1 value
+ sub $ap,$ap,$tj @ "rewind" ap to &ap[1]
+ ldr $bi,[$tp,#4]! @ *(++bp)
+ sub $np,$np,$tj @ "rewind" np to &np[1]
+ ldr $aj,[$ap,#-4] @ ap[0]
+ ldr $alo,[sp] @ tp[0]
+ ldr $nj,[$np,#-4] @ np[0]
+ ldr $tj,[sp,#4] @ tp[1]
+
+ mov $ahi,#0
+ umlal $alo,$ahi,$aj,$bi @ ap[0]*bp[i]+tp[0]
+ str $tp,[$_bp] @ save bp
+ mul $n0,$alo,$n0
+ mov $nlo,#0
+ umlal $alo,$nlo,$nj,$n0 @ np[0]*n0+"tp[0]"
+ mov $tp,sp
+
+.Linner:
+ ldr $aj,[$ap],#4 @ ap[j],ap++
+ adds $alo,$ahi,$tj @ +=tp[j]
+ ldr $nj,[$np],#4 @ np[j],np++
+ mov $ahi,#0
+ umlal $alo,$ahi,$aj,$bi @ ap[j]*bp[i]
+ mov $nhi,#0
+ umlal $nlo,$nhi,$nj,$n0 @ np[j]*n0
+ adc $ahi,$ahi,#0
+ ldr $tj,[$tp,#8] @ tp[j+1]
+ adds $nlo,$nlo,$alo
+ str $nlo,[$tp],#4 @ tp[j-1]=,tp++
+ adc $nlo,$nhi,#0
+ cmp $tp,$num
+ bne .Linner
+
+ adds $nlo,$nlo,$ahi
+ mov $nhi,#0
+ ldr $tp,[$_bp] @ restore bp
+ adc $nhi,$nhi,#0
+ ldr $n0,[$_n0] @ restore n0
+ adds $nlo,$nlo,$tj
+ ldr $tj,[$_bpend] @ restore &bp[num]
+ adc $nhi,$nhi,#0
+ str $nlo,[$num] @ tp[num-1]=
+ str $nhi,[$num,#4] @ tp[num]=
+
+ cmp $tp,$tj
+#ifdef __thumb2__
+ itt ne
+#endif
+ movne $tj,sp
+ bne .Louter
+
+ ldr $rp,[$_rp] @ pull rp
+ mov $aj,sp
+ add $num,$num,#4 @ $num to point at &tp[num]
+ sub $aj,$num,$aj @ "original" num value
+ mov $tp,sp @ "rewind" $tp
+ mov $ap,$tp @ "borrow" $ap
+ sub $np,$np,$aj @ "rewind" $np to &np[0]
+
+ subs $tj,$tj,$tj @ "clear" carry flag
+.Lsub: ldr $tj,[$tp],#4
+ ldr $nj,[$np],#4
+ sbcs $tj,$tj,$nj @ tp[j]-np[j]
+ str $tj,[$rp],#4 @ rp[j]=
+ teq $tp,$num @ preserve carry
+ bne .Lsub
+ sbcs $nhi,$nhi,#0 @ upmost carry
+ mov $tp,sp @ "rewind" $tp
+ sub $rp,$rp,$aj @ "rewind" $rp
+
+ and $ap,$tp,$nhi
+ bic $np,$rp,$nhi
+ orr $ap,$ap,$np @ ap=borrow?tp:rp
+
+.Lcopy: ldr $tj,[$ap],#4 @ copy or in-place refresh
+ str sp,[$tp],#4 @ zap tp
+ str $tj,[$rp],#4
+ cmp $tp,$num
+ bne .Lcopy
+
+ mov sp,$num
+ add sp,sp,#4 @ skip over tp[num+1]
+ ldmia sp!,{r4-r12,lr} @ restore registers
+ add sp,sp,#2*4 @ skip over {r0,r2}
+ mov r0,#1
+.Labrt:
+#if __ARM_ARCH__>=5
+ ret @ bx lr
+#else
+ tst lr,#1
+ moveq pc,lr @ be binary compatible with V4, yet
+ bx lr @ interoperable with Thumb ISA:-)
+#endif
+.size bn_mul_mont,.-bn_mul_mont
+___
+{
+my ($A0,$A1,$A2,$A3)=map("d$_",(0..3));
+my ($N0,$N1,$N2,$N3)=map("d$_",(4..7));
+my ($Z,$Temp)=("q4","q5");
+my @ACC=map("q$_",(6..13));
+my ($Bi,$Ni,$M0)=map("d$_",(28..31));
+my $zero="$Z#lo";
+my $temp="$Temp#lo";
+
+my ($rptr,$aptr,$bptr,$nptr,$n0,$num)=map("r$_",(0..5));
+my ($tinptr,$toutptr,$inner,$outer,$bnptr)=map("r$_",(6..11));
+
+$code.=<<___;
+#if __ARM_MAX_ARCH__>=7
+.arch armv7-a
+.fpu neon
+
+.type bn_mul8x_mont_neon,%function
+.align 5
+bn_mul8x_mont_neon:
+ mov ip,sp
+ stmdb sp!,{r4-r11}
+ vstmdb sp!,{d8-d15} @ ABI specification says so
+ ldmia ip,{r4-r5} @ load rest of parameter block
+ mov ip,sp
+
+ cmp $num,#8
+ bhi .LNEON_8n
+
+ @ special case for $num==8, everything is in register bank...
+
+ vld1.32 {${Bi}[0]}, [$bptr,:32]!
+ veor $zero,$zero,$zero
+ sub $toutptr,sp,$num,lsl#4
+ vld1.32 {$A0-$A3}, [$aptr]! @ can't specify :32 :-(
+ and $toutptr,$toutptr,#-64
+ vld1.32 {${M0}[0]}, [$n0,:32]
+ mov sp,$toutptr @ alloca
+ vzip.16 $Bi,$zero
+
+ vmull.u32 @ACC[0],$Bi,${A0}[0]
+ vmull.u32 @ACC[1],$Bi,${A0}[1]
+ vmull.u32 @ACC[2],$Bi,${A1}[0]
+ vshl.i64 $Ni,@ACC[0]#hi,#16
+ vmull.u32 @ACC[3],$Bi,${A1}[1]
+
+ vadd.u64 $Ni,$Ni,@ACC[0]#lo
+ veor $zero,$zero,$zero
+ vmul.u32 $Ni,$Ni,$M0
+
+ vmull.u32 @ACC[4],$Bi,${A2}[0]
+ vld1.32 {$N0-$N3}, [$nptr]!
+ vmull.u32 @ACC[5],$Bi,${A2}[1]
+ vmull.u32 @ACC[6],$Bi,${A3}[0]
+ vzip.16 $Ni,$zero
+ vmull.u32 @ACC[7],$Bi,${A3}[1]
+
+ vmlal.u32 @ACC[0],$Ni,${N0}[0]
+ sub $outer,$num,#1
+ vmlal.u32 @ACC[1],$Ni,${N0}[1]
+ vmlal.u32 @ACC[2],$Ni,${N1}[0]
+ vmlal.u32 @ACC[3],$Ni,${N1}[1]
+
+ vmlal.u32 @ACC[4],$Ni,${N2}[0]
+ vmov $Temp,@ACC[0]
+ vmlal.u32 @ACC[5],$Ni,${N2}[1]
+ vmov @ACC[0],@ACC[1]
+ vmlal.u32 @ACC[6],$Ni,${N3}[0]
+ vmov @ACC[1],@ACC[2]
+ vmlal.u32 @ACC[7],$Ni,${N3}[1]
+ vmov @ACC[2],@ACC[3]
+ vmov @ACC[3],@ACC[4]
+ vshr.u64 $temp,$temp,#16
+ vmov @ACC[4],@ACC[5]
+ vmov @ACC[5],@ACC[6]
+ vadd.u64 $temp,$temp,$Temp#hi
+ vmov @ACC[6],@ACC[7]
+ veor @ACC[7],@ACC[7]
+ vshr.u64 $temp,$temp,#16
+
+ b .LNEON_outer8
+
+.align 4
+.LNEON_outer8:
+ vld1.32 {${Bi}[0]}, [$bptr,:32]!
+ veor $zero,$zero,$zero
+ vzip.16 $Bi,$zero
+ vadd.u64 @ACC[0]#lo,@ACC[0]#lo,$temp
+
+ vmlal.u32 @ACC[0],$Bi,${A0}[0]
+ vmlal.u32 @ACC[1],$Bi,${A0}[1]
+ vmlal.u32 @ACC[2],$Bi,${A1}[0]
+ vshl.i64 $Ni,@ACC[0]#hi,#16
+ vmlal.u32 @ACC[3],$Bi,${A1}[1]
+
+ vadd.u64 $Ni,$Ni,@ACC[0]#lo
+ veor $zero,$zero,$zero
+ subs $outer,$outer,#1
+ vmul.u32 $Ni,$Ni,$M0
+
+ vmlal.u32 @ACC[4],$Bi,${A2}[0]
+ vmlal.u32 @ACC[5],$Bi,${A2}[1]
+ vmlal.u32 @ACC[6],$Bi,${A3}[0]
+ vzip.16 $Ni,$zero
+ vmlal.u32 @ACC[7],$Bi,${A3}[1]
+
+ vmlal.u32 @ACC[0],$Ni,${N0}[0]
+ vmlal.u32 @ACC[1],$Ni,${N0}[1]
+ vmlal.u32 @ACC[2],$Ni,${N1}[0]
+ vmlal.u32 @ACC[3],$Ni,${N1}[1]
+
+ vmlal.u32 @ACC[4],$Ni,${N2}[0]
+ vmov $Temp,@ACC[0]
+ vmlal.u32 @ACC[5],$Ni,${N2}[1]
+ vmov @ACC[0],@ACC[1]
+ vmlal.u32 @ACC[6],$Ni,${N3}[0]
+ vmov @ACC[1],@ACC[2]
+ vmlal.u32 @ACC[7],$Ni,${N3}[1]
+ vmov @ACC[2],@ACC[3]
+ vmov @ACC[3],@ACC[4]
+ vshr.u64 $temp,$temp,#16
+ vmov @ACC[4],@ACC[5]
+ vmov @ACC[5],@ACC[6]
+ vadd.u64 $temp,$temp,$Temp#hi
+ vmov @ACC[6],@ACC[7]
+ veor @ACC[7],@ACC[7]
+ vshr.u64 $temp,$temp,#16
+
+ bne .LNEON_outer8
+
+ vadd.u64 @ACC[0]#lo,@ACC[0]#lo,$temp
+ mov $toutptr,sp
+ vshr.u64 $temp,@ACC[0]#lo,#16
+ mov $inner,$num
+ vadd.u64 @ACC[0]#hi,@ACC[0]#hi,$temp
+ add $tinptr,sp,#96
+ vshr.u64 $temp,@ACC[0]#hi,#16
+ vzip.16 @ACC[0]#lo,@ACC[0]#hi
+
+ b .LNEON_tail_entry
+
+.align 4
+.LNEON_8n:
+ veor @ACC[0],@ACC[0],@ACC[0]
+ sub $toutptr,sp,#128
+ veor @ACC[1],@ACC[1],@ACC[1]
+ sub $toutptr,$toutptr,$num,lsl#4
+ veor @ACC[2],@ACC[2],@ACC[2]
+ and $toutptr,$toutptr,#-64
+ veor @ACC[3],@ACC[3],@ACC[3]
+ mov sp,$toutptr @ alloca
+ veor @ACC[4],@ACC[4],@ACC[4]
+ add $toutptr,$toutptr,#256
+ veor @ACC[5],@ACC[5],@ACC[5]
+ sub $inner,$num,#8
+ veor @ACC[6],@ACC[6],@ACC[6]
+ veor @ACC[7],@ACC[7],@ACC[7]
+
+.LNEON_8n_init:
+ vst1.64 {@ACC[0]-@ACC[1]},[$toutptr,:256]!
+ subs $inner,$inner,#8
+ vst1.64 {@ACC[2]-@ACC[3]},[$toutptr,:256]!
+ vst1.64 {@ACC[4]-@ACC[5]},[$toutptr,:256]!
+ vst1.64 {@ACC[6]-@ACC[7]},[$toutptr,:256]!
+ bne .LNEON_8n_init
+
+ add $tinptr,sp,#256
+ vld1.32 {$A0-$A3},[$aptr]!
+ add $bnptr,sp,#8
+ vld1.32 {${M0}[0]},[$n0,:32]
+ mov $outer,$num
+ b .LNEON_8n_outer
+
+.align 4
+.LNEON_8n_outer:
+ vld1.32 {${Bi}[0]},[$bptr,:32]! @ *b++
+ veor $zero,$zero,$zero
+ vzip.16 $Bi,$zero
+ add $toutptr,sp,#128
+ vld1.32 {$N0-$N3},[$nptr]!
+
+ vmlal.u32 @ACC[0],$Bi,${A0}[0]
+ vmlal.u32 @ACC[1],$Bi,${A0}[1]
+ veor $zero,$zero,$zero
+ vmlal.u32 @ACC[2],$Bi,${A1}[0]
+ vshl.i64 $Ni,@ACC[0]#hi,#16
+ vmlal.u32 @ACC[3],$Bi,${A1}[1]
+ vadd.u64 $Ni,$Ni,@ACC[0]#lo
+ vmlal.u32 @ACC[4],$Bi,${A2}[0]
+ vmul.u32 $Ni,$Ni,$M0
+ vmlal.u32 @ACC[5],$Bi,${A2}[1]
+ vst1.32 {$Bi},[sp,:64] @ put aside smashed b[8*i+0]
+ vmlal.u32 @ACC[6],$Bi,${A3}[0]
+ vzip.16 $Ni,$zero
+ vmlal.u32 @ACC[7],$Bi,${A3}[1]
+___
+for ($i=0; $i<7;) {
+$code.=<<___;
+ vld1.32 {${Bi}[0]},[$bptr,:32]! @ *b++
+ vmlal.u32 @ACC[0],$Ni,${N0}[0]
+ veor $temp,$temp,$temp
+ vmlal.u32 @ACC[1],$Ni,${N0}[1]
+ vzip.16 $Bi,$temp
+ vmlal.u32 @ACC[2],$Ni,${N1}[0]
+ vshr.u64 @ACC[0]#lo,@ACC[0]#lo,#16
+ vmlal.u32 @ACC[3],$Ni,${N1}[1]
+ vmlal.u32 @ACC[4],$Ni,${N2}[0]
+ vadd.u64 @ACC[0]#lo,@ACC[0]#lo,@ACC[0]#hi
+ vmlal.u32 @ACC[5],$Ni,${N2}[1]
+ vshr.u64 @ACC[0]#lo,@ACC[0]#lo,#16
+ vmlal.u32 @ACC[6],$Ni,${N3}[0]
+ vmlal.u32 @ACC[7],$Ni,${N3}[1]
+ vadd.u64 @ACC[1]#lo,@ACC[1]#lo,@ACC[0]#lo
+ vst1.32 {$Ni},[$bnptr,:64]! @ put aside smashed m[8*i+$i]
+___
+ push(@ACC,shift(@ACC)); $i++;
+$code.=<<___;
+ vmlal.u32 @ACC[0],$Bi,${A0}[0]
+ vld1.64 {@ACC[7]},[$tinptr,:128]!
+ vmlal.u32 @ACC[1],$Bi,${A0}[1]
+ veor $zero,$zero,$zero
+ vmlal.u32 @ACC[2],$Bi,${A1}[0]
+ vshl.i64 $Ni,@ACC[0]#hi,#16
+ vmlal.u32 @ACC[3],$Bi,${A1}[1]
+ vadd.u64 $Ni,$Ni,@ACC[0]#lo
+ vmlal.u32 @ACC[4],$Bi,${A2}[0]
+ vmul.u32 $Ni,$Ni,$M0
+ vmlal.u32 @ACC[5],$Bi,${A2}[1]
+ vst1.32 {$Bi},[$bnptr,:64]! @ put aside smashed b[8*i+$i]
+ vmlal.u32 @ACC[6],$Bi,${A3}[0]
+ vzip.16 $Ni,$zero
+ vmlal.u32 @ACC[7],$Bi,${A3}[1]
+___
+}
+$code.=<<___;
+ vld1.32 {$Bi},[sp,:64] @ pull smashed b[8*i+0]
+ vmlal.u32 @ACC[0],$Ni,${N0}[0]
+ vld1.32 {$A0-$A3},[$aptr]!
+ vmlal.u32 @ACC[1],$Ni,${N0}[1]
+ vmlal.u32 @ACC[2],$Ni,${N1}[0]
+ vshr.u64 @ACC[0]#lo,@ACC[0]#lo,#16
+ vmlal.u32 @ACC[3],$Ni,${N1}[1]
+ vmlal.u32 @ACC[4],$Ni,${N2}[0]
+ vadd.u64 @ACC[0]#lo,@ACC[0]#lo,@ACC[0]#hi
+ vmlal.u32 @ACC[5],$Ni,${N2}[1]
+ vshr.u64 @ACC[0]#lo,@ACC[0]#lo,#16
+ vmlal.u32 @ACC[6],$Ni,${N3}[0]
+ vmlal.u32 @ACC[7],$Ni,${N3}[1]
+ vadd.u64 @ACC[1]#lo,@ACC[1]#lo,@ACC[0]#lo
+ vst1.32 {$Ni},[$bnptr,:64] @ put aside smashed m[8*i+$i]
+ add $bnptr,sp,#8 @ rewind
+___
+ push(@ACC,shift(@ACC));
+$code.=<<___;
+ sub $inner,$num,#8
+ b .LNEON_8n_inner
+
+.align 4
+.LNEON_8n_inner:
+ subs $inner,$inner,#8
+ vmlal.u32 @ACC[0],$Bi,${A0}[0]
+ vld1.64 {@ACC[7]},[$tinptr,:128]
+ vmlal.u32 @ACC[1],$Bi,${A0}[1]
+ vld1.32 {$Ni},[$bnptr,:64]! @ pull smashed m[8*i+0]
+ vmlal.u32 @ACC[2],$Bi,${A1}[0]
+ vld1.32 {$N0-$N3},[$nptr]!
+ vmlal.u32 @ACC[3],$Bi,${A1}[1]
+ it ne
+ addne $tinptr,$tinptr,#16 @ don't advance in last iteration
+ vmlal.u32 @ACC[4],$Bi,${A2}[0]
+ vmlal.u32 @ACC[5],$Bi,${A2}[1]
+ vmlal.u32 @ACC[6],$Bi,${A3}[0]
+ vmlal.u32 @ACC[7],$Bi,${A3}[1]
+___
+for ($i=1; $i<8; $i++) {
+$code.=<<___;
+ vld1.32 {$Bi},[$bnptr,:64]! @ pull smashed b[8*i+$i]
+ vmlal.u32 @ACC[0],$Ni,${N0}[0]
+ vmlal.u32 @ACC[1],$Ni,${N0}[1]
+ vmlal.u32 @ACC[2],$Ni,${N1}[0]
+ vmlal.u32 @ACC[3],$Ni,${N1}[1]
+ vmlal.u32 @ACC[4],$Ni,${N2}[0]
+ vmlal.u32 @ACC[5],$Ni,${N2}[1]
+ vmlal.u32 @ACC[6],$Ni,${N3}[0]
+ vmlal.u32 @ACC[7],$Ni,${N3}[1]
+ vst1.64 {@ACC[0]},[$toutptr,:128]!
+___
+ push(@ACC,shift(@ACC));
+$code.=<<___;
+ vmlal.u32 @ACC[0],$Bi,${A0}[0]
+ vld1.64 {@ACC[7]},[$tinptr,:128]
+ vmlal.u32 @ACC[1],$Bi,${A0}[1]
+ vld1.32 {$Ni},[$bnptr,:64]! @ pull smashed m[8*i+$i]
+ vmlal.u32 @ACC[2],$Bi,${A1}[0]
+ it ne
+ addne $tinptr,$tinptr,#16 @ don't advance in last iteration
+ vmlal.u32 @ACC[3],$Bi,${A1}[1]
+ vmlal.u32 @ACC[4],$Bi,${A2}[0]
+ vmlal.u32 @ACC[5],$Bi,${A2}[1]
+ vmlal.u32 @ACC[6],$Bi,${A3}[0]
+ vmlal.u32 @ACC[7],$Bi,${A3}[1]
+___
+}
+$code.=<<___;
+ it eq
+ subeq $aptr,$aptr,$num,lsl#2 @ rewind
+ vmlal.u32 @ACC[0],$Ni,${N0}[0]
+ vld1.32 {$Bi},[sp,:64] @ pull smashed b[8*i+0]
+ vmlal.u32 @ACC[1],$Ni,${N0}[1]
+ vld1.32 {$A0-$A3},[$aptr]!
+ vmlal.u32 @ACC[2],$Ni,${N1}[0]
+ add $bnptr,sp,#8 @ rewind
+ vmlal.u32 @ACC[3],$Ni,${N1}[1]
+ vmlal.u32 @ACC[4],$Ni,${N2}[0]
+ vmlal.u32 @ACC[5],$Ni,${N2}[1]
+ vmlal.u32 @ACC[6],$Ni,${N3}[0]
+ vst1.64 {@ACC[0]},[$toutptr,:128]!
+ vmlal.u32 @ACC[7],$Ni,${N3}[1]
+
+ bne .LNEON_8n_inner
+___
+ push(@ACC,shift(@ACC));
+$code.=<<___;
+ add $tinptr,sp,#128
+ vst1.64 {@ACC[0]-@ACC[1]},[$toutptr,:256]!
+ veor q2,q2,q2 @ $N0-$N1
+ vst1.64 {@ACC[2]-@ACC[3]},[$toutptr,:256]!
+ veor q3,q3,q3 @ $N2-$N3
+ vst1.64 {@ACC[4]-@ACC[5]},[$toutptr,:256]!
+ vst1.64 {@ACC[6]},[$toutptr,:128]
+
+ subs $outer,$outer,#8
+ vld1.64 {@ACC[0]-@ACC[1]},[$tinptr,:256]!
+ vld1.64 {@ACC[2]-@ACC[3]},[$tinptr,:256]!
+ vld1.64 {@ACC[4]-@ACC[5]},[$tinptr,:256]!
+ vld1.64 {@ACC[6]-@ACC[7]},[$tinptr,:256]!
+
+ itt ne
+ subne $nptr,$nptr,$num,lsl#2 @ rewind
+ bne .LNEON_8n_outer
+
+ add $toutptr,sp,#128
+ vst1.64 {q2-q3}, [sp,:256]! @ start wiping stack frame
+ vshr.u64 $temp,@ACC[0]#lo,#16
+ vst1.64 {q2-q3},[sp,:256]!
+ vadd.u64 @ACC[0]#hi,@ACC[0]#hi,$temp
+ vst1.64 {q2-q3}, [sp,:256]!
+ vshr.u64 $temp,@ACC[0]#hi,#16
+ vst1.64 {q2-q3}, [sp,:256]!
+ vzip.16 @ACC[0]#lo,@ACC[0]#hi
+
+ mov $inner,$num
+ b .LNEON_tail_entry
+
+.align 4
+.LNEON_tail:
+ vadd.u64 @ACC[0]#lo,@ACC[0]#lo,$temp
+ vshr.u64 $temp,@ACC[0]#lo,#16
+ vld1.64 {@ACC[2]-@ACC[3]}, [$tinptr, :256]!
+ vadd.u64 @ACC[0]#hi,@ACC[0]#hi,$temp
+ vld1.64 {@ACC[4]-@ACC[5]}, [$tinptr, :256]!
+ vshr.u64 $temp,@ACC[0]#hi,#16
+ vld1.64 {@ACC[6]-@ACC[7]}, [$tinptr, :256]!
+ vzip.16 @ACC[0]#lo,@ACC[0]#hi
+
+.LNEON_tail_entry:
+___
+for ($i=1; $i<8; $i++) {
+$code.=<<___;
+ vadd.u64 @ACC[1]#lo,@ACC[1]#lo,$temp
+ vst1.32 {@ACC[0]#lo[0]}, [$toutptr, :32]!
+ vshr.u64 $temp,@ACC[1]#lo,#16
+ vadd.u64 @ACC[1]#hi,@ACC[1]#hi,$temp
+ vshr.u64 $temp,@ACC[1]#hi,#16
+ vzip.16 @ACC[1]#lo,@ACC[1]#hi
+___
+ push(@ACC,shift(@ACC));
+}
+ push(@ACC,shift(@ACC));
+$code.=<<___;
+ vld1.64 {@ACC[0]-@ACC[1]}, [$tinptr, :256]!
+ subs $inner,$inner,#8
+ vst1.32 {@ACC[7]#lo[0]}, [$toutptr, :32]!
+ bne .LNEON_tail
+
+ vst1.32 {${temp}[0]}, [$toutptr, :32] @ top-most bit
+ sub $nptr,$nptr,$num,lsl#2 @ rewind $nptr
+ subs $aptr,sp,#0 @ clear carry flag
+ add $bptr,sp,$num,lsl#2
+
+.LNEON_sub:
+ ldmia $aptr!, {r4-r7}
+ ldmia $nptr!, {r8-r11}
+ sbcs r8, r4,r8
+ sbcs r9, r5,r9
+ sbcs r10,r6,r10
+ sbcs r11,r7,r11
+ teq $aptr,$bptr @ preserves carry
+ stmia $rptr!, {r8-r11}
+ bne .LNEON_sub
+
+ ldr r10, [$aptr] @ load top-most bit
+ mov r11,sp
+ veor q0,q0,q0
+ sub r11,$bptr,r11 @ this is num*4
+ veor q1,q1,q1
+ mov $aptr,sp
+ sub $rptr,$rptr,r11 @ rewind $rptr
+ mov $nptr,$bptr @ second 3/4th of frame
+ sbcs r10,r10,#0 @ result is carry flag
+
+.LNEON_copy_n_zap:
+ ldmia $aptr!, {r4-r7}
+ ldmia $rptr, {r8-r11}
+ it cc
+ movcc r8, r4
+ vst1.64 {q0-q1}, [$nptr,:256]! @ wipe
+ itt cc
+ movcc r9, r5
+ movcc r10,r6
+ vst1.64 {q0-q1}, [$nptr,:256]! @ wipe
+ it cc
+ movcc r11,r7
+ ldmia $aptr, {r4-r7}
+ stmia $rptr!, {r8-r11}
+ sub $aptr,$aptr,#16
+ ldmia $rptr, {r8-r11}
+ it cc
+ movcc r8, r4
+ vst1.64 {q0-q1}, [$aptr,:256]! @ wipe
+ itt cc
+ movcc r9, r5
+ movcc r10,r6
+ vst1.64 {q0-q1}, [$nptr,:256]! @ wipe
+ it cc
+ movcc r11,r7
+ teq $aptr,$bptr @ preserves carry
+ stmia $rptr!, {r8-r11}
+ bne .LNEON_copy_n_zap
+
+ mov sp,ip
+ vldmia sp!,{d8-d15}
+ ldmia sp!,{r4-r11}
+ ret @ bx lr
+.size bn_mul8x_mont_neon,.-bn_mul8x_mont_neon
+#endif
+___
+}
+$code.=<<___;
+.asciz "Montgomery multiplication for ARMv4/NEON, CRYPTOGAMS by <appro\@openssl.org>"
+.align 2
+#if __ARM_MAX_ARCH__>=7
+.comm OPENSSL_armcap_P,4,4
+#endif
+___
+
+foreach (split("\n",$code)) {
+ s/\`([^\`]*)\`/eval $1/ge;
+
+ s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/ge or
+ s/\bret\b/bx lr/g or
+ s/\bbx\s+lr\b/.word\t0xe12fff1e/g; # make it possible to compile with -march=armv4
+
+ print $_,"\n";
+}
+
+close STDOUT;
diff --git a/openssl-1.1.0h/crypto/bn/asm/armv8-mont.pl b/openssl-1.1.0h/crypto/bn/asm/armv8-mont.pl
new file mode 100755
index 0000000..5d5af1b
--- /dev/null
+++ b/openssl-1.1.0h/crypto/bn/asm/armv8-mont.pl
@@ -0,0 +1,1510 @@
+#! /usr/bin/env perl
+# Copyright 2015-2016 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License"). You may not use
+# this file except in compliance with the License. You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# March 2015
+#
+# "Teaser" Montgomery multiplication module for ARMv8. Needs more
+# work. While it does improve RSA sign performance by 20-30% (less for
+# longer keys) on most processors, for some reason RSA2048 is not
+# faster and RSA4096 goes 15-20% slower on Cortex-A57. Multiplication
+# instruction issue rate is limited on processor in question, meaning
+# that dedicated squaring procedure is a must. Well, actually all
+# contemporary AArch64 processors seem to have limited multiplication
+# issue rate, i.e. they can't issue multiplication every cycle, which
+# explains moderate improvement coefficients in comparison to
+# compiler-generated code. Recall that compiler is instructed to use
+# umulh and therefore uses same amount of multiplication instructions
+# to do the job. Assembly's edge is to minimize number of "collateral"
+# instructions and of course instruction scheduling.
+#
+# April 2015
+#
+# Squaring procedure that handles lengths divisible by 8 improves
+# RSA/DSA performance by 25-40-60% depending on processor and key
+# length. Overall improvement coefficients are always positive in
+# comparison to compiler-generated code. On Cortex-A57 improvement
+# is still modest on longest key lengths, while others exhibit e.g.
+# 50-70% improvement for RSA4096 sign. RSA2048 sign is ~25% faster
+# on Cortex-A57 and ~60-100% faster on others.
+
+$flavour = shift;
+$output = shift;
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
+die "can't locate arm-xlate.pl";
+
+open OUT,"| \"$^X\" $xlate $flavour $output";
+*STDOUT=*OUT;
+
+($lo0,$hi0,$aj,$m0,$alo,$ahi,
+ $lo1,$hi1,$nj,$m1,$nlo,$nhi,
+ $ovf, $i,$j,$tp,$tj) = map("x$_",6..17,19..24);
+
+# int bn_mul_mont(
+$rp="x0"; # BN_ULONG *rp,
+$ap="x1"; # const BN_ULONG *ap,
+$bp="x2"; # const BN_ULONG *bp,
+$np="x3"; # const BN_ULONG *np,
+$n0="x4"; # const BN_ULONG *n0,
+$num="x5"; # int num);
+
+$code.=<<___;
+.text
+
+.globl bn_mul_mont
+.type bn_mul_mont,%function
+.align 5
+bn_mul_mont:
+ tst $num,#7
+ b.eq __bn_sqr8x_mont
+ tst $num,#3
+ b.eq __bn_mul4x_mont
+.Lmul_mont:
+ stp x29,x30,[sp,#-64]!
+ add x29,sp,#0
+ stp x19,x20,[sp,#16]
+ stp x21,x22,[sp,#32]
+ stp x23,x24,[sp,#48]
+
+ ldr $m0,[$bp],#8 // bp[0]
+ sub $tp,sp,$num,lsl#3
+ ldp $hi0,$aj,[$ap],#16 // ap[0..1]
+ lsl $num,$num,#3
+ ldr $n0,[$n0] // *n0
+ and $tp,$tp,#-16 // ABI says so
+ ldp $hi1,$nj,[$np],#16 // np[0..1]
+
+ mul $lo0,$hi0,$m0 // ap[0]*bp[0]
+ sub $j,$num,#16 // j=num-2
+ umulh $hi0,$hi0,$m0
+ mul $alo,$aj,$m0 // ap[1]*bp[0]
+ umulh $ahi,$aj,$m0
+
+ mul $m1,$lo0,$n0 // "tp[0]"*n0
+ mov sp,$tp // alloca
+
+ // (*) mul $lo1,$hi1,$m1 // np[0]*m1
+ umulh $hi1,$hi1,$m1
+ mul $nlo,$nj,$m1 // np[1]*m1
+ // (*) adds $lo1,$lo1,$lo0 // discarded
+ // (*) As for removal of first multiplication and addition
+ // instructions. The outcome of first addition is
+ // guaranteed to be zero, which leaves two computationally
+ // significant outcomes: it either carries or not. Then
+ // question is when does it carry? Is there alternative
+ // way to deduce it? If you follow operations, you can
+ // observe that condition for carry is quite simple:
+ // $lo0 being non-zero. So that carry can be calculated
+ // by adding -1 to $lo0. That's what next instruction does.
+ subs xzr,$lo0,#1 // (*)
+ umulh $nhi,$nj,$m1
+ adc $hi1,$hi1,xzr
+ cbz $j,.L1st_skip
+
+.L1st:
+ ldr $aj,[$ap],#8
+ adds $lo0,$alo,$hi0
+ sub $j,$j,#8 // j--
+ adc $hi0,$ahi,xzr
+
+ ldr $nj,[$np],#8
+ adds $lo1,$nlo,$hi1
+ mul $alo,$aj,$m0 // ap[j]*bp[0]
+ adc $hi1,$nhi,xzr
+ umulh $ahi,$aj,$m0
+
+ adds $lo1,$lo1,$lo0
+ mul $nlo,$nj,$m1 // np[j]*m1
+ adc $hi1,$hi1,xzr
+ umulh $nhi,$nj,$m1
+ str $lo1,[$tp],#8 // tp[j-1]
+ cbnz $j,.L1st
+
+.L1st_skip:
+ adds $lo0,$alo,$hi0
+ sub $ap,$ap,$num // rewind $ap
+ adc $hi0,$ahi,xzr
+
+ adds $lo1,$nlo,$hi1
+ sub $np,$np,$num // rewind $np
+ adc $hi1,$nhi,xzr
+
+ adds $lo1,$lo1,$lo0
+ sub $i,$num,#8 // i=num-1
+ adcs $hi1,$hi1,$hi0
+
+ adc $ovf,xzr,xzr // upmost overflow bit
+ stp $lo1,$hi1,[$tp]
+
+.Louter:
+ ldr $m0,[$bp],#8 // bp[i]
+ ldp $hi0,$aj,[$ap],#16
+ ldr $tj,[sp] // tp[0]
+ add $tp,sp,#8
+
+ mul $lo0,$hi0,$m0 // ap[0]*bp[i]
+ sub $j,$num,#16 // j=num-2
+ umulh $hi0,$hi0,$m0
+ ldp $hi1,$nj,[$np],#16
+ mul $alo,$aj,$m0 // ap[1]*bp[i]
+ adds $lo0,$lo0,$tj
+ umulh $ahi,$aj,$m0
+ adc $hi0,$hi0,xzr
+
+ mul $m1,$lo0,$n0
+ sub $i,$i,#8 // i--
+
+ // (*) mul $lo1,$hi1,$m1 // np[0]*m1
+ umulh $hi1,$hi1,$m1
+ mul $nlo,$nj,$m1 // np[1]*m1
+ // (*) adds $lo1,$lo1,$lo0
+ subs xzr,$lo0,#1 // (*)
+ umulh $nhi,$nj,$m1
+ cbz $j,.Linner_skip
+
+.Linner:
+ ldr $aj,[$ap],#8
+ adc $hi1,$hi1,xzr
+ ldr $tj,[$tp],#8 // tp[j]
+ adds $lo0,$alo,$hi0
+ sub $j,$j,#8 // j--
+ adc $hi0,$ahi,xzr
+
+ adds $lo1,$nlo,$hi1
+ ldr $nj,[$np],#8
+ adc $hi1,$nhi,xzr
+
+ mul $alo,$aj,$m0 // ap[j]*bp[i]
+ adds $lo0,$lo0,$tj
+ umulh $ahi,$aj,$m0
+ adc $hi0,$hi0,xzr
+
+ mul $nlo,$nj,$m1 // np[j]*m1
+ adds $lo1,$lo1,$lo0
+ umulh $nhi,$nj,$m1
+ str $lo1,[$tp,#-16] // tp[j-1]
+ cbnz $j,.Linner
+
+.Linner_skip:
+ ldr $tj,[$tp],#8 // tp[j]
+ adc $hi1,$hi1,xzr
+ adds $lo0,$alo,$hi0
+ sub $ap,$ap,$num // rewind $ap
+ adc $hi0,$ahi,xzr
+
+ adds $lo1,$nlo,$hi1
+ sub $np,$np,$num // rewind $np
+ adcs $hi1,$nhi,$ovf
+ adc $ovf,xzr,xzr
+
+ adds $lo0,$lo0,$tj
+ adc $hi0,$hi0,xzr
+
+ adds $lo1,$lo1,$lo0
+ adcs $hi1,$hi1,$hi0
+ adc $ovf,$ovf,xzr // upmost overflow bit
+ stp $lo1,$hi1,[$tp,#-16]
+
+ cbnz $i,.Louter
+
+ // Final step. We see if result is larger than modulus, and
+ // if it is, subtract the modulus. But comparison implies
+ // subtraction. So we subtract modulus, see if it borrowed,
+ // and conditionally copy original value.
+ ldr $tj,[sp] // tp[0]
+ add $tp,sp,#8
+ ldr $nj,[$np],#8 // np[0]
+ subs $j,$num,#8 // j=num-1 and clear borrow
+ mov $ap,$rp
+.Lsub:
+ sbcs $aj,$tj,$nj // tp[j]-np[j]
+ ldr $tj,[$tp],#8
+ sub $j,$j,#8 // j--
+ ldr $nj,[$np],#8
+ str $aj,[$ap],#8 // rp[j]=tp[j]-np[j]
+ cbnz $j,.Lsub
+
+ sbcs $aj,$tj,$nj
+ sbcs $ovf,$ovf,xzr // did it borrow?
+ str $aj,[$ap],#8 // rp[num-1]
+
+ ldr $tj,[sp] // tp[0]
+ add $tp,sp,#8
+ ldr $aj,[$rp],#8 // rp[0]
+ sub $num,$num,#8 // num--
+ nop
+.Lcond_copy:
+ sub $num,$num,#8 // num--
+ csel $nj,$tj,$aj,lo // did it borrow?
+ ldr $tj,[$tp],#8
+ ldr $aj,[$rp],#8
+ str xzr,[$tp,#-16] // wipe tp
+ str $nj,[$rp,#-16]
+ cbnz $num,.Lcond_copy
+
+ csel $nj,$tj,$aj,lo
+ str xzr,[$tp,#-8] // wipe tp
+ str $nj,[$rp,#-8]
+
+ ldp x19,x20,[x29,#16]
+ mov sp,x29
+ ldp x21,x22,[x29,#32]
+ mov x0,#1
+ ldp x23,x24,[x29,#48]
+ ldr x29,[sp],#64
+ ret
+.size bn_mul_mont,.-bn_mul_mont
+___
+{
+########################################################################
+# Following is ARMv8 adaptation of sqrx8x_mont from x86_64-mont5 module.
+
+my ($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7)=map("x$_",(6..13));
+my ($t0,$t1,$t2,$t3)=map("x$_",(14..17));
+my ($acc0,$acc1,$acc2,$acc3,$acc4,$acc5,$acc6,$acc7)=map("x$_",(19..26));
+my ($cnt,$carry,$topmost)=("x27","x28","x30");
+my ($tp,$ap_end,$na0)=($bp,$np,$carry);
+
+$code.=<<___;
+.type __bn_sqr8x_mont,%function
+.align 5
+__bn_sqr8x_mont:
+ cmp $ap,$bp
+ b.ne __bn_mul4x_mont
+.Lsqr8x_mont:
+ stp x29,x30,[sp,#-128]!
+ add x29,sp,#0
+ stp x19,x20,[sp,#16]
+ stp x21,x22,[sp,#32]
+ stp x23,x24,[sp,#48]
+ stp x25,x26,[sp,#64]
+ stp x27,x28,[sp,#80]
+ stp $rp,$np,[sp,#96] // offload rp and np
+
+ ldp $a0,$a1,[$ap,#8*0]
+ ldp $a2,$a3,[$ap,#8*2]
+ ldp $a4,$a5,[$ap,#8*4]
+ ldp $a6,$a7,[$ap,#8*6]
+
+ sub $tp,sp,$num,lsl#4
+ lsl $num,$num,#3
+ ldr $n0,[$n0] // *n0
+ mov sp,$tp // alloca
+ sub $cnt,$num,#8*8
+ b .Lsqr8x_zero_start
+
+.Lsqr8x_zero:
+ sub $cnt,$cnt,#8*8
+ stp xzr,xzr,[$tp,#8*0]
+ stp xzr,xzr,[$tp,#8*2]
+ stp xzr,xzr,[$tp,#8*4]
+ stp xzr,xzr,[$tp,#8*6]
+.Lsqr8x_zero_start:
+ stp xzr,xzr,[$tp,#8*8]
+ stp xzr,xzr,[$tp,#8*10]
+ stp xzr,xzr,[$tp,#8*12]
+ stp xzr,xzr,[$tp,#8*14]
+ add $tp,$tp,#8*16
+ cbnz $cnt,.Lsqr8x_zero
+
+ add $ap_end,$ap,$num
+ add $ap,$ap,#8*8
+ mov $acc0,xzr
+ mov $acc1,xzr
+ mov $acc2,xzr
+ mov $acc3,xzr
+ mov $acc4,xzr
+ mov $acc5,xzr
+ mov $acc6,xzr
+ mov $acc7,xzr
+ mov $tp,sp
+ str $n0,[x29,#112] // offload n0
+
+ // Multiply everything but a[i]*a[i]
+.align 4
+.Lsqr8x_outer_loop:
+ // a[1]a[0] (i)
+ // a[2]a[0]
+ // a[3]a[0]
+ // a[4]a[0]
+ // a[5]a[0]
+ // a[6]a[0]
+ // a[7]a[0]
+ // a[2]a[1] (ii)
+ // a[3]a[1]
+ // a[4]a[1]
+ // a[5]a[1]
+ // a[6]a[1]
+ // a[7]a[1]
+ // a[3]a[2] (iii)
+ // a[4]a[2]
+ // a[5]a[2]
+ // a[6]a[2]
+ // a[7]a[2]
+ // a[4]a[3] (iv)
+ // a[5]a[3]
+ // a[6]a[3]
+ // a[7]a[3]
+ // a[5]a[4] (v)
+ // a[6]a[4]
+ // a[7]a[4]
+ // a[6]a[5] (vi)
+ // a[7]a[5]
+ // a[7]a[6] (vii)
+
+ mul $t0,$a1,$a0 // lo(a[1..7]*a[0]) (i)
+ mul $t1,$a2,$a0
+ mul $t2,$a3,$a0
+ mul $t3,$a4,$a0
+ adds $acc1,$acc1,$t0 // t[1]+lo(a[1]*a[0])
+ mul $t0,$a5,$a0
+ adcs $acc2,$acc2,$t1
+ mul $t1,$a6,$a0
+ adcs $acc3,$acc3,$t2
+ mul $t2,$a7,$a0
+ adcs $acc4,$acc4,$t3
+ umulh $t3,$a1,$a0 // hi(a[1..7]*a[0])
+ adcs $acc5,$acc5,$t0
+ umulh $t0,$a2,$a0
+ adcs $acc6,$acc6,$t1
+ umulh $t1,$a3,$a0
+ adcs $acc7,$acc7,$t2
+ umulh $t2,$a4,$a0
+ stp $acc0,$acc1,[$tp],#8*2 // t[0..1]
+ adc $acc0,xzr,xzr // t[8]
+ adds $acc2,$acc2,$t3 // t[2]+lo(a[1]*a[0])
+ umulh $t3,$a5,$a0
+ adcs $acc3,$acc3,$t0
+ umulh $t0,$a6,$a0
+ adcs $acc4,$acc4,$t1
+ umulh $t1,$a7,$a0
+ adcs $acc5,$acc5,$t2
+ mul $t2,$a2,$a1 // lo(a[2..7]*a[1]) (ii)
+ adcs $acc6,$acc6,$t3
+ mul $t3,$a3,$a1
+ adcs $acc7,$acc7,$t0
+ mul $t0,$a4,$a1
+ adc $acc0,$acc0,$t1
+
+ mul $t1,$a5,$a1
+ adds $acc3,$acc3,$t2
+ mul $t2,$a6,$a1
+ adcs $acc4,$acc4,$t3
+ mul $t3,$a7,$a1
+ adcs $acc5,$acc5,$t0
+ umulh $t0,$a2,$a1 // hi(a[2..7]*a[1])
+ adcs $acc6,$acc6,$t1
+ umulh $t1,$a3,$a1
+ adcs $acc7,$acc7,$t2
+ umulh $t2,$a4,$a1
+ adcs $acc0,$acc0,$t3
+ umulh $t3,$a5,$a1
+ stp $acc2,$acc3,[$tp],#8*2 // t[2..3]
+ adc $acc1,xzr,xzr // t[9]
+ adds $acc4,$acc4,$t0
+ umulh $t0,$a6,$a1
+ adcs $acc5,$acc5,$t1
+ umulh $t1,$a7,$a1
+ adcs $acc6,$acc6,$t2
+ mul $t2,$a3,$a2 // lo(a[3..7]*a[2]) (iii)
+ adcs $acc7,$acc7,$t3
+ mul $t3,$a4,$a2
+ adcs $acc0,$acc0,$t0
+ mul $t0,$a5,$a2
+ adc $acc1,$acc1,$t1
+
+ mul $t1,$a6,$a2
+ adds $acc5,$acc5,$t2
+ mul $t2,$a7,$a2
+ adcs $acc6,$acc6,$t3
+ umulh $t3,$a3,$a2 // hi(a[3..7]*a[2])
+ adcs $acc7,$acc7,$t0
+ umulh $t0,$a4,$a2
+ adcs $acc0,$acc0,$t1
+ umulh $t1,$a5,$a2
+ adcs $acc1,$acc1,$t2
+ umulh $t2,$a6,$a2
+ stp $acc4,$acc5,[$tp],#8*2 // t[4..5]
+ adc $acc2,xzr,xzr // t[10]
+ adds $acc6,$acc6,$t3
+ umulh $t3,$a7,$a2
+ adcs $acc7,$acc7,$t0
+ mul $t0,$a4,$a3 // lo(a[4..7]*a[3]) (iv)
+ adcs $acc0,$acc0,$t1
+ mul $t1,$a5,$a3
+ adcs $acc1,$acc1,$t2
+ mul $t2,$a6,$a3
+ adc $acc2,$acc2,$t3
+
+ mul $t3,$a7,$a3
+ adds $acc7,$acc7,$t0
+ umulh $t0,$a4,$a3 // hi(a[4..7]*a[3])
+ adcs $acc0,$acc0,$t1
+ umulh $t1,$a5,$a3
+ adcs $acc1,$acc1,$t2
+ umulh $t2,$a6,$a3
+ adcs $acc2,$acc2,$t3
+ umulh $t3,$a7,$a3
+ stp $acc6,$acc7,[$tp],#8*2 // t[6..7]
+ adc $acc3,xzr,xzr // t[11]
+ adds $acc0,$acc0,$t0
+ mul $t0,$a5,$a4 // lo(a[5..7]*a[4]) (v)
+ adcs $acc1,$acc1,$t1
+ mul $t1,$a6,$a4
+ adcs $acc2,$acc2,$t2
+ mul $t2,$a7,$a4
+ adc $acc3,$acc3,$t3
+
+ umulh $t3,$a5,$a4 // hi(a[5..7]*a[4])
+ adds $acc1,$acc1,$t0
+ umulh $t0,$a6,$a4
+ adcs $acc2,$acc2,$t1
+ umulh $t1,$a7,$a4
+ adcs $acc3,$acc3,$t2
+ mul $t2,$a6,$a5 // lo(a[6..7]*a[5]) (vi)
+ adc $acc4,xzr,xzr // t[12]
+ adds $acc2,$acc2,$t3
+ mul $t3,$a7,$a5
+ adcs $acc3,$acc3,$t0
+ umulh $t0,$a6,$a5 // hi(a[6..7]*a[5])
+ adc $acc4,$acc4,$t1
+
+ umulh $t1,$a7,$a5
+ adds $acc3,$acc3,$t2
+ mul $t2,$a7,$a6 // lo(a[7]*a[6]) (vii)
+ adcs $acc4,$acc4,$t3
+ umulh $t3,$a7,$a6 // hi(a[7]*a[6])
+ adc $acc5,xzr,xzr // t[13]
+ adds $acc4,$acc4,$t0
+ sub $cnt,$ap_end,$ap // done yet?
+ adc $acc5,$acc5,$t1
+
+ adds $acc5,$acc5,$t2
+ sub $t0,$ap_end,$num // rewinded ap
+ adc $acc6,xzr,xzr // t[14]
+ add $acc6,$acc6,$t3
+
+ cbz $cnt,.Lsqr8x_outer_break
+
+ mov $n0,$a0
+ ldp $a0,$a1,[$tp,#8*0]
+ ldp $a2,$a3,[$tp,#8*2]
+ ldp $a4,$a5,[$tp,#8*4]
+ ldp $a6,$a7,[$tp,#8*6]
+ adds $acc0,$acc0,$a0
+ adcs $acc1,$acc1,$a1
+ ldp $a0,$a1,[$ap,#8*0]
+ adcs $acc2,$acc2,$a2
+ adcs $acc3,$acc3,$a3
+ ldp $a2,$a3,[$ap,#8*2]
+ adcs $acc4,$acc4,$a4
+ adcs $acc5,$acc5,$a5
+ ldp $a4,$a5,[$ap,#8*4]
+ adcs $acc6,$acc6,$a6
+ mov $rp,$ap
+ adcs $acc7,xzr,$a7
+ ldp $a6,$a7,[$ap,#8*6]
+ add $ap,$ap,#8*8
+ //adc $carry,xzr,xzr // moved below
+ mov $cnt,#-8*8
+
+ // a[8]a[0]
+ // a[9]a[0]
+ // a[a]a[0]
+ // a[b]a[0]
+ // a[c]a[0]
+ // a[d]a[0]
+ // a[e]a[0]
+ // a[f]a[0]
+ // a[8]a[1]
+ // a[f]a[1]........................
+ // a[8]a[2]
+ // a[f]a[2]........................
+ // a[8]a[3]
+ // a[f]a[3]........................
+ // a[8]a[4]
+ // a[f]a[4]........................
+ // a[8]a[5]
+ // a[f]a[5]........................
+ // a[8]a[6]
+ // a[f]a[6]........................
+ // a[8]a[7]
+ // a[f]a[7]........................
+.Lsqr8x_mul:
+ mul $t0,$a0,$n0
+ adc $carry,xzr,xzr // carry bit, modulo-scheduled
+ mul $t1,$a1,$n0
+ add $cnt,$cnt,#8
+ mul $t2,$a2,$n0
+ mul $t3,$a3,$n0
+ adds $acc0,$acc0,$t0
+ mul $t0,$a4,$n0
+ adcs $acc1,$acc1,$t1
+ mul $t1,$a5,$n0
+ adcs $acc2,$acc2,$t2
+ mul $t2,$a6,$n0
+ adcs $acc3,$acc3,$t3
+ mul $t3,$a7,$n0
+ adcs $acc4,$acc4,$t0
+ umulh $t0,$a0,$n0
+ adcs $acc5,$acc5,$t1
+ umulh $t1,$a1,$n0
+ adcs $acc6,$acc6,$t2
+ umulh $t2,$a2,$n0
+ adcs $acc7,$acc7,$t3
+ umulh $t3,$a3,$n0
+ adc $carry,$carry,xzr
+ str $acc0,[$tp],#8
+ adds $acc0,$acc1,$t0
+ umulh $t0,$a4,$n0
+ adcs $acc1,$acc2,$t1
+ umulh $t1,$a5,$n0
+ adcs $acc2,$acc3,$t2
+ umulh $t2,$a6,$n0
+ adcs $acc3,$acc4,$t3
+ umulh $t3,$a7,$n0
+ ldr $n0,[$rp,$cnt]
+ adcs $acc4,$acc5,$t0
+ adcs $acc5,$acc6,$t1
+ adcs $acc6,$acc7,$t2
+ adcs $acc7,$carry,$t3
+ //adc $carry,xzr,xzr // moved above
+ cbnz $cnt,.Lsqr8x_mul
+ // note that carry flag is guaranteed
+ // to be zero at this point
+ cmp $ap,$ap_end // done yet?
+ b.eq .Lsqr8x_break
+
+ ldp $a0,$a1,[$tp,#8*0]
+ ldp $a2,$a3,[$tp,#8*2]
+ ldp $a4,$a5,[$tp,#8*4]
+ ldp $a6,$a7,[$tp,#8*6]
+ adds $acc0,$acc0,$a0
+ ldr $n0,[$rp,#-8*8]
+ adcs $acc1,$acc1,$a1
+ ldp $a0,$a1,[$ap,#8*0]
+ adcs $acc2,$acc2,$a2
+ adcs $acc3,$acc3,$a3
+ ldp $a2,$a3,[$ap,#8*2]
+ adcs $acc4,$acc4,$a4
+ adcs $acc5,$acc5,$a5
+ ldp $a4,$a5,[$ap,#8*4]
+ adcs $acc6,$acc6,$a6
+ mov $cnt,#-8*8
+ adcs $acc7,$acc7,$a7
+ ldp $a6,$a7,[$ap,#8*6]
+ add $ap,$ap,#8*8
+ //adc $carry,xzr,xzr // moved above
+ b .Lsqr8x_mul
+
+.align 4
+.Lsqr8x_break:
+ ldp $a0,$a1,[$rp,#8*0]
+ add $ap,$rp,#8*8
+ ldp $a2,$a3,[$rp,#8*2]
+ sub $t0,$ap_end,$ap // is it last iteration?
+ ldp $a4,$a5,[$rp,#8*4]
+ sub $t1,$tp,$t0
+ ldp $a6,$a7,[$rp,#8*6]
+ cbz $t0,.Lsqr8x_outer_loop
+
+ stp $acc0,$acc1,[$tp,#8*0]
+ ldp $acc0,$acc1,[$t1,#8*0]
+ stp $acc2,$acc3,[$tp,#8*2]
+ ldp $acc2,$acc3,[$t1,#8*2]
+ stp $acc4,$acc5,[$tp,#8*4]
+ ldp $acc4,$acc5,[$t1,#8*4]
+ stp $acc6,$acc7,[$tp,#8*6]
+ mov $tp,$t1
+ ldp $acc6,$acc7,[$t1,#8*6]
+ b .Lsqr8x_outer_loop
+
+.align 4
+.Lsqr8x_outer_break:
+ // Now multiply above result by 2 and add a[n-1]*a[n-1]|...|a[0]*a[0]
+ ldp $a1,$a3,[$t0,#8*0] // recall that $t0 is &a[0]
+ ldp $t1,$t2,[sp,#8*1]
+ ldp $a5,$a7,[$t0,#8*2]
+ add $ap,$t0,#8*4
+ ldp $t3,$t0,[sp,#8*3]
+
+ stp $acc0,$acc1,[$tp,#8*0]
+ mul $acc0,$a1,$a1
+ stp $acc2,$acc3,[$tp,#8*2]
+ umulh $a1,$a1,$a1
+ stp $acc4,$acc5,[$tp,#8*4]
+ mul $a2,$a3,$a3
+ stp $acc6,$acc7,[$tp,#8*6]
+ mov $tp,sp
+ umulh $a3,$a3,$a3
+ adds $acc1,$a1,$t1,lsl#1
+ extr $t1,$t2,$t1,#63
+ sub $cnt,$num,#8*4
+
+.Lsqr4x_shift_n_add:
+ adcs $acc2,$a2,$t1
+ extr $t2,$t3,$t2,#63
+ sub $cnt,$cnt,#8*4
+ adcs $acc3,$a3,$t2
+ ldp $t1,$t2,[$tp,#8*5]
+ mul $a4,$a5,$a5
+ ldp $a1,$a3,[$ap],#8*2
+ umulh $a5,$a5,$a5
+ mul $a6,$a7,$a7
+ umulh $a7,$a7,$a7
+ extr $t3,$t0,$t3,#63
+ stp $acc0,$acc1,[$tp,#8*0]
+ adcs $acc4,$a4,$t3
+ extr $t0,$t1,$t0,#63
+ stp $acc2,$acc3,[$tp,#8*2]
+ adcs $acc5,$a5,$t0
+ ldp $t3,$t0,[$tp,#8*7]
+ extr $t1,$t2,$t1,#63
+ adcs $acc6,$a6,$t1
+ extr $t2,$t3,$t2,#63
+ adcs $acc7,$a7,$t2
+ ldp $t1,$t2,[$tp,#8*9]
+ mul $a0,$a1,$a1
+ ldp $a5,$a7,[$ap],#8*2
+ umulh $a1,$a1,$a1
+ mul $a2,$a3,$a3
+ umulh $a3,$a3,$a3
+ stp $acc4,$acc5,[$tp,#8*4]
+ extr $t3,$t0,$t3,#63
+ stp $acc6,$acc7,[$tp,#8*6]
+ add $tp,$tp,#8*8
+ adcs $acc0,$a0,$t3
+ extr $t0,$t1,$t0,#63
+ adcs $acc1,$a1,$t0
+ ldp $t3,$t0,[$tp,#8*3]
+ extr $t1,$t2,$t1,#63
+ cbnz $cnt,.Lsqr4x_shift_n_add
+___
+my ($np,$np_end)=($ap,$ap_end);
+$code.=<<___;
+ ldp $np,$n0,[x29,#104] // pull np and n0
+
+ adcs $acc2,$a2,$t1
+ extr $t2,$t3,$t2,#63
+ adcs $acc3,$a3,$t2
+ ldp $t1,$t2,[$tp,#8*5]
+ mul $a4,$a5,$a5
+ umulh $a5,$a5,$a5
+ stp $acc0,$acc1,[$tp,#8*0]
+ mul $a6,$a7,$a7
+ umulh $a7,$a7,$a7
+ stp $acc2,$acc3,[$tp,#8*2]
+ extr $t3,$t0,$t3,#63
+ adcs $acc4,$a4,$t3
+ extr $t0,$t1,$t0,#63
+ ldp $acc0,$acc1,[sp,#8*0]
+ adcs $acc5,$a5,$t0
+ extr $t1,$t2,$t1,#63
+ ldp $a0,$a1,[$np,#8*0]
+ adcs $acc6,$a6,$t1
+ extr $t2,xzr,$t2,#63
+ ldp $a2,$a3,[$np,#8*2]
+ adc $acc7,$a7,$t2
+ ldp $a4,$a5,[$np,#8*4]
+
+ // Reduce by 512 bits per iteration
+ mul $na0,$n0,$acc0 // t[0]*n0
+ ldp $a6,$a7,[$np,#8*6]
+ add $np_end,$np,$num
+ ldp $acc2,$acc3,[sp,#8*2]
+ stp $acc4,$acc5,[$tp,#8*4]
+ ldp $acc4,$acc5,[sp,#8*4]
+ stp $acc6,$acc7,[$tp,#8*6]
+ ldp $acc6,$acc7,[sp,#8*6]
+ add $np,$np,#8*8
+ mov $topmost,xzr // initial top-most carry
+ mov $tp,sp
+ mov $cnt,#8
+
+.Lsqr8x_reduction:
+ // (*) mul $t0,$a0,$na0 // lo(n[0-7])*lo(t[0]*n0)
+ mul $t1,$a1,$na0
+ sub $cnt,$cnt,#1
+ mul $t2,$a2,$na0
+ str $na0,[$tp],#8 // put aside t[0]*n0 for tail processing
+ mul $t3,$a3,$na0
+ // (*) adds xzr,$acc0,$t0
+ subs xzr,$acc0,#1 // (*)
+ mul $t0,$a4,$na0
+ adcs $acc0,$acc1,$t1
+ mul $t1,$a5,$na0
+ adcs $acc1,$acc2,$t2
+ mul $t2,$a6,$na0
+ adcs $acc2,$acc3,$t3
+ mul $t3,$a7,$na0
+ adcs $acc3,$acc4,$t0
+ umulh $t0,$a0,$na0 // hi(n[0-7])*lo(t[0]*n0)
+ adcs $acc4,$acc5,$t1
+ umulh $t1,$a1,$na0
+ adcs $acc5,$acc6,$t2
+ umulh $t2,$a2,$na0
+ adcs $acc6,$acc7,$t3
+ umulh $t3,$a3,$na0
+ adc $acc7,xzr,xzr
+ adds $acc0,$acc0,$t0
+ umulh $t0,$a4,$na0
+ adcs $acc1,$acc1,$t1
+ umulh $t1,$a5,$na0
+ adcs $acc2,$acc2,$t2
+ umulh $t2,$a6,$na0
+ adcs $acc3,$acc3,$t3
+ umulh $t3,$a7,$na0
+ mul $na0,$n0,$acc0 // next t[0]*n0
+ adcs $acc4,$acc4,$t0
+ adcs $acc5,$acc5,$t1
+ adcs $acc6,$acc6,$t2
+ adc $acc7,$acc7,$t3
+ cbnz $cnt,.Lsqr8x_reduction
+
+ ldp $t0,$t1,[$tp,#8*0]
+ ldp $t2,$t3,[$tp,#8*2]
+ mov $rp,$tp
+ sub $cnt,$np_end,$np // done yet?
+ adds $acc0,$acc0,$t0
+ adcs $acc1,$acc1,$t1
+ ldp $t0,$t1,[$tp,#8*4]
+ adcs $acc2,$acc2,$t2
+ adcs $acc3,$acc3,$t3
+ ldp $t2,$t3,[$tp,#8*6]
+ adcs $acc4,$acc4,$t0
+ adcs $acc5,$acc5,$t1
+ adcs $acc6,$acc6,$t2
+ adcs $acc7,$acc7,$t3
+ //adc $carry,xzr,xzr // moved below
+ cbz $cnt,.Lsqr8x8_post_condition
+
+ ldr $n0,[$tp,#-8*8]
+ ldp $a0,$a1,[$np,#8*0]
+ ldp $a2,$a3,[$np,#8*2]
+ ldp $a4,$a5,[$np,#8*4]
+ mov $cnt,#-8*8
+ ldp $a6,$a7,[$np,#8*6]
+ add $np,$np,#8*8
+
+.Lsqr8x_tail:
+ mul $t0,$a0,$n0
+ adc $carry,xzr,xzr // carry bit, modulo-scheduled
+ mul $t1,$a1,$n0
+ add $cnt,$cnt,#8
+ mul $t2,$a2,$n0
+ mul $t3,$a3,$n0
+ adds $acc0,$acc0,$t0
+ mul $t0,$a4,$n0
+ adcs $acc1,$acc1,$t1
+ mul $t1,$a5,$n0
+ adcs $acc2,$acc2,$t2
+ mul $t2,$a6,$n0
+ adcs $acc3,$acc3,$t3
+ mul $t3,$a7,$n0
+ adcs $acc4,$acc4,$t0
+ umulh $t0,$a0,$n0
+ adcs $acc5,$acc5,$t1
+ umulh $t1,$a1,$n0
+ adcs $acc6,$acc6,$t2
+ umulh $t2,$a2,$n0
+ adcs $acc7,$acc7,$t3
+ umulh $t3,$a3,$n0
+ adc $carry,$carry,xzr
+ str $acc0,[$tp],#8
+ adds $acc0,$acc1,$t0
+ umulh $t0,$a4,$n0
+ adcs $acc1,$acc2,$t1
+ umulh $t1,$a5,$n0
+ adcs $acc2,$acc3,$t2
+ umulh $t2,$a6,$n0
+ adcs $acc3,$acc4,$t3
+ umulh $t3,$a7,$n0
+ ldr $n0,[$rp,$cnt]
+ adcs $acc4,$acc5,$t0
+ adcs $acc5,$acc6,$t1
+ adcs $acc6,$acc7,$t2
+ adcs $acc7,$carry,$t3
+ //adc $carry,xzr,xzr // moved above
+ cbnz $cnt,.Lsqr8x_tail
+ // note that carry flag is guaranteed
+ // to be zero at this point
+ ldp $a0,$a1,[$tp,#8*0]
+ sub $cnt,$np_end,$np // done yet?
+ sub $t2,$np_end,$num // rewinded np
+ ldp $a2,$a3,[$tp,#8*2]
+ ldp $a4,$a5,[$tp,#8*4]
+ ldp $a6,$a7,[$tp,#8*6]
+ cbz $cnt,.Lsqr8x_tail_break
+
+ ldr $n0,[$rp,#-8*8]
+ adds $acc0,$acc0,$a0
+ adcs $acc1,$acc1,$a1
+ ldp $a0,$a1,[$np,#8*0]
+ adcs $acc2,$acc2,$a2
+ adcs $acc3,$acc3,$a3
+ ldp $a2,$a3,[$np,#8*2]
+ adcs $acc4,$acc4,$a4
+ adcs $acc5,$acc5,$a5
+ ldp $a4,$a5,[$np,#8*4]
+ adcs $acc6,$acc6,$a6
+ mov $cnt,#-8*8
+ adcs $acc7,$acc7,$a7
+ ldp $a6,$a7,[$np,#8*6]
+ add $np,$np,#8*8
+ //adc $carry,xzr,xzr // moved above
+ b .Lsqr8x_tail
+
+.align 4
+.Lsqr8x_tail_break:
+ ldr $n0,[x29,#112] // pull n0
+ add $cnt,$tp,#8*8 // end of current t[num] window
+
+ subs xzr,$topmost,#1 // "move" top-most carry to carry bit
+ adcs $t0,$acc0,$a0
+ adcs $t1,$acc1,$a1
+ ldp $acc0,$acc1,[$rp,#8*0]
+ adcs $acc2,$acc2,$a2
+ ldp $a0,$a1,[$t2,#8*0] // recall that $t2 is &n[0]
+ adcs $acc3,$acc3,$a3
+ ldp $a2,$a3,[$t2,#8*2]
+ adcs $acc4,$acc4,$a4
+ adcs $acc5,$acc5,$a5
+ ldp $a4,$a5,[$t2,#8*4]
+ adcs $acc6,$acc6,$a6
+ adcs $acc7,$acc7,$a7
+ ldp $a6,$a7,[$t2,#8*6]
+ add $np,$t2,#8*8
+ adc $topmost,xzr,xzr // top-most carry
+ mul $na0,$n0,$acc0
+ stp $t0,$t1,[$tp,#8*0]
+ stp $acc2,$acc3,[$tp,#8*2]
+ ldp $acc2,$acc3,[$rp,#8*2]
+ stp $acc4,$acc5,[$tp,#8*4]
+ ldp $acc4,$acc5,[$rp,#8*4]
+ cmp $cnt,x29 // did we hit the bottom?
+ stp $acc6,$acc7,[$tp,#8*6]
+ mov $tp,$rp // slide the window
+ ldp $acc6,$acc7,[$rp,#8*6]
+ mov $cnt,#8
+ b.ne .Lsqr8x_reduction
+
+ // Final step. We see if result is larger than modulus, and
+ // if it is, subtract the modulus. But comparison implies
+ // subtraction. So we subtract modulus, see if it borrowed,
+ // and conditionally copy original value.
+ ldr $rp,[x29,#96] // pull rp
+ add $tp,$tp,#8*8
+ subs $t0,$acc0,$a0
+ sbcs $t1,$acc1,$a1
+ sub $cnt,$num,#8*8
+ mov $ap_end,$rp // $rp copy
+
+.Lsqr8x_sub:
+ sbcs $t2,$acc2,$a2
+ ldp $a0,$a1,[$np,#8*0]
+ sbcs $t3,$acc3,$a3
+ stp $t0,$t1,[$rp,#8*0]
+ sbcs $t0,$acc4,$a4
+ ldp $a2,$a3,[$np,#8*2]
+ sbcs $t1,$acc5,$a5
+ stp $t2,$t3,[$rp,#8*2]
+ sbcs $t2,$acc6,$a6
+ ldp $a4,$a5,[$np,#8*4]
+ sbcs $t3,$acc7,$a7
+ ldp $a6,$a7,[$np,#8*6]
+ add $np,$np,#8*8
+ ldp $acc0,$acc1,[$tp,#8*0]
+ sub $cnt,$cnt,#8*8
+ ldp $acc2,$acc3,[$tp,#8*2]
+ ldp $acc4,$acc5,[$tp,#8*4]
+ ldp $acc6,$acc7,[$tp,#8*6]
+ add $tp,$tp,#8*8
+ stp $t0,$t1,[$rp,#8*4]
+ sbcs $t0,$acc0,$a0
+ stp $t2,$t3,[$rp,#8*6]
+ add $rp,$rp,#8*8
+ sbcs $t1,$acc1,$a1
+ cbnz $cnt,.Lsqr8x_sub
+
+ sbcs $t2,$acc2,$a2
+ mov $tp,sp
+ add $ap,sp,$num
+ ldp $a0,$a1,[$ap_end,#8*0]
+ sbcs $t3,$acc3,$a3
+ stp $t0,$t1,[$rp,#8*0]
+ sbcs $t0,$acc4,$a4
+ ldp $a2,$a3,[$ap_end,#8*2]
+ sbcs $t1,$acc5,$a5
+ stp $t2,$t3,[$rp,#8*2]
+ sbcs $t2,$acc6,$a6
+ ldp $acc0,$acc1,[$ap,#8*0]
+ sbcs $t3,$acc7,$a7
+ ldp $acc2,$acc3,[$ap,#8*2]
+ sbcs xzr,$topmost,xzr // did it borrow?
+ ldr x30,[x29,#8] // pull return address
+ stp $t0,$t1,[$rp,#8*4]
+ stp $t2,$t3,[$rp,#8*6]
+
+ sub $cnt,$num,#8*4
+.Lsqr4x_cond_copy:
+ sub $cnt,$cnt,#8*4
+ csel $t0,$acc0,$a0,lo
+ stp xzr,xzr,[$tp,#8*0]
+ csel $t1,$acc1,$a1,lo
+ ldp $a0,$a1,[$ap_end,#8*4]
+ ldp $acc0,$acc1,[$ap,#8*4]
+ csel $t2,$acc2,$a2,lo
+ stp xzr,xzr,[$tp,#8*2]
+ add $tp,$tp,#8*4
+ csel $t3,$acc3,$a3,lo
+ ldp $a2,$a3,[$ap_end,#8*6]
+ ldp $acc2,$acc3,[$ap,#8*6]
+ add $ap,$ap,#8*4
+ stp $t0,$t1,[$ap_end,#8*0]
+ stp $t2,$t3,[$ap_end,#8*2]
+ add $ap_end,$ap_end,#8*4
+ stp xzr,xzr,[$ap,#8*0]
+ stp xzr,xzr,[$ap,#8*2]
+ cbnz $cnt,.Lsqr4x_cond_copy
+
+ csel $t0,$acc0,$a0,lo
+ stp xzr,xzr,[$tp,#8*0]
+ csel $t1,$acc1,$a1,lo
+ stp xzr,xzr,[$tp,#8*2]
+ csel $t2,$acc2,$a2,lo
+ csel $t3,$acc3,$a3,lo
+ stp $t0,$t1,[$ap_end,#8*0]
+ stp $t2,$t3,[$ap_end,#8*2]
+
+ b .Lsqr8x_done
+
+.align 4
+.Lsqr8x8_post_condition:
+ adc $carry,xzr,xzr
+ ldr x30,[x29,#8] // pull return address
+ // $acc0-7,$carry hold result, $a0-7 hold modulus
+ subs $a0,$acc0,$a0
+ ldr $ap,[x29,#96] // pull rp
+ sbcs $a1,$acc1,$a1
+ stp xzr,xzr,[sp,#8*0]
+ sbcs $a2,$acc2,$a2
+ stp xzr,xzr,[sp,#8*2]
+ sbcs $a3,$acc3,$a3
+ stp xzr,xzr,[sp,#8*4]
+ sbcs $a4,$acc4,$a4
+ stp xzr,xzr,[sp,#8*6]
+ sbcs $a5,$acc5,$a5
+ stp xzr,xzr,[sp,#8*8]
+ sbcs $a6,$acc6,$a6
+ stp xzr,xzr,[sp,#8*10]
+ sbcs $a7,$acc7,$a7
+ stp xzr,xzr,[sp,#8*12]
+ sbcs $carry,$carry,xzr // did it borrow?
+ stp xzr,xzr,[sp,#8*14]
+
+ // $a0-7 hold result-modulus
+ csel $a0,$acc0,$a0,lo
+ csel $a1,$acc1,$a1,lo
+ csel $a2,$acc2,$a2,lo
+ csel $a3,$acc3,$a3,lo
+ stp $a0,$a1,[$ap,#8*0]
+ csel $a4,$acc4,$a4,lo
+ csel $a5,$acc5,$a5,lo
+ stp $a2,$a3,[$ap,#8*2]
+ csel $a6,$acc6,$a6,lo
+ csel $a7,$acc7,$a7,lo
+ stp $a4,$a5,[$ap,#8*4]
+ stp $a6,$a7,[$ap,#8*6]
+
+.Lsqr8x_done:
+ ldp x19,x20,[x29,#16]
+ mov sp,x29
+ ldp x21,x22,[x29,#32]
+ mov x0,#1
+ ldp x23,x24,[x29,#48]
+ ldp x25,x26,[x29,#64]
+ ldp x27,x28,[x29,#80]
+ ldr x29,[sp],#128
+ ret
+.size __bn_sqr8x_mont,.-__bn_sqr8x_mont
+___
+}
+
+{
+########################################################################
+# Even though this might look as ARMv8 adaptation of mulx4x_mont from
+# x86_64-mont5 module, it's different in sense that it performs
+# reduction 256 bits at a time.
+
+my ($a0,$a1,$a2,$a3,
+ $t0,$t1,$t2,$t3,
+ $m0,$m1,$m2,$m3,
+ $acc0,$acc1,$acc2,$acc3,$acc4,
+ $bi,$mi,$tp,$ap_end,$cnt) = map("x$_",(6..17,19..28));
+my $bp_end=$rp;
+my ($carry,$topmost) = ($rp,"x30");
+
+$code.=<<___;
+.type __bn_mul4x_mont,%function
+.align 5
+__bn_mul4x_mont:
+ stp x29,x30,[sp,#-128]!
+ add x29,sp,#0
+ stp x19,x20,[sp,#16]
+ stp x21,x22,[sp,#32]
+ stp x23,x24,[sp,#48]
+ stp x25,x26,[sp,#64]
+ stp x27,x28,[sp,#80]
+
+ sub $tp,sp,$num,lsl#3
+ lsl $num,$num,#3
+ ldr $n0,[$n0] // *n0
+ sub sp,$tp,#8*4 // alloca
+
+ add $t0,$bp,$num
+ add $ap_end,$ap,$num
+ stp $rp,$t0,[x29,#96] // offload rp and &b[num]
+
+ ldr $bi,[$bp,#8*0] // b[0]
+ ldp $a0,$a1,[$ap,#8*0] // a[0..3]
+ ldp $a2,$a3,[$ap,#8*2]
+ add $ap,$ap,#8*4
+ mov $acc0,xzr
+ mov $acc1,xzr
+ mov $acc2,xzr
+ mov $acc3,xzr
+ ldp $m0,$m1,[$np,#8*0] // n[0..3]
+ ldp $m2,$m3,[$np,#8*2]
+ adds $np,$np,#8*4 // clear carry bit
+ mov $carry,xzr
+ mov $cnt,#0
+ mov $tp,sp
+
+.Loop_mul4x_1st_reduction:
+ mul $t0,$a0,$bi // lo(a[0..3]*b[0])
+ adc $carry,$carry,xzr // modulo-scheduled
+ mul $t1,$a1,$bi
+ add $cnt,$cnt,#8
+ mul $t2,$a2,$bi
+ and $cnt,$cnt,#31
+ mul $t3,$a3,$bi
+ adds $acc0,$acc0,$t0
+ umulh $t0,$a0,$bi // hi(a[0..3]*b[0])
+ adcs $acc1,$acc1,$t1
+ mul $mi,$acc0,$n0 // t[0]*n0
+ adcs $acc2,$acc2,$t2
+ umulh $t1,$a1,$bi
+ adcs $acc3,$acc3,$t3
+ umulh $t2,$a2,$bi
+ adc $acc4,xzr,xzr
+ umulh $t3,$a3,$bi
+ ldr $bi,[$bp,$cnt] // next b[i] (or b[0])
+ adds $acc1,$acc1,$t0
+ // (*) mul $t0,$m0,$mi // lo(n[0..3]*t[0]*n0)
+ str $mi,[$tp],#8 // put aside t[0]*n0 for tail processing
+ adcs $acc2,$acc2,$t1
+ mul $t1,$m1,$mi
+ adcs $acc3,$acc3,$t2
+ mul $t2,$m2,$mi
+ adc $acc4,$acc4,$t3 // can't overflow
+ mul $t3,$m3,$mi
+ // (*) adds xzr,$acc0,$t0
+ subs xzr,$acc0,#1 // (*)
+ umulh $t0,$m0,$mi // hi(n[0..3]*t[0]*n0)
+ adcs $acc0,$acc1,$t1
+ umulh $t1,$m1,$mi
+ adcs $acc1,$acc2,$t2
+ umulh $t2,$m2,$mi
+ adcs $acc2,$acc3,$t3
+ umulh $t3,$m3,$mi
+ adcs $acc3,$acc4,$carry
+ adc $carry,xzr,xzr
+ adds $acc0,$acc0,$t0
+ sub $t0,$ap_end,$ap
+ adcs $acc1,$acc1,$t1
+ adcs $acc2,$acc2,$t2
+ adcs $acc3,$acc3,$t3
+ //adc $carry,$carry,xzr
+ cbnz $cnt,.Loop_mul4x_1st_reduction
+
+ cbz $t0,.Lmul4x4_post_condition
+
+ ldp $a0,$a1,[$ap,#8*0] // a[4..7]
+ ldp $a2,$a3,[$ap,#8*2]
+ add $ap,$ap,#8*4
+ ldr $mi,[sp] // a[0]*n0
+ ldp $m0,$m1,[$np,#8*0] // n[4..7]
+ ldp $m2,$m3,[$np,#8*2]
+ add $np,$np,#8*4
+
+.Loop_mul4x_1st_tail:
+ mul $t0,$a0,$bi // lo(a[4..7]*b[i])
+ adc $carry,$carry,xzr // modulo-scheduled
+ mul $t1,$a1,$bi
+ add $cnt,$cnt,#8
+ mul $t2,$a2,$bi
+ and $cnt,$cnt,#31
+ mul $t3,$a3,$bi
+ adds $acc0,$acc0,$t0
+ umulh $t0,$a0,$bi // hi(a[4..7]*b[i])
+ adcs $acc1,$acc1,$t1
+ umulh $t1,$a1,$bi
+ adcs $acc2,$acc2,$t2
+ umulh $t2,$a2,$bi
+ adcs $acc3,$acc3,$t3
+ umulh $t3,$a3,$bi
+ adc $acc4,xzr,xzr
+ ldr $bi,[$bp,$cnt] // next b[i] (or b[0])
+ adds $acc1,$acc1,$t0
+ mul $t0,$m0,$mi // lo(n[4..7]*a[0]*n0)
+ adcs $acc2,$acc2,$t1
+ mul $t1,$m1,$mi
+ adcs $acc3,$acc3,$t2
+ mul $t2,$m2,$mi
+ adc $acc4,$acc4,$t3 // can't overflow
+ mul $t3,$m3,$mi
+ adds $acc0,$acc0,$t0
+ umulh $t0,$m0,$mi // hi(n[4..7]*a[0]*n0)
+ adcs $acc1,$acc1,$t1
+ umulh $t1,$m1,$mi
+ adcs $acc2,$acc2,$t2
+ umulh $t2,$m2,$mi
+ adcs $acc3,$acc3,$t3
+ adcs $acc4,$acc4,$carry
+ umulh $t3,$m3,$mi
+ adc $carry,xzr,xzr
+ ldr $mi,[sp,$cnt] // next t[0]*n0
+ str $acc0,[$tp],#8 // result!!!
+ adds $acc0,$acc1,$t0
+ sub $t0,$ap_end,$ap // done yet?
+ adcs $acc1,$acc2,$t1
+ adcs $acc2,$acc3,$t2
+ adcs $acc3,$acc4,$t3
+ //adc $carry,$carry,xzr
+ cbnz $cnt,.Loop_mul4x_1st_tail
+
+ sub $t1,$ap_end,$num // rewinded $ap
+ cbz $t0,.Lmul4x_proceed
+
+ ldp $a0,$a1,[$ap,#8*0]
+ ldp $a2,$a3,[$ap,#8*2]
+ add $ap,$ap,#8*4
+ ldp $m0,$m1,[$np,#8*0]
+ ldp $m2,$m3,[$np,#8*2]
+ add $np,$np,#8*4
+ b .Loop_mul4x_1st_tail
+
+.align 5
+.Lmul4x_proceed:
+ ldr $bi,[$bp,#8*4]! // *++b
+ adc $topmost,$carry,xzr
+ ldp $a0,$a1,[$t1,#8*0] // a[0..3]
+ sub $np,$np,$num // rewind np
+ ldp $a2,$a3,[$t1,#8*2]
+ add $ap,$t1,#8*4
+
+ stp $acc0,$acc1,[$tp,#8*0] // result!!!
+ ldp $acc0,$acc1,[sp,#8*4] // t[0..3]
+ stp $acc2,$acc3,[$tp,#8*2] // result!!!
+ ldp $acc2,$acc3,[sp,#8*6]
+
+ ldp $m0,$m1,[$np,#8*0] // n[0..3]
+ mov $tp,sp
+ ldp $m2,$m3,[$np,#8*2]
+ adds $np,$np,#8*4 // clear carry bit
+ mov $carry,xzr
+
+.align 4
+.Loop_mul4x_reduction:
+ mul $t0,$a0,$bi // lo(a[0..3]*b[4])
+ adc $carry,$carry,xzr // modulo-scheduled
+ mul $t1,$a1,$bi
+ add $cnt,$cnt,#8
+ mul $t2,$a2,$bi
+ and $cnt,$cnt,#31
+ mul $t3,$a3,$bi
+ adds $acc0,$acc0,$t0
+ umulh $t0,$a0,$bi // hi(a[0..3]*b[4])
+ adcs $acc1,$acc1,$t1
+ mul $mi,$acc0,$n0 // t[0]*n0
+ adcs $acc2,$acc2,$t2
+ umulh $t1,$a1,$bi
+ adcs $acc3,$acc3,$t3
+ umulh $t2,$a2,$bi
+ adc $acc4,xzr,xzr
+ umulh $t3,$a3,$bi
+ ldr $bi,[$bp,$cnt] // next b[i]
+ adds $acc1,$acc1,$t0
+ // (*) mul $t0,$m0,$mi
+ str $mi,[$tp],#8 // put aside t[0]*n0 for tail processing
+ adcs $acc2,$acc2,$t1
+ mul $t1,$m1,$mi // lo(n[0..3]*t[0]*n0
+ adcs $acc3,$acc3,$t2
+ mul $t2,$m2,$mi
+ adc $acc4,$acc4,$t3 // can't overflow
+ mul $t3,$m3,$mi
+ // (*) adds xzr,$acc0,$t0
+ subs xzr,$acc0,#1 // (*)
+ umulh $t0,$m0,$mi // hi(n[0..3]*t[0]*n0
+ adcs $acc0,$acc1,$t1
+ umulh $t1,$m1,$mi
+ adcs $acc1,$acc2,$t2
+ umulh $t2,$m2,$mi
+ adcs $acc2,$acc3,$t3
+ umulh $t3,$m3,$mi
+ adcs $acc3,$acc4,$carry
+ adc $carry,xzr,xzr
+ adds $acc0,$acc0,$t0
+ adcs $acc1,$acc1,$t1
+ adcs $acc2,$acc2,$t2
+ adcs $acc3,$acc3,$t3
+ //adc $carry,$carry,xzr
+ cbnz $cnt,.Loop_mul4x_reduction
+
+ adc $carry,$carry,xzr
+ ldp $t0,$t1,[$tp,#8*4] // t[4..7]
+ ldp $t2,$t3,[$tp,#8*6]
+ ldp $a0,$a1,[$ap,#8*0] // a[4..7]
+ ldp $a2,$a3,[$ap,#8*2]
+ add $ap,$ap,#8*4
+ adds $acc0,$acc0,$t0
+ adcs $acc1,$acc1,$t1
+ adcs $acc2,$acc2,$t2
+ adcs $acc3,$acc3,$t3
+ //adc $carry,$carry,xzr
+
+ ldr $mi,[sp] // t[0]*n0
+ ldp $m0,$m1,[$np,#8*0] // n[4..7]
+ ldp $m2,$m3,[$np,#8*2]
+ add $np,$np,#8*4
+
+.align 4
+.Loop_mul4x_tail:
+ mul $t0,$a0,$bi // lo(a[4..7]*b[4])
+ adc $carry,$carry,xzr // modulo-scheduled
+ mul $t1,$a1,$bi
+ add $cnt,$cnt,#8
+ mul $t2,$a2,$bi
+ and $cnt,$cnt,#31
+ mul $t3,$a3,$bi
+ adds $acc0,$acc0,$t0
+ umulh $t0,$a0,$bi // hi(a[4..7]*b[4])
+ adcs $acc1,$acc1,$t1
+ umulh $t1,$a1,$bi
+ adcs $acc2,$acc2,$t2
+ umulh $t2,$a2,$bi
+ adcs $acc3,$acc3,$t3
+ umulh $t3,$a3,$bi
+ adc $acc4,xzr,xzr
+ ldr $bi,[$bp,$cnt] // next b[i]
+ adds $acc1,$acc1,$t0
+ mul $t0,$m0,$mi // lo(n[4..7]*t[0]*n0)
+ adcs $acc2,$acc2,$t1
+ mul $t1,$m1,$mi
+ adcs $acc3,$acc3,$t2
+ mul $t2,$m2,$mi
+ adc $acc4,$acc4,$t3 // can't overflow
+ mul $t3,$m3,$mi
+ adds $acc0,$acc0,$t0
+ umulh $t0,$m0,$mi // hi(n[4..7]*t[0]*n0)
+ adcs $acc1,$acc1,$t1
+ umulh $t1,$m1,$mi
+ adcs $acc2,$acc2,$t2
+ umulh $t2,$m2,$mi
+ adcs $acc3,$acc3,$t3
+ umulh $t3,$m3,$mi
+ adcs $acc4,$acc4,$carry
+ ldr $mi,[sp,$cnt] // next a[0]*n0
+ adc $carry,xzr,xzr
+ str $acc0,[$tp],#8 // result!!!
+ adds $acc0,$acc1,$t0
+ sub $t0,$ap_end,$ap // done yet?
+ adcs $acc1,$acc2,$t1
+ adcs $acc2,$acc3,$t2
+ adcs $acc3,$acc4,$t3
+ //adc $carry,$carry,xzr
+ cbnz $cnt,.Loop_mul4x_tail
+
+ sub $t1,$np,$num // rewinded np?
+ adc $carry,$carry,xzr
+ cbz $t0,.Loop_mul4x_break
+
+ ldp $t0,$t1,[$tp,#8*4]
+ ldp $t2,$t3,[$tp,#8*6]
+ ldp $a0,$a1,[$ap,#8*0]
+ ldp $a2,$a3,[$ap,#8*2]
+ add $ap,$ap,#8*4
+ adds $acc0,$acc0,$t0
+ adcs $acc1,$acc1,$t1
+ adcs $acc2,$acc2,$t2
+ adcs $acc3,$acc3,$t3
+ //adc $carry,$carry,xzr
+ ldp $m0,$m1,[$np,#8*0]
+ ldp $m2,$m3,[$np,#8*2]
+ add $np,$np,#8*4
+ b .Loop_mul4x_tail
+
+.align 4
+.Loop_mul4x_break:
+ ldp $t2,$t3,[x29,#96] // pull rp and &b[num]
+ adds $acc0,$acc0,$topmost
+ add $bp,$bp,#8*4 // bp++
+ adcs $acc1,$acc1,xzr
+ sub $ap,$ap,$num // rewind ap
+ adcs $acc2,$acc2,xzr
+ stp $acc0,$acc1,[$tp,#8*0] // result!!!
+ adcs $acc3,$acc3,xzr
+ ldp $acc0,$acc1,[sp,#8*4] // t[0..3]
+ adc $topmost,$carry,xzr
+ stp $acc2,$acc3,[$tp,#8*2] // result!!!
+ cmp $bp,$t3 // done yet?
+ ldp $acc2,$acc3,[sp,#8*6]
+ ldp $m0,$m1,[$t1,#8*0] // n[0..3]
+ ldp $m2,$m3,[$t1,#8*2]
+ add $np,$t1,#8*4
+ b.eq .Lmul4x_post
+
+ ldr $bi,[$bp]
+ ldp $a0,$a1,[$ap,#8*0] // a[0..3]
+ ldp $a2,$a3,[$ap,#8*2]
+ adds $ap,$ap,#8*4 // clear carry bit
+ mov $carry,xzr
+ mov $tp,sp
+ b .Loop_mul4x_reduction
+
+.align 4
+.Lmul4x_post:
+ // Final step. We see if result is larger than modulus, and
+ // if it is, subtract the modulus. But comparison implies
+ // subtraction. So we subtract modulus, see if it borrowed,
+ // and conditionally copy original value.
+ mov $rp,$t2
+ mov $ap_end,$t2 // $rp copy
+ subs $t0,$acc0,$m0
+ add $tp,sp,#8*8
+ sbcs $t1,$acc1,$m1
+ sub $cnt,$num,#8*4
+
+.Lmul4x_sub:
+ sbcs $t2,$acc2,$m2
+ ldp $m0,$m1,[$np,#8*0]
+ sub $cnt,$cnt,#8*4
+ ldp $acc0,$acc1,[$tp,#8*0]
+ sbcs $t3,$acc3,$m3
+ ldp $m2,$m3,[$np,#8*2]
+ add $np,$np,#8*4
+ ldp $acc2,$acc3,[$tp,#8*2]
+ add $tp,$tp,#8*4
+ stp $t0,$t1,[$rp,#8*0]
+ sbcs $t0,$acc0,$m0
+ stp $t2,$t3,[$rp,#8*2]
+ add $rp,$rp,#8*4
+ sbcs $t1,$acc1,$m1
+ cbnz $cnt,.Lmul4x_sub
+
+ sbcs $t2,$acc2,$m2
+ mov $tp,sp
+ add $ap,sp,#8*4
+ ldp $a0,$a1,[$ap_end,#8*0]
+ sbcs $t3,$acc3,$m3
+ stp $t0,$t1,[$rp,#8*0]
+ ldp $a2,$a3,[$ap_end,#8*2]
+ stp $t2,$t3,[$rp,#8*2]
+ ldp $acc0,$acc1,[$ap,#8*0]
+ ldp $acc2,$acc3,[$ap,#8*2]
+ sbcs xzr,$topmost,xzr // did it borrow?
+ ldr x30,[x29,#8] // pull return address
+
+ sub $cnt,$num,#8*4
+.Lmul4x_cond_copy:
+ sub $cnt,$cnt,#8*4
+ csel $t0,$acc0,$a0,lo
+ stp xzr,xzr,[$tp,#8*0]
+ csel $t1,$acc1,$a1,lo
+ ldp $a0,$a1,[$ap_end,#8*4]
+ ldp $acc0,$acc1,[$ap,#8*4]
+ csel $t2,$acc2,$a2,lo
+ stp xzr,xzr,[$tp,#8*2]
+ add $tp,$tp,#8*4
+ csel $t3,$acc3,$a3,lo
+ ldp $a2,$a3,[$ap_end,#8*6]
+ ldp $acc2,$acc3,[$ap,#8*6]
+ add $ap,$ap,#8*4
+ stp $t0,$t1,[$ap_end,#8*0]
+ stp $t2,$t3,[$ap_end,#8*2]
+ add $ap_end,$ap_end,#8*4
+ cbnz $cnt,.Lmul4x_cond_copy
+
+ csel $t0,$acc0,$a0,lo
+ stp xzr,xzr,[$tp,#8*0]
+ csel $t1,$acc1,$a1,lo
+ stp xzr,xzr,[$tp,#8*2]
+ csel $t2,$acc2,$a2,lo
+ stp xzr,xzr,[$tp,#8*3]
+ csel $t3,$acc3,$a3,lo
+ stp xzr,xzr,[$tp,#8*4]
+ stp $t0,$t1,[$ap_end,#8*0]
+ stp $t2,$t3,[$ap_end,#8*2]
+
+ b .Lmul4x_done
+
+.align 4
+.Lmul4x4_post_condition:
+ adc $carry,$carry,xzr
+ ldr $ap,[x29,#96] // pull rp
+ // $acc0-3,$carry hold result, $m0-7 hold modulus
+ subs $a0,$acc0,$m0
+ ldr x30,[x29,#8] // pull return address
+ sbcs $a1,$acc1,$m1
+ stp xzr,xzr,[sp,#8*0]
+ sbcs $a2,$acc2,$m2
+ stp xzr,xzr,[sp,#8*2]
+ sbcs $a3,$acc3,$m3
+ stp xzr,xzr,[sp,#8*4]
+ sbcs xzr,$carry,xzr // did it borrow?
+ stp xzr,xzr,[sp,#8*6]
+
+ // $a0-3 hold result-modulus
+ csel $a0,$acc0,$a0,lo
+ csel $a1,$acc1,$a1,lo
+ csel $a2,$acc2,$a2,lo
+ csel $a3,$acc3,$a3,lo
+ stp $a0,$a1,[$ap,#8*0]
+ stp $a2,$a3,[$ap,#8*2]
+
+.Lmul4x_done:
+ ldp x19,x20,[x29,#16]
+ mov sp,x29
+ ldp x21,x22,[x29,#32]
+ mov x0,#1
+ ldp x23,x24,[x29,#48]
+ ldp x25,x26,[x29,#64]
+ ldp x27,x28,[x29,#80]
+ ldr x29,[sp],#128
+ ret
+.size __bn_mul4x_mont,.-__bn_mul4x_mont
+___
+}
+$code.=<<___;
+.asciz "Montgomery Multiplication for ARMv8, CRYPTOGAMS by <appro\@openssl.org>"
+.align 4
+___
+
+print $code;
+
+close STDOUT;
diff --git a/openssl-1.1.0h/crypto/bn/asm/bn-586.pl b/openssl-1.1.0h/crypto/bn/asm/bn-586.pl
new file mode 100644
index 0000000..1ca1bbf
--- /dev/null
+++ b/openssl-1.1.0h/crypto/bn/asm/bn-586.pl
@@ -0,0 +1,785 @@
+#! /usr/bin/env perl
+# Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License"). You may not use
+# this file except in compliance with the License. You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+push(@INC,"${dir}","${dir}../../perlasm");
+require "x86asm.pl";
+
+$output = pop;
+open STDOUT,">$output";
+
+&asm_init($ARGV[0],$0);
+
+$sse2=0;
+for (@ARGV) { $sse2=1 if (/-DOPENSSL_IA32_SSE2/); }
+
+&external_label("OPENSSL_ia32cap_P") if ($sse2);
+
+&bn_mul_add_words("bn_mul_add_words");
+&bn_mul_words("bn_mul_words");
+&bn_sqr_words("bn_sqr_words");
+&bn_div_words("bn_div_words");
+&bn_add_words("bn_add_words");
+&bn_sub_words("bn_sub_words");
+&bn_sub_part_words("bn_sub_part_words");
+
+&asm_finish();
+
+close STDOUT;
+
+sub bn_mul_add_words
+ {
+ local($name)=@_;
+
+ &function_begin_B($name,$sse2?"EXTRN\t_OPENSSL_ia32cap_P:DWORD":"");
+
+ $r="eax";
+ $a="edx";
+ $c="ecx";
+
+ if ($sse2) {
+ &picmeup("eax","OPENSSL_ia32cap_P");
+ &bt(&DWP(0,"eax"),26);
+ &jnc(&label("maw_non_sse2"));
+
+ &mov($r,&wparam(0));
+ &mov($a,&wparam(1));
+ &mov($c,&wparam(2));
+ &movd("mm0",&wparam(3)); # mm0 = w
+ &pxor("mm1","mm1"); # mm1 = carry_in
+ &jmp(&label("maw_sse2_entry"));
+
+ &set_label("maw_sse2_unrolled",16);
+ &movd("mm3",&DWP(0,$r,"",0)); # mm3 = r[0]
+ &paddq("mm1","mm3"); # mm1 = carry_in + r[0]
+ &movd("mm2",&DWP(0,$a,"",0)); # mm2 = a[0]
+ &pmuludq("mm2","mm0"); # mm2 = w*a[0]
+ &movd("mm4",&DWP(4,$a,"",0)); # mm4 = a[1]
+ &pmuludq("mm4","mm0"); # mm4 = w*a[1]
+ &movd("mm6",&DWP(8,$a,"",0)); # mm6 = a[2]
+ &pmuludq("mm6","mm0"); # mm6 = w*a[2]
+ &movd("mm7",&DWP(12,$a,"",0)); # mm7 = a[3]
+ &pmuludq("mm7","mm0"); # mm7 = w*a[3]
+ &paddq("mm1","mm2"); # mm1 = carry_in + r[0] + w*a[0]
+ &movd("mm3",&DWP(4,$r,"",0)); # mm3 = r[1]
+ &paddq("mm3","mm4"); # mm3 = r[1] + w*a[1]
+ &movd("mm5",&DWP(8,$r,"",0)); # mm5 = r[2]
+ &paddq("mm5","mm6"); # mm5 = r[2] + w*a[2]
+ &movd("mm4",&DWP(12,$r,"",0)); # mm4 = r[3]
+ &paddq("mm7","mm4"); # mm7 = r[3] + w*a[3]
+ &movd(&DWP(0,$r,"",0),"mm1");
+ &movd("mm2",&DWP(16,$a,"",0)); # mm2 = a[4]
+ &pmuludq("mm2","mm0"); # mm2 = w*a[4]
+ &psrlq("mm1",32); # mm1 = carry0
+ &movd("mm4",&DWP(20,$a,"",0)); # mm4 = a[5]
+ &pmuludq("mm4","mm0"); # mm4 = w*a[5]
+ &paddq("mm1","mm3"); # mm1 = carry0 + r[1] + w*a[1]
+ &movd("mm6",&DWP(24,$a,"",0)); # mm6 = a[6]
+ &pmuludq("mm6","mm0"); # mm6 = w*a[6]
+ &movd(&DWP(4,$r,"",0),"mm1");
+ &psrlq("mm1",32); # mm1 = carry1
+ &movd("mm3",&DWP(28,$a,"",0)); # mm3 = a[7]
+ &add($a,32);
+ &pmuludq("mm3","mm0"); # mm3 = w*a[7]
+ &paddq("mm1","mm5"); # mm1 = carry1 + r[2] + w*a[2]
+ &movd("mm5",&DWP(16,$r,"",0)); # mm5 = r[4]
+ &paddq("mm2","mm5"); # mm2 = r[4] + w*a[4]
+ &movd(&DWP(8,$r,"",0),"mm1");
+ &psrlq("mm1",32); # mm1 = carry2
+ &paddq("mm1","mm7"); # mm1 = carry2 + r[3] + w*a[3]
+ &movd("mm5",&DWP(20,$r,"",0)); # mm5 = r[5]
+ &paddq("mm4","mm5"); # mm4 = r[5] + w*a[5]
+ &movd(&DWP(12,$r,"",0),"mm1");
+ &psrlq("mm1",32); # mm1 = carry3
+ &paddq("mm1","mm2"); # mm1 = carry3 + r[4] + w*a[4]
+ &movd("mm5",&DWP(24,$r,"",0)); # mm5 = r[6]
+ &paddq("mm6","mm5"); # mm6 = r[6] + w*a[6]
+ &movd(&DWP(16,$r,"",0),"mm1");
+ &psrlq("mm1",32); # mm1 = carry4
+ &paddq("mm1","mm4"); # mm1 = carry4 + r[5] + w*a[5]
+ &movd("mm5",&DWP(28,$r,"",0)); # mm5 = r[7]
+ &paddq("mm3","mm5"); # mm3 = r[7] + w*a[7]
+ &movd(&DWP(20,$r,"",0),"mm1");
+ &psrlq("mm1",32); # mm1 = carry5
+ &paddq("mm1","mm6"); # mm1 = carry5 + r[6] + w*a[6]
+ &movd(&DWP(24,$r,"",0),"mm1");
+ &psrlq("mm1",32); # mm1 = carry6
+ &paddq("mm1","mm3"); # mm1 = carry6 + r[7] + w*a[7]
+ &movd(&DWP(28,$r,"",0),"mm1");
+ &lea($r,&DWP(32,$r));
+ &psrlq("mm1",32); # mm1 = carry_out
+
+ &sub($c,8);
+ &jz(&label("maw_sse2_exit"));
+ &set_label("maw_sse2_entry");
+ &test($c,0xfffffff8);
+ &jnz(&label("maw_sse2_unrolled"));
+
+ &set_label("maw_sse2_loop",4);
+ &movd("mm2",&DWP(0,$a)); # mm2 = a[i]
+ &movd("mm3",&DWP(0,$r)); # mm3 = r[i]
+ &pmuludq("mm2","mm0"); # a[i] *= w
+ &lea($a,&DWP(4,$a));
+ &paddq("mm1","mm3"); # carry += r[i]
+ &paddq("mm1","mm2"); # carry += a[i]*w
+ &movd(&DWP(0,$r),"mm1"); # r[i] = carry_low
+ &sub($c,1);
+ &psrlq("mm1",32); # carry = carry_high
+ &lea($r,&DWP(4,$r));
+ &jnz(&label("maw_sse2_loop"));
+ &set_label("maw_sse2_exit");
+ &movd("eax","mm1"); # c = carry_out
+ &emms();
+ &ret();
+
+ &set_label("maw_non_sse2",16);
+ }
+
+ # function_begin prologue
+ &push("ebp");
+ &push("ebx");
+ &push("esi");
+ &push("edi");
+
+ &comment("");
+ $Low="eax";
+ $High="edx";
+ $a="ebx";
+ $w="ebp";
+ $r="edi";
+ $c="esi";
+
+ &xor($c,$c); # clear carry
+ &mov($r,&wparam(0)); #
+
+ &mov("ecx",&wparam(2)); #
+ &mov($a,&wparam(1)); #
+
+ &and("ecx",0xfffffff8); # num / 8
+ &mov($w,&wparam(3)); #
+
+ &push("ecx"); # Up the stack for a tmp variable
+
+ &jz(&label("maw_finish"));
+
+ &set_label("maw_loop",16);
+
+ for ($i=0; $i<32; $i+=4)
+ {
+ &comment("Round $i");
+
+ &mov("eax",&DWP($i,$a)); # *a
+ &mul($w); # *a * w
+ &add("eax",$c); # L(t)+= c
+ &adc("edx",0); # H(t)+=carry
+ &add("eax",&DWP($i,$r)); # L(t)+= *r
+ &adc("edx",0); # H(t)+=carry
+ &mov(&DWP($i,$r),"eax"); # *r= L(t);
+ &mov($c,"edx"); # c= H(t);
+ }
+
+ &comment("");
+ &sub("ecx",8);
+ &lea($a,&DWP(32,$a));
+ &lea($r,&DWP(32,$r));
+ &jnz(&label("maw_loop"));
+
+ &set_label("maw_finish",0);
+ &mov("ecx",&wparam(2)); # get num
+ &and("ecx",7);
+ &jnz(&label("maw_finish2")); # helps branch prediction
+ &jmp(&label("maw_end"));
+
+ &set_label("maw_finish2",1);
+ for ($i=0; $i<7; $i++)
+ {
+ &comment("Tail Round $i");
+ &mov("eax",&DWP($i*4,$a)); # *a
+ &mul($w); # *a * w
+ &add("eax",$c); # L(t)+=c
+ &adc("edx",0); # H(t)+=carry
+ &add("eax",&DWP($i*4,$r)); # L(t)+= *r
+ &adc("edx",0); # H(t)+=carry
+ &dec("ecx") if ($i != 7-1);
+ &mov(&DWP($i*4,$r),"eax"); # *r= L(t);
+ &mov($c,"edx"); # c= H(t);
+ &jz(&label("maw_end")) if ($i != 7-1);
+ }
+ &set_label("maw_end",0);
+ &mov("eax",$c);
+
+ &pop("ecx"); # clear variable from
+
+ &function_end($name);
+ }
+
+sub bn_mul_words
+ {
+ local($name)=@_;
+
+ &function_begin_B($name,$sse2?"EXTRN\t_OPENSSL_ia32cap_P:DWORD":"");
+
+ $r="eax";
+ $a="edx";
+ $c="ecx";
+
+ if ($sse2) {
+ &picmeup("eax","OPENSSL_ia32cap_P");
+ &bt(&DWP(0,"eax"),26);
+ &jnc(&label("mw_non_sse2"));
+
+ &mov($r,&wparam(0));
+ &mov($a,&wparam(1));
+ &mov($c,&wparam(2));
+ &movd("mm0",&wparam(3)); # mm0 = w
+ &pxor("mm1","mm1"); # mm1 = carry = 0
+
+ &set_label("mw_sse2_loop",16);
+ &movd("mm2",&DWP(0,$a)); # mm2 = a[i]
+ &pmuludq("mm2","mm0"); # a[i] *= w
+ &lea($a,&DWP(4,$a));
+ &paddq("mm1","mm2"); # carry += a[i]*w
+ &movd(&DWP(0,$r),"mm1"); # r[i] = carry_low
+ &sub($c,1);
+ &psrlq("mm1",32); # carry = carry_high
+ &lea($r,&DWP(4,$r));
+ &jnz(&label("mw_sse2_loop"));
+
+ &movd("eax","mm1"); # return carry
+ &emms();
+ &ret();
+ &set_label("mw_non_sse2",16);
+ }
+
+ # function_begin prologue
+ &push("ebp");
+ &push("ebx");
+ &push("esi");
+ &push("edi");
+
+ &comment("");
+ $Low="eax";
+ $High="edx";
+ $a="ebx";
+ $w="ecx";
+ $r="edi";
+ $c="esi";
+ $num="ebp";
+
+ &xor($c,$c); # clear carry
+ &mov($r,&wparam(0)); #
+ &mov($a,&wparam(1)); #
+ &mov($num,&wparam(2)); #
+ &mov($w,&wparam(3)); #
+
+ &and($num,0xfffffff8); # num / 8
+ &jz(&label("mw_finish"));
+
+ &set_label("mw_loop",0);
+ for ($i=0; $i<32; $i+=4)
+ {
+ &comment("Round $i");
+
+ &mov("eax",&DWP($i,$a,"",0)); # *a
+ &mul($w); # *a * w
+ &add("eax",$c); # L(t)+=c
+ # XXX
+
+ &adc("edx",0); # H(t)+=carry
+ &mov(&DWP($i,$r,"",0),"eax"); # *r= L(t);
+
+ &mov($c,"edx"); # c= H(t);
+ }
+
+ &comment("");
+ &add($a,32);
+ &add($r,32);
+ &sub($num,8);
+ &jz(&label("mw_finish"));
+ &jmp(&label("mw_loop"));
+
+ &set_label("mw_finish",0);
+ &mov($num,&wparam(2)); # get num
+ &and($num,7);
+ &jnz(&label("mw_finish2"));
+ &jmp(&label("mw_end"));
+
+ &set_label("mw_finish2",1);
+ for ($i=0; $i<7; $i++)
+ {
+ &comment("Tail Round $i");
+ &mov("eax",&DWP($i*4,$a,"",0));# *a
+ &mul($w); # *a * w
+ &add("eax",$c); # L(t)+=c
+ # XXX
+ &adc("edx",0); # H(t)+=carry
+ &mov(&DWP($i*4,$r,"",0),"eax");# *r= L(t);
+ &mov($c,"edx"); # c= H(t);
+ &dec($num) if ($i != 7-1);
+ &jz(&label("mw_end")) if ($i != 7-1);
+ }
+ &set_label("mw_end",0);
+ &mov("eax",$c);
+
+ &function_end($name);
+ }
+
+sub bn_sqr_words
+ {
+ local($name)=@_;
+
+ &function_begin_B($name,$sse2?"EXTRN\t_OPENSSL_ia32cap_P:DWORD":"");
+
+ $r="eax";
+ $a="edx";
+ $c="ecx";
+
+ if ($sse2) {
+ &picmeup("eax","OPENSSL_ia32cap_P");
+ &bt(&DWP(0,"eax"),26);
+ &jnc(&label("sqr_non_sse2"));
+
+ &mov($r,&wparam(0));
+ &mov($a,&wparam(1));
+ &mov($c,&wparam(2));
+
+ &set_label("sqr_sse2_loop",16);
+ &movd("mm0",&DWP(0,$a)); # mm0 = a[i]
+ &pmuludq("mm0","mm0"); # a[i] *= a[i]
+ &lea($a,&DWP(4,$a)); # a++
+ &movq(&QWP(0,$r),"mm0"); # r[i] = a[i]*a[i]
+ &sub($c,1);
+ &lea($r,&DWP(8,$r)); # r += 2
+ &jnz(&label("sqr_sse2_loop"));
+
+ &emms();
+ &ret();
+ &set_label("sqr_non_sse2",16);
+ }
+
+ # function_begin prologue
+ &push("ebp");
+ &push("ebx");
+ &push("esi");
+ &push("edi");
+
+ &comment("");
+ $r="esi";
+ $a="edi";
+ $num="ebx";
+
+ &mov($r,&wparam(0)); #
+ &mov($a,&wparam(1)); #
+ &mov($num,&wparam(2)); #
+
+ &and($num,0xfffffff8); # num / 8
+ &jz(&label("sw_finish"));
+
+ &set_label("sw_loop",0);
+ for ($i=0; $i<32; $i+=4)
+ {
+ &comment("Round $i");
+ &mov("eax",&DWP($i,$a,"",0)); # *a
+ # XXX
+ &mul("eax"); # *a * *a
+ &mov(&DWP($i*2,$r,"",0),"eax"); #
+ &mov(&DWP($i*2+4,$r,"",0),"edx");#
+ }
+
+ &comment("");
+ &add($a,32);
+ &add($r,64);
+ &sub($num,8);
+ &jnz(&label("sw_loop"));
+
+ &set_label("sw_finish",0);
+ &mov($num,&wparam(2)); # get num
+ &and($num,7);
+ &jz(&label("sw_end"));
+
+ for ($i=0; $i<7; $i++)
+ {
+ &comment("Tail Round $i");
+ &mov("eax",&DWP($i*4,$a,"",0)); # *a
+ # XXX
+ &mul("eax"); # *a * *a
+ &mov(&DWP($i*8,$r,"",0),"eax"); #
+ &dec($num) if ($i != 7-1);
+ &mov(&DWP($i*8+4,$r,"",0),"edx");
+ &jz(&label("sw_end")) if ($i != 7-1);
+ }
+ &set_label("sw_end",0);
+
+ &function_end($name);
+ }
+
+sub bn_div_words
+ {
+ local($name)=@_;
+
+ &function_begin_B($name,"");
+ &mov("edx",&wparam(0)); #
+ &mov("eax",&wparam(1)); #
+ &mov("ecx",&wparam(2)); #
+ &div("ecx");
+ &ret();
+ &function_end_B($name);
+ }
+
+sub bn_add_words
+ {
+ local($name)=@_;
+
+ &function_begin($name,"");
+
+ &comment("");
+ $a="esi";
+ $b="edi";
+ $c="eax";
+ $r="ebx";
+ $tmp1="ecx";
+ $tmp2="edx";
+ $num="ebp";
+
+ &mov($r,&wparam(0)); # get r
+ &mov($a,&wparam(1)); # get a
+ &mov($b,&wparam(2)); # get b
+ &mov($num,&wparam(3)); # get num
+ &xor($c,$c); # clear carry
+ &and($num,0xfffffff8); # num / 8
+
+ &jz(&label("aw_finish"));
+
+ &set_label("aw_loop",0);
+ for ($i=0; $i<8; $i++)
+ {
+ &comment("Round $i");
+
+ &mov($tmp1,&DWP($i*4,$a,"",0)); # *a
+ &mov($tmp2,&DWP($i*4,$b,"",0)); # *b
+ &add($tmp1,$c);
+ &mov($c,0);
+ &adc($c,$c);
+ &add($tmp1,$tmp2);
+ &adc($c,0);
+ &mov(&DWP($i*4,$r,"",0),$tmp1); # *r
+ }
+
+ &comment("");
+ &add($a,32);
+ &add($b,32);
+ &add($r,32);
+ &sub($num,8);
+ &jnz(&label("aw_loop"));
+
+ &set_label("aw_finish",0);
+ &mov($num,&wparam(3)); # get num
+ &and($num,7);
+ &jz(&label("aw_end"));
+
+ for ($i=0; $i<7; $i++)
+ {
+ &comment("Tail Round $i");
+ &mov($tmp1,&DWP($i*4,$a,"",0)); # *a
+ &mov($tmp2,&DWP($i*4,$b,"",0));# *b
+ &add($tmp1,$c);
+ &mov($c,0);
+ &adc($c,$c);
+ &add($tmp1,$tmp2);
+ &adc($c,0);
+ &dec($num) if ($i != 6);
+ &mov(&DWP($i*4,$r,"",0),$tmp1); # *r
+ &jz(&label("aw_end")) if ($i != 6);
+ }
+ &set_label("aw_end",0);
+
+# &mov("eax",$c); # $c is "eax"
+
+ &function_end($name);
+ }
+
+sub bn_sub_words
+ {
+ local($name)=@_;
+
+ &function_begin($name,"");
+
+ &comment("");
+ $a="esi";
+ $b="edi";
+ $c="eax";
+ $r="ebx";
+ $tmp1="ecx";
+ $tmp2="edx";
+ $num="ebp";
+
+ &mov($r,&wparam(0)); # get r
+ &mov($a,&wparam(1)); # get a
+ &mov($b,&wparam(2)); # get b
+ &mov($num,&wparam(3)); # get num
+ &xor($c,$c); # clear carry
+ &and($num,0xfffffff8); # num / 8
+
+ &jz(&label("aw_finish"));
+
+ &set_label("aw_loop",0);
+ for ($i=0; $i<8; $i++)
+ {
+ &comment("Round $i");
+
+ &mov($tmp1,&DWP($i*4,$a,"",0)); # *a
+ &mov($tmp2,&DWP($i*4,$b,"",0)); # *b
+ &sub($tmp1,$c);
+ &mov($c,0);
+ &adc($c,$c);
+ &sub($tmp1,$tmp2);
+ &adc($c,0);
+ &mov(&DWP($i*4,$r,"",0),$tmp1); # *r
+ }
+
+ &comment("");
+ &add($a,32);
+ &add($b,32);
+ &add($r,32);
+ &sub($num,8);
+ &jnz(&label("aw_loop"));
+
+ &set_label("aw_finish",0);
+ &mov($num,&wparam(3)); # get num
+ &and($num,7);
+ &jz(&label("aw_end"));
+
+ for ($i=0; $i<7; $i++)
+ {
+ &comment("Tail Round $i");
+ &mov($tmp1,&DWP($i*4,$a,"",0)); # *a
+ &mov($tmp2,&DWP($i*4,$b,"",0));# *b
+ &sub($tmp1,$c);
+ &mov($c,0);
+ &adc($c,$c);
+ &sub($tmp1,$tmp2);
+ &adc($c,0);
+ &dec($num) if ($i != 6);
+ &mov(&DWP($i*4,$r,"",0),$tmp1); # *r
+ &jz(&label("aw_end")) if ($i != 6);
+ }
+ &set_label("aw_end",0);
+
+# &mov("eax",$c); # $c is "eax"
+
+ &function_end($name);
+ }
+
+sub bn_sub_part_words
+ {
+ local($name)=@_;
+
+ &function_begin($name,"");
+
+ &comment("");
+ $a="esi";
+ $b="edi";
+ $c="eax";
+ $r="ebx";
+ $tmp1="ecx";
+ $tmp2="edx";
+ $num="ebp";
+
+ &mov($r,&wparam(0)); # get r
+ &mov($a,&wparam(1)); # get a
+ &mov($b,&wparam(2)); # get b
+ &mov($num,&wparam(3)); # get num
+ &xor($c,$c); # clear carry
+ &and($num,0xfffffff8); # num / 8
+
+ &jz(&label("aw_finish"));
+
+ &set_label("aw_loop",0);
+ for ($i=0; $i<8; $i++)
+ {
+ &comment("Round $i");
+
+ &mov($tmp1,&DWP($i*4,$a,"",0)); # *a
+ &mov($tmp2,&DWP($i*4,$b,"",0)); # *b
+ &sub($tmp1,$c);
+ &mov($c,0);
+ &adc($c,$c);
+ &sub($tmp1,$tmp2);
+ &adc($c,0);
+ &mov(&DWP($i*4,$r,"",0),$tmp1); # *r
+ }
+
+ &comment("");
+ &add($a,32);
+ &add($b,32);
+ &add($r,32);
+ &sub($num,8);
+ &jnz(&label("aw_loop"));
+
+ &set_label("aw_finish",0);
+ &mov($num,&wparam(3)); # get num
+ &and($num,7);
+ &jz(&label("aw_end"));
+
+ for ($i=0; $i<7; $i++)
+ {
+ &comment("Tail Round $i");
+ &mov($tmp1,&DWP(0,$a,"",0)); # *a
+ &mov($tmp2,&DWP(0,$b,"",0));# *b
+ &sub($tmp1,$c);
+ &mov($c,0);
+ &adc($c,$c);
+ &sub($tmp1,$tmp2);
+ &adc($c,0);
+ &mov(&DWP(0,$r,"",0),$tmp1); # *r
+ &add($a, 4);
+ &add($b, 4);
+ &add($r, 4);
+ &dec($num) if ($i != 6);
+ &jz(&label("aw_end")) if ($i != 6);
+ }
+ &set_label("aw_end",0);
+
+ &cmp(&wparam(4),0);
+ &je(&label("pw_end"));
+
+ &mov($num,&wparam(4)); # get dl
+ &cmp($num,0);
+ &je(&label("pw_end"));
+ &jge(&label("pw_pos"));
+
+ &comment("pw_neg");
+ &mov($tmp2,0);
+ &sub($tmp2,$num);
+ &mov($num,$tmp2);
+ &and($num,0xfffffff8); # num / 8
+ &jz(&label("pw_neg_finish"));
+
+ &set_label("pw_neg_loop",0);
+ for ($i=0; $i<8; $i++)
+ {
+ &comment("dl<0 Round $i");
+
+ &mov($tmp1,0);
+ &mov($tmp2,&DWP($i*4,$b,"",0)); # *b
+ &sub($tmp1,$c);
+ &mov($c,0);
+ &adc($c,$c);
+ &sub($tmp1,$tmp2);
+ &adc($c,0);
+ &mov(&DWP($i*4,$r,"",0),$tmp1); # *r
+ }
+
+ &comment("");
+ &add($b,32);
+ &add($r,32);
+ &sub($num,8);
+ &jnz(&label("pw_neg_loop"));
+
+ &set_label("pw_neg_finish",0);
+ &mov($tmp2,&wparam(4)); # get dl
+ &mov($num,0);
+ &sub($num,$tmp2);
+ &and($num,7);
+ &jz(&label("pw_end"));
+
+ for ($i=0; $i<7; $i++)
+ {
+ &comment("dl<0 Tail Round $i");
+ &mov($tmp1,0);
+ &mov($tmp2,&DWP($i*4,$b,"",0));# *b
+ &sub($tmp1,$c);
+ &mov($c,0);
+ &adc($c,$c);
+ &sub($tmp1,$tmp2);
+ &adc($c,0);
+ &dec($num) if ($i != 6);
+ &mov(&DWP($i*4,$r,"",0),$tmp1); # *r
+ &jz(&label("pw_end")) if ($i != 6);
+ }
+
+ &jmp(&label("pw_end"));
+
+ &set_label("pw_pos",0);
+
+ &and($num,0xfffffff8); # num / 8
+ &jz(&label("pw_pos_finish"));
+
+ &set_label("pw_pos_loop",0);
+
+ for ($i=0; $i<8; $i++)
+ {
+ &comment("dl>0 Round $i");
+
+ &mov($tmp1,&DWP($i*4,$a,"",0)); # *a
+ &sub($tmp1,$c);
+ &mov(&DWP($i*4,$r,"",0),$tmp1); # *r
+ &jnc(&label("pw_nc".$i));
+ }
+
+ &comment("");
+ &add($a,32);
+ &add($r,32);
+ &sub($num,8);
+ &jnz(&label("pw_pos_loop"));
+
+ &set_label("pw_pos_finish",0);
+ &mov($num,&wparam(4)); # get dl
+ &and($num,7);
+ &jz(&label("pw_end"));
+
+ for ($i=0; $i<7; $i++)
+ {
+ &comment("dl>0 Tail Round $i");
+ &mov($tmp1,&DWP($i*4,$a,"",0)); # *a
+ &sub($tmp1,$c);
+ &mov(&DWP($i*4,$r,"",0),$tmp1); # *r
+ &jnc(&label("pw_tail_nc".$i));
+ &dec($num) if ($i != 6);
+ &jz(&label("pw_end")) if ($i != 6);
+ }
+ &mov($c,1);
+ &jmp(&label("pw_end"));
+
+ &set_label("pw_nc_loop",0);
+ for ($i=0; $i<8; $i++)
+ {
+ &mov($tmp1,&DWP($i*4,$a,"",0)); # *a
+ &mov(&DWP($i*4,$r,"",0),$tmp1); # *r
+ &set_label("pw_nc".$i,0);
+ }
+
+ &comment("");
+ &add($a,32);
+ &add($r,32);
+ &sub($num,8);
+ &jnz(&label("pw_nc_loop"));
+
+ &mov($num,&wparam(4)); # get dl
+ &and($num,7);
+ &jz(&label("pw_nc_end"));
+
+ for ($i=0; $i<7; $i++)
+ {
+ &mov($tmp1,&DWP($i*4,$a,"",0)); # *a
+ &mov(&DWP($i*4,$r,"",0),$tmp1); # *r
+ &set_label("pw_tail_nc".$i,0);
+ &dec($num) if ($i != 6);
+ &jz(&label("pw_nc_end")) if ($i != 6);
+ }
+
+ &set_label("pw_nc_end",0);
+ &mov($c,0);
+
+ &set_label("pw_end",0);
+
+# &mov("eax",$c); # $c is "eax"
+
+ &function_end($name);
+ }
diff --git a/openssl-1.1.0h/crypto/bn/asm/bn-c64xplus.asm b/openssl-1.1.0h/crypto/bn/asm/bn-c64xplus.asm
new file mode 100644
index 0000000..de6d377
--- /dev/null
+++ b/openssl-1.1.0h/crypto/bn/asm/bn-c64xplus.asm
@@ -0,0 +1,382 @@
+;; Copyright 2012-2016 The OpenSSL Project Authors. All Rights Reserved.
+;;
+;; Licensed under the OpenSSL license (the "License"). You may not use
+;; this file except in compliance with the License. You can obtain a copy
+;; in the file LICENSE in the source distribution or at
+;; https://www.openssl.org/source/license.html
+;;
+;;====================================================================
+;; Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+;; project.
+;;
+;; Rights for redistribution and usage in source and binary forms are
+;; granted according to the OpenSSL license. Warranty of any kind is
+;; disclaimed.
+;;====================================================================
+;; Compiler-generated multiply-n-add SPLOOP runs at 12*n cycles, n
+;; being the number of 32-bit words, addition - 8*n. Corresponding 4x
+;; unrolled SPLOOP-free loops - at ~8*n and ~5*n. Below assembler
+;; SPLOOPs spin at ... 2*n cycles [plus epilogue].
+;;====================================================================
+ .text
+
+ .if .ASSEMBLER_VERSION<7000000
+ .asg 0,__TI_EABI__
+ .endif
+ .if __TI_EABI__
+ .asg bn_mul_add_words,_bn_mul_add_words
+ .asg bn_mul_words,_bn_mul_words
+ .asg bn_sqr_words,_bn_sqr_words
+ .asg bn_add_words,_bn_add_words
+ .asg bn_sub_words,_bn_sub_words
+ .asg bn_div_words,_bn_div_words
+ .asg bn_sqr_comba8,_bn_sqr_comba8
+ .asg bn_mul_comba8,_bn_mul_comba8
+ .asg bn_sqr_comba4,_bn_sqr_comba4
+ .asg bn_mul_comba4,_bn_mul_comba4
+ .endif
+
+ .asg B3,RA
+ .asg A4,ARG0
+ .asg B4,ARG1
+ .asg A6,ARG2
+ .asg B6,ARG3
+ .asg A8,ARG4
+ .asg B8,ARG5
+ .asg A4,RET
+ .asg A15,FP
+ .asg B14,DP
+ .asg B15,SP
+
+ .global _bn_mul_add_words
+_bn_mul_add_words:
+ .asmfunc
+ MV ARG2,B0
+ [!B0] BNOP RA
+||[!B0] MVK 0,RET
+ [B0] MVC B0,ILC
+ [B0] ZERO A19 ; high part of accumulator
+|| [B0] MV ARG0,A2
+|| [B0] MV ARG3,A3
+ NOP 3
+
+ SPLOOP 2 ; 2*n+10
+;;====================================================================
+ LDW *ARG1++,B7 ; ap[i]
+ NOP 3
+ LDW *ARG0++,A7 ; rp[i]
+ MPY32U B7,A3,A17:A16
+ NOP 3 ; [2,0] in epilogue
+ ADDU A16,A7,A21:A20
+ ADDU A19,A21:A20,A19:A18
+|| MV.S A17,A23
+ SPKERNEL 2,1 ; leave slot for "return value"
+|| STW A18,*A2++ ; rp[i]
+|| ADD A19,A23,A19
+;;====================================================================
+ BNOP RA,4
+ MV A19,RET ; return value
+ .endasmfunc
+
+ .global _bn_mul_words
+_bn_mul_words:
+ .asmfunc
+ MV ARG2,B0
+ [!B0] BNOP RA
+||[!B0] MVK 0,RET
+ [B0] MVC B0,ILC
+ [B0] ZERO A19 ; high part of accumulator
+ NOP 3
+
+ SPLOOP 2 ; 2*n+10
+;;====================================================================
+ LDW *ARG1++,A7 ; ap[i]
+ NOP 4
+ MPY32U A7,ARG3,A17:A16
+ NOP 4 ; [2,0] in epiloque
+ ADDU A19,A16,A19:A18
+|| MV.S A17,A21
+ SPKERNEL 2,1 ; leave slot for "return value"
+|| STW A18,*ARG0++ ; rp[i]
+|| ADD.L A19,A21,A19
+;;====================================================================
+ BNOP RA,4
+ MV A19,RET ; return value
+ .endasmfunc
+
+ .global _bn_sqr_words
+_bn_sqr_words:
+ .asmfunc
+ MV ARG2,B0
+ [!B0] BNOP RA
+||[!B0] MVK 0,RET
+ [B0] MVC B0,ILC
+ [B0] MV ARG0,B2
+|| [B0] ADD 4,ARG0,ARG0
+ NOP 3
+
+ SPLOOP 2 ; 2*n+10
+;;====================================================================
+ LDW *ARG1++,B7 ; ap[i]
+ NOP 4
+ MPY32U B7,B7,B1:B0
+ NOP 3 ; [2,0] in epilogue
+ STW B0,*B2++(8) ; rp[2*i]
+ MV B1,A1
+ SPKERNEL 2,0 ; fully overlap BNOP RA,5
+|| STW A1,*ARG0++(8) ; rp[2*i+1]
+;;====================================================================
+ BNOP RA,5
+ .endasmfunc
+
+ .global _bn_add_words
+_bn_add_words:
+ .asmfunc
+ MV ARG3,B0
+ [!B0] BNOP RA
+||[!B0] MVK 0,RET
+ [B0] MVC B0,ILC
+ [B0] ZERO A1 ; carry flag
+|| [B0] MV ARG0,A3
+ NOP 3
+
+ SPLOOP 2 ; 2*n+6
+;;====================================================================
+ LDW *ARG2++,A7 ; bp[i]
+|| LDW *ARG1++,B7 ; ap[i]
+ NOP 4
+ ADDU A7,B7,A9:A8
+ ADDU A1,A9:A8,A1:A0
+ SPKERNEL 0,0 ; fully overlap BNOP RA,5
+|| STW A0,*A3++ ; write result
+|| MV A1,RET ; keep carry flag in RET
+;;====================================================================
+ BNOP RA,5
+ .endasmfunc
+
+ .global _bn_sub_words
+_bn_sub_words:
+ .asmfunc
+ MV ARG3,B0
+ [!B0] BNOP RA
+||[!B0] MVK 0,RET
+ [B0] MVC B0,ILC
+ [B0] ZERO A2 ; borrow flag
+|| [B0] MV ARG0,A3
+ NOP 3
+
+ SPLOOP 2 ; 2*n+6
+;;====================================================================
+ LDW *ARG2++,A7 ; bp[i]
+|| LDW *ARG1++,B7 ; ap[i]
+ NOP 4
+ SUBU B7,A7,A1:A0
+ [A2] SUB A1:A0,1,A1:A0
+ SPKERNEL 0,1 ; leave slot for "return borrow flag"
+|| STW A0,*A3++ ; write result
+|| AND 1,A1,A2 ; pass on borrow flag
+;;====================================================================
+ BNOP RA,4
+ AND 1,A1,RET ; return borrow flag
+ .endasmfunc
+
+ .global _bn_div_words
+_bn_div_words:
+ .asmfunc
+ LMBD 1,A6,A0 ; leading zero bits in dv
+ LMBD 1,A4,A1 ; leading zero bits in hi
+|| MVK 32,B0
+ CMPLTU A1,A0,A2
+|| ADD A0,B0,B0
+ [ A2] BNOP RA
+||[ A2] MVK -1,A4 ; return overflow
+||[!A2] MV A4,A3 ; reassign hi
+ [!A2] MV B4,A4 ; reassign lo, will be quotient
+||[!A2] MVC B0,ILC
+ [!A2] SHL A6,A0,A6 ; normalize dv
+|| MVK 1,A1
+
+ [!A2] CMPLTU A3,A6,A1 ; hi<dv?
+||[!A2] SHL A4,1,A5:A4 ; lo<<1
+ [!A1] SUB A3,A6,A3 ; hi-=dv
+||[!A1] OR 1,A4,A4
+ [!A2] SHRU A3,31,A1 ; upper bit
+||[!A2] ADDAH A5,A3,A3 ; hi<<1|lo>>31
+
+ SPLOOP 3
+ [!A1] CMPLTU A3,A6,A1 ; hi<dv?
+||[ A1] ZERO A1
+|| SHL A4,1,A5:A4 ; lo<<1
+ [!A1] SUB A3,A6,A3 ; hi-=dv
+||[!A1] OR 1,A4,A4 ; quotient
+ SHRU A3,31,A1 ; upper bit
+|| ADDAH A5,A3,A3 ; hi<<1|lo>>31
+ SPKERNEL
+
+ BNOP RA,5
+ .endasmfunc
+
+;;====================================================================
+;; Not really Comba algorithm, just straightforward NxM... Dedicated
+;; fully unrolled real Comba implementations are asymptotically 2x
+;; faster, but naturally larger undertaking. Purpose of this exercise
+;; was rather to learn to master nested SPLOOPs...
+;;====================================================================
+ .global _bn_sqr_comba8
+ .global _bn_mul_comba8
+_bn_sqr_comba8:
+ MV ARG1,ARG2
+_bn_mul_comba8:
+ .asmfunc
+ MVK 8,B0 ; N, RILC
+|| MVK 8,A0 ; M, outer loop counter
+|| MV ARG1,A5 ; copy ap
+|| MV ARG0,B4 ; copy rp
+|| ZERO B19 ; high part of accumulator
+ MVC B0,RILC
+|| SUB B0,2,B1 ; N-2, initial ILC
+|| SUB B0,1,B2 ; const B2=N-1
+|| LDW *A5++,B6 ; ap[0]
+|| MV A0,A3 ; const A3=M
+sploopNxM?: ; for best performance arrange M<=N
+ [A0] SPLOOPD 2 ; 2*n+10
+|| MVC B1,ILC
+|| ADDAW B4,B0,B5
+|| ZERO B7
+|| LDW *A5++,A9 ; pre-fetch ap[1]
+|| ZERO A1
+|| SUB A0,1,A0
+;;====================================================================
+;; SPLOOP from bn_mul_add_words, but with flipped A<>B register files.
+;; This is because of Advisory 15 from TI publication SPRZ247I.
+ LDW *ARG2++,A7 ; bp[i]
+ NOP 3
+ [A1] LDW *B5++,B7 ; rp[i]
+ MPY32U A7,B6,B17:B16
+ NOP 3
+ ADDU B16,B7,B21:B20
+ ADDU B19,B21:B20,B19:B18
+|| MV.S B17,B23
+ SPKERNEL
+|| STW B18,*B4++ ; rp[i]
+|| ADD.S B19,B23,B19
+;;====================================================================
+outer?: ; m*2*(n+1)+10
+ SUBAW ARG2,A3,ARG2 ; rewind bp to bp[0]
+ SPMASKR
+|| CMPGT A0,1,A2 ; done pre-fetching ap[i+1]?
+ MVD A9,B6 ; move through .M unit(*)
+ [A2] LDW *A5++,A9 ; pre-fetch ap[i+1]
+ SUBAW B5,B2,B5 ; rewind rp to rp[1]
+ MVK 1,A1
+ [A0] BNOP.S1 outer?,4
+|| [A0] SUB.L A0,1,A0
+ STW B19,*B4--[B2] ; rewind rp tp rp[1]
+|| ZERO.S B19 ; high part of accumulator
+;; end of outer?
+ BNOP RA,5 ; return
+ .endasmfunc
+;; (*) It should be noted that B6 is used as input to MPY32U in
+;; chronologically next cycle in *preceding* SPLOOP iteration.
+;; Normally such arrangement would require DINT, but at this
+;; point SPLOOP is draining and interrupts are disabled
+;; implicitly.
+
+ .global _bn_sqr_comba4
+ .global _bn_mul_comba4
+_bn_sqr_comba4:
+ MV ARG1,ARG2
+_bn_mul_comba4:
+ .asmfunc
+ .if 0
+ BNOP sploopNxM?,3
+ ;; Above mentioned m*2*(n+1)+10 does not apply in n=m=4 case,
+ ;; because of low-counter effect, when prologue phase finishes
+ ;; before SPKERNEL instruction is reached. As result it's 25%
+ ;; slower than expected...
+ MVK 4,B0 ; N, RILC
+|| MVK 4,A0 ; M, outer loop counter
+|| MV ARG1,A5 ; copy ap
+|| MV ARG0,B4 ; copy rp
+|| ZERO B19 ; high part of accumulator
+ MVC B0,RILC
+|| SUB B0,2,B1 ; first ILC
+|| SUB B0,1,B2 ; const B2=N-1
+|| LDW *A5++,B6 ; ap[0]
+|| MV A0,A3 ; const A3=M
+ .else
+ ;; This alternative is an exercise in fully unrolled Comba
+ ;; algorithm implementation that operates at n*(n+1)+12, or
+ ;; as little as 32 cycles...
+ LDW *ARG1[0],B16 ; a[0]
+|| LDW *ARG2[0],A16 ; b[0]
+ LDW *ARG1[1],B17 ; a[1]
+|| LDW *ARG2[1],A17 ; b[1]
+ LDW *ARG1[2],B18 ; a[2]
+|| LDW *ARG2[2],A18 ; b[2]
+ LDW *ARG1[3],B19 ; a[3]
+|| LDW *ARG2[3],A19 ; b[3]
+ NOP
+ MPY32U A16,B16,A1:A0 ; a[0]*b[0]
+ MPY32U A17,B16,A23:A22 ; a[0]*b[1]
+ MPY32U A16,B17,A25:A24 ; a[1]*b[0]
+ MPY32U A16,B18,A27:A26 ; a[2]*b[0]
+ STW A0,*ARG0[0]
+|| MPY32U A17,B17,A29:A28 ; a[1]*b[1]
+ MPY32U A18,B16,A31:A30 ; a[0]*b[2]
+|| ADDU A22,A1,A1:A0
+ MV A23,B0
+|| MPY32U A19,B16,A21:A20 ; a[3]*b[0]
+|| ADDU A24,A1:A0,A1:A0
+ ADDU A25,B0,B1:B0
+|| STW A0,*ARG0[1]
+|| MPY32U A18,B17,A23:A22 ; a[2]*b[1]
+|| ADDU A26,A1,A9:A8
+ ADDU A27,B1,B9:B8
+|| MPY32U A17,B18,A25:A24 ; a[1]*b[2]
+|| ADDU A28,A9:A8,A9:A8
+ ADDU A29,B9:B8,B9:B8
+|| MPY32U A16,B19,A27:A26 ; a[0]*b[3]
+|| ADDU A30,A9:A8,A9:A8
+ ADDU A31,B9:B8,B9:B8
+|| ADDU B0,A9:A8,A9:A8
+ STW A8,*ARG0[2]
+|| ADDU A20,A9,A1:A0
+ ADDU A21,B9,B1:B0
+|| MPY32U A19,B17,A21:A20 ; a[3]*b[1]
+|| ADDU A22,A1:A0,A1:A0
+ ADDU A23,B1:B0,B1:B0
+|| MPY32U A18,B18,A23:A22 ; a[2]*b[2]
+|| ADDU A24,A1:A0,A1:A0
+ ADDU A25,B1:B0,B1:B0
+|| MPY32U A17,B19,A25:A24 ; a[1]*b[3]
+|| ADDU A26,A1:A0,A1:A0
+ ADDU A27,B1:B0,B1:B0
+|| ADDU B8,A1:A0,A1:A0
+ STW A0,*ARG0[3]
+|| MPY32U A19,B18,A27:A26 ; a[3]*b[2]
+|| ADDU A20,A1,A9:A8
+ ADDU A21,B1,B9:B8
+|| MPY32U A18,B19,A29:A28 ; a[2]*b[3]
+|| ADDU A22,A9:A8,A9:A8
+ ADDU A23,B9:B8,B9:B8
+|| MPY32U A19,B19,A31:A30 ; a[3]*b[3]
+|| ADDU A24,A9:A8,A9:A8
+ ADDU A25,B9:B8,B9:B8
+|| ADDU B0,A9:A8,A9:A8
+ STW A8,*ARG0[4]
+|| ADDU A26,A9,A1:A0
+ ADDU A27,B9,B1:B0
+|| ADDU A28,A1:A0,A1:A0
+ ADDU A29,B1:B0,B1:B0
+|| BNOP RA
+|| ADDU B8,A1:A0,A1:A0
+ STW A0,*ARG0[5]
+|| ADDU A30,A1,A9:A8
+ ADD A31,B1,B8
+ ADDU B0,A9:A8,A9:A8 ; removed || to avoid cross-path stall below
+ ADD B8,A9,A9
+|| STW A8,*ARG0[6]
+ STW A9,*ARG0[7]
+ .endif
+ .endasmfunc
diff --git a/openssl-1.1.0h/crypto/bn/asm/c64xplus-gf2m.pl b/openssl-1.1.0h/crypto/bn/asm/c64xplus-gf2m.pl
new file mode 100644
index 0000000..c0e5400
--- /dev/null
+++ b/openssl-1.1.0h/crypto/bn/asm/c64xplus-gf2m.pl
@@ -0,0 +1,160 @@
+#! /usr/bin/env perl
+# Copyright 2012-2016 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License"). You may not use
+# this file except in compliance with the License. You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# February 2012
+#
+# The module implements bn_GF2m_mul_2x2 polynomial multiplication
+# used in bn_gf2m.c. It's kind of low-hanging mechanical port from
+# C for the time being... The subroutine runs in 37 cycles, which is
+# 4.5x faster than compiler-generated code. Though comparison is
+# totally unfair, because this module utilizes Galois Field Multiply
+# instruction.
+
+while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {}
+open STDOUT,">$output";
+
+($rp,$a1,$a0,$b1,$b0)=("A4","B4","A6","B6","A8"); # argument vector
+
+($Alo,$Alox0,$Alox1,$Alox2,$Alox3)=map("A$_",(16..20));
+($Ahi,$Ahix0,$Ahix1,$Ahix2,$Ahix3)=map("B$_",(16..20));
+($B_0,$B_1,$B_2,$B_3)=("B5","A5","A7","B7");
+($A,$B)=($Alo,$B_1);
+$xFF="B1";
+
+sub mul_1x1_upper {
+my ($A,$B)=@_;
+$code.=<<___;
+ EXTU $B,8,24,$B_2 ; smash $B to 4 bytes
+|| AND $B,$xFF,$B_0
+|| SHRU $B,24,$B_3
+ SHRU $A,16, $Ahi ; smash $A to two halfwords
+|| EXTU $A,16,16,$Alo
+
+ XORMPY $Alo,$B_2,$Alox2 ; 16x8 bits muliplication
+|| XORMPY $Ahi,$B_2,$Ahix2
+|| EXTU $B,16,24,$B_1
+ XORMPY $Alo,$B_0,$Alox0
+|| XORMPY $Ahi,$B_0,$Ahix0
+ XORMPY $Alo,$B_3,$Alox3
+|| XORMPY $Ahi,$B_3,$Ahix3
+ XORMPY $Alo,$B_1,$Alox1
+|| XORMPY $Ahi,$B_1,$Ahix1
+___
+}
+sub mul_1x1_merged {
+my ($OUTlo,$OUThi,$A,$B)=@_;
+$code.=<<___;
+ EXTU $B,8,24,$B_2 ; smash $B to 4 bytes
+|| AND $B,$xFF,$B_0
+|| SHRU $B,24,$B_3
+ SHRU $A,16, $Ahi ; smash $A to two halfwords
+|| EXTU $A,16,16,$Alo
+
+ XOR $Ahix0,$Alox2,$Ahix0
+|| MV $Ahix2,$OUThi
+|| XORMPY $Alo,$B_2,$Alox2
+ XORMPY $Ahi,$B_2,$Ahix2
+|| EXTU $B,16,24,$B_1
+|| XORMPY $Alo,$B_0,A1 ; $Alox0
+ XOR $Ahix1,$Alox3,$Ahix1
+|| SHL $Ahix0,16,$OUTlo
+|| SHRU $Ahix0,16,$Ahix0
+ XOR $Alox0,$OUTlo,$OUTlo
+|| XOR $Ahix0,$OUThi,$OUThi
+|| XORMPY $Ahi,$B_0,$Ahix0
+|| XORMPY $Alo,$B_3,$Alox3
+|| SHL $Alox1,8,$Alox1
+|| SHL $Ahix3,8,$Ahix3
+ XOR $Alox1,$OUTlo,$OUTlo
+|| XOR $Ahix3,$OUThi,$OUThi
+|| XORMPY $Ahi,$B_3,$Ahix3
+|| SHL $Ahix1,24,$Alox1
+|| SHRU $Ahix1,8, $Ahix1
+ XOR $Alox1,$OUTlo,$OUTlo
+|| XOR $Ahix1,$OUThi,$OUThi
+|| XORMPY $Alo,$B_1,$Alox1
+|| XORMPY $Ahi,$B_1,$Ahix1
+|| MV A1,$Alox0
+___
+}
+sub mul_1x1_lower {
+my ($OUTlo,$OUThi)=@_;
+$code.=<<___;
+ ;NOP
+ XOR $Ahix0,$Alox2,$Ahix0
+|| MV $Ahix2,$OUThi
+ NOP
+ XOR $Ahix1,$Alox3,$Ahix1
+|| SHL $Ahix0,16,$OUTlo
+|| SHRU $Ahix0,16,$Ahix0
+ XOR $Alox0,$OUTlo,$OUTlo
+|| XOR $Ahix0,$OUThi,$OUThi
+|| SHL $Alox1,8,$Alox1
+|| SHL $Ahix3,8,$Ahix3
+ XOR $Alox1,$OUTlo,$OUTlo
+|| XOR $Ahix3,$OUThi,$OUThi
+|| SHL $Ahix1,24,$Alox1
+|| SHRU $Ahix1,8, $Ahix1
+ XOR $Alox1,$OUTlo,$OUTlo
+|| XOR $Ahix1,$OUThi,$OUThi
+___
+}
+$code.=<<___;
+ .text
+
+ .if .ASSEMBLER_VERSION<7000000
+ .asg 0,__TI_EABI__
+ .endif
+ .if __TI_EABI__
+ .asg bn_GF2m_mul_2x2,_bn_GF2m_mul_2x2
+ .endif
+
+ .global _bn_GF2m_mul_2x2
+_bn_GF2m_mul_2x2:
+ .asmfunc
+ MVK 0xFF,$xFF
+___
+ &mul_1x1_upper($a0,$b0); # a0·b0
+$code.=<<___;
+|| MV $b1,$B
+ MV $a1,$A
+___
+ &mul_1x1_merged("A28","B28",$A,$B); # a0·b0/a1·b1
+$code.=<<___;
+|| XOR $b0,$b1,$B
+ XOR $a0,$a1,$A
+___
+ &mul_1x1_merged("A31","B31",$A,$B); # a1·b1/(a0+a1)·(b0+b1)
+$code.=<<___;
+ XOR A28,A31,A29
+|| XOR B28,B31,B29 ; a0·b0+a1·b1
+___
+ &mul_1x1_lower("A30","B30"); # (a0+a1)·(b0+b1)
+$code.=<<___;
+|| BNOP B3
+ XOR A29,A30,A30
+|| XOR B29,B30,B30 ; (a0+a1)·(b0+b1)-a0·b0-a1·b1
+ XOR B28,A30,A30
+|| STW A28,*${rp}[0]
+ XOR B30,A31,A31
+|| STW A30,*${rp}[1]
+ STW A31,*${rp}[2]
+ STW B31,*${rp}[3]
+ .endasmfunc
+___
+
+print $code;
+close STDOUT;
diff --git a/openssl-1.1.0h/crypto/bn/asm/co-586.pl b/openssl-1.1.0h/crypto/bn/asm/co-586.pl
new file mode 100644
index 0000000..60d0363
--- /dev/null
+++ b/openssl-1.1.0h/crypto/bn/asm/co-586.pl
@@ -0,0 +1,298 @@
+#! /usr/bin/env perl
+# Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License"). You may not use
+# this file except in compliance with the License. You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+push(@INC,"${dir}","${dir}../../perlasm");
+require "x86asm.pl";
+
+$output = pop;
+open STDOUT,">$output";
+
+&asm_init($ARGV[0],$0);
+
+&bn_mul_comba("bn_mul_comba8",8);
+&bn_mul_comba("bn_mul_comba4",4);
+&bn_sqr_comba("bn_sqr_comba8",8);
+&bn_sqr_comba("bn_sqr_comba4",4);
+
+&asm_finish();
+
+close STDOUT;
+
+sub mul_add_c
+ {
+ local($a,$ai,$b,$bi,$c0,$c1,$c2,$pos,$i,$na,$nb)=@_;
+
+ # pos == -1 if eax and edx are pre-loaded, 0 to load from next
+ # words, and 1 if load return value
+
+ &comment("mul a[$ai]*b[$bi]");
+
+ # "eax" and "edx" will always be pre-loaded.
+ # &mov("eax",&DWP($ai*4,$a,"",0)) ;
+ # &mov("edx",&DWP($bi*4,$b,"",0));
+
+ &mul("edx");
+ &add($c0,"eax");
+ &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 0; # laod next a
+ &mov("eax",&wparam(0)) if $pos > 0; # load r[]
+ ###
+ &adc($c1,"edx");
+ &mov("edx",&DWP(($nb)*4,$b,"",0)) if $pos == 0; # laod next b
+ &mov("edx",&DWP(($nb)*4,$b,"",0)) if $pos == 1; # laod next b
+ ###
+ &adc($c2,0);
+ # is pos > 1, it means it is the last loop
+ &mov(&DWP($i*4,"eax","",0),$c0) if $pos > 0; # save r[];
+ &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 1; # laod next a
+ }
+
+sub sqr_add_c
+ {
+ local($r,$a,$ai,$bi,$c0,$c1,$c2,$pos,$i,$na,$nb)=@_;
+
+ # pos == -1 if eax and edx are pre-loaded, 0 to load from next
+ # words, and 1 if load return value
+
+ &comment("sqr a[$ai]*a[$bi]");
+
+ # "eax" and "edx" will always be pre-loaded.
+ # &mov("eax",&DWP($ai*4,$a,"",0)) ;
+ # &mov("edx",&DWP($bi*4,$b,"",0));
+
+ if ($ai == $bi)
+ { &mul("eax");}
+ else
+ { &mul("edx");}
+ &add($c0,"eax");
+ &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 0; # load next a
+ ###
+ &adc($c1,"edx");
+ &mov("edx",&DWP(($nb)*4,$a,"",0)) if ($pos == 1) && ($na != $nb);
+ ###
+ &adc($c2,0);
+ # is pos > 1, it means it is the last loop
+ &mov(&DWP($i*4,$r,"",0),$c0) if $pos > 0; # save r[];
+ &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 1; # load next b
+ }
+
+sub sqr_add_c2
+ {
+ local($r,$a,$ai,$bi,$c0,$c1,$c2,$pos,$i,$na,$nb)=@_;
+
+ # pos == -1 if eax and edx are pre-loaded, 0 to load from next
+ # words, and 1 if load return value
+
+ &comment("sqr a[$ai]*a[$bi]");
+
+ # "eax" and "edx" will always be pre-loaded.
+ # &mov("eax",&DWP($ai*4,$a,"",0)) ;
+ # &mov("edx",&DWP($bi*4,$a,"",0));
+
+ if ($ai == $bi)
+ { &mul("eax");}
+ else
+ { &mul("edx");}
+ &add("eax","eax");
+ ###
+ &adc("edx","edx");
+ ###
+ &adc($c2,0);
+ &add($c0,"eax");
+ &adc($c1,"edx");
+ &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 0; # load next a
+ &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 1; # load next b
+ &adc($c2,0);
+ &mov(&DWP($i*4,$r,"",0),$c0) if $pos > 0; # save r[];
+ &mov("edx",&DWP(($nb)*4,$a,"",0)) if ($pos <= 1) && ($na != $nb);
+ ###
+ }
+
+sub bn_mul_comba
+ {
+ local($name,$num)=@_;
+ local($a,$b,$c0,$c1,$c2);
+ local($i,$as,$ae,$bs,$be,$ai,$bi);
+ local($tot,$end);
+
+ &function_begin_B($name,"");
+
+ $c0="ebx";
+ $c1="ecx";
+ $c2="ebp";
+ $a="esi";
+ $b="edi";
+
+ $as=0;
+ $ae=0;
+ $bs=0;
+ $be=0;
+ $tot=$num+$num-1;
+
+ &push("esi");
+ &mov($a,&wparam(1));
+ &push("edi");
+ &mov($b,&wparam(2));
+ &push("ebp");
+ &push("ebx");
+
+ &xor($c0,$c0);
+ &mov("eax",&DWP(0,$a,"",0)); # load the first word
+ &xor($c1,$c1);
+ &mov("edx",&DWP(0,$b,"",0)); # load the first second
+
+ for ($i=0; $i<$tot; $i++)
+ {
+ $ai=$as;
+ $bi=$bs;
+ $end=$be+1;
+
+ &comment("################## Calculate word $i");
+
+ for ($j=$bs; $j<$end; $j++)
+ {
+ &xor($c2,$c2) if ($j == $bs);
+ if (($j+1) == $end)
+ {
+ $v=1;
+ $v=2 if (($i+1) == $tot);
+ }
+ else
+ { $v=0; }
+ if (($j+1) != $end)
+ {
+ $na=($ai-1);
+ $nb=($bi+1);
+ }
+ else
+ {
+ $na=$as+($i < ($num-1));
+ $nb=$bs+($i >= ($num-1));
+ }
+#printf STDERR "[$ai,$bi] -> [$na,$nb]\n";
+ &mul_add_c($a,$ai,$b,$bi,$c0,$c1,$c2,$v,$i,$na,$nb);
+ if ($v)
+ {
+ &comment("saved r[$i]");
+ # &mov("eax",&wparam(0));
+ # &mov(&DWP($i*4,"eax","",0),$c0);
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ }
+ $ai--;
+ $bi++;
+ }
+ $as++ if ($i < ($num-1));
+ $ae++ if ($i >= ($num-1));
+
+ $bs++ if ($i >= ($num-1));
+ $be++ if ($i < ($num-1));
+ }
+ &comment("save r[$i]");
+ # &mov("eax",&wparam(0));
+ &mov(&DWP($i*4,"eax","",0),$c0);
+
+ &pop("ebx");
+ &pop("ebp");
+ &pop("edi");
+ &pop("esi");
+ &ret();
+ &function_end_B($name);
+ }
+
+sub bn_sqr_comba
+ {
+ local($name,$num)=@_;
+ local($r,$a,$c0,$c1,$c2)=@_;
+ local($i,$as,$ae,$bs,$be,$ai,$bi);
+ local($b,$tot,$end,$half);
+
+ &function_begin_B($name,"");
+
+ $c0="ebx";
+ $c1="ecx";
+ $c2="ebp";
+ $a="esi";
+ $r="edi";
+
+ &push("esi");
+ &push("edi");
+ &push("ebp");
+ &push("ebx");
+ &mov($r,&wparam(0));
+ &mov($a,&wparam(1));
+ &xor($c0,$c0);
+ &xor($c1,$c1);
+ &mov("eax",&DWP(0,$a,"",0)); # load the first word
+
+ $as=0;
+ $ae=0;
+ $bs=0;
+ $be=0;
+ $tot=$num+$num-1;
+
+ for ($i=0; $i<$tot; $i++)
+ {
+ $ai=$as;
+ $bi=$bs;
+ $end=$be+1;
+
+ &comment("############### Calculate word $i");
+ for ($j=$bs; $j<$end; $j++)
+ {
+ &xor($c2,$c2) if ($j == $bs);
+ if (($ai-1) < ($bi+1))
+ {
+ $v=1;
+ $v=2 if ($i+1) == $tot;
+ }
+ else
+ { $v=0; }
+ if (!$v)
+ {
+ $na=$ai-1;
+ $nb=$bi+1;
+ }
+ else
+ {
+ $na=$as+($i < ($num-1));
+ $nb=$bs+($i >= ($num-1));
+ }
+ if ($ai == $bi)
+ {
+ &sqr_add_c($r,$a,$ai,$bi,
+ $c0,$c1,$c2,$v,$i,$na,$nb);
+ }
+ else
+ {
+ &sqr_add_c2($r,$a,$ai,$bi,
+ $c0,$c1,$c2,$v,$i,$na,$nb);
+ }
+ if ($v)
+ {
+ &comment("saved r[$i]");
+ #&mov(&DWP($i*4,$r,"",0),$c0);
+ ($c0,$c1,$c2)=($c1,$c2,$c0);
+ last;
+ }
+ $ai--;
+ $bi++;
+ }
+ $as++ if ($i < ($num-1));
+ $ae++ if ($i >= ($num-1));
+
+ $bs++ if ($i >= ($num-1));
+ $be++ if ($i < ($num-1));
+ }
+ &mov(&DWP($i*4,$r,"",0),$c0);
+ &pop("ebx");
+ &pop("ebp");
+ &pop("edi");
+ &pop("esi");
+ &ret();
+ &function_end_B($name);
+ }
diff --git a/openssl-1.1.0h/crypto/bn/asm/ia64-mont.pl b/openssl-1.1.0h/crypto/bn/asm/ia64-mont.pl
new file mode 100644
index 0000000..5cc5c59
--- /dev/null
+++ b/openssl-1.1.0h/crypto/bn/asm/ia64-mont.pl
@@ -0,0 +1,860 @@
+#! /usr/bin/env perl
+# Copyright 2010-2016 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License"). You may not use
+# this file except in compliance with the License. You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# January 2010
+#
+# "Teaser" Montgomery multiplication module for IA-64. There are
+# several possibilities for improvement:
+#
+# - modulo-scheduling outer loop would eliminate quite a number of
+# stalls after ldf8, xma and getf.sig outside inner loop and
+# improve shorter key performance;
+# - shorter vector support [with input vectors being fetched only
+# once] should be added;
+# - 2x unroll with help of n0[1] would make the code scalable on
+# "wider" IA-64, "wider" than Itanium 2 that is, which is not of
+# acute interest, because upcoming Tukwila's individual cores are
+# reportedly based on Itanium 2 design;
+# - dedicated squaring procedure(?);
+#
+# January 2010
+#
+# Shorter vector support is implemented by zero-padding ap and np
+# vectors up to 8 elements, or 512 bits. This means that 256-bit
+# inputs will be processed only 2 times faster than 512-bit inputs,
+# not 4 [as one would expect, because algorithm complexity is n^2].
+# The reason for padding is that inputs shorter than 512 bits won't
+# be processed faster anyway, because minimal critical path of the
+# core loop happens to match 512-bit timing. Either way, it resulted
+# in >100% improvement of 512-bit RSA sign benchmark and 50% - of
+# 1024-bit one [in comparison to original version of *this* module].
+#
+# So far 'openssl speed rsa dsa' output on 900MHz Itanium 2 *with*
+# this module is:
+# sign verify sign/s verify/s
+# rsa 512 bits 0.000290s 0.000024s 3452.8 42031.4
+# rsa 1024 bits 0.000793s 0.000058s 1261.7 17172.0
+# rsa 2048 bits 0.005908s 0.000148s 169.3 6754.0
+# rsa 4096 bits 0.033456s 0.000469s 29.9 2133.6
+# dsa 512 bits 0.000253s 0.000198s 3949.9 5057.0
+# dsa 1024 bits 0.000585s 0.000607s 1708.4 1647.4
+# dsa 2048 bits 0.001453s 0.001703s 688.1 587.4
+#
+# ... and *without* (but still with ia64.S):
+#
+# rsa 512 bits 0.000670s 0.000041s 1491.8 24145.5
+# rsa 1024 bits 0.001988s 0.000080s 502.9 12499.3
+# rsa 2048 bits 0.008702s 0.000189s 114.9 5293.9
+# rsa 4096 bits 0.043860s 0.000533s 22.8 1875.9
+# dsa 512 bits 0.000441s 0.000427s 2265.3 2340.6
+# dsa 1024 bits 0.000823s 0.000867s 1215.6 1153.2
+# dsa 2048 bits 0.001894s 0.002179s 528.1 458.9
+#
+# As it can be seen, RSA sign performance improves by 130-30%,
+# hereafter less for longer keys, while verify - by 74-13%.
+# DSA performance improves by 115-30%.
+
+$output=pop;
+
+if ($^O eq "hpux") {
+ $ADDP="addp4";
+ for (@ARGV) { $ADDP="add" if (/[\+DD|\-mlp]64/); }
+} else { $ADDP="add"; }
+
+$code=<<___;
+.explicit
+.text
+
+// int bn_mul_mont (BN_ULONG *rp,const BN_ULONG *ap,
+// const BN_ULONG *bp,const BN_ULONG *np,
+// const BN_ULONG *n0p,int num);
+.align 64
+.global bn_mul_mont#
+.proc bn_mul_mont#
+bn_mul_mont:
+ .prologue
+ .body
+{ .mmi; cmp4.le p6,p7=2,r37;;
+(p6) cmp4.lt.unc p8,p9=8,r37
+ mov ret0=r0 };;
+{ .bbb;
+(p9) br.cond.dptk.many bn_mul_mont_8
+(p8) br.cond.dpnt.many bn_mul_mont_general
+(p7) br.ret.spnt.many b0 };;
+.endp bn_mul_mont#
+
+prevfs=r2; prevpr=r3; prevlc=r10; prevsp=r11;
+
+rptr=r8; aptr=r9; bptr=r14; nptr=r15;
+tptr=r16; // &tp[0]
+tp_1=r17; // &tp[-1]
+num=r18; len=r19; lc=r20;
+topbit=r21; // carry bit from tmp[num]
+
+n0=f6;
+m0=f7;
+bi=f8;
+
+.align 64
+.local bn_mul_mont_general#
+.proc bn_mul_mont_general#
+bn_mul_mont_general:
+ .prologue
+{ .mmi; .save ar.pfs,prevfs
+ alloc prevfs=ar.pfs,6,2,0,8
+ $ADDP aptr=0,in1
+ .save ar.lc,prevlc
+ mov prevlc=ar.lc }
+{ .mmi; .vframe prevsp
+ mov prevsp=sp
+ $ADDP bptr=0,in2
+ .save pr,prevpr
+ mov prevpr=pr };;
+
+ .body
+ .rotf alo[6],nlo[4],ahi[8],nhi[6]
+ .rotr a[3],n[3],t[2]
+
+{ .mmi; ldf8 bi=[bptr],8 // (*bp++)
+ ldf8 alo[4]=[aptr],16 // ap[0]
+ $ADDP r30=8,in1 };;
+{ .mmi; ldf8 alo[3]=[r30],16 // ap[1]
+ ldf8 alo[2]=[aptr],16 // ap[2]
+ $ADDP in4=0,in4 };;
+{ .mmi; ldf8 alo[1]=[r30] // ap[3]
+ ldf8 n0=[in4] // n0
+ $ADDP rptr=0,in0 }
+{ .mmi; $ADDP nptr=0,in3
+ mov r31=16
+ zxt4 num=in5 };;
+{ .mmi; ldf8 nlo[2]=[nptr],8 // np[0]
+ shladd len=num,3,r0
+ shladd r31=num,3,r31 };;
+{ .mmi; ldf8 nlo[1]=[nptr],8 // np[1]
+ add lc=-5,num
+ sub r31=sp,r31 };;
+{ .mfb; and sp=-16,r31 // alloca
+ xmpy.hu ahi[2]=alo[4],bi // ap[0]*bp[0]
+ nop.b 0 }
+{ .mfb; nop.m 0
+ xmpy.lu alo[4]=alo[4],bi
+ brp.loop.imp .L1st_ctop,.L1st_cend-16
+ };;
+{ .mfi; nop.m 0
+ xma.hu ahi[1]=alo[3],bi,ahi[2] // ap[1]*bp[0]
+ add tp_1=8,sp }
+{ .mfi; nop.m 0
+ xma.lu alo[3]=alo[3],bi,ahi[2]
+ mov pr.rot=0x20001f<<16
+ // ------^----- (p40) at first (p23)
+ // ----------^^ p[16:20]=1
+ };;
+{ .mfi; nop.m 0
+ xmpy.lu m0=alo[4],n0 // (ap[0]*bp[0])*n0
+ mov ar.lc=lc }
+{ .mfi; nop.m 0
+ fcvt.fxu.s1 nhi[1]=f0
+ mov ar.ec=8 };;
+
+.align 32
+.L1st_ctop:
+.pred.rel "mutex",p40,p42
+{ .mfi; (p16) ldf8 alo[0]=[aptr],8 // *(aptr++)
+ (p18) xma.hu ahi[0]=alo[2],bi,ahi[1]
+ (p40) add n[2]=n[2],a[2] } // (p23) }
+{ .mfi; (p18) ldf8 nlo[0]=[nptr],8 // *(nptr++)(p16)
+ (p18) xma.lu alo[2]=alo[2],bi,ahi[1]
+ (p42) add n[2]=n[2],a[2],1 };; // (p23)
+{ .mfi; (p21) getf.sig a[0]=alo[5]
+ (p20) xma.hu nhi[0]=nlo[2],m0,nhi[1]
+ (p42) cmp.leu p41,p39=n[2],a[2] } // (p23)
+{ .mfi; (p23) st8 [tp_1]=n[2],8
+ (p20) xma.lu nlo[2]=nlo[2],m0,nhi[1]
+ (p40) cmp.ltu p41,p39=n[2],a[2] } // (p23)
+{ .mmb; (p21) getf.sig n[0]=nlo[3]
+ (p16) nop.m 0
+ br.ctop.sptk .L1st_ctop };;
+.L1st_cend:
+
+{ .mmi; getf.sig a[0]=ahi[6] // (p24)
+ getf.sig n[0]=nhi[4]
+ add num=-1,num };; // num--
+{ .mmi; .pred.rel "mutex",p40,p42
+(p40) add n[0]=n[0],a[0]
+(p42) add n[0]=n[0],a[0],1
+ sub aptr=aptr,len };; // rewind
+{ .mmi; .pred.rel "mutex",p40,p42
+(p40) cmp.ltu p41,p39=n[0],a[0]
+(p42) cmp.leu p41,p39=n[0],a[0]
+ sub nptr=nptr,len };;
+{ .mmi; .pred.rel "mutex",p39,p41
+(p39) add topbit=r0,r0
+(p41) add topbit=r0,r0,1
+ nop.i 0 }
+{ .mmi; st8 [tp_1]=n[0]
+ add tptr=16,sp
+ add tp_1=8,sp };;
+
+.Louter:
+{ .mmi; ldf8 bi=[bptr],8 // (*bp++)
+ ldf8 ahi[3]=[tptr] // tp[0]
+ add r30=8,aptr };;
+{ .mmi; ldf8 alo[4]=[aptr],16 // ap[0]
+ ldf8 alo[3]=[r30],16 // ap[1]
+ add r31=8,nptr };;
+{ .mfb; ldf8 alo[2]=[aptr],16 // ap[2]
+ xma.hu ahi[2]=alo[4],bi,ahi[3] // ap[0]*bp[i]+tp[0]
+ brp.loop.imp .Linner_ctop,.Linner_cend-16
+ }
+{ .mfb; ldf8 alo[1]=[r30] // ap[3]
+ xma.lu alo[4]=alo[4],bi,ahi[3]
+ clrrrb.pr };;
+{ .mfi; ldf8 nlo[2]=[nptr],16 // np[0]
+ xma.hu ahi[1]=alo[3],bi,ahi[2] // ap[1]*bp[i]
+ nop.i 0 }
+{ .mfi; ldf8 nlo[1]=[r31] // np[1]
+ xma.lu alo[3]=alo[3],bi,ahi[2]
+ mov pr.rot=0x20101f<<16
+ // ------^----- (p40) at first (p23)
+ // --------^--- (p30) at first (p22)
+ // ----------^^ p[16:20]=1
+ };;
+{ .mfi; st8 [tptr]=r0 // tp[0] is already accounted
+ xmpy.lu m0=alo[4],n0 // (ap[0]*bp[i]+tp[0])*n0
+ mov ar.lc=lc }
+{ .mfi;
+ fcvt.fxu.s1 nhi[1]=f0
+ mov ar.ec=8 };;
+
+// This loop spins in 4*(n+7) ticks on Itanium 2 and should spin in
+// 7*(n+7) ticks on Itanium (the one codenamed Merced). Factor of 7
+// in latter case accounts for two-tick pipeline stall, which means
+// that its performance would be ~20% lower than optimal one. No
+// attempt was made to address this, because original Itanium is
+// hardly represented out in the wild...
+.align 32
+.Linner_ctop:
+.pred.rel "mutex",p40,p42
+.pred.rel "mutex",p30,p32
+{ .mfi; (p16) ldf8 alo[0]=[aptr],8 // *(aptr++)
+ (p18) xma.hu ahi[0]=alo[2],bi,ahi[1]
+ (p40) add n[2]=n[2],a[2] } // (p23)
+{ .mfi; (p16) nop.m 0
+ (p18) xma.lu alo[2]=alo[2],bi,ahi[1]
+ (p42) add n[2]=n[2],a[2],1 };; // (p23)
+{ .mfi; (p21) getf.sig a[0]=alo[5]
+ (p16) nop.f 0
+ (p40) cmp.ltu p41,p39=n[2],a[2] } // (p23)
+{ .mfi; (p21) ld8 t[0]=[tptr],8
+ (p16) nop.f 0
+ (p42) cmp.leu p41,p39=n[2],a[2] };; // (p23)
+{ .mfi; (p18) ldf8 nlo[0]=[nptr],8 // *(nptr++)
+ (p20) xma.hu nhi[0]=nlo[2],m0,nhi[1]
+ (p30) add a[1]=a[1],t[1] } // (p22)
+{ .mfi; (p16) nop.m 0
+ (p20) xma.lu nlo[2]=nlo[2],m0,nhi[1]
+ (p32) add a[1]=a[1],t[1],1 };; // (p22)
+{ .mmi; (p21) getf.sig n[0]=nlo[3]
+ (p16) nop.m 0
+ (p30) cmp.ltu p31,p29=a[1],t[1] } // (p22)
+{ .mmb; (p23) st8 [tp_1]=n[2],8
+ (p32) cmp.leu p31,p29=a[1],t[1] // (p22)
+ br.ctop.sptk .Linner_ctop };;
+.Linner_cend:
+
+{ .mmi; getf.sig a[0]=ahi[6] // (p24)
+ getf.sig n[0]=nhi[4]
+ nop.i 0 };;
+
+{ .mmi; .pred.rel "mutex",p31,p33
+(p31) add a[0]=a[0],topbit
+(p33) add a[0]=a[0],topbit,1
+ mov topbit=r0 };;
+{ .mfi; .pred.rel "mutex",p31,p33
+(p31) cmp.ltu p32,p30=a[0],topbit
+(p33) cmp.leu p32,p30=a[0],topbit
+ }
+{ .mfi; .pred.rel "mutex",p40,p42
+(p40) add n[0]=n[0],a[0]
+(p42) add n[0]=n[0],a[0],1
+ };;
+{ .mmi; .pred.rel "mutex",p44,p46
+(p40) cmp.ltu p41,p39=n[0],a[0]
+(p42) cmp.leu p41,p39=n[0],a[0]
+(p32) add topbit=r0,r0,1 }
+
+{ .mmi; st8 [tp_1]=n[0],8
+ cmp4.ne p6,p0=1,num
+ sub aptr=aptr,len };; // rewind
+{ .mmi; sub nptr=nptr,len
+(p41) add topbit=r0,r0,1
+ add tptr=16,sp }
+{ .mmb; add tp_1=8,sp
+ add num=-1,num // num--
+(p6) br.cond.sptk.many .Louter };;
+
+{ .mbb; add lc=4,lc
+ brp.loop.imp .Lsub_ctop,.Lsub_cend-16
+ clrrrb.pr };;
+{ .mii; nop.m 0
+ mov pr.rot=0x10001<<16
+ // ------^---- (p33) at first (p17)
+ mov ar.lc=lc }
+{ .mii; nop.m 0
+ mov ar.ec=3
+ nop.i 0 };;
+
+.Lsub_ctop:
+.pred.rel "mutex",p33,p35
+{ .mfi; (p16) ld8 t[0]=[tptr],8 // t=*(tp++)
+ (p16) nop.f 0
+ (p33) sub n[1]=t[1],n[1] } // (p17)
+{ .mfi; (p16) ld8 n[0]=[nptr],8 // n=*(np++)
+ (p16) nop.f 0
+ (p35) sub n[1]=t[1],n[1],1 };; // (p17)
+{ .mib; (p18) st8 [rptr]=n[2],8 // *(rp++)=r
+ (p33) cmp.gtu p34,p32=n[1],t[1] // (p17)
+ (p18) nop.b 0 }
+{ .mib; (p18) nop.m 0
+ (p35) cmp.geu p34,p32=n[1],t[1] // (p17)
+ br.ctop.sptk .Lsub_ctop };;
+.Lsub_cend:
+
+{ .mmb; .pred.rel "mutex",p34,p36
+(p34) sub topbit=topbit,r0 // (p19)
+(p36) sub topbit=topbit,r0,1
+ brp.loop.imp .Lcopy_ctop,.Lcopy_cend-16
+ }
+{ .mmb; sub rptr=rptr,len // rewind
+ sub tptr=tptr,len
+ clrrrb.pr };;
+{ .mmi; and aptr=tptr,topbit
+ andcm bptr=rptr,topbit
+ mov pr.rot=1<<16 };;
+{ .mii; or nptr=aptr,bptr
+ mov ar.lc=lc
+ mov ar.ec=3 };;
+
+.Lcopy_ctop:
+{ .mmb; (p16) ld8 n[0]=[nptr],8
+ (p18) st8 [tptr]=r0,8
+ (p16) nop.b 0 }
+{ .mmb; (p16) nop.m 0
+ (p18) st8 [rptr]=n[2],8
+ br.ctop.sptk .Lcopy_ctop };;
+.Lcopy_cend:
+
+{ .mmi; mov ret0=1 // signal "handled"
+ rum 1<<5 // clear um.mfh
+ mov ar.lc=prevlc }
+{ .mib; .restore sp
+ mov sp=prevsp
+ mov pr=prevpr,0x1ffff
+ br.ret.sptk.many b0 };;
+.endp bn_mul_mont_general#
+
+a1=r16; a2=r17; a3=r18; a4=r19; a5=r20; a6=r21; a7=r22; a8=r23;
+n1=r24; n2=r25; n3=r26; n4=r27; n5=r28; n6=r29; n7=r30; n8=r31;
+t0=r15;
+
+ai0=f8; ai1=f9; ai2=f10; ai3=f11; ai4=f12; ai5=f13; ai6=f14; ai7=f15;
+ni0=f16; ni1=f17; ni2=f18; ni3=f19; ni4=f20; ni5=f21; ni6=f22; ni7=f23;
+
+.align 64
+.skip 48 // aligns loop body
+.local bn_mul_mont_8#
+.proc bn_mul_mont_8#
+bn_mul_mont_8:
+ .prologue
+{ .mmi; .save ar.pfs,prevfs
+ alloc prevfs=ar.pfs,6,2,0,8
+ .vframe prevsp
+ mov prevsp=sp
+ .save ar.lc,prevlc
+ mov prevlc=ar.lc }
+{ .mmi; add r17=-6*16,sp
+ add sp=-7*16,sp
+ .save pr,prevpr
+ mov prevpr=pr };;
+
+{ .mmi; .save.gf 0,0x10
+ stf.spill [sp]=f16,-16
+ .save.gf 0,0x20
+ stf.spill [r17]=f17,32
+ add r16=-5*16,prevsp};;
+{ .mmi; .save.gf 0,0x40
+ stf.spill [r16]=f18,32
+ .save.gf 0,0x80
+ stf.spill [r17]=f19,32
+ $ADDP aptr=0,in1 };;
+{ .mmi; .save.gf 0,0x100
+ stf.spill [r16]=f20,32
+ .save.gf 0,0x200
+ stf.spill [r17]=f21,32
+ $ADDP r29=8,in1 };;
+{ .mmi; .save.gf 0,0x400
+ stf.spill [r16]=f22
+ .save.gf 0,0x800
+ stf.spill [r17]=f23
+ $ADDP rptr=0,in0 };;
+
+ .body
+ .rotf bj[8],mj[2],tf[2],alo[10],ahi[10],nlo[10],nhi[10]
+ .rotr t[8]
+
+// load input vectors padding them to 8 elements
+{ .mmi; ldf8 ai0=[aptr],16 // ap[0]
+ ldf8 ai1=[r29],16 // ap[1]
+ $ADDP bptr=0,in2 }
+{ .mmi; $ADDP r30=8,in2
+ $ADDP nptr=0,in3
+ $ADDP r31=8,in3 };;
+{ .mmi; ldf8 bj[7]=[bptr],16 // bp[0]
+ ldf8 bj[6]=[r30],16 // bp[1]
+ cmp4.le p4,p5=3,in5 }
+{ .mmi; ldf8 ni0=[nptr],16 // np[0]
+ ldf8 ni1=[r31],16 // np[1]
+ cmp4.le p6,p7=4,in5 };;
+
+{ .mfi; (p4)ldf8 ai2=[aptr],16 // ap[2]
+ (p5)fcvt.fxu ai2=f0
+ cmp4.le p8,p9=5,in5 }
+{ .mfi; (p6)ldf8 ai3=[r29],16 // ap[3]
+ (p7)fcvt.fxu ai3=f0
+ cmp4.le p10,p11=6,in5 }
+{ .mfi; (p4)ldf8 bj[5]=[bptr],16 // bp[2]
+ (p5)fcvt.fxu bj[5]=f0
+ cmp4.le p12,p13=7,in5 }
+{ .mfi; (p6)ldf8 bj[4]=[r30],16 // bp[3]
+ (p7)fcvt.fxu bj[4]=f0
+ cmp4.le p14,p15=8,in5 }
+{ .mfi; (p4)ldf8 ni2=[nptr],16 // np[2]
+ (p5)fcvt.fxu ni2=f0
+ addp4 r28=-1,in5 }
+{ .mfi; (p6)ldf8 ni3=[r31],16 // np[3]
+ (p7)fcvt.fxu ni3=f0
+ $ADDP in4=0,in4 };;
+
+{ .mfi; ldf8 n0=[in4]
+ fcvt.fxu tf[1]=f0
+ nop.i 0 }
+
+{ .mfi; (p8)ldf8 ai4=[aptr],16 // ap[4]
+ (p9)fcvt.fxu ai4=f0
+ mov t[0]=r0 }
+{ .mfi; (p10)ldf8 ai5=[r29],16 // ap[5]
+ (p11)fcvt.fxu ai5=f0
+ mov t[1]=r0 }
+{ .mfi; (p8)ldf8 bj[3]=[bptr],16 // bp[4]
+ (p9)fcvt.fxu bj[3]=f0
+ mov t[2]=r0 }
+{ .mfi; (p10)ldf8 bj[2]=[r30],16 // bp[5]
+ (p11)fcvt.fxu bj[2]=f0
+ mov t[3]=r0 }
+{ .mfi; (p8)ldf8 ni4=[nptr],16 // np[4]
+ (p9)fcvt.fxu ni4=f0
+ mov t[4]=r0 }
+{ .mfi; (p10)ldf8 ni5=[r31],16 // np[5]
+ (p11)fcvt.fxu ni5=f0
+ mov t[5]=r0 };;
+
+{ .mfi; (p12)ldf8 ai6=[aptr],16 // ap[6]
+ (p13)fcvt.fxu ai6=f0
+ mov t[6]=r0 }
+{ .mfi; (p14)ldf8 ai7=[r29],16 // ap[7]
+ (p15)fcvt.fxu ai7=f0
+ mov t[7]=r0 }
+{ .mfi; (p12)ldf8 bj[1]=[bptr],16 // bp[6]
+ (p13)fcvt.fxu bj[1]=f0
+ mov ar.lc=r28 }
+{ .mfi; (p14)ldf8 bj[0]=[r30],16 // bp[7]
+ (p15)fcvt.fxu bj[0]=f0
+ mov ar.ec=1 }
+{ .mfi; (p12)ldf8 ni6=[nptr],16 // np[6]
+ (p13)fcvt.fxu ni6=f0
+ mov pr.rot=1<<16 }
+{ .mfb; (p14)ldf8 ni7=[r31],16 // np[7]
+ (p15)fcvt.fxu ni7=f0
+ brp.loop.imp .Louter_8_ctop,.Louter_8_cend-16
+ };;
+
+// The loop is scheduled for 32*n ticks on Itanium 2. Actual attempt
+// to measure with help of Interval Time Counter indicated that the
+// factor is a tad higher: 33 or 34, if not 35. Exact measurement and
+// addressing the issue is problematic, because I don't have access
+// to platform-specific instruction-level profiler. On Itanium it
+// should run in 56*n ticks, because of higher xma latency...
+.Louter_8_ctop:
+ .pred.rel "mutex",p40,p42
+ .pred.rel "mutex",p48,p50
+{ .mfi; (p16) nop.m 0 // 0:
+ (p16) xma.hu ahi[0]=ai0,bj[7],tf[1] // ap[0]*b[i]+t[0]
+ (p40) add a3=a3,n3 } // (p17) a3+=n3
+{ .mfi; (p42) add a3=a3,n3,1
+ (p16) xma.lu alo[0]=ai0,bj[7],tf[1]
+ (p16) nop.i 0 };;
+{ .mii; (p17) getf.sig a7=alo[8] // 1:
+ (p48) add t[6]=t[6],a3 // (p17) t[6]+=a3
+ (p50) add t[6]=t[6],a3,1 };;
+{ .mfi; (p17) getf.sig a8=ahi[8] // 2:
+ (p17) xma.hu nhi[7]=ni6,mj[1],nhi[6] // np[6]*m0
+ (p40) cmp.ltu p43,p41=a3,n3 }
+{ .mfi; (p42) cmp.leu p43,p41=a3,n3
+ (p17) xma.lu nlo[7]=ni6,mj[1],nhi[6]
+ (p16) nop.i 0 };;
+{ .mii; (p17) getf.sig n5=nlo[6] // 3:
+ (p48) cmp.ltu p51,p49=t[6],a3
+ (p50) cmp.leu p51,p49=t[6],a3 };;
+ .pred.rel "mutex",p41,p43
+ .pred.rel "mutex",p49,p51
+{ .mfi; (p16) nop.m 0 // 4:
+ (p16) xma.hu ahi[1]=ai1,bj[7],ahi[0] // ap[1]*b[i]
+ (p41) add a4=a4,n4 } // (p17) a4+=n4
+{ .mfi; (p43) add a4=a4,n4,1
+ (p16) xma.lu alo[1]=ai1,bj[7],ahi[0]
+ (p16) nop.i 0 };;
+{ .mfi; (p49) add t[5]=t[5],a4 // 5: (p17) t[5]+=a4
+ (p16) xmpy.lu mj[0]=alo[0],n0 // (ap[0]*b[i]+t[0])*n0
+ (p51) add t[5]=t[5],a4,1 };;
+{ .mfi; (p16) nop.m 0 // 6:
+ (p17) xma.hu nhi[8]=ni7,mj[1],nhi[7] // np[7]*m0
+ (p41) cmp.ltu p42,p40=a4,n4 }
+{ .mfi; (p43) cmp.leu p42,p40=a4,n4
+ (p17) xma.lu nlo[8]=ni7,mj[1],nhi[7]
+ (p16) nop.i 0 };;
+{ .mii; (p17) getf.sig n6=nlo[7] // 7:
+ (p49) cmp.ltu p50,p48=t[5],a4
+ (p51) cmp.leu p50,p48=t[5],a4 };;
+ .pred.rel "mutex",p40,p42
+ .pred.rel "mutex",p48,p50
+{ .mfi; (p16) nop.m 0 // 8:
+ (p16) xma.hu ahi[2]=ai2,bj[7],ahi[1] // ap[2]*b[i]
+ (p40) add a5=a5,n5 } // (p17) a5+=n5
+{ .mfi; (p42) add a5=a5,n5,1
+ (p16) xma.lu alo[2]=ai2,bj[7],ahi[1]
+ (p16) nop.i 0 };;
+{ .mii; (p16) getf.sig a1=alo[1] // 9:
+ (p48) add t[4]=t[4],a5 // p(17) t[4]+=a5
+ (p50) add t[4]=t[4],a5,1 };;
+{ .mfi; (p16) nop.m 0 // 10:
+ (p16) xma.hu nhi[0]=ni0,mj[0],alo[0] // np[0]*m0
+ (p40) cmp.ltu p43,p41=a5,n5 }
+{ .mfi; (p42) cmp.leu p43,p41=a5,n5
+ (p16) xma.lu nlo[0]=ni0,mj[0],alo[0]
+ (p16) nop.i 0 };;
+{ .mii; (p17) getf.sig n7=nlo[8] // 11:
+ (p48) cmp.ltu p51,p49=t[4],a5
+ (p50) cmp.leu p51,p49=t[4],a5 };;
+ .pred.rel "mutex",p41,p43
+ .pred.rel "mutex",p49,p51
+{ .mfi; (p17) getf.sig n8=nhi[8] // 12:
+ (p16) xma.hu ahi[3]=ai3,bj[7],ahi[2] // ap[3]*b[i]
+ (p41) add a6=a6,n6 } // (p17) a6+=n6
+{ .mfi; (p43) add a6=a6,n6,1
+ (p16) xma.lu alo[3]=ai3,bj[7],ahi[2]
+ (p16) nop.i 0 };;
+{ .mii; (p16) getf.sig a2=alo[2] // 13:
+ (p49) add t[3]=t[3],a6 // (p17) t[3]+=a6
+ (p51) add t[3]=t[3],a6,1 };;
+{ .mfi; (p16) nop.m 0 // 14:
+ (p16) xma.hu nhi[1]=ni1,mj[0],nhi[0] // np[1]*m0
+ (p41) cmp.ltu p42,p40=a6,n6 }
+{ .mfi; (p43) cmp.leu p42,p40=a6,n6
+ (p16) xma.lu nlo[1]=ni1,mj[0],nhi[0]
+ (p16) nop.i 0 };;
+{ .mii; (p16) nop.m 0 // 15:
+ (p49) cmp.ltu p50,p48=t[3],a6
+ (p51) cmp.leu p50,p48=t[3],a6 };;
+ .pred.rel "mutex",p40,p42
+ .pred.rel "mutex",p48,p50
+{ .mfi; (p16) nop.m 0 // 16:
+ (p16) xma.hu ahi[4]=ai4,bj[7],ahi[3] // ap[4]*b[i]
+ (p40) add a7=a7,n7 } // (p17) a7+=n7
+{ .mfi; (p42) add a7=a7,n7,1
+ (p16) xma.lu alo[4]=ai4,bj[7],ahi[3]
+ (p16) nop.i 0 };;
+{ .mii; (p16) getf.sig a3=alo[3] // 17:
+ (p48) add t[2]=t[2],a7 // (p17) t[2]+=a7
+ (p50) add t[2]=t[2],a7,1 };;
+{ .mfi; (p16) nop.m 0 // 18:
+ (p16) xma.hu nhi[2]=ni2,mj[0],nhi[1] // np[2]*m0
+ (p40) cmp.ltu p43,p41=a7,n7 }
+{ .mfi; (p42) cmp.leu p43,p41=a7,n7
+ (p16) xma.lu nlo[2]=ni2,mj[0],nhi[1]
+ (p16) nop.i 0 };;
+{ .mii; (p16) getf.sig n1=nlo[1] // 19:
+ (p48) cmp.ltu p51,p49=t[2],a7
+ (p50) cmp.leu p51,p49=t[2],a7 };;
+ .pred.rel "mutex",p41,p43
+ .pred.rel "mutex",p49,p51
+{ .mfi; (p16) nop.m 0 // 20:
+ (p16) xma.hu ahi[5]=ai5,bj[7],ahi[4] // ap[5]*b[i]
+ (p41) add a8=a8,n8 } // (p17) a8+=n8
+{ .mfi; (p43) add a8=a8,n8,1
+ (p16) xma.lu alo[5]=ai5,bj[7],ahi[4]
+ (p16) nop.i 0 };;
+{ .mii; (p16) getf.sig a4=alo[4] // 21:
+ (p49) add t[1]=t[1],a8 // (p17) t[1]+=a8
+ (p51) add t[1]=t[1],a8,1 };;
+{ .mfi; (p16) nop.m 0 // 22:
+ (p16) xma.hu nhi[3]=ni3,mj[0],nhi[2] // np[3]*m0
+ (p41) cmp.ltu p42,p40=a8,n8 }
+{ .mfi; (p43) cmp.leu p42,p40=a8,n8
+ (p16) xma.lu nlo[3]=ni3,mj[0],nhi[2]
+ (p16) nop.i 0 };;
+{ .mii; (p16) getf.sig n2=nlo[2] // 23:
+ (p49) cmp.ltu p50,p48=t[1],a8
+ (p51) cmp.leu p50,p48=t[1],a8 };;
+{ .mfi; (p16) nop.m 0 // 24:
+ (p16) xma.hu ahi[6]=ai6,bj[7],ahi[5] // ap[6]*b[i]
+ (p16) add a1=a1,n1 } // (p16) a1+=n1
+{ .mfi; (p16) nop.m 0
+ (p16) xma.lu alo[6]=ai6,bj[7],ahi[5]
+ (p17) mov t[0]=r0 };;
+{ .mii; (p16) getf.sig a5=alo[5] // 25:
+ (p16) add t0=t[7],a1 // (p16) t[7]+=a1
+ (p42) add t[0]=t[0],r0,1 };;
+{ .mfi; (p16) setf.sig tf[0]=t0 // 26:
+ (p16) xma.hu nhi[4]=ni4,mj[0],nhi[3] // np[4]*m0
+ (p50) add t[0]=t[0],r0,1 }
+{ .mfi; (p16) cmp.ltu.unc p42,p40=a1,n1
+ (p16) xma.lu nlo[4]=ni4,mj[0],nhi[3]
+ (p16) nop.i 0 };;
+{ .mii; (p16) getf.sig n3=nlo[3] // 27:
+ (p16) cmp.ltu.unc p50,p48=t0,a1
+ (p16) nop.i 0 };;
+ .pred.rel "mutex",p40,p42
+ .pred.rel "mutex",p48,p50
+{ .mfi; (p16) nop.m 0 // 28:
+ (p16) xma.hu ahi[7]=ai7,bj[7],ahi[6] // ap[7]*b[i]
+ (p40) add a2=a2,n2 } // (p16) a2+=n2
+{ .mfi; (p42) add a2=a2,n2,1
+ (p16) xma.lu alo[7]=ai7,bj[7],ahi[6]
+ (p16) nop.i 0 };;
+{ .mii; (p16) getf.sig a6=alo[6] // 29:
+ (p48) add t[6]=t[6],a2 // (p16) t[6]+=a2
+ (p50) add t[6]=t[6],a2,1 };;
+{ .mfi; (p16) nop.m 0 // 30:
+ (p16) xma.hu nhi[5]=ni5,mj[0],nhi[4] // np[5]*m0
+ (p40) cmp.ltu p41,p39=a2,n2 }
+{ .mfi; (p42) cmp.leu p41,p39=a2,n2
+ (p16) xma.lu nlo[5]=ni5,mj[0],nhi[4]
+ (p16) nop.i 0 };;
+{ .mfi; (p16) getf.sig n4=nlo[4] // 31:
+ (p16) nop.f 0
+ (p48) cmp.ltu p49,p47=t[6],a2 }
+{ .mfb; (p50) cmp.leu p49,p47=t[6],a2
+ (p16) nop.f 0
+ br.ctop.sptk.many .Louter_8_ctop };;
+.Louter_8_cend:
+
+// above loop has to execute one more time, without (p16), which is
+// replaced with merged move of np[8] to GPR bank
+ .pred.rel "mutex",p40,p42
+ .pred.rel "mutex",p48,p50
+{ .mmi; (p0) getf.sig n1=ni0 // 0:
+ (p40) add a3=a3,n3 // (p17) a3+=n3
+ (p42) add a3=a3,n3,1 };;
+{ .mii; (p17) getf.sig a7=alo[8] // 1:
+ (p48) add t[6]=t[6],a3 // (p17) t[6]+=a3
+ (p50) add t[6]=t[6],a3,1 };;
+{ .mfi; (p17) getf.sig a8=ahi[8] // 2:
+ (p17) xma.hu nhi[7]=ni6,mj[1],nhi[6] // np[6]*m0
+ (p40) cmp.ltu p43,p41=a3,n3 }
+{ .mfi; (p42) cmp.leu p43,p41=a3,n3
+ (p17) xma.lu nlo[7]=ni6,mj[1],nhi[6]
+ (p0) nop.i 0 };;
+{ .mii; (p17) getf.sig n5=nlo[6] // 3:
+ (p48) cmp.ltu p51,p49=t[6],a3
+ (p50) cmp.leu p51,p49=t[6],a3 };;
+ .pred.rel "mutex",p41,p43
+ .pred.rel "mutex",p49,p51
+{ .mmi; (p0) getf.sig n2=ni1 // 4:
+ (p41) add a4=a4,n4 // (p17) a4+=n4
+ (p43) add a4=a4,n4,1 };;
+{ .mfi; (p49) add t[5]=t[5],a4 // 5: (p17) t[5]+=a4
+ (p0) nop.f 0
+ (p51) add t[5]=t[5],a4,1 };;
+{ .mfi; (p0) getf.sig n3=ni2 // 6:
+ (p17) xma.hu nhi[8]=ni7,mj[1],nhi[7] // np[7]*m0
+ (p41) cmp.ltu p42,p40=a4,n4 }
+{ .mfi; (p43) cmp.leu p42,p40=a4,n4
+ (p17) xma.lu nlo[8]=ni7,mj[1],nhi[7]
+ (p0) nop.i 0 };;
+{ .mii; (p17) getf.sig n6=nlo[7] // 7:
+ (p49) cmp.ltu p50,p48=t[5],a4
+ (p51) cmp.leu p50,p48=t[5],a4 };;
+ .pred.rel "mutex",p40,p42
+ .pred.rel "mutex",p48,p50
+{ .mii; (p0) getf.sig n4=ni3 // 8:
+ (p40) add a5=a5,n5 // (p17) a5+=n5
+ (p42) add a5=a5,n5,1 };;
+{ .mii; (p0) nop.m 0 // 9:
+ (p48) add t[4]=t[4],a5 // p(17) t[4]+=a5
+ (p50) add t[4]=t[4],a5,1 };;
+{ .mii; (p0) nop.m 0 // 10:
+ (p40) cmp.ltu p43,p41=a5,n5
+ (p42) cmp.leu p43,p41=a5,n5 };;
+{ .mii; (p17) getf.sig n7=nlo[8] // 11:
+ (p48) cmp.ltu p51,p49=t[4],a5
+ (p50) cmp.leu p51,p49=t[4],a5 };;
+ .pred.rel "mutex",p41,p43
+ .pred.rel "mutex",p49,p51
+{ .mii; (p17) getf.sig n8=nhi[8] // 12:
+ (p41) add a6=a6,n6 // (p17) a6+=n6
+ (p43) add a6=a6,n6,1 };;
+{ .mii; (p0) getf.sig n5=ni4 // 13:
+ (p49) add t[3]=t[3],a6 // (p17) t[3]+=a6
+ (p51) add t[3]=t[3],a6,1 };;
+{ .mii; (p0) nop.m 0 // 14:
+ (p41) cmp.ltu p42,p40=a6,n6
+ (p43) cmp.leu p42,p40=a6,n6 };;
+{ .mii; (p0) getf.sig n6=ni5 // 15:
+ (p49) cmp.ltu p50,p48=t[3],a6
+ (p51) cmp.leu p50,p48=t[3],a6 };;
+ .pred.rel "mutex",p40,p42
+ .pred.rel "mutex",p48,p50
+{ .mii; (p0) nop.m 0 // 16:
+ (p40) add a7=a7,n7 // (p17) a7+=n7
+ (p42) add a7=a7,n7,1 };;
+{ .mii; (p0) nop.m 0 // 17:
+ (p48) add t[2]=t[2],a7 // (p17) t[2]+=a7
+ (p50) add t[2]=t[2],a7,1 };;
+{ .mii; (p0) nop.m 0 // 18:
+ (p40) cmp.ltu p43,p41=a7,n7
+ (p42) cmp.leu p43,p41=a7,n7 };;
+{ .mii; (p0) getf.sig n7=ni6 // 19:
+ (p48) cmp.ltu p51,p49=t[2],a7
+ (p50) cmp.leu p51,p49=t[2],a7 };;
+ .pred.rel "mutex",p41,p43
+ .pred.rel "mutex",p49,p51
+{ .mii; (p0) nop.m 0 // 20:
+ (p41) add a8=a8,n8 // (p17) a8+=n8
+ (p43) add a8=a8,n8,1 };;
+{ .mmi; (p0) nop.m 0 // 21:
+ (p49) add t[1]=t[1],a8 // (p17) t[1]+=a8
+ (p51) add t[1]=t[1],a8,1 }
+{ .mmi; (p17) mov t[0]=r0
+ (p41) cmp.ltu p42,p40=a8,n8
+ (p43) cmp.leu p42,p40=a8,n8 };;
+{ .mmi; (p0) getf.sig n8=ni7 // 22:
+ (p49) cmp.ltu p50,p48=t[1],a8
+ (p51) cmp.leu p50,p48=t[1],a8 }
+{ .mmi; (p42) add t[0]=t[0],r0,1
+ (p0) add r16=-7*16,prevsp
+ (p0) add r17=-6*16,prevsp };;
+
+// subtract np[8] from carrybit|tmp[8]
+// carrybit|tmp[8] layout upon exit from above loop is:
+// t[0]|t[1]|t[2]|t[3]|t[4]|t[5]|t[6]|t[7]|t0 (least significant)
+{ .mmi; (p50)add t[0]=t[0],r0,1
+ add r18=-5*16,prevsp
+ sub n1=t0,n1 };;
+{ .mmi; cmp.gtu p34,p32=n1,t0;;
+ .pred.rel "mutex",p32,p34
+ (p32)sub n2=t[7],n2
+ (p34)sub n2=t[7],n2,1 };;
+{ .mii; (p32)cmp.gtu p35,p33=n2,t[7]
+ (p34)cmp.geu p35,p33=n2,t[7];;
+ .pred.rel "mutex",p33,p35
+ (p33)sub n3=t[6],n3 }
+{ .mmi; (p35)sub n3=t[6],n3,1;;
+ (p33)cmp.gtu p34,p32=n3,t[6]
+ (p35)cmp.geu p34,p32=n3,t[6] };;
+ .pred.rel "mutex",p32,p34
+{ .mii; (p32)sub n4=t[5],n4
+ (p34)sub n4=t[5],n4,1;;
+ (p32)cmp.gtu p35,p33=n4,t[5] }
+{ .mmi; (p34)cmp.geu p35,p33=n4,t[5];;
+ .pred.rel "mutex",p33,p35
+ (p33)sub n5=t[4],n5
+ (p35)sub n5=t[4],n5,1 };;
+{ .mii; (p33)cmp.gtu p34,p32=n5,t[4]
+ (p35)cmp.geu p34,p32=n5,t[4];;
+ .pred.rel "mutex",p32,p34
+ (p32)sub n6=t[3],n6 }
+{ .mmi; (p34)sub n6=t[3],n6,1;;
+ (p32)cmp.gtu p35,p33=n6,t[3]
+ (p34)cmp.geu p35,p33=n6,t[3] };;
+ .pred.rel "mutex",p33,p35
+{ .mii; (p33)sub n7=t[2],n7
+ (p35)sub n7=t[2],n7,1;;
+ (p33)cmp.gtu p34,p32=n7,t[2] }
+{ .mmi; (p35)cmp.geu p34,p32=n7,t[2];;
+ .pred.rel "mutex",p32,p34
+ (p32)sub n8=t[1],n8
+ (p34)sub n8=t[1],n8,1 };;
+{ .mii; (p32)cmp.gtu p35,p33=n8,t[1]
+ (p34)cmp.geu p35,p33=n8,t[1];;
+ .pred.rel "mutex",p33,p35
+ (p33)sub a8=t[0],r0 }
+{ .mmi; (p35)sub a8=t[0],r0,1;;
+ (p33)cmp.gtu p34,p32=a8,t[0]
+ (p35)cmp.geu p34,p32=a8,t[0] };;
+
+// save the result, either tmp[num] or tmp[num]-np[num]
+ .pred.rel "mutex",p32,p34
+{ .mmi; (p32)st8 [rptr]=n1,8
+ (p34)st8 [rptr]=t0,8
+ add r19=-4*16,prevsp};;
+{ .mmb; (p32)st8 [rptr]=n2,8
+ (p34)st8 [rptr]=t[7],8
+ (p5)br.cond.dpnt.few .Ldone };;
+{ .mmb; (p32)st8 [rptr]=n3,8
+ (p34)st8 [rptr]=t[6],8
+ (p7)br.cond.dpnt.few .Ldone };;
+{ .mmb; (p32)st8 [rptr]=n4,8
+ (p34)st8 [rptr]=t[5],8
+ (p9)br.cond.dpnt.few .Ldone };;
+{ .mmb; (p32)st8 [rptr]=n5,8
+ (p34)st8 [rptr]=t[4],8
+ (p11)br.cond.dpnt.few .Ldone };;
+{ .mmb; (p32)st8 [rptr]=n6,8
+ (p34)st8 [rptr]=t[3],8
+ (p13)br.cond.dpnt.few .Ldone };;
+{ .mmb; (p32)st8 [rptr]=n7,8
+ (p34)st8 [rptr]=t[2],8
+ (p15)br.cond.dpnt.few .Ldone };;
+{ .mmb; (p32)st8 [rptr]=n8,8
+ (p34)st8 [rptr]=t[1],8
+ nop.b 0 };;
+.Ldone: // epilogue
+{ .mmi; ldf.fill f16=[r16],64
+ ldf.fill f17=[r17],64
+ nop.i 0 }
+{ .mmi; ldf.fill f18=[r18],64
+ ldf.fill f19=[r19],64
+ mov pr=prevpr,0x1ffff };;
+{ .mmi; ldf.fill f20=[r16]
+ ldf.fill f21=[r17]
+ mov ar.lc=prevlc }
+{ .mmi; ldf.fill f22=[r18]
+ ldf.fill f23=[r19]
+ mov ret0=1 } // signal "handled"
+{ .mib; rum 1<<5
+ .restore sp
+ mov sp=prevsp
+ br.ret.sptk.many b0 };;
+.endp bn_mul_mont_8#
+
+.type copyright#,\@object
+copyright:
+stringz "Montgomery multiplication for IA-64, CRYPTOGAMS by <appro\@openssl.org>"
+___
+
+open STDOUT,">$output" if $output;
+print $code;
+close STDOUT;
diff --git a/openssl-1.1.0h/crypto/bn/asm/ia64.S b/openssl-1.1.0h/crypto/bn/asm/ia64.S
new file mode 100644
index 0000000..f2404a3
--- /dev/null
+++ b/openssl-1.1.0h/crypto/bn/asm/ia64.S
@@ -0,0 +1,1562 @@
+.explicit
+.text
+.ident "ia64.S, Version 2.1"
+.ident "IA-64 ISA artwork by Andy Polyakov <appro@fy.chalmers.se>"
+
+// Copyright 2001-2016 The OpenSSL Project Authors. All Rights Reserved.
+//
+// Licensed under the OpenSSL license (the "License"). You may not use
+// this file except in compliance with the License. You can obtain a copy
+// in the file LICENSE in the source distribution or at
+// https://www.openssl.org/source/license.html
+
+//
+// ====================================================================
+// Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+// project.
+//
+// Rights for redistribution and usage in source and binary forms are
+// granted according to the OpenSSL license. Warranty of any kind is
+// disclaimed.
+// ====================================================================
+//
+// Version 2.x is Itanium2 re-tune. Few words about how Itanum2 is
+// different from Itanium to this module viewpoint. Most notably, is it
+// "wider" than Itanium? Can you experience loop scalability as
+// discussed in commentary sections? Not really:-( Itanium2 has 6
+// integer ALU ports, i.e. it's 2 ports wider, but it's not enough to
+// spin twice as fast, as I need 8 IALU ports. Amount of floating point
+// ports is the same, i.e. 2, while I need 4. In other words, to this
+// module Itanium2 remains effectively as "wide" as Itanium. Yet it's
+// essentially different in respect to this module, and a re-tune was
+// required. Well, because some instruction latencies has changed. Most
+// noticeably those intensively used:
+//
+// Itanium Itanium2
+// ldf8 9 6 L2 hit
+// ld8 2 1 L1 hit
+// getf 2 5
+// xma[->getf] 7[+1] 4[+0]
+// add[->st8] 1[+1] 1[+0]
+//
+// What does it mean? You might ratiocinate that the original code
+// should run just faster... Because sum of latencies is smaller...
+// Wrong! Note that getf latency increased. This means that if a loop is
+// scheduled for lower latency (as they were), then it will suffer from
+// stall condition and the code will therefore turn anti-scalable, e.g.
+// original bn_mul_words spun at 5*n or 2.5 times slower than expected
+// on Itanium2! What to do? Reschedule loops for Itanium2? But then
+// Itanium would exhibit anti-scalability. So I've chosen to reschedule
+// for worst latency for every instruction aiming for best *all-round*
+// performance.
+
+// Q. How much faster does it get?
+// A. Here is the output from 'openssl speed rsa dsa' for vanilla
+// 0.9.6a compiled with gcc version 2.96 20000731 (Red Hat
+// Linux 7.1 2.96-81):
+//
+// sign verify sign/s verify/s
+// rsa 512 bits 0.0036s 0.0003s 275.3 2999.2
+// rsa 1024 bits 0.0203s 0.0011s 49.3 894.1
+// rsa 2048 bits 0.1331s 0.0040s 7.5 250.9
+// rsa 4096 bits 0.9270s 0.0147s 1.1 68.1
+// sign verify sign/s verify/s
+// dsa 512 bits 0.0035s 0.0043s 288.3 234.8
+// dsa 1024 bits 0.0111s 0.0135s 90.0 74.2
+//
+// And here is similar output but for this assembler
+// implementation:-)
+//
+// sign verify sign/s verify/s
+// rsa 512 bits 0.0021s 0.0001s 549.4 9638.5
+// rsa 1024 bits 0.0055s 0.0002s 183.8 4481.1
+// rsa 2048 bits 0.0244s 0.0006s 41.4 1726.3
+// rsa 4096 bits 0.1295s 0.0018s 7.7 561.5
+// sign verify sign/s verify/s
+// dsa 512 bits 0.0012s 0.0013s 891.9 756.6
+// dsa 1024 bits 0.0023s 0.0028s 440.4 376.2
+//
+// Yes, you may argue that it's not fair comparison as it's
+// possible to craft the C implementation with BN_UMULT_HIGH
+// inline assembler macro. But of course! Here is the output
+// with the macro:
+//
+// sign verify sign/s verify/s
+// rsa 512 bits 0.0020s 0.0002s 495.0 6561.0
+// rsa 1024 bits 0.0086s 0.0004s 116.2 2235.7
+// rsa 2048 bits 0.0519s 0.0015s 19.3 667.3
+// rsa 4096 bits 0.3464s 0.0053s 2.9 187.7
+// sign verify sign/s verify/s
+// dsa 512 bits 0.0016s 0.0020s 613.1 510.5
+// dsa 1024 bits 0.0045s 0.0054s 221.0 183.9
+//
+// My code is still way faster, huh:-) And I believe that even
+// higher performance can be achieved. Note that as keys get
+// longer, performance gain is larger. Why? According to the
+// profiler there is another player in the field, namely
+// BN_from_montgomery consuming larger and larger portion of CPU
+// time as keysize decreases. I therefore consider putting effort
+// to assembler implementation of the following routine:
+//
+// void bn_mul_add_mont (BN_ULONG *rp,BN_ULONG *np,int nl,BN_ULONG n0)
+// {
+// int i,j;
+// BN_ULONG v;
+//
+// for (i=0; i<nl; i++)
+// {
+// v=bn_mul_add_words(rp,np,nl,(rp[0]*n0)&BN_MASK2);
+// nrp++;
+// rp++;
+// if (((nrp[-1]+=v)&BN_MASK2) < v)
+// for (j=0; ((++nrp[j])&BN_MASK2) == 0; j++) ;
+// }
+// }
+//
+// It might as well be beneficial to implement even combaX
+// variants, as it appears as it can literally unleash the
+// performance (see comment section to bn_mul_comba8 below).
+//
+// And finally for your reference the output for 0.9.6a compiled
+// with SGIcc version 0.01.0-12 (keep in mind that for the moment
+// of this writing it's not possible to convince SGIcc to use
+// BN_UMULT_HIGH inline assembler macro, yet the code is fast,
+// i.e. for a compiler generated one:-):
+//
+// sign verify sign/s verify/s
+// rsa 512 bits 0.0022s 0.0002s 452.7 5894.3
+// rsa 1024 bits 0.0097s 0.0005s 102.7 2002.9
+// rsa 2048 bits 0.0578s 0.0017s 17.3 600.2
+// rsa 4096 bits 0.3838s 0.0061s 2.6 164.5
+// sign verify sign/s verify/s
+// dsa 512 bits 0.0018s 0.0022s 547.3 459.6
+// dsa 1024 bits 0.0051s 0.0062s 196.6 161.3
+//
+// Oh! Benchmarks were performed on 733MHz Lion-class Itanium
+// system running Redhat Linux 7.1 (very special thanks to Ray
+// McCaffity of Williams Communications for providing an account).
+//
+// Q. What's the heck with 'rum 1<<5' at the end of every function?
+// A. Well, by clearing the "upper FP registers written" bit of the
+// User Mask I want to excuse the kernel from preserving upper
+// (f32-f128) FP register bank over process context switch, thus
+// minimizing bus bandwidth consumption during the switch (i.e.
+// after PKI opration completes and the program is off doing
+// something else like bulk symmetric encryption). Having said
+// this, I also want to point out that it might be good idea
+// to compile the whole toolkit (as well as majority of the
+// programs for that matter) with -mfixed-range=f32-f127 command
+// line option. No, it doesn't prevent the compiler from writing
+// to upper bank, but at least discourages to do so. If you don't
+// like the idea you have the option to compile the module with
+// -Drum=nop.m in command line.
+//
+
+#if defined(_HPUX_SOURCE) && !defined(_LP64)
+#define ADDP addp4
+#else
+#define ADDP add
+#endif
+
+#if 1
+//
+// bn_[add|sub]_words routines.
+//
+// Loops are spinning in 2*(n+5) ticks on Itanuim (provided that the
+// data reside in L1 cache, i.e. 2 ticks away). It's possible to
+// compress the epilogue and get down to 2*n+6, but at the cost of
+// scalability (the neat feature of this implementation is that it
+// shall automagically spin in n+5 on "wider" IA-64 implementations:-)
+// I consider that the epilogue is short enough as it is to trade tiny
+// performance loss on Itanium for scalability.
+//
+// BN_ULONG bn_add_words(BN_ULONG *rp, BN_ULONG *ap, BN_ULONG *bp,int num)
+//
+.global bn_add_words#
+.proc bn_add_words#
+.align 64
+.skip 32 // makes the loop body aligned at 64-byte boundary
+bn_add_words:
+ .prologue
+ .save ar.pfs,r2
+{ .mii; alloc r2=ar.pfs,4,12,0,16
+ cmp4.le p6,p0=r35,r0 };;
+{ .mfb; mov r8=r0 // return value
+(p6) br.ret.spnt.many b0 };;
+
+{ .mib; sub r10=r35,r0,1
+ .save ar.lc,r3
+ mov r3=ar.lc
+ brp.loop.imp .L_bn_add_words_ctop,.L_bn_add_words_cend-16
+ }
+{ .mib; ADDP r14=0,r32 // rp
+ .save pr,r9
+ mov r9=pr };;
+ .body
+{ .mii; ADDP r15=0,r33 // ap
+ mov ar.lc=r10
+ mov ar.ec=6 }
+{ .mib; ADDP r16=0,r34 // bp
+ mov pr.rot=1<<16 };;
+
+.L_bn_add_words_ctop:
+{ .mii; (p16) ld8 r32=[r16],8 // b=*(bp++)
+ (p18) add r39=r37,r34
+ (p19) cmp.ltu.unc p56,p0=r40,r38 }
+{ .mfb; (p0) nop.m 0x0
+ (p0) nop.f 0x0
+ (p0) nop.b 0x0 }
+{ .mii; (p16) ld8 r35=[r15],8 // a=*(ap++)
+ (p58) cmp.eq.or p57,p0=-1,r41 // (p20)
+ (p58) add r41=1,r41 } // (p20)
+{ .mfb; (p21) st8 [r14]=r42,8 // *(rp++)=r
+ (p0) nop.f 0x0
+ br.ctop.sptk .L_bn_add_words_ctop };;
+.L_bn_add_words_cend:
+
+{ .mii;
+(p59) add r8=1,r8 // return value
+ mov pr=r9,0x1ffff
+ mov ar.lc=r3 }
+{ .mbb; nop.b 0x0
+ br.ret.sptk.many b0 };;
+.endp bn_add_words#
+
+//
+// BN_ULONG bn_sub_words(BN_ULONG *rp, BN_ULONG *ap, BN_ULONG *bp,int num)
+//
+.global bn_sub_words#
+.proc bn_sub_words#
+.align 64
+.skip 32 // makes the loop body aligned at 64-byte boundary
+bn_sub_words:
+ .prologue
+ .save ar.pfs,r2
+{ .mii; alloc r2=ar.pfs,4,12,0,16
+ cmp4.le p6,p0=r35,r0 };;
+{ .mfb; mov r8=r0 // return value
+(p6) br.ret.spnt.many b0 };;
+
+{ .mib; sub r10=r35,r0,1
+ .save ar.lc,r3
+ mov r3=ar.lc
+ brp.loop.imp .L_bn_sub_words_ctop,.L_bn_sub_words_cend-16
+ }
+{ .mib; ADDP r14=0,r32 // rp
+ .save pr,r9
+ mov r9=pr };;
+ .body
+{ .mii; ADDP r15=0,r33 // ap
+ mov ar.lc=r10
+ mov ar.ec=6 }
+{ .mib; ADDP r16=0,r34 // bp
+ mov pr.rot=1<<16 };;
+
+.L_bn_sub_words_ctop:
+{ .mii; (p16) ld8 r32=[r16],8 // b=*(bp++)
+ (p18) sub r39=r37,r34
+ (p19) cmp.gtu.unc p56,p0=r40,r38 }
+{ .mfb; (p0) nop.m 0x0
+ (p0) nop.f 0x0
+ (p0) nop.b 0x0 }
+{ .mii; (p16) ld8 r35=[r15],8 // a=*(ap++)
+ (p58) cmp.eq.or p57,p0=0,r41 // (p20)
+ (p58) add r41=-1,r41 } // (p20)
+{ .mbb; (p21) st8 [r14]=r42,8 // *(rp++)=r
+ (p0) nop.b 0x0
+ br.ctop.sptk .L_bn_sub_words_ctop };;
+.L_bn_sub_words_cend:
+
+{ .mii;
+(p59) add r8=1,r8 // return value
+ mov pr=r9,0x1ffff
+ mov ar.lc=r3 }
+{ .mbb; nop.b 0x0
+ br.ret.sptk.many b0 };;
+.endp bn_sub_words#
+#endif
+
+#if 0
+#define XMA_TEMPTATION
+#endif
+
+#if 1
+//
+// BN_ULONG bn_mul_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
+//
+.global bn_mul_words#
+.proc bn_mul_words#
+.align 64
+.skip 32 // makes the loop body aligned at 64-byte boundary
+bn_mul_words:
+ .prologue
+ .save ar.pfs,r2
+#ifdef XMA_TEMPTATION
+{ .mfi; alloc r2=ar.pfs,4,0,0,0 };;
+#else
+{ .mfi; alloc r2=ar.pfs,4,12,0,16 };;
+#endif
+{ .mib; mov r8=r0 // return value
+ cmp4.le p6,p0=r34,r0
+(p6) br.ret.spnt.many b0 };;
+
+{ .mii; sub r10=r34,r0,1
+ .save ar.lc,r3
+ mov r3=ar.lc
+ .save pr,r9
+ mov r9=pr };;
+
+ .body
+{ .mib; setf.sig f8=r35 // w
+ mov pr.rot=0x800001<<16
+ // ------^----- serves as (p50) at first (p27)
+ brp.loop.imp .L_bn_mul_words_ctop,.L_bn_mul_words_cend-16
+ }
+
+#ifndef XMA_TEMPTATION
+
+{ .mmi; ADDP r14=0,r32 // rp
+ ADDP r15=0,r33 // ap
+ mov ar.lc=r10 }
+{ .mmi; mov r40=0 // serves as r35 at first (p27)
+ mov ar.ec=13 };;
+
+// This loop spins in 2*(n+12) ticks. It's scheduled for data in Itanium
+// L2 cache (i.e. 9 ticks away) as floating point load/store instructions
+// bypass L1 cache and L2 latency is actually best-case scenario for
+// ldf8. The loop is not scalable and shall run in 2*(n+12) even on
+// "wider" IA-64 implementations. It's a trade-off here. n+24 loop
+// would give us ~5% in *overall* performance improvement on "wider"
+// IA-64, but would hurt Itanium for about same because of longer
+// epilogue. As it's a matter of few percents in either case I've
+// chosen to trade the scalability for development time (you can see
+// this very instruction sequence in bn_mul_add_words loop which in
+// turn is scalable).
+.L_bn_mul_words_ctop:
+{ .mfi; (p25) getf.sig r36=f52 // low
+ (p21) xmpy.lu f48=f37,f8
+ (p28) cmp.ltu p54,p50=r41,r39 }
+{ .mfi; (p16) ldf8 f32=[r15],8
+ (p21) xmpy.hu f40=f37,f8
+ (p0) nop.i 0x0 };;
+{ .mii; (p25) getf.sig r32=f44 // high
+ .pred.rel "mutex",p50,p54
+ (p50) add r40=r38,r35 // (p27)
+ (p54) add r40=r38,r35,1 } // (p27)
+{ .mfb; (p28) st8 [r14]=r41,8
+ (p0) nop.f 0x0
+ br.ctop.sptk .L_bn_mul_words_ctop };;
+.L_bn_mul_words_cend:
+
+{ .mii; nop.m 0x0
+.pred.rel "mutex",p51,p55
+(p51) add r8=r36,r0
+(p55) add r8=r36,r0,1 }
+{ .mfb; nop.m 0x0
+ nop.f 0x0
+ nop.b 0x0 }
+
+#else // XMA_TEMPTATION
+
+ setf.sig f37=r0 // serves as carry at (p18) tick
+ mov ar.lc=r10
+ mov ar.ec=5;;
+
+// Most of you examining this code very likely wonder why in the name
+// of Intel the following loop is commented out? Indeed, it looks so
+// neat that you find it hard to believe that it's something wrong
+// with it, right? The catch is that every iteration depends on the
+// result from previous one and the latter isn't available instantly.
+// The loop therefore spins at the latency of xma minus 1, or in other
+// words at 6*(n+4) ticks:-( Compare to the "production" loop above
+// that runs in 2*(n+11) where the low latency problem is worked around
+// by moving the dependency to one-tick latent integer ALU. Note that
+// "distance" between ldf8 and xma is not latency of ldf8, but the
+// *difference* between xma and ldf8 latencies.
+.L_bn_mul_words_ctop:
+{ .mfi; (p16) ldf8 f32=[r33],8
+ (p18) xma.hu f38=f34,f8,f39 }
+{ .mfb; (p20) stf8 [r32]=f37,8
+ (p18) xma.lu f35=f34,f8,f39
+ br.ctop.sptk .L_bn_mul_words_ctop };;
+.L_bn_mul_words_cend:
+
+ getf.sig r8=f41 // the return value
+
+#endif // XMA_TEMPTATION
+
+{ .mii; nop.m 0x0
+ mov pr=r9,0x1ffff
+ mov ar.lc=r3 }
+{ .mfb; rum 1<<5 // clear um.mfh
+ nop.f 0x0
+ br.ret.sptk.many b0 };;
+.endp bn_mul_words#
+#endif
+
+#if 1
+//
+// BN_ULONG bn_mul_add_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
+//
+.global bn_mul_add_words#
+.proc bn_mul_add_words#
+.align 64
+.skip 48 // makes the loop body aligned at 64-byte boundary
+bn_mul_add_words:
+ .prologue
+ .save ar.pfs,r2
+{ .mmi; alloc r2=ar.pfs,4,4,0,8
+ cmp4.le p6,p0=r34,r0
+ .save ar.lc,r3
+ mov r3=ar.lc };;
+{ .mib; mov r8=r0 // return value
+ sub r10=r34,r0,1
+(p6) br.ret.spnt.many b0 };;
+
+{ .mib; setf.sig f8=r35 // w
+ .save pr,r9
+ mov r9=pr
+ brp.loop.imp .L_bn_mul_add_words_ctop,.L_bn_mul_add_words_cend-16
+ }
+ .body
+{ .mmi; ADDP r14=0,r32 // rp
+ ADDP r15=0,r33 // ap
+ mov ar.lc=r10 }
+{ .mii; ADDP r16=0,r32 // rp copy
+ mov pr.rot=0x2001<<16
+ // ------^----- serves as (p40) at first (p27)
+ mov ar.ec=11 };;
+
+// This loop spins in 3*(n+10) ticks on Itanium and in 2*(n+10) on
+// Itanium 2. Yes, unlike previous versions it scales:-) Previous
+// version was performing *all* additions in IALU and was starving
+// for those even on Itanium 2. In this version one addition is
+// moved to FPU and is folded with multiplication. This is at cost
+// of propagating the result from previous call to this subroutine
+// to L2 cache... In other words negligible even for shorter keys.
+// *Overall* performance improvement [over previous version] varies
+// from 11 to 22 percent depending on key length.
+.L_bn_mul_add_words_ctop:
+.pred.rel "mutex",p40,p42
+{ .mfi; (p23) getf.sig r36=f45 // low
+ (p20) xma.lu f42=f36,f8,f50 // low
+ (p40) add r39=r39,r35 } // (p27)
+{ .mfi; (p16) ldf8 f32=[r15],8 // *(ap++)
+ (p20) xma.hu f36=f36,f8,f50 // high
+ (p42) add r39=r39,r35,1 };; // (p27)
+{ .mmi; (p24) getf.sig r32=f40 // high
+ (p16) ldf8 f46=[r16],8 // *(rp1++)
+ (p40) cmp.ltu p41,p39=r39,r35 } // (p27)
+{ .mib; (p26) st8 [r14]=r39,8 // *(rp2++)
+ (p42) cmp.leu p41,p39=r39,r35 // (p27)
+ br.ctop.sptk .L_bn_mul_add_words_ctop};;
+.L_bn_mul_add_words_cend:
+
+{ .mmi; .pred.rel "mutex",p40,p42
+(p40) add r8=r35,r0
+(p42) add r8=r35,r0,1
+ mov pr=r9,0x1ffff }
+{ .mib; rum 1<<5 // clear um.mfh
+ mov ar.lc=r3
+ br.ret.sptk.many b0 };;
+.endp bn_mul_add_words#
+#endif
+
+#if 1
+//
+// void bn_sqr_words(BN_ULONG *rp, BN_ULONG *ap, int num)
+//
+.global bn_sqr_words#
+.proc bn_sqr_words#
+.align 64
+.skip 32 // makes the loop body aligned at 64-byte boundary
+bn_sqr_words:
+ .prologue
+ .save ar.pfs,r2
+{ .mii; alloc r2=ar.pfs,3,0,0,0
+ sxt4 r34=r34 };;
+{ .mii; cmp.le p6,p0=r34,r0
+ mov r8=r0 } // return value
+{ .mfb; ADDP r32=0,r32
+ nop.f 0x0
+(p6) br.ret.spnt.many b0 };;
+
+{ .mii; sub r10=r34,r0,1
+ .save ar.lc,r3
+ mov r3=ar.lc
+ .save pr,r9
+ mov r9=pr };;
+
+ .body
+{ .mib; ADDP r33=0,r33
+ mov pr.rot=1<<16
+ brp.loop.imp .L_bn_sqr_words_ctop,.L_bn_sqr_words_cend-16
+ }
+{ .mii; add r34=8,r32
+ mov ar.lc=r10
+ mov ar.ec=18 };;
+
+// 2*(n+17) on Itanium, (n+17) on "wider" IA-64 implementations. It's
+// possible to compress the epilogue (I'm getting tired to write this
+// comment over and over) and get down to 2*n+16 at the cost of
+// scalability. The decision will very likely be reconsidered after the
+// benchmark program is profiled. I.e. if perfomance gain on Itanium
+// will appear larger than loss on "wider" IA-64, then the loop should
+// be explicitly split and the epilogue compressed.
+.L_bn_sqr_words_ctop:
+{ .mfi; (p16) ldf8 f32=[r33],8
+ (p25) xmpy.lu f42=f41,f41
+ (p0) nop.i 0x0 }
+{ .mib; (p33) stf8 [r32]=f50,16
+ (p0) nop.i 0x0
+ (p0) nop.b 0x0 }
+{ .mfi; (p0) nop.m 0x0
+ (p25) xmpy.hu f52=f41,f41
+ (p0) nop.i 0x0 }
+{ .mib; (p33) stf8 [r34]=f60,16
+ (p0) nop.i 0x0
+ br.ctop.sptk .L_bn_sqr_words_ctop };;
+.L_bn_sqr_words_cend:
+
+{ .mii; nop.m 0x0
+ mov pr=r9,0x1ffff
+ mov ar.lc=r3 }
+{ .mfb; rum 1<<5 // clear um.mfh
+ nop.f 0x0
+ br.ret.sptk.many b0 };;
+.endp bn_sqr_words#
+#endif
+
+#if 1
+// Apparently we win nothing by implementing special bn_sqr_comba8.
+// Yes, it is possible to reduce the number of multiplications by
+// almost factor of two, but then the amount of additions would
+// increase by factor of two (as we would have to perform those
+// otherwise performed by xma ourselves). Normally we would trade
+// anyway as multiplications are way more expensive, but not this
+// time... Multiplication kernel is fully pipelined and as we drain
+// one 128-bit multiplication result per clock cycle multiplications
+// are effectively as inexpensive as additions. Special implementation
+// might become of interest for "wider" IA-64 implementation as you'll
+// be able to get through the multiplication phase faster (there won't
+// be any stall issues as discussed in the commentary section below and
+// you therefore will be able to employ all 4 FP units)... But these
+// Itanium days it's simply too hard to justify the effort so I just
+// drop down to bn_mul_comba8 code:-)
+//
+// void bn_sqr_comba8(BN_ULONG *r, BN_ULONG *a)
+//
+.global bn_sqr_comba8#
+.proc bn_sqr_comba8#
+.align 64
+bn_sqr_comba8:
+ .prologue
+ .save ar.pfs,r2
+#if defined(_HPUX_SOURCE) && !defined(_LP64)
+{ .mii; alloc r2=ar.pfs,2,1,0,0
+ addp4 r33=0,r33
+ addp4 r32=0,r32 };;
+{ .mii;
+#else
+{ .mii; alloc r2=ar.pfs,2,1,0,0
+#endif
+ mov r34=r33
+ add r14=8,r33 };;
+ .body
+{ .mii; add r17=8,r34
+ add r15=16,r33
+ add r18=16,r34 }
+{ .mfb; add r16=24,r33
+ br .L_cheat_entry_point8 };;
+.endp bn_sqr_comba8#
+#endif
+
+#if 1
+// I've estimated this routine to run in ~120 ticks, but in reality
+// (i.e. according to ar.itc) it takes ~160 ticks. Are those extra
+// cycles consumed for instructions fetch? Or did I misinterpret some
+// clause in Itanium µ-architecture manual? Comments are welcomed and
+// highly appreciated.
+//
+// On Itanium 2 it takes ~190 ticks. This is because of stalls on
+// result from getf.sig. I do nothing about it at this point for
+// reasons depicted below.
+//
+// However! It should be noted that even 160 ticks is darn good result
+// as it's over 10 (yes, ten, spelled as t-e-n) times faster than the
+// C version (compiled with gcc with inline assembler). I really
+// kicked compiler's butt here, didn't I? Yeah! This brings us to the
+// following statement. It's damn shame that this routine isn't called
+// very often nowadays! According to the profiler most CPU time is
+// consumed by bn_mul_add_words called from BN_from_montgomery. In
+// order to estimate what we're missing, I've compared the performance
+// of this routine against "traditional" implementation, i.e. against
+// following routine:
+//
+// void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
+// { r[ 8]=bn_mul_words( &(r[0]),a,8,b[0]);
+// r[ 9]=bn_mul_add_words(&(r[1]),a,8,b[1]);
+// r[10]=bn_mul_add_words(&(r[2]),a,8,b[2]);
+// r[11]=bn_mul_add_words(&(r[3]),a,8,b[3]);
+// r[12]=bn_mul_add_words(&(r[4]),a,8,b[4]);
+// r[13]=bn_mul_add_words(&(r[5]),a,8,b[5]);
+// r[14]=bn_mul_add_words(&(r[6]),a,8,b[6]);
+// r[15]=bn_mul_add_words(&(r[7]),a,8,b[7]);
+// }
+//
+// The one below is over 8 times faster than the one above:-( Even
+// more reasons to "combafy" bn_mul_add_mont...
+//
+// And yes, this routine really made me wish there were an optimizing
+// assembler! It also feels like it deserves a dedication.
+//
+// To my wife for being there and to my kids...
+//
+// void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
+//
+#define carry1 r14
+#define carry2 r15
+#define carry3 r34
+.global bn_mul_comba8#
+.proc bn_mul_comba8#
+.align 64
+bn_mul_comba8:
+ .prologue
+ .save ar.pfs,r2
+#if defined(_HPUX_SOURCE) && !defined(_LP64)
+{ .mii; alloc r2=ar.pfs,3,0,0,0
+ addp4 r33=0,r33
+ addp4 r34=0,r34 };;
+{ .mii; addp4 r32=0,r32
+#else
+{ .mii; alloc r2=ar.pfs,3,0,0,0
+#endif
+ add r14=8,r33
+ add r17=8,r34 }
+ .body
+{ .mii; add r15=16,r33
+ add r18=16,r34
+ add r16=24,r33 }
+.L_cheat_entry_point8:
+{ .mmi; add r19=24,r34
+
+ ldf8 f32=[r33],32 };;
+
+{ .mmi; ldf8 f120=[r34],32
+ ldf8 f121=[r17],32 }
+{ .mmi; ldf8 f122=[r18],32
+ ldf8 f123=[r19],32 };;
+{ .mmi; ldf8 f124=[r34]
+ ldf8 f125=[r17] }
+{ .mmi; ldf8 f126=[r18]
+ ldf8 f127=[r19] }
+
+{ .mmi; ldf8 f33=[r14],32
+ ldf8 f34=[r15],32 }
+{ .mmi; ldf8 f35=[r16],32;;
+ ldf8 f36=[r33] }
+{ .mmi; ldf8 f37=[r14]
+ ldf8 f38=[r15] }
+{ .mfi; ldf8 f39=[r16]
+// -------\ Entering multiplier's heaven /-------
+// ------------\ /------------
+// -----------------\ /-----------------
+// ----------------------\/----------------------
+ xma.hu f41=f32,f120,f0 }
+{ .mfi; xma.lu f40=f32,f120,f0 };; // (*)
+{ .mfi; xma.hu f51=f32,f121,f0 }
+{ .mfi; xma.lu f50=f32,f121,f0 };;
+{ .mfi; xma.hu f61=f32,f122,f0 }
+{ .mfi; xma.lu f60=f32,f122,f0 };;
+{ .mfi; xma.hu f71=f32,f123,f0 }
+{ .mfi; xma.lu f70=f32,f123,f0 };;
+{ .mfi; xma.hu f81=f32,f124,f0 }
+{ .mfi; xma.lu f80=f32,f124,f0 };;
+{ .mfi; xma.hu f91=f32,f125,f0 }
+{ .mfi; xma.lu f90=f32,f125,f0 };;
+{ .mfi; xma.hu f101=f32,f126,f0 }
+{ .mfi; xma.lu f100=f32,f126,f0 };;
+{ .mfi; xma.hu f111=f32,f127,f0 }
+{ .mfi; xma.lu f110=f32,f127,f0 };;//
+// (*) You can argue that splitting at every second bundle would
+// prevent "wider" IA-64 implementations from achieving the peak
+// performance. Well, not really... The catch is that if you
+// intend to keep 4 FP units busy by splitting at every fourth
+// bundle and thus perform these 16 multiplications in 4 ticks,
+// the first bundle *below* would stall because the result from
+// the first xma bundle *above* won't be available for another 3
+// ticks (if not more, being an optimist, I assume that "wider"
+// implementation will have same latency:-). This stall will hold
+// you back and the performance would be as if every second bundle
+// were split *anyway*...
+{ .mfi; getf.sig r16=f40
+ xma.hu f42=f33,f120,f41
+ add r33=8,r32 }
+{ .mfi; xma.lu f41=f33,f120,f41 };;
+{ .mfi; getf.sig r24=f50
+ xma.hu f52=f33,f121,f51 }
+{ .mfi; xma.lu f51=f33,f121,f51 };;
+{ .mfi; st8 [r32]=r16,16
+ xma.hu f62=f33,f122,f61 }
+{ .mfi; xma.lu f61=f33,f122,f61 };;
+{ .mfi; xma.hu f72=f33,f123,f71 }
+{ .mfi; xma.lu f71=f33,f123,f71 };;
+{ .mfi; xma.hu f82=f33,f124,f81 }
+{ .mfi; xma.lu f81=f33,f124,f81 };;
+{ .mfi; xma.hu f92=f33,f125,f91 }
+{ .mfi; xma.lu f91=f33,f125,f91 };;
+{ .mfi; xma.hu f102=f33,f126,f101 }
+{ .mfi; xma.lu f101=f33,f126,f101 };;
+{ .mfi; xma.hu f112=f33,f127,f111 }
+{ .mfi; xma.lu f111=f33,f127,f111 };;//
+//-------------------------------------------------//
+{ .mfi; getf.sig r25=f41
+ xma.hu f43=f34,f120,f42 }
+{ .mfi; xma.lu f42=f34,f120,f42 };;
+{ .mfi; getf.sig r16=f60
+ xma.hu f53=f34,f121,f52 }
+{ .mfi; xma.lu f52=f34,f121,f52 };;
+{ .mfi; getf.sig r17=f51
+ xma.hu f63=f34,f122,f62
+ add r25=r25,r24 }
+{ .mfi; xma.lu f62=f34,f122,f62
+ mov carry1=0 };;
+{ .mfi; cmp.ltu p6,p0=r25,r24
+ xma.hu f73=f34,f123,f72 }
+{ .mfi; xma.lu f72=f34,f123,f72 };;
+{ .mfi; st8 [r33]=r25,16
+ xma.hu f83=f34,f124,f82
+(p6) add carry1=1,carry1 }
+{ .mfi; xma.lu f82=f34,f124,f82 };;
+{ .mfi; xma.hu f93=f34,f125,f92 }
+{ .mfi; xma.lu f92=f34,f125,f92 };;
+{ .mfi; xma.hu f103=f34,f126,f102 }
+{ .mfi; xma.lu f102=f34,f126,f102 };;
+{ .mfi; xma.hu f113=f34,f127,f112 }
+{ .mfi; xma.lu f112=f34,f127,f112 };;//
+//-------------------------------------------------//
+{ .mfi; getf.sig r18=f42
+ xma.hu f44=f35,f120,f43
+ add r17=r17,r16 }
+{ .mfi; xma.lu f43=f35,f120,f43 };;
+{ .mfi; getf.sig r24=f70
+ xma.hu f54=f35,f121,f53 }
+{ .mfi; mov carry2=0
+ xma.lu f53=f35,f121,f53 };;
+{ .mfi; getf.sig r25=f61
+ xma.hu f64=f35,f122,f63
+ cmp.ltu p7,p0=r17,r16 }
+{ .mfi; add r18=r18,r17
+ xma.lu f63=f35,f122,f63 };;
+{ .mfi; getf.sig r26=f52
+ xma.hu f74=f35,f123,f73
+(p7) add carry2=1,carry2 }
+{ .mfi; cmp.ltu p7,p0=r18,r17
+ xma.lu f73=f35,f123,f73
+ add r18=r18,carry1 };;
+{ .mfi;
+ xma.hu f84=f35,f124,f83
+(p7) add carry2=1,carry2 }
+{ .mfi; cmp.ltu p7,p0=r18,carry1
+ xma.lu f83=f35,f124,f83 };;
+{ .mfi; st8 [r32]=r18,16
+ xma.hu f94=f35,f125,f93
+(p7) add carry2=1,carry2 }
+{ .mfi; xma.lu f93=f35,f125,f93 };;
+{ .mfi; xma.hu f104=f35,f126,f103 }
+{ .mfi; xma.lu f103=f35,f126,f103 };;
+{ .mfi; xma.hu f114=f35,f127,f113 }
+{ .mfi; mov carry1=0
+ xma.lu f113=f35,f127,f113
+ add r25=r25,r24 };;//
+//-------------------------------------------------//
+{ .mfi; getf.sig r27=f43
+ xma.hu f45=f36,f120,f44
+ cmp.ltu p6,p0=r25,r24 }
+{ .mfi; xma.lu f44=f36,f120,f44
+ add r26=r26,r25 };;
+{ .mfi; getf.sig r16=f80
+ xma.hu f55=f36,f121,f54
+(p6) add carry1=1,carry1 }
+{ .mfi; xma.lu f54=f36,f121,f54 };;
+{ .mfi; getf.sig r17=f71
+ xma.hu f65=f36,f122,f64
+ cmp.ltu p6,p0=r26,r25 }
+{ .mfi; xma.lu f64=f36,f122,f64
+ add r27=r27,r26 };;
+{ .mfi; getf.sig r18=f62
+ xma.hu f75=f36,f123,f74
+(p6) add carry1=1,carry1 }
+{ .mfi; cmp.ltu p6,p0=r27,r26
+ xma.lu f74=f36,f123,f74
+ add r27=r27,carry2 };;
+{ .mfi; getf.sig r19=f53
+ xma.hu f85=f36,f124,f84
+(p6) add carry1=1,carry1 }
+{ .mfi; xma.lu f84=f36,f124,f84
+ cmp.ltu p6,p0=r27,carry2 };;
+{ .mfi; st8 [r33]=r27,16
+ xma.hu f95=f36,f125,f94
+(p6) add carry1=1,carry1 }
+{ .mfi; xma.lu f94=f36,f125,f94 };;
+{ .mfi; xma.hu f105=f36,f126,f104 }
+{ .mfi; mov carry2=0
+ xma.lu f104=f36,f126,f104
+ add r17=r17,r16 };;
+{ .mfi; xma.hu f115=f36,f127,f114
+ cmp.ltu p7,p0=r17,r16 }
+{ .mfi; xma.lu f114=f36,f127,f114
+ add r18=r18,r17 };;//
+//-------------------------------------------------//
+{ .mfi; getf.sig r20=f44
+ xma.hu f46=f37,f120,f45
+(p7) add carry2=1,carry2 }
+{ .mfi; cmp.ltu p7,p0=r18,r17
+ xma.lu f45=f37,f120,f45
+ add r19=r19,r18 };;
+{ .mfi; getf.sig r24=f90
+ xma.hu f56=f37,f121,f55 }
+{ .mfi; xma.lu f55=f37,f121,f55 };;
+{ .mfi; getf.sig r25=f81
+ xma.hu f66=f37,f122,f65
+(p7) add carry2=1,carry2 }
+{ .mfi; cmp.ltu p7,p0=r19,r18
+ xma.lu f65=f37,f122,f65
+ add r20=r20,r19 };;
+{ .mfi; getf.sig r26=f72
+ xma.hu f76=f37,f123,f75
+(p7) add carry2=1,carry2 }
+{ .mfi; cmp.ltu p7,p0=r20,r19
+ xma.lu f75=f37,f123,f75
+ add r20=r20,carry1 };;
+{ .mfi; getf.sig r27=f63
+ xma.hu f86=f37,f124,f85
+(p7) add carry2=1,carry2 }
+{ .mfi; xma.lu f85=f37,f124,f85
+ cmp.ltu p7,p0=r20,carry1 };;
+{ .mfi; getf.sig r28=f54
+ xma.hu f96=f37,f125,f95
+(p7) add carry2=1,carry2 }
+{ .mfi; st8 [r32]=r20,16
+ xma.lu f95=f37,f125,f95 };;
+{ .mfi; xma.hu f106=f37,f126,f105 }
+{ .mfi; mov carry1=0
+ xma.lu f105=f37,f126,f105
+ add r25=r25,r24 };;
+{ .mfi; xma.hu f116=f37,f127,f115
+ cmp.ltu p6,p0=r25,r24 }
+{ .mfi; xma.lu f115=f37,f127,f115
+ add r26=r26,r25 };;//
+//-------------------------------------------------//
+{ .mfi; getf.sig r29=f45
+ xma.hu f47=f38,f120,f46
+(p6) add carry1=1,carry1 }
+{ .mfi; cmp.ltu p6,p0=r26,r25
+ xma.lu f46=f38,f120,f46
+ add r27=r27,r26 };;
+{ .mfi; getf.sig r16=f100
+ xma.hu f57=f38,f121,f56
+(p6) add carry1=1,carry1 }
+{ .mfi; cmp.ltu p6,p0=r27,r26
+ xma.lu f56=f38,f121,f56
+ add r28=r28,r27 };;
+{ .mfi; getf.sig r17=f91
+ xma.hu f67=f38,f122,f66
+(p6) add carry1=1,carry1 }
+{ .mfi; cmp.ltu p6,p0=r28,r27
+ xma.lu f66=f38,f122,f66
+ add r29=r29,r28 };;
+{ .mfi; getf.sig r18=f82
+ xma.hu f77=f38,f123,f76
+(p6) add carry1=1,carry1 }
+{ .mfi; cmp.ltu p6,p0=r29,r28
+ xma.lu f76=f38,f123,f76
+ add r29=r29,carry2 };;
+{ .mfi; getf.sig r19=f73
+ xma.hu f87=f38,f124,f86
+(p6) add carry1=1,carry1 }
+{ .mfi; xma.lu f86=f38,f124,f86
+ cmp.ltu p6,p0=r29,carry2 };;
+{ .mfi; getf.sig r20=f64
+ xma.hu f97=f38,f125,f96
+(p6) add carry1=1,carry1 }
+{ .mfi; st8 [r33]=r29,16
+ xma.lu f96=f38,f125,f96 };;
+{ .mfi; getf.sig r21=f55
+ xma.hu f107=f38,f126,f106 }
+{ .mfi; mov carry2=0
+ xma.lu f106=f38,f126,f106
+ add r17=r17,r16 };;
+{ .mfi; xma.hu f117=f38,f127,f116
+ cmp.ltu p7,p0=r17,r16 }
+{ .mfi; xma.lu f116=f38,f127,f116
+ add r18=r18,r17 };;//
+//-------------------------------------------------//
+{ .mfi; getf.sig r22=f46
+ xma.hu f48=f39,f120,f47
+(p7) add carry2=1,carry2 }
+{ .mfi; cmp.ltu p7,p0=r18,r17
+ xma.lu f47=f39,f120,f47
+ add r19=r19,r18 };;
+{ .mfi; getf.sig r24=f110
+ xma.hu f58=f39,f121,f57
+(p7) add carry2=1,carry2 }
+{ .mfi; cmp.ltu p7,p0=r19,r18
+ xma.lu f57=f39,f121,f57
+ add r20=r20,r19 };;
+{ .mfi; getf.sig r25=f101
+ xma.hu f68=f39,f122,f67
+(p7) add carry2=1,carry2 }
+{ .mfi; cmp.ltu p7,p0=r20,r19
+ xma.lu f67=f39,f122,f67
+ add r21=r21,r20 };;
+{ .mfi; getf.sig r26=f92
+ xma.hu f78=f39,f123,f77
+(p7) add carry2=1,carry2 }
+{ .mfi; cmp.ltu p7,p0=r21,r20
+ xma.lu f77=f39,f123,f77
+ add r22=r22,r21 };;
+{ .mfi; getf.sig r27=f83
+ xma.hu f88=f39,f124,f87
+(p7) add carry2=1,carry2 }
+{ .mfi; cmp.ltu p7,p0=r22,r21
+ xma.lu f87=f39,f124,f87
+ add r22=r22,carry1 };;
+{ .mfi; getf.sig r28=f74
+ xma.hu f98=f39,f125,f97
+(p7) add carry2=1,carry2 }
+{ .mfi; xma.lu f97=f39,f125,f97
+ cmp.ltu p7,p0=r22,carry1 };;
+{ .mfi; getf.sig r29=f65
+ xma.hu f108=f39,f126,f107
+(p7) add carry2=1,carry2 }
+{ .mfi; st8 [r32]=r22,16
+ xma.lu f107=f39,f126,f107 };;
+{ .mfi; getf.sig r30=f56
+ xma.hu f118=f39,f127,f117 }
+{ .mfi; xma.lu f117=f39,f127,f117 };;//
+//-------------------------------------------------//
+// Leaving muliplier's heaven... Quite a ride, huh?
+
+{ .mii; getf.sig r31=f47
+ add r25=r25,r24
+ mov carry1=0 };;
+{ .mii; getf.sig r16=f111
+ cmp.ltu p6,p0=r25,r24
+ add r26=r26,r25 };;
+{ .mfb; getf.sig r17=f102 }
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r26,r25
+ add r27=r27,r26 };;
+{ .mfb; nop.m 0x0 }
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r27,r26
+ add r28=r28,r27 };;
+{ .mii; getf.sig r18=f93
+ add r17=r17,r16
+ mov carry3=0 }
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r28,r27
+ add r29=r29,r28 };;
+{ .mii; getf.sig r19=f84
+ cmp.ltu p7,p0=r17,r16 }
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r29,r28
+ add r30=r30,r29 };;
+{ .mii; getf.sig r20=f75
+ add r18=r18,r17 }
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r30,r29
+ add r31=r31,r30 };;
+{ .mfb; getf.sig r21=f66 }
+{ .mii; (p7) add carry3=1,carry3
+ cmp.ltu p7,p0=r18,r17
+ add r19=r19,r18 }
+{ .mfb; nop.m 0x0 }
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r31,r30
+ add r31=r31,carry2 };;
+{ .mfb; getf.sig r22=f57 }
+{ .mii; (p7) add carry3=1,carry3
+ cmp.ltu p7,p0=r19,r18
+ add r20=r20,r19 }
+{ .mfb; nop.m 0x0 }
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r31,carry2 };;
+{ .mfb; getf.sig r23=f48 }
+{ .mii; (p7) add carry3=1,carry3
+ cmp.ltu p7,p0=r20,r19
+ add r21=r21,r20 }
+{ .mii;
+(p6) add carry1=1,carry1 }
+{ .mfb; st8 [r33]=r31,16 };;
+
+{ .mfb; getf.sig r24=f112 }
+{ .mii; (p7) add carry3=1,carry3
+ cmp.ltu p7,p0=r21,r20
+ add r22=r22,r21 };;
+{ .mfb; getf.sig r25=f103 }
+{ .mii; (p7) add carry3=1,carry3
+ cmp.ltu p7,p0=r22,r21
+ add r23=r23,r22 };;
+{ .mfb; getf.sig r26=f94 }
+{ .mii; (p7) add carry3=1,carry3
+ cmp.ltu p7,p0=r23,r22
+ add r23=r23,carry1 };;
+{ .mfb; getf.sig r27=f85 }
+{ .mii; (p7) add carry3=1,carry3
+ cmp.ltu p7,p8=r23,carry1};;
+{ .mii; getf.sig r28=f76
+ add r25=r25,r24
+ mov carry1=0 }
+{ .mii; st8 [r32]=r23,16
+ (p7) add carry2=1,carry3
+ (p8) add carry2=0,carry3 };;
+
+{ .mfb; nop.m 0x0 }
+{ .mii; getf.sig r29=f67
+ cmp.ltu p6,p0=r25,r24
+ add r26=r26,r25 };;
+{ .mfb; getf.sig r30=f58 }
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r26,r25
+ add r27=r27,r26 };;
+{ .mfb; getf.sig r16=f113 }
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r27,r26
+ add r28=r28,r27 };;
+{ .mfb; getf.sig r17=f104 }
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r28,r27
+ add r29=r29,r28 };;
+{ .mfb; getf.sig r18=f95 }
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r29,r28
+ add r30=r30,r29 };;
+{ .mii; getf.sig r19=f86
+ add r17=r17,r16
+ mov carry3=0 }
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r30,r29
+ add r30=r30,carry2 };;
+{ .mii; getf.sig r20=f77
+ cmp.ltu p7,p0=r17,r16
+ add r18=r18,r17 }
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r30,carry2 };;
+{ .mfb; getf.sig r21=f68 }
+{ .mii; st8 [r33]=r30,16
+(p6) add carry1=1,carry1 };;
+
+{ .mfb; getf.sig r24=f114 }
+{ .mii; (p7) add carry3=1,carry3
+ cmp.ltu p7,p0=r18,r17
+ add r19=r19,r18 };;
+{ .mfb; getf.sig r25=f105 }
+{ .mii; (p7) add carry3=1,carry3
+ cmp.ltu p7,p0=r19,r18
+ add r20=r20,r19 };;
+{ .mfb; getf.sig r26=f96 }
+{ .mii; (p7) add carry3=1,carry3
+ cmp.ltu p7,p0=r20,r19
+ add r21=r21,r20 };;
+{ .mfb; getf.sig r27=f87 }
+{ .mii; (p7) add carry3=1,carry3
+ cmp.ltu p7,p0=r21,r20
+ add r21=r21,carry1 };;
+{ .mib; getf.sig r28=f78
+ add r25=r25,r24 }
+{ .mib; (p7) add carry3=1,carry3
+ cmp.ltu p7,p8=r21,carry1};;
+{ .mii; st8 [r32]=r21,16
+ (p7) add carry2=1,carry3
+ (p8) add carry2=0,carry3 }
+
+{ .mii; mov carry1=0
+ cmp.ltu p6,p0=r25,r24
+ add r26=r26,r25 };;
+{ .mfb; getf.sig r16=f115 }
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r26,r25
+ add r27=r27,r26 };;
+{ .mfb; getf.sig r17=f106 }
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r27,r26
+ add r28=r28,r27 };;
+{ .mfb; getf.sig r18=f97 }
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r28,r27
+ add r28=r28,carry2 };;
+{ .mib; getf.sig r19=f88
+ add r17=r17,r16 }
+{ .mib;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r28,carry2 };;
+{ .mii; st8 [r33]=r28,16
+(p6) add carry1=1,carry1 }
+
+{ .mii; mov carry2=0
+ cmp.ltu p7,p0=r17,r16
+ add r18=r18,r17 };;
+{ .mfb; getf.sig r24=f116 }
+{ .mii; (p7) add carry2=1,carry2
+ cmp.ltu p7,p0=r18,r17
+ add r19=r19,r18 };;
+{ .mfb; getf.sig r25=f107 }
+{ .mii; (p7) add carry2=1,carry2
+ cmp.ltu p7,p0=r19,r18
+ add r19=r19,carry1 };;
+{ .mfb; getf.sig r26=f98 }
+{ .mii; (p7) add carry2=1,carry2
+ cmp.ltu p7,p0=r19,carry1};;
+{ .mii; st8 [r32]=r19,16
+ (p7) add carry2=1,carry2 }
+
+{ .mfb; add r25=r25,r24 };;
+
+{ .mfb; getf.sig r16=f117 }
+{ .mii; mov carry1=0
+ cmp.ltu p6,p0=r25,r24
+ add r26=r26,r25 };;
+{ .mfb; getf.sig r17=f108 }
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r26,r25
+ add r26=r26,carry2 };;
+{ .mfb; nop.m 0x0 }
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r26,carry2 };;
+{ .mii; st8 [r33]=r26,16
+(p6) add carry1=1,carry1 }
+
+{ .mfb; add r17=r17,r16 };;
+{ .mfb; getf.sig r24=f118 }
+{ .mii; mov carry2=0
+ cmp.ltu p7,p0=r17,r16
+ add r17=r17,carry1 };;
+{ .mii; (p7) add carry2=1,carry2
+ cmp.ltu p7,p0=r17,carry1};;
+{ .mii; st8 [r32]=r17
+ (p7) add carry2=1,carry2 };;
+{ .mfb; add r24=r24,carry2 };;
+{ .mib; st8 [r33]=r24 }
+
+{ .mib; rum 1<<5 // clear um.mfh
+ br.ret.sptk.many b0 };;
+.endp bn_mul_comba8#
+#undef carry3
+#undef carry2
+#undef carry1
+#endif
+
+#if 1
+// It's possible to make it faster (see comment to bn_sqr_comba8), but
+// I reckon it doesn't worth the effort. Basically because the routine
+// (actually both of them) practically never called... So I just play
+// same trick as with bn_sqr_comba8.
+//
+// void bn_sqr_comba4(BN_ULONG *r, BN_ULONG *a)
+//
+.global bn_sqr_comba4#
+.proc bn_sqr_comba4#
+.align 64
+bn_sqr_comba4:
+ .prologue
+ .save ar.pfs,r2
+#if defined(_HPUX_SOURCE) && !defined(_LP64)
+{ .mii; alloc r2=ar.pfs,2,1,0,0
+ addp4 r32=0,r32
+ addp4 r33=0,r33 };;
+{ .mii;
+#else
+{ .mii; alloc r2=ar.pfs,2,1,0,0
+#endif
+ mov r34=r33
+ add r14=8,r33 };;
+ .body
+{ .mii; add r17=8,r34
+ add r15=16,r33
+ add r18=16,r34 }
+{ .mfb; add r16=24,r33
+ br .L_cheat_entry_point4 };;
+.endp bn_sqr_comba4#
+#endif
+
+#if 1
+// Runs in ~115 cycles and ~4.5 times faster than C. Well, whatever...
+//
+// void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
+//
+#define carry1 r14
+#define carry2 r15
+.global bn_mul_comba4#
+.proc bn_mul_comba4#
+.align 64
+bn_mul_comba4:
+ .prologue
+ .save ar.pfs,r2
+#if defined(_HPUX_SOURCE) && !defined(_LP64)
+{ .mii; alloc r2=ar.pfs,3,0,0,0
+ addp4 r33=0,r33
+ addp4 r34=0,r34 };;
+{ .mii; addp4 r32=0,r32
+#else
+{ .mii; alloc r2=ar.pfs,3,0,0,0
+#endif
+ add r14=8,r33
+ add r17=8,r34 }
+ .body
+{ .mii; add r15=16,r33
+ add r18=16,r34
+ add r16=24,r33 };;
+.L_cheat_entry_point4:
+{ .mmi; add r19=24,r34
+
+ ldf8 f32=[r33] }
+
+{ .mmi; ldf8 f120=[r34]
+ ldf8 f121=[r17] };;
+{ .mmi; ldf8 f122=[r18]
+ ldf8 f123=[r19] }
+
+{ .mmi; ldf8 f33=[r14]
+ ldf8 f34=[r15] }
+{ .mfi; ldf8 f35=[r16]
+
+ xma.hu f41=f32,f120,f0 }
+{ .mfi; xma.lu f40=f32,f120,f0 };;
+{ .mfi; xma.hu f51=f32,f121,f0 }
+{ .mfi; xma.lu f50=f32,f121,f0 };;
+{ .mfi; xma.hu f61=f32,f122,f0 }
+{ .mfi; xma.lu f60=f32,f122,f0 };;
+{ .mfi; xma.hu f71=f32,f123,f0 }
+{ .mfi; xma.lu f70=f32,f123,f0 };;//
+// Major stall takes place here, and 3 more places below. Result from
+// first xma is not available for another 3 ticks.
+{ .mfi; getf.sig r16=f40
+ xma.hu f42=f33,f120,f41
+ add r33=8,r32 }
+{ .mfi; xma.lu f41=f33,f120,f41 };;
+{ .mfi; getf.sig r24=f50
+ xma.hu f52=f33,f121,f51 }
+{ .mfi; xma.lu f51=f33,f121,f51 };;
+{ .mfi; st8 [r32]=r16,16
+ xma.hu f62=f33,f122,f61 }
+{ .mfi; xma.lu f61=f33,f122,f61 };;
+{ .mfi; xma.hu f72=f33,f123,f71 }
+{ .mfi; xma.lu f71=f33,f123,f71 };;//
+//-------------------------------------------------//
+{ .mfi; getf.sig r25=f41
+ xma.hu f43=f34,f120,f42 }
+{ .mfi; xma.lu f42=f34,f120,f42 };;
+{ .mfi; getf.sig r16=f60
+ xma.hu f53=f34,f121,f52 }
+{ .mfi; xma.lu f52=f34,f121,f52 };;
+{ .mfi; getf.sig r17=f51
+ xma.hu f63=f34,f122,f62
+ add r25=r25,r24 }
+{ .mfi; mov carry1=0
+ xma.lu f62=f34,f122,f62 };;
+{ .mfi; st8 [r33]=r25,16
+ xma.hu f73=f34,f123,f72
+ cmp.ltu p6,p0=r25,r24 }
+{ .mfi; xma.lu f72=f34,f123,f72 };;//
+//-------------------------------------------------//
+{ .mfi; getf.sig r18=f42
+ xma.hu f44=f35,f120,f43
+(p6) add carry1=1,carry1 }
+{ .mfi; add r17=r17,r16
+ xma.lu f43=f35,f120,f43
+ mov carry2=0 };;
+{ .mfi; getf.sig r24=f70
+ xma.hu f54=f35,f121,f53
+ cmp.ltu p7,p0=r17,r16 }
+{ .mfi; xma.lu f53=f35,f121,f53 };;
+{ .mfi; getf.sig r25=f61
+ xma.hu f64=f35,f122,f63
+ add r18=r18,r17 }
+{ .mfi; xma.lu f63=f35,f122,f63
+(p7) add carry2=1,carry2 };;
+{ .mfi; getf.sig r26=f52
+ xma.hu f74=f35,f123,f73
+ cmp.ltu p7,p0=r18,r17 }
+{ .mfi; xma.lu f73=f35,f123,f73
+ add r18=r18,carry1 };;
+//-------------------------------------------------//
+{ .mii; st8 [r32]=r18,16
+(p7) add carry2=1,carry2
+ cmp.ltu p7,p0=r18,carry1 };;
+
+{ .mfi; getf.sig r27=f43 // last major stall
+(p7) add carry2=1,carry2 };;
+{ .mii; getf.sig r16=f71
+ add r25=r25,r24
+ mov carry1=0 };;
+{ .mii; getf.sig r17=f62
+ cmp.ltu p6,p0=r25,r24
+ add r26=r26,r25 };;
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r26,r25
+ add r27=r27,r26 };;
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r27,r26
+ add r27=r27,carry2 };;
+{ .mii; getf.sig r18=f53
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r27,carry2 };;
+{ .mfi; st8 [r33]=r27,16
+(p6) add carry1=1,carry1 }
+
+{ .mii; getf.sig r19=f44
+ add r17=r17,r16
+ mov carry2=0 };;
+{ .mii; getf.sig r24=f72
+ cmp.ltu p7,p0=r17,r16
+ add r18=r18,r17 };;
+{ .mii; (p7) add carry2=1,carry2
+ cmp.ltu p7,p0=r18,r17
+ add r19=r19,r18 };;
+{ .mii; (p7) add carry2=1,carry2
+ cmp.ltu p7,p0=r19,r18
+ add r19=r19,carry1 };;
+{ .mii; getf.sig r25=f63
+ (p7) add carry2=1,carry2
+ cmp.ltu p7,p0=r19,carry1};;
+{ .mii; st8 [r32]=r19,16
+ (p7) add carry2=1,carry2 }
+
+{ .mii; getf.sig r26=f54
+ add r25=r25,r24
+ mov carry1=0 };;
+{ .mii; getf.sig r16=f73
+ cmp.ltu p6,p0=r25,r24
+ add r26=r26,r25 };;
+{ .mii;
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r26,r25
+ add r26=r26,carry2 };;
+{ .mii; getf.sig r17=f64
+(p6) add carry1=1,carry1
+ cmp.ltu p6,p0=r26,carry2 };;
+{ .mii; st8 [r33]=r26,16
+(p6) add carry1=1,carry1 }
+
+{ .mii; getf.sig r24=f74
+ add r17=r17,r16
+ mov carry2=0 };;
+{ .mii; cmp.ltu p7,p0=r17,r16
+ add r17=r17,carry1 };;
+
+{ .mii; (p7) add carry2=1,carry2
+ cmp.ltu p7,p0=r17,carry1};;
+{ .mii; st8 [r32]=r17,16
+ (p7) add carry2=1,carry2 };;
+
+{ .mii; add r24=r24,carry2 };;
+{ .mii; st8 [r33]=r24 }
+
+{ .mib; rum 1<<5 // clear um.mfh
+ br.ret.sptk.many b0 };;
+.endp bn_mul_comba4#
+#undef carry2
+#undef carry1
+#endif
+
+#if 1
+//
+// BN_ULONG bn_div_words(BN_ULONG h, BN_ULONG l, BN_ULONG d)
+//
+// In the nutshell it's a port of my MIPS III/IV implementation.
+//
+#define AT r14
+#define H r16
+#define HH r20
+#define L r17
+#define D r18
+#define DH r22
+#define I r21
+
+#if 0
+// Some preprocessors (most notably HP-UX) appear to be allergic to
+// macros enclosed to parenthesis [as these three were].
+#define cont p16
+#define break p0 // p20
+#define equ p24
+#else
+cont=p16
+break=p0
+equ=p24
+#endif
+
+.global abort#
+.global bn_div_words#
+.proc bn_div_words#
+.align 64
+bn_div_words:
+ .prologue
+ .save ar.pfs,r2
+{ .mii; alloc r2=ar.pfs,3,5,0,8
+ .save b0,r3
+ mov r3=b0
+ .save pr,r10
+ mov r10=pr };;
+{ .mmb; cmp.eq p6,p0=r34,r0
+ mov r8=-1
+(p6) br.ret.spnt.many b0 };;
+
+ .body
+{ .mii; mov H=r32 // save h
+ mov ar.ec=0 // don't rotate at exit
+ mov pr.rot=0 }
+{ .mii; mov L=r33 // save l
+ mov r36=r0 };;
+
+.L_divw_shift: // -vv- note signed comparison
+{ .mfi; (p0) cmp.lt p16,p0=r0,r34 // d
+ (p0) shladd r33=r34,1,r0 }
+{ .mfb; (p0) add r35=1,r36
+ (p0) nop.f 0x0
+(p16) br.wtop.dpnt .L_divw_shift };;
+
+{ .mii; mov D=r34
+ shr.u DH=r34,32
+ sub r35=64,r36 };;
+{ .mii; setf.sig f7=DH
+ shr.u AT=H,r35
+ mov I=r36 };;
+{ .mib; cmp.ne p6,p0=r0,AT
+ shl H=H,r36
+(p6) br.call.spnt.clr b0=abort };; // overflow, die...
+
+{ .mfi; fcvt.xuf.s1 f7=f7
+ shr.u AT=L,r35 };;
+{ .mii; shl L=L,r36
+ or H=H,AT };;
+
+{ .mii; nop.m 0x0
+ cmp.leu p6,p0=D,H;;
+(p6) sub H=H,D }
+
+{ .mlx; setf.sig f14=D
+ movl AT=0xffffffff };;
+///////////////////////////////////////////////////////////
+{ .mii; setf.sig f6=H
+ shr.u HH=H,32;;
+ cmp.eq p6,p7=HH,DH };;
+{ .mfb;
+(p6) setf.sig f8=AT
+(p7) fcvt.xuf.s1 f6=f6
+(p7) br.call.sptk b6=.L_udiv64_32_b6 };;
+
+{ .mfi; getf.sig r33=f8 // q
+ xmpy.lu f9=f8,f14 }
+{ .mfi; xmpy.hu f10=f8,f14
+ shrp H=H,L,32 };;
+
+{ .mmi; getf.sig r35=f9 // tl
+ getf.sig r31=f10 };; // th
+
+.L_divw_1st_iter:
+{ .mii; (p0) add r32=-1,r33
+ (p0) cmp.eq equ,cont=HH,r31 };;
+{ .mii; (p0) cmp.ltu p8,p0=r35,D
+ (p0) sub r34=r35,D
+ (equ) cmp.leu break,cont=r35,H };;
+{ .mib; (cont) cmp.leu cont,break=HH,r31
+ (p8) add r31=-1,r31
+(cont) br.wtop.spnt .L_divw_1st_iter };;
+///////////////////////////////////////////////////////////
+{ .mii; sub H=H,r35
+ shl r8=r33,32
+ shl L=L,32 };;
+///////////////////////////////////////////////////////////
+{ .mii; setf.sig f6=H
+ shr.u HH=H,32;;
+ cmp.eq p6,p7=HH,DH };;
+{ .mfb;
+(p6) setf.sig f8=AT
+(p7) fcvt.xuf.s1 f6=f6
+(p7) br.call.sptk b6=.L_udiv64_32_b6 };;
+
+{ .mfi; getf.sig r33=f8 // q
+ xmpy.lu f9=f8,f14 }
+{ .mfi; xmpy.hu f10=f8,f14
+ shrp H=H,L,32 };;
+
+{ .mmi; getf.sig r35=f9 // tl
+ getf.sig r31=f10 };; // th
+
+.L_divw_2nd_iter:
+{ .mii; (p0) add r32=-1,r33
+ (p0) cmp.eq equ,cont=HH,r31 };;
+{ .mii; (p0) cmp.ltu p8,p0=r35,D
+ (p0) sub r34=r35,D
+ (equ) cmp.leu break,cont=r35,H };;
+{ .mib; (cont) cmp.leu cont,break=HH,r31
+ (p8) add r31=-1,r31
+(cont) br.wtop.spnt .L_divw_2nd_iter };;
+///////////////////////////////////////////////////////////
+{ .mii; sub H=H,r35
+ or r8=r8,r33
+ mov ar.pfs=r2 };;
+{ .mii; shr.u r9=H,I // remainder if anybody wants it
+ mov pr=r10,0x1ffff }
+{ .mfb; br.ret.sptk.many b0 };;
+
+// Unsigned 64 by 32 (well, by 64 for the moment) bit integer division
+// procedure.
+//
+// inputs: f6 = (double)a, f7 = (double)b
+// output: f8 = (int)(a/b)
+// clobbered: f8,f9,f10,f11,pred
+pred=p15
+// One can argue that this snippet is copyrighted to Intel
+// Corporation, as it's essentially identical to one of those
+// found in "Divide, Square Root and Remainder" section at
+// http://www.intel.com/software/products/opensource/libraries/num.htm.
+// Yes, I admit that the referred code was used as template,
+// but after I realized that there hardly is any other instruction
+// sequence which would perform this operation. I mean I figure that
+// any independent attempt to implement high-performance division
+// will result in code virtually identical to the Intel code. It
+// should be noted though that below division kernel is 1 cycle
+// faster than Intel one (note commented splits:-), not to mention
+// original prologue (rather lack of one) and epilogue.
+.align 32
+.skip 16
+.L_udiv64_32_b6:
+ frcpa.s1 f8,pred=f6,f7;; // [0] y0 = 1 / b
+
+(pred) fnma.s1 f9=f7,f8,f1 // [5] e0 = 1 - b * y0
+(pred) fmpy.s1 f10=f6,f8;; // [5] q0 = a * y0
+(pred) fmpy.s1 f11=f9,f9 // [10] e1 = e0 * e0
+(pred) fma.s1 f10=f9,f10,f10;; // [10] q1 = q0 + e0 * q0
+(pred) fma.s1 f8=f9,f8,f8 //;; // [15] y1 = y0 + e0 * y0
+(pred) fma.s1 f9=f11,f10,f10;; // [15] q2 = q1 + e1 * q1
+(pred) fma.s1 f8=f11,f8,f8 //;; // [20] y2 = y1 + e1 * y1
+(pred) fnma.s1 f10=f7,f9,f6;; // [20] r2 = a - b * q2
+(pred) fma.s1 f8=f10,f8,f9;; // [25] q3 = q2 + r2 * y2
+
+ fcvt.fxu.trunc.s1 f8=f8 // [30] q = trunc(q3)
+ br.ret.sptk.many b6;;
+.endp bn_div_words#
+#endif
diff --git a/openssl-1.1.0h/crypto/bn/asm/mips-mont.pl b/openssl-1.1.0h/crypto/bn/asm/mips-mont.pl
new file mode 100644
index 0000000..a907571
--- /dev/null
+++ b/openssl-1.1.0h/crypto/bn/asm/mips-mont.pl
@@ -0,0 +1,433 @@
+#! /usr/bin/env perl
+# Copyright 2010-2016 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License"). You may not use
+# this file except in compliance with the License. You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# This module doesn't present direct interest for OpenSSL, because it
+# doesn't provide better performance for longer keys, at least not on
+# in-order-execution cores. While 512-bit RSA sign operations can be
+# 65% faster in 64-bit mode, 1024-bit ones are only 15% faster, and
+# 4096-bit ones are up to 15% slower. In 32-bit mode it varies from
+# 16% improvement for 512-bit RSA sign to -33% for 4096-bit RSA
+# verify:-( All comparisons are against bn_mul_mont-free assembler.
+# The module might be of interest to embedded system developers, as
+# the code is smaller than 1KB, yet offers >3x improvement on MIPS64
+# and 75-30% [less for longer keys] on MIPS32 over compiler-generated
+# code.
+
+######################################################################
+# There is a number of MIPS ABI in use, O32 and N32/64 are most
+# widely used. Then there is a new contender: NUBI. It appears that if
+# one picks the latter, it's possible to arrange code in ABI neutral
+# manner. Therefore let's stick to NUBI register layout:
+#
+($zero,$at,$t0,$t1,$t2)=map("\$$_",(0..2,24,25));
+($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7)=map("\$$_",(4..11));
+($s0,$s1,$s2,$s3,$s4,$s5,$s6,$s7,$s8,$s9,$s10,$s11)=map("\$$_",(12..23));
+($gp,$tp,$sp,$fp,$ra)=map("\$$_",(3,28..31));
+#
+# The return value is placed in $a0. Following coding rules facilitate
+# interoperability:
+#
+# - never ever touch $tp, "thread pointer", former $gp;
+# - copy return value to $t0, former $v0 [or to $a0 if you're adapting
+# old code];
+# - on O32 populate $a4-$a7 with 'lw $aN,4*N($sp)' if necessary;
+#
+# For reference here is register layout for N32/64 MIPS ABIs:
+#
+# ($zero,$at,$v0,$v1)=map("\$$_",(0..3));
+# ($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7)=map("\$$_",(4..11));
+# ($t0,$t1,$t2,$t3,$t8,$t9)=map("\$$_",(12..15,24,25));
+# ($s0,$s1,$s2,$s3,$s4,$s5,$s6,$s7)=map("\$$_",(16..23));
+# ($gp,$sp,$fp,$ra)=map("\$$_",(28..31));
+#
+$flavour = shift || "o32"; # supported flavours are o32,n32,64,nubi32,nubi64
+
+if ($flavour =~ /64|n32/i) {
+ $PTR_ADD="dadd"; # incidentally works even on n32
+ $PTR_SUB="dsub"; # incidentally works even on n32
+ $REG_S="sd";
+ $REG_L="ld";
+ $SZREG=8;
+} else {
+ $PTR_ADD="add";
+ $PTR_SUB="sub";
+ $REG_S="sw";
+ $REG_L="lw";
+ $SZREG=4;
+}
+$SAVED_REGS_MASK = ($flavour =~ /nubi/i) ? 0x00fff000 : 0x00ff0000;
+#
+# <appro@openssl.org>
+#
+######################################################################
+
+while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {}
+open STDOUT,">$output";
+
+if ($flavour =~ /64|n32/i) {
+ $LD="ld";
+ $ST="sd";
+ $MULTU="dmultu";
+ $ADDU="daddu";
+ $SUBU="dsubu";
+ $BNSZ=8;
+} else {
+ $LD="lw";
+ $ST="sw";
+ $MULTU="multu";
+ $ADDU="addu";
+ $SUBU="subu";
+ $BNSZ=4;
+}
+
+# int bn_mul_mont(
+$rp=$a0; # BN_ULONG *rp,
+$ap=$a1; # const BN_ULONG *ap,
+$bp=$a2; # const BN_ULONG *bp,
+$np=$a3; # const BN_ULONG *np,
+$n0=$a4; # const BN_ULONG *n0,
+$num=$a5; # int num);
+
+$lo0=$a6;
+$hi0=$a7;
+$lo1=$t1;
+$hi1=$t2;
+$aj=$s0;
+$bi=$s1;
+$nj=$s2;
+$tp=$s3;
+$alo=$s4;
+$ahi=$s5;
+$nlo=$s6;
+$nhi=$s7;
+$tj=$s8;
+$i=$s9;
+$j=$s10;
+$m1=$s11;
+
+$FRAMESIZE=14;
+
+$code=<<___;
+.text
+
+.set noat
+.set noreorder
+
+.align 5
+.globl bn_mul_mont
+.ent bn_mul_mont
+bn_mul_mont:
+___
+$code.=<<___ if ($flavour =~ /o32/i);
+ lw $n0,16($sp)
+ lw $num,20($sp)
+___
+$code.=<<___;
+ slt $at,$num,4
+ bnez $at,1f
+ li $t0,0
+ slt $at,$num,17 # on in-order CPU
+ bnez $at,bn_mul_mont_internal
+ nop
+1: jr $ra
+ li $a0,0
+.end bn_mul_mont
+
+.align 5
+.ent bn_mul_mont_internal
+bn_mul_mont_internal:
+ .frame $fp,$FRAMESIZE*$SZREG,$ra
+ .mask 0x40000000|$SAVED_REGS_MASK,-$SZREG
+ $PTR_SUB $sp,$FRAMESIZE*$SZREG
+ $REG_S $fp,($FRAMESIZE-1)*$SZREG($sp)
+ $REG_S $s11,($FRAMESIZE-2)*$SZREG($sp)
+ $REG_S $s10,($FRAMESIZE-3)*$SZREG($sp)
+ $REG_S $s9,($FRAMESIZE-4)*$SZREG($sp)
+ $REG_S $s8,($FRAMESIZE-5)*$SZREG($sp)
+ $REG_S $s7,($FRAMESIZE-6)*$SZREG($sp)
+ $REG_S $s6,($FRAMESIZE-7)*$SZREG($sp)
+ $REG_S $s5,($FRAMESIZE-8)*$SZREG($sp)
+ $REG_S $s4,($FRAMESIZE-9)*$SZREG($sp)
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ $REG_S $s3,($FRAMESIZE-10)*$SZREG($sp)
+ $REG_S $s2,($FRAMESIZE-11)*$SZREG($sp)
+ $REG_S $s1,($FRAMESIZE-12)*$SZREG($sp)
+ $REG_S $s0,($FRAMESIZE-13)*$SZREG($sp)
+___
+$code.=<<___;
+ move $fp,$sp
+
+ .set reorder
+ $LD $n0,0($n0)
+ $LD $bi,0($bp) # bp[0]
+ $LD $aj,0($ap) # ap[0]
+ $LD $nj,0($np) # np[0]
+
+ $PTR_SUB $sp,2*$BNSZ # place for two extra words
+ sll $num,`log($BNSZ)/log(2)`
+ li $at,-4096
+ $PTR_SUB $sp,$num
+ and $sp,$at
+
+ $MULTU $aj,$bi
+ $LD $alo,$BNSZ($ap)
+ $LD $nlo,$BNSZ($np)
+ mflo $lo0
+ mfhi $hi0
+ $MULTU $lo0,$n0
+ mflo $m1
+
+ $MULTU $alo,$bi
+ mflo $alo
+ mfhi $ahi
+
+ $MULTU $nj,$m1
+ mflo $lo1
+ mfhi $hi1
+ $MULTU $nlo,$m1
+ $ADDU $lo1,$lo0
+ sltu $at,$lo1,$lo0
+ $ADDU $hi1,$at
+ mflo $nlo
+ mfhi $nhi
+
+ move $tp,$sp
+ li $j,2*$BNSZ
+.align 4
+.L1st:
+ .set noreorder
+ $PTR_ADD $aj,$ap,$j
+ $PTR_ADD $nj,$np,$j
+ $LD $aj,($aj)
+ $LD $nj,($nj)
+
+ $MULTU $aj,$bi
+ $ADDU $lo0,$alo,$hi0
+ $ADDU $lo1,$nlo,$hi1
+ sltu $at,$lo0,$hi0
+ sltu $t0,$lo1,$hi1
+ $ADDU $hi0,$ahi,$at
+ $ADDU $hi1,$nhi,$t0
+ mflo $alo
+ mfhi $ahi
+
+ $ADDU $lo1,$lo0
+ sltu $at,$lo1,$lo0
+ $MULTU $nj,$m1
+ $ADDU $hi1,$at
+ addu $j,$BNSZ
+ $ST $lo1,($tp)
+ sltu $t0,$j,$num
+ mflo $nlo
+ mfhi $nhi
+
+ bnez $t0,.L1st
+ $PTR_ADD $tp,$BNSZ
+ .set reorder
+
+ $ADDU $lo0,$alo,$hi0
+ sltu $at,$lo0,$hi0
+ $ADDU $hi0,$ahi,$at
+
+ $ADDU $lo1,$nlo,$hi1
+ sltu $t0,$lo1,$hi1
+ $ADDU $hi1,$nhi,$t0
+ $ADDU $lo1,$lo0
+ sltu $at,$lo1,$lo0
+ $ADDU $hi1,$at
+
+ $ST $lo1,($tp)
+
+ $ADDU $hi1,$hi0
+ sltu $at,$hi1,$hi0
+ $ST $hi1,$BNSZ($tp)
+ $ST $at,2*$BNSZ($tp)
+
+ li $i,$BNSZ
+.align 4
+.Louter:
+ $PTR_ADD $bi,$bp,$i
+ $LD $bi,($bi)
+ $LD $aj,($ap)
+ $LD $alo,$BNSZ($ap)
+ $LD $tj,($sp)
+
+ $MULTU $aj,$bi
+ $LD $nj,($np)
+ $LD $nlo,$BNSZ($np)
+ mflo $lo0
+ mfhi $hi0
+ $ADDU $lo0,$tj
+ $MULTU $lo0,$n0
+ sltu $at,$lo0,$tj
+ $ADDU $hi0,$at
+ mflo $m1
+
+ $MULTU $alo,$bi
+ mflo $alo
+ mfhi $ahi
+
+ $MULTU $nj,$m1
+ mflo $lo1
+ mfhi $hi1
+
+ $MULTU $nlo,$m1
+ $ADDU $lo1,$lo0
+ sltu $at,$lo1,$lo0
+ $ADDU $hi1,$at
+ mflo $nlo
+ mfhi $nhi
+
+ move $tp,$sp
+ li $j,2*$BNSZ
+ $LD $tj,$BNSZ($tp)
+.align 4
+.Linner:
+ .set noreorder
+ $PTR_ADD $aj,$ap,$j
+ $PTR_ADD $nj,$np,$j
+ $LD $aj,($aj)
+ $LD $nj,($nj)
+
+ $MULTU $aj,$bi
+ $ADDU $lo0,$alo,$hi0
+ $ADDU $lo1,$nlo,$hi1
+ sltu $at,$lo0,$hi0
+ sltu $t0,$lo1,$hi1
+ $ADDU $hi0,$ahi,$at
+ $ADDU $hi1,$nhi,$t0
+ mflo $alo
+ mfhi $ahi
+
+ $ADDU $lo0,$tj
+ addu $j,$BNSZ
+ $MULTU $nj,$m1
+ sltu $at,$lo0,$tj
+ $ADDU $lo1,$lo0
+ $ADDU $hi0,$at
+ sltu $t0,$lo1,$lo0
+ $LD $tj,2*$BNSZ($tp)
+ $ADDU $hi1,$t0
+ sltu $at,$j,$num
+ mflo $nlo
+ mfhi $nhi
+ $ST $lo1,($tp)
+ bnez $at,.Linner
+ $PTR_ADD $tp,$BNSZ
+ .set reorder
+
+ $ADDU $lo0,$alo,$hi0
+ sltu $at,$lo0,$hi0
+ $ADDU $hi0,$ahi,$at
+ $ADDU $lo0,$tj
+ sltu $t0,$lo0,$tj
+ $ADDU $hi0,$t0
+
+ $LD $tj,2*$BNSZ($tp)
+ $ADDU $lo1,$nlo,$hi1
+ sltu $at,$lo1,$hi1
+ $ADDU $hi1,$nhi,$at
+ $ADDU $lo1,$lo0
+ sltu $t0,$lo1,$lo0
+ $ADDU $hi1,$t0
+ $ST $lo1,($tp)
+
+ $ADDU $lo1,$hi1,$hi0
+ sltu $hi1,$lo1,$hi0
+ $ADDU $lo1,$tj
+ sltu $at,$lo1,$tj
+ $ADDU $hi1,$at
+ $ST $lo1,$BNSZ($tp)
+ $ST $hi1,2*$BNSZ($tp)
+
+ addu $i,$BNSZ
+ sltu $t0,$i,$num
+ bnez $t0,.Louter
+
+ .set noreorder
+ $PTR_ADD $tj,$sp,$num # &tp[num]
+ move $tp,$sp
+ move $ap,$sp
+ li $hi0,0 # clear borrow bit
+
+.align 4
+.Lsub: $LD $lo0,($tp)
+ $LD $lo1,($np)
+ $PTR_ADD $tp,$BNSZ
+ $PTR_ADD $np,$BNSZ
+ $SUBU $lo1,$lo0,$lo1 # tp[i]-np[i]
+ sgtu $at,$lo1,$lo0
+ $SUBU $lo0,$lo1,$hi0
+ sgtu $hi0,$lo0,$lo1
+ $ST $lo0,($rp)
+ or $hi0,$at
+ sltu $at,$tp,$tj
+ bnez $at,.Lsub
+ $PTR_ADD $rp,$BNSZ
+
+ $SUBU $hi0,$hi1,$hi0 # handle upmost overflow bit
+ move $tp,$sp
+ $PTR_SUB $rp,$num # restore rp
+ not $hi1,$hi0
+
+ and $ap,$hi0,$sp
+ and $bp,$hi1,$rp
+ or $ap,$ap,$bp # ap=borrow?tp:rp
+
+.align 4
+.Lcopy: $LD $aj,($ap)
+ $PTR_ADD $ap,$BNSZ
+ $ST $zero,($tp)
+ $PTR_ADD $tp,$BNSZ
+ sltu $at,$tp,$tj
+ $ST $aj,($rp)
+ bnez $at,.Lcopy
+ $PTR_ADD $rp,$BNSZ
+
+ li $a0,1
+ li $t0,1
+
+ .set noreorder
+ move $sp,$fp
+ $REG_L $fp,($FRAMESIZE-1)*$SZREG($sp)
+ $REG_L $s11,($FRAMESIZE-2)*$SZREG($sp)
+ $REG_L $s10,($FRAMESIZE-3)*$SZREG($sp)
+ $REG_L $s9,($FRAMESIZE-4)*$SZREG($sp)
+ $REG_L $s8,($FRAMESIZE-5)*$SZREG($sp)
+ $REG_L $s7,($FRAMESIZE-6)*$SZREG($sp)
+ $REG_L $s6,($FRAMESIZE-7)*$SZREG($sp)
+ $REG_L $s5,($FRAMESIZE-8)*$SZREG($sp)
+ $REG_L $s4,($FRAMESIZE-9)*$SZREG($sp)
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ $REG_L $s3,($FRAMESIZE-10)*$SZREG($sp)
+ $REG_L $s2,($FRAMESIZE-11)*$SZREG($sp)
+ $REG_L $s1,($FRAMESIZE-12)*$SZREG($sp)
+ $REG_L $s0,($FRAMESIZE-13)*$SZREG($sp)
+___
+$code.=<<___;
+ jr $ra
+ $PTR_ADD $sp,$FRAMESIZE*$SZREG
+.end bn_mul_mont_internal
+.rdata
+.asciiz "Montgomery Multiplication for MIPS, CRYPTOGAMS by <appro\@openssl.org>"
+___
+
+$code =~ s/\`([^\`]*)\`/eval $1/gem;
+
+print $code;
+close STDOUT;
diff --git a/openssl-1.1.0h/crypto/bn/asm/mips.pl b/openssl-1.1.0h/crypto/bn/asm/mips.pl
new file mode 100644
index 0000000..420f01f
--- /dev/null
+++ b/openssl-1.1.0h/crypto/bn/asm/mips.pl
@@ -0,0 +1,2241 @@
+#! /usr/bin/env perl
+# Copyright 2010-2016 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License"). You may not use
+# this file except in compliance with the License. You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project.
+#
+# Rights for redistribution and usage in source and binary forms are
+# granted according to the OpenSSL license. Warranty of any kind is
+# disclaimed.
+# ====================================================================
+
+
+# July 1999
+#
+# This is drop-in MIPS III/IV ISA replacement for crypto/bn/bn_asm.c.
+#
+# The module is designed to work with either of the "new" MIPS ABI(5),
+# namely N32 or N64, offered by IRIX 6.x. It's not meant to work under
+# IRIX 5.x not only because it doesn't support new ABIs but also
+# because 5.x kernels put R4x00 CPU into 32-bit mode and all those
+# 64-bit instructions (daddu, dmultu, etc.) found below gonna only
+# cause illegal instruction exception:-(
+#
+# In addition the code depends on preprocessor flags set up by MIPSpro
+# compiler driver (either as or cc) and therefore (probably?) can't be
+# compiled by the GNU assembler. GNU C driver manages fine though...
+# I mean as long as -mmips-as is specified or is the default option,
+# because then it simply invokes /usr/bin/as which in turn takes
+# perfect care of the preprocessor definitions. Another neat feature
+# offered by the MIPSpro assembler is an optimization pass. This gave
+# me the opportunity to have the code looking more regular as all those
+# architecture dependent instruction rescheduling details were left to
+# the assembler. Cool, huh?
+#
+# Performance improvement is astonishing! 'apps/openssl speed rsa dsa'
+# goes way over 3 times faster!
+#
+# <appro@fy.chalmers.se>
+
+# October 2010
+#
+# Adapt the module even for 32-bit ABIs and other OSes. The former was
+# achieved by mechanical replacement of 64-bit arithmetic instructions
+# such as dmultu, daddu, etc. with their 32-bit counterparts and
+# adjusting offsets denoting multiples of BN_ULONG. Above mentioned
+# >3x performance improvement naturally does not apply to 32-bit code
+# [because there is no instruction 32-bit compiler can't use], one
+# has to content with 40-85% improvement depending on benchmark and
+# key length, more for longer keys.
+
+$flavour = shift || "o32";
+while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {}
+open STDOUT,">$output";
+
+if ($flavour =~ /64|n32/i) {
+ $LD="ld";
+ $ST="sd";
+ $MULTU="dmultu";
+ $DIVU="ddivu";
+ $ADDU="daddu";
+ $SUBU="dsubu";
+ $SRL="dsrl";
+ $SLL="dsll";
+ $BNSZ=8;
+ $PTR_ADD="daddu";
+ $PTR_SUB="dsubu";
+ $SZREG=8;
+ $REG_S="sd";
+ $REG_L="ld";
+} else {
+ $LD="lw";
+ $ST="sw";
+ $MULTU="multu";
+ $DIVU="divu";
+ $ADDU="addu";
+ $SUBU="subu";
+ $SRL="srl";
+ $SLL="sll";
+ $BNSZ=4;
+ $PTR_ADD="addu";
+ $PTR_SUB="subu";
+ $SZREG=4;
+ $REG_S="sw";
+ $REG_L="lw";
+ $code=".set mips2\n";
+}
+
+# Below is N32/64 register layout used in the original module.
+#
+($zero,$at,$v0,$v1)=map("\$$_",(0..3));
+($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7)=map("\$$_",(4..11));
+($t0,$t1,$t2,$t3,$t8,$t9)=map("\$$_",(12..15,24,25));
+($s0,$s1,$s2,$s3,$s4,$s5,$s6,$s7)=map("\$$_",(16..23));
+($gp,$sp,$fp,$ra)=map("\$$_",(28..31));
+($ta0,$ta1,$ta2,$ta3)=($a4,$a5,$a6,$a7);
+#
+# No special adaptation is required for O32. NUBI on the other hand
+# is treated by saving/restoring ($v1,$t0..$t3).
+
+$gp=$v1 if ($flavour =~ /nubi/i);
+
+$minus4=$v1;
+
+$code.=<<___;
+.rdata
+.asciiz "mips3.s, Version 1.2"
+.asciiz "MIPS II/III/IV ISA artwork by Andy Polyakov <appro\@fy.chalmers.se>"
+
+.text
+.set noat
+
+.align 5
+.globl bn_mul_add_words
+.ent bn_mul_add_words
+bn_mul_add_words:
+ .set noreorder
+ bgtz $a2,bn_mul_add_words_internal
+ move $v0,$zero
+ jr $ra
+ move $a0,$v0
+.end bn_mul_add_words
+
+.align 5
+.ent bn_mul_add_words_internal
+bn_mul_add_words_internal:
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ .frame $sp,6*$SZREG,$ra
+ .mask 0x8000f008,-$SZREG
+ .set noreorder
+ $PTR_SUB $sp,6*$SZREG
+ $REG_S $ra,5*$SZREG($sp)
+ $REG_S $t3,4*$SZREG($sp)
+ $REG_S $t2,3*$SZREG($sp)
+ $REG_S $t1,2*$SZREG($sp)
+ $REG_S $t0,1*$SZREG($sp)
+ $REG_S $gp,0*$SZREG($sp)
+___
+$code.=<<___;
+ .set reorder
+ li $minus4,-4
+ and $ta0,$a2,$minus4
+ beqz $ta0,.L_bn_mul_add_words_tail
+
+.L_bn_mul_add_words_loop:
+ $LD $t0,0($a1)
+ $MULTU $t0,$a3
+ $LD $t1,0($a0)
+ $LD $t2,$BNSZ($a1)
+ $LD $t3,$BNSZ($a0)
+ $LD $ta0,2*$BNSZ($a1)
+ $LD $ta1,2*$BNSZ($a0)
+ $ADDU $t1,$v0
+ sltu $v0,$t1,$v0 # All manuals say it "compares 32-bit
+ # values", but it seems to work fine
+ # even on 64-bit registers.
+ mflo $at
+ mfhi $t0
+ $ADDU $t1,$at
+ $ADDU $v0,$t0
+ $MULTU $t2,$a3
+ sltu $at,$t1,$at
+ $ST $t1,0($a0)
+ $ADDU $v0,$at
+
+ $LD $ta2,3*$BNSZ($a1)
+ $LD $ta3,3*$BNSZ($a0)
+ $ADDU $t3,$v0
+ sltu $v0,$t3,$v0
+ mflo $at
+ mfhi $t2
+ $ADDU $t3,$at
+ $ADDU $v0,$t2
+ $MULTU $ta0,$a3
+ sltu $at,$t3,$at
+ $ST $t3,$BNSZ($a0)
+ $ADDU $v0,$at
+
+ subu $a2,4
+ $PTR_ADD $a0,4*$BNSZ
+ $PTR_ADD $a1,4*$BNSZ
+ $ADDU $ta1,$v0
+ sltu $v0,$ta1,$v0
+ mflo $at
+ mfhi $ta0
+ $ADDU $ta1,$at
+ $ADDU $v0,$ta0
+ $MULTU $ta2,$a3
+ sltu $at,$ta1,$at
+ $ST $ta1,-2*$BNSZ($a0)
+ $ADDU $v0,$at
+
+
+ and $ta0,$a2,$minus4
+ $ADDU $ta3,$v0
+ sltu $v0,$ta3,$v0
+ mflo $at
+ mfhi $ta2
+ $ADDU $ta3,$at
+ $ADDU $v0,$ta2
+ sltu $at,$ta3,$at
+ $ST $ta3,-$BNSZ($a0)
+ .set noreorder
+ bgtz $ta0,.L_bn_mul_add_words_loop
+ $ADDU $v0,$at
+
+ beqz $a2,.L_bn_mul_add_words_return
+ nop
+
+.L_bn_mul_add_words_tail:
+ .set reorder
+ $LD $t0,0($a1)
+ $MULTU $t0,$a3
+ $LD $t1,0($a0)
+ subu $a2,1
+ $ADDU $t1,$v0
+ sltu $v0,$t1,$v0
+ mflo $at
+ mfhi $t0
+ $ADDU $t1,$at
+ $ADDU $v0,$t0
+ sltu $at,$t1,$at
+ $ST $t1,0($a0)
+ $ADDU $v0,$at
+ beqz $a2,.L_bn_mul_add_words_return
+
+ $LD $t0,$BNSZ($a1)
+ $MULTU $t0,$a3
+ $LD $t1,$BNSZ($a0)
+ subu $a2,1
+ $ADDU $t1,$v0
+ sltu $v0,$t1,$v0
+ mflo $at
+ mfhi $t0
+ $ADDU $t1,$at
+ $ADDU $v0,$t0
+ sltu $at,$t1,$at
+ $ST $t1,$BNSZ($a0)
+ $ADDU $v0,$at
+ beqz $a2,.L_bn_mul_add_words_return
+
+ $LD $t0,2*$BNSZ($a1)
+ $MULTU $t0,$a3
+ $LD $t1,2*$BNSZ($a0)
+ $ADDU $t1,$v0
+ sltu $v0,$t1,$v0
+ mflo $at
+ mfhi $t0
+ $ADDU $t1,$at
+ $ADDU $v0,$t0
+ sltu $at,$t1,$at
+ $ST $t1,2*$BNSZ($a0)
+ $ADDU $v0,$at
+
+.L_bn_mul_add_words_return:
+ .set noreorder
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ $REG_L $t3,4*$SZREG($sp)
+ $REG_L $t2,3*$SZREG($sp)
+ $REG_L $t1,2*$SZREG($sp)
+ $REG_L $t0,1*$SZREG($sp)
+ $REG_L $gp,0*$SZREG($sp)
+ $PTR_ADD $sp,6*$SZREG
+___
+$code.=<<___;
+ jr $ra
+ move $a0,$v0
+.end bn_mul_add_words_internal
+
+.align 5
+.globl bn_mul_words
+.ent bn_mul_words
+bn_mul_words:
+ .set noreorder
+ bgtz $a2,bn_mul_words_internal
+ move $v0,$zero
+ jr $ra
+ move $a0,$v0
+.end bn_mul_words
+
+.align 5
+.ent bn_mul_words_internal
+bn_mul_words_internal:
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ .frame $sp,6*$SZREG,$ra
+ .mask 0x8000f008,-$SZREG
+ .set noreorder
+ $PTR_SUB $sp,6*$SZREG
+ $REG_S $ra,5*$SZREG($sp)
+ $REG_S $t3,4*$SZREG($sp)
+ $REG_S $t2,3*$SZREG($sp)
+ $REG_S $t1,2*$SZREG($sp)
+ $REG_S $t0,1*$SZREG($sp)
+ $REG_S $gp,0*$SZREG($sp)
+___
+$code.=<<___;
+ .set reorder
+ li $minus4,-4
+ and $ta0,$a2,$minus4
+ beqz $ta0,.L_bn_mul_words_tail
+
+.L_bn_mul_words_loop:
+ $LD $t0,0($a1)
+ $MULTU $t0,$a3
+ $LD $t2,$BNSZ($a1)
+ $LD $ta0,2*$BNSZ($a1)
+ $LD $ta2,3*$BNSZ($a1)
+ mflo $at
+ mfhi $t0
+ $ADDU $v0,$at
+ sltu $t1,$v0,$at
+ $MULTU $t2,$a3
+ $ST $v0,0($a0)
+ $ADDU $v0,$t1,$t0
+
+ subu $a2,4
+ $PTR_ADD $a0,4*$BNSZ
+ $PTR_ADD $a1,4*$BNSZ
+ mflo $at
+ mfhi $t2
+ $ADDU $v0,$at
+ sltu $t3,$v0,$at
+ $MULTU $ta0,$a3
+ $ST $v0,-3*$BNSZ($a0)
+ $ADDU $v0,$t3,$t2
+
+ mflo $at
+ mfhi $ta0
+ $ADDU $v0,$at
+ sltu $ta1,$v0,$at
+ $MULTU $ta2,$a3
+ $ST $v0,-2*$BNSZ($a0)
+ $ADDU $v0,$ta1,$ta0
+
+ and $ta0,$a2,$minus4
+ mflo $at
+ mfhi $ta2
+ $ADDU $v0,$at
+ sltu $ta3,$v0,$at
+ $ST $v0,-$BNSZ($a0)
+ .set noreorder
+ bgtz $ta0,.L_bn_mul_words_loop
+ $ADDU $v0,$ta3,$ta2
+
+ beqz $a2,.L_bn_mul_words_return
+ nop
+
+.L_bn_mul_words_tail:
+ .set reorder
+ $LD $t0,0($a1)
+ $MULTU $t0,$a3
+ subu $a2,1
+ mflo $at
+ mfhi $t0
+ $ADDU $v0,$at
+ sltu $t1,$v0,$at
+ $ST $v0,0($a0)
+ $ADDU $v0,$t1,$t0
+ beqz $a2,.L_bn_mul_words_return
+
+ $LD $t0,$BNSZ($a1)
+ $MULTU $t0,$a3
+ subu $a2,1
+ mflo $at
+ mfhi $t0
+ $ADDU $v0,$at
+ sltu $t1,$v0,$at
+ $ST $v0,$BNSZ($a0)
+ $ADDU $v0,$t1,$t0
+ beqz $a2,.L_bn_mul_words_return
+
+ $LD $t0,2*$BNSZ($a1)
+ $MULTU $t0,$a3
+ mflo $at
+ mfhi $t0
+ $ADDU $v0,$at
+ sltu $t1,$v0,$at
+ $ST $v0,2*$BNSZ($a0)
+ $ADDU $v0,$t1,$t0
+
+.L_bn_mul_words_return:
+ .set noreorder
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ $REG_L $t3,4*$SZREG($sp)
+ $REG_L $t2,3*$SZREG($sp)
+ $REG_L $t1,2*$SZREG($sp)
+ $REG_L $t0,1*$SZREG($sp)
+ $REG_L $gp,0*$SZREG($sp)
+ $PTR_ADD $sp,6*$SZREG
+___
+$code.=<<___;
+ jr $ra
+ move $a0,$v0
+.end bn_mul_words_internal
+
+.align 5
+.globl bn_sqr_words
+.ent bn_sqr_words
+bn_sqr_words:
+ .set noreorder
+ bgtz $a2,bn_sqr_words_internal
+ move $v0,$zero
+ jr $ra
+ move $a0,$v0
+.end bn_sqr_words
+
+.align 5
+.ent bn_sqr_words_internal
+bn_sqr_words_internal:
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ .frame $sp,6*$SZREG,$ra
+ .mask 0x8000f008,-$SZREG
+ .set noreorder
+ $PTR_SUB $sp,6*$SZREG
+ $REG_S $ra,5*$SZREG($sp)
+ $REG_S $t3,4*$SZREG($sp)
+ $REG_S $t2,3*$SZREG($sp)
+ $REG_S $t1,2*$SZREG($sp)
+ $REG_S $t0,1*$SZREG($sp)
+ $REG_S $gp,0*$SZREG($sp)
+___
+$code.=<<___;
+ .set reorder
+ li $minus4,-4
+ and $ta0,$a2,$minus4
+ beqz $ta0,.L_bn_sqr_words_tail
+
+.L_bn_sqr_words_loop:
+ $LD $t0,0($a1)
+ $MULTU $t0,$t0
+ $LD $t2,$BNSZ($a1)
+ $LD $ta0,2*$BNSZ($a1)
+ $LD $ta2,3*$BNSZ($a1)
+ mflo $t1
+ mfhi $t0
+ $ST $t1,0($a0)
+ $ST $t0,$BNSZ($a0)
+
+ $MULTU $t2,$t2
+ subu $a2,4
+ $PTR_ADD $a0,8*$BNSZ
+ $PTR_ADD $a1,4*$BNSZ
+ mflo $t3
+ mfhi $t2
+ $ST $t3,-6*$BNSZ($a0)
+ $ST $t2,-5*$BNSZ($a0)
+
+ $MULTU $ta0,$ta0
+ mflo $ta1
+ mfhi $ta0
+ $ST $ta1,-4*$BNSZ($a0)
+ $ST $ta0,-3*$BNSZ($a0)
+
+
+ $MULTU $ta2,$ta2
+ and $ta0,$a2,$minus4
+ mflo $ta3
+ mfhi $ta2
+ $ST $ta3,-2*$BNSZ($a0)
+
+ .set noreorder
+ bgtz $ta0,.L_bn_sqr_words_loop
+ $ST $ta2,-$BNSZ($a0)
+
+ beqz $a2,.L_bn_sqr_words_return
+ nop
+
+.L_bn_sqr_words_tail:
+ .set reorder
+ $LD $t0,0($a1)
+ $MULTU $t0,$t0
+ subu $a2,1
+ mflo $t1
+ mfhi $t0
+ $ST $t1,0($a0)
+ $ST $t0,$BNSZ($a0)
+ beqz $a2,.L_bn_sqr_words_return
+
+ $LD $t0,$BNSZ($a1)
+ $MULTU $t0,$t0
+ subu $a2,1
+ mflo $t1
+ mfhi $t0
+ $ST $t1,2*$BNSZ($a0)
+ $ST $t0,3*$BNSZ($a0)
+ beqz $a2,.L_bn_sqr_words_return
+
+ $LD $t0,2*$BNSZ($a1)
+ $MULTU $t0,$t0
+ mflo $t1
+ mfhi $t0
+ $ST $t1,4*$BNSZ($a0)
+ $ST $t0,5*$BNSZ($a0)
+
+.L_bn_sqr_words_return:
+ .set noreorder
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ $REG_L $t3,4*$SZREG($sp)
+ $REG_L $t2,3*$SZREG($sp)
+ $REG_L $t1,2*$SZREG($sp)
+ $REG_L $t0,1*$SZREG($sp)
+ $REG_L $gp,0*$SZREG($sp)
+ $PTR_ADD $sp,6*$SZREG
+___
+$code.=<<___;
+ jr $ra
+ move $a0,$v0
+
+.end bn_sqr_words_internal
+
+.align 5
+.globl bn_add_words
+.ent bn_add_words
+bn_add_words:
+ .set noreorder
+ bgtz $a3,bn_add_words_internal
+ move $v0,$zero
+ jr $ra
+ move $a0,$v0
+.end bn_add_words
+
+.align 5
+.ent bn_add_words_internal
+bn_add_words_internal:
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ .frame $sp,6*$SZREG,$ra
+ .mask 0x8000f008,-$SZREG
+ .set noreorder
+ $PTR_SUB $sp,6*$SZREG
+ $REG_S $ra,5*$SZREG($sp)
+ $REG_S $t3,4*$SZREG($sp)
+ $REG_S $t2,3*$SZREG($sp)
+ $REG_S $t1,2*$SZREG($sp)
+ $REG_S $t0,1*$SZREG($sp)
+ $REG_S $gp,0*$SZREG($sp)
+___
+$code.=<<___;
+ .set reorder
+ li $minus4,-4
+ and $at,$a3,$minus4
+ beqz $at,.L_bn_add_words_tail
+
+.L_bn_add_words_loop:
+ $LD $t0,0($a1)
+ $LD $ta0,0($a2)
+ subu $a3,4
+ $LD $t1,$BNSZ($a1)
+ and $at,$a3,$minus4
+ $LD $t2,2*$BNSZ($a1)
+ $PTR_ADD $a2,4*$BNSZ
+ $LD $t3,3*$BNSZ($a1)
+ $PTR_ADD $a0,4*$BNSZ
+ $LD $ta1,-3*$BNSZ($a2)
+ $PTR_ADD $a1,4*$BNSZ
+ $LD $ta2,-2*$BNSZ($a2)
+ $LD $ta3,-$BNSZ($a2)
+ $ADDU $ta0,$t0
+ sltu $t8,$ta0,$t0
+ $ADDU $t0,$ta0,$v0
+ sltu $v0,$t0,$ta0
+ $ST $t0,-4*$BNSZ($a0)
+ $ADDU $v0,$t8
+
+ $ADDU $ta1,$t1
+ sltu $t9,$ta1,$t1
+ $ADDU $t1,$ta1,$v0
+ sltu $v0,$t1,$ta1
+ $ST $t1,-3*$BNSZ($a0)
+ $ADDU $v0,$t9
+
+ $ADDU $ta2,$t2
+ sltu $t8,$ta2,$t2
+ $ADDU $t2,$ta2,$v0
+ sltu $v0,$t2,$ta2
+ $ST $t2,-2*$BNSZ($a0)
+ $ADDU $v0,$t8
+
+ $ADDU $ta3,$t3
+ sltu $t9,$ta3,$t3
+ $ADDU $t3,$ta3,$v0
+ sltu $v0,$t3,$ta3
+ $ST $t3,-$BNSZ($a0)
+
+ .set noreorder
+ bgtz $at,.L_bn_add_words_loop
+ $ADDU $v0,$t9
+
+ beqz $a3,.L_bn_add_words_return
+ nop
+
+.L_bn_add_words_tail:
+ .set reorder
+ $LD $t0,0($a1)
+ $LD $ta0,0($a2)
+ $ADDU $ta0,$t0
+ subu $a3,1
+ sltu $t8,$ta0,$t0
+ $ADDU $t0,$ta0,$v0
+ sltu $v0,$t0,$ta0
+ $ST $t0,0($a0)
+ $ADDU $v0,$t8
+ beqz $a3,.L_bn_add_words_return
+
+ $LD $t1,$BNSZ($a1)
+ $LD $ta1,$BNSZ($a2)
+ $ADDU $ta1,$t1
+ subu $a3,1
+ sltu $t9,$ta1,$t1
+ $ADDU $t1,$ta1,$v0
+ sltu $v0,$t1,$ta1
+ $ST $t1,$BNSZ($a0)
+ $ADDU $v0,$t9
+ beqz $a3,.L_bn_add_words_return
+
+ $LD $t2,2*$BNSZ($a1)
+ $LD $ta2,2*$BNSZ($a2)
+ $ADDU $ta2,$t2
+ sltu $t8,$ta2,$t2
+ $ADDU $t2,$ta2,$v0
+ sltu $v0,$t2,$ta2
+ $ST $t2,2*$BNSZ($a0)
+ $ADDU $v0,$t8
+
+.L_bn_add_words_return:
+ .set noreorder
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ $REG_L $t3,4*$SZREG($sp)
+ $REG_L $t2,3*$SZREG($sp)
+ $REG_L $t1,2*$SZREG($sp)
+ $REG_L $t0,1*$SZREG($sp)
+ $REG_L $gp,0*$SZREG($sp)
+ $PTR_ADD $sp,6*$SZREG
+___
+$code.=<<___;
+ jr $ra
+ move $a0,$v0
+
+.end bn_add_words_internal
+
+.align 5
+.globl bn_sub_words
+.ent bn_sub_words
+bn_sub_words:
+ .set noreorder
+ bgtz $a3,bn_sub_words_internal
+ move $v0,$zero
+ jr $ra
+ move $a0,$zero
+.end bn_sub_words
+
+.align 5
+.ent bn_sub_words_internal
+bn_sub_words_internal:
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ .frame $sp,6*$SZREG,$ra
+ .mask 0x8000f008,-$SZREG
+ .set noreorder
+ $PTR_SUB $sp,6*$SZREG
+ $REG_S $ra,5*$SZREG($sp)
+ $REG_S $t3,4*$SZREG($sp)
+ $REG_S $t2,3*$SZREG($sp)
+ $REG_S $t1,2*$SZREG($sp)
+ $REG_S $t0,1*$SZREG($sp)
+ $REG_S $gp,0*$SZREG($sp)
+___
+$code.=<<___;
+ .set reorder
+ li $minus4,-4
+ and $at,$a3,$minus4
+ beqz $at,.L_bn_sub_words_tail
+
+.L_bn_sub_words_loop:
+ $LD $t0,0($a1)
+ $LD $ta0,0($a2)
+ subu $a3,4
+ $LD $t1,$BNSZ($a1)
+ and $at,$a3,$minus4
+ $LD $t2,2*$BNSZ($a1)
+ $PTR_ADD $a2,4*$BNSZ
+ $LD $t3,3*$BNSZ($a1)
+ $PTR_ADD $a0,4*$BNSZ
+ $LD $ta1,-3*$BNSZ($a2)
+ $PTR_ADD $a1,4*$BNSZ
+ $LD $ta2,-2*$BNSZ($a2)
+ $LD $ta3,-$BNSZ($a2)
+ sltu $t8,$t0,$ta0
+ $SUBU $ta0,$t0,$ta0
+ $SUBU $t0,$ta0,$v0
+ sgtu $v0,$t0,$ta0
+ $ST $t0,-4*$BNSZ($a0)
+ $ADDU $v0,$t8
+
+ sltu $t9,$t1,$ta1
+ $SUBU $ta1,$t1,$ta1
+ $SUBU $t1,$ta1,$v0
+ sgtu $v0,$t1,$ta1
+ $ST $t1,-3*$BNSZ($a0)
+ $ADDU $v0,$t9
+
+
+ sltu $t8,$t2,$ta2
+ $SUBU $ta2,$t2,$ta2
+ $SUBU $t2,$ta2,$v0
+ sgtu $v0,$t2,$ta2
+ $ST $t2,-2*$BNSZ($a0)
+ $ADDU $v0,$t8
+
+ sltu $t9,$t3,$ta3
+ $SUBU $ta3,$t3,$ta3
+ $SUBU $t3,$ta3,$v0
+ sgtu $v0,$t3,$ta3
+ $ST $t3,-$BNSZ($a0)
+
+ .set noreorder
+ bgtz $at,.L_bn_sub_words_loop
+ $ADDU $v0,$t9
+
+ beqz $a3,.L_bn_sub_words_return
+ nop
+
+.L_bn_sub_words_tail:
+ .set reorder
+ $LD $t0,0($a1)
+ $LD $ta0,0($a2)
+ subu $a3,1
+ sltu $t8,$t0,$ta0
+ $SUBU $ta0,$t0,$ta0
+ $SUBU $t0,$ta0,$v0
+ sgtu $v0,$t0,$ta0
+ $ST $t0,0($a0)
+ $ADDU $v0,$t8
+ beqz $a3,.L_bn_sub_words_return
+
+ $LD $t1,$BNSZ($a1)
+ subu $a3,1
+ $LD $ta1,$BNSZ($a2)
+ sltu $t9,$t1,$ta1
+ $SUBU $ta1,$t1,$ta1
+ $SUBU $t1,$ta1,$v0
+ sgtu $v0,$t1,$ta1
+ $ST $t1,$BNSZ($a0)
+ $ADDU $v0,$t9
+ beqz $a3,.L_bn_sub_words_return
+
+ $LD $t2,2*$BNSZ($a1)
+ $LD $ta2,2*$BNSZ($a2)
+ sltu $t8,$t2,$ta2
+ $SUBU $ta2,$t2,$ta2
+ $SUBU $t2,$ta2,$v0
+ sgtu $v0,$t2,$ta2
+ $ST $t2,2*$BNSZ($a0)
+ $ADDU $v0,$t8
+
+.L_bn_sub_words_return:
+ .set noreorder
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ $REG_L $t3,4*$SZREG($sp)
+ $REG_L $t2,3*$SZREG($sp)
+ $REG_L $t1,2*$SZREG($sp)
+ $REG_L $t0,1*$SZREG($sp)
+ $REG_L $gp,0*$SZREG($sp)
+ $PTR_ADD $sp,6*$SZREG
+___
+$code.=<<___;
+ jr $ra
+ move $a0,$v0
+.end bn_sub_words_internal
+
+.align 5
+.globl bn_div_3_words
+.ent bn_div_3_words
+bn_div_3_words:
+ .set noreorder
+ move $a3,$a0 # we know that bn_div_words does not
+ # touch $a3, $ta2, $ta3 and preserves $a2
+ # so that we can save two arguments
+ # and return address in registers
+ # instead of stack:-)
+
+ $LD $a0,($a3)
+ move $ta2,$a1
+ bne $a0,$a2,bn_div_3_words_internal
+ $LD $a1,-$BNSZ($a3)
+ li $v0,-1
+ jr $ra
+ move $a0,$v0
+.end bn_div_3_words
+
+.align 5
+.ent bn_div_3_words_internal
+bn_div_3_words_internal:
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ .frame $sp,6*$SZREG,$ra
+ .mask 0x8000f008,-$SZREG
+ .set noreorder
+ $PTR_SUB $sp,6*$SZREG
+ $REG_S $ra,5*$SZREG($sp)
+ $REG_S $t3,4*$SZREG($sp)
+ $REG_S $t2,3*$SZREG($sp)
+ $REG_S $t1,2*$SZREG($sp)
+ $REG_S $t0,1*$SZREG($sp)
+ $REG_S $gp,0*$SZREG($sp)
+___
+$code.=<<___;
+ .set reorder
+ move $ta3,$ra
+ bal bn_div_words_internal
+ move $ra,$ta3
+ $MULTU $ta2,$v0
+ $LD $t2,-2*$BNSZ($a3)
+ move $ta0,$zero
+ mfhi $t1
+ mflo $t0
+ sltu $t8,$t1,$a1
+.L_bn_div_3_words_inner_loop:
+ bnez $t8,.L_bn_div_3_words_inner_loop_done
+ sgeu $at,$t2,$t0
+ seq $t9,$t1,$a1
+ and $at,$t9
+ sltu $t3,$t0,$ta2
+ $ADDU $a1,$a2
+ $SUBU $t1,$t3
+ $SUBU $t0,$ta2
+ sltu $t8,$t1,$a1
+ sltu $ta0,$a1,$a2
+ or $t8,$ta0
+ .set noreorder
+ beqz $at,.L_bn_div_3_words_inner_loop
+ $SUBU $v0,1
+ $ADDU $v0,1
+ .set reorder
+.L_bn_div_3_words_inner_loop_done:
+ .set noreorder
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ $REG_L $t3,4*$SZREG($sp)
+ $REG_L $t2,3*$SZREG($sp)
+ $REG_L $t1,2*$SZREG($sp)
+ $REG_L $t0,1*$SZREG($sp)
+ $REG_L $gp,0*$SZREG($sp)
+ $PTR_ADD $sp,6*$SZREG
+___
+$code.=<<___;
+ jr $ra
+ move $a0,$v0
+.end bn_div_3_words_internal
+
+.align 5
+.globl bn_div_words
+.ent bn_div_words
+bn_div_words:
+ .set noreorder
+ bnez $a2,bn_div_words_internal
+ li $v0,-1 # I would rather signal div-by-zero
+ # which can be done with 'break 7'
+ jr $ra
+ move $a0,$v0
+.end bn_div_words
+
+.align 5
+.ent bn_div_words_internal
+bn_div_words_internal:
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ .frame $sp,6*$SZREG,$ra
+ .mask 0x8000f008,-$SZREG
+ .set noreorder
+ $PTR_SUB $sp,6*$SZREG
+ $REG_S $ra,5*$SZREG($sp)
+ $REG_S $t3,4*$SZREG($sp)
+ $REG_S $t2,3*$SZREG($sp)
+ $REG_S $t1,2*$SZREG($sp)
+ $REG_S $t0,1*$SZREG($sp)
+ $REG_S $gp,0*$SZREG($sp)
+___
+$code.=<<___;
+ move $v1,$zero
+ bltz $a2,.L_bn_div_words_body
+ move $t9,$v1
+ $SLL $a2,1
+ bgtz $a2,.-4
+ addu $t9,1
+
+ .set reorder
+ negu $t1,$t9
+ li $t2,-1
+ $SLL $t2,$t1
+ and $t2,$a0
+ $SRL $at,$a1,$t1
+ .set noreorder
+ beqz $t2,.+12
+ nop
+ break 6 # signal overflow
+ .set reorder
+ $SLL $a0,$t9
+ $SLL $a1,$t9
+ or $a0,$at
+___
+$QT=$ta0;
+$HH=$ta1;
+$DH=$v1;
+$code.=<<___;
+.L_bn_div_words_body:
+ $SRL $DH,$a2,4*$BNSZ # bits
+ sgeu $at,$a0,$a2
+ .set noreorder
+ beqz $at,.+12
+ nop
+ $SUBU $a0,$a2
+ .set reorder
+
+ li $QT,-1
+ $SRL $HH,$a0,4*$BNSZ # bits
+ $SRL $QT,4*$BNSZ # q=0xffffffff
+ beq $DH,$HH,.L_bn_div_words_skip_div1
+ $DIVU $zero,$a0,$DH
+ mflo $QT
+.L_bn_div_words_skip_div1:
+ $MULTU $a2,$QT
+ $SLL $t3,$a0,4*$BNSZ # bits
+ $SRL $at,$a1,4*$BNSZ # bits
+ or $t3,$at
+ mflo $t0
+ mfhi $t1
+.L_bn_div_words_inner_loop1:
+ sltu $t2,$t3,$t0
+ seq $t8,$HH,$t1
+ sltu $at,$HH,$t1
+ and $t2,$t8
+ sltu $v0,$t0,$a2
+ or $at,$t2
+ .set noreorder
+ beqz $at,.L_bn_div_words_inner_loop1_done
+ $SUBU $t1,$v0
+ $SUBU $t0,$a2
+ b .L_bn_div_words_inner_loop1
+ $SUBU $QT,1
+ .set reorder
+.L_bn_div_words_inner_loop1_done:
+
+ $SLL $a1,4*$BNSZ # bits
+ $SUBU $a0,$t3,$t0
+ $SLL $v0,$QT,4*$BNSZ # bits
+
+ li $QT,-1
+ $SRL $HH,$a0,4*$BNSZ # bits
+ $SRL $QT,4*$BNSZ # q=0xffffffff
+ beq $DH,$HH,.L_bn_div_words_skip_div2
+ $DIVU $zero,$a0,$DH
+ mflo $QT
+.L_bn_div_words_skip_div2:
+ $MULTU $a2,$QT
+ $SLL $t3,$a0,4*$BNSZ # bits
+ $SRL $at,$a1,4*$BNSZ # bits
+ or $t3,$at
+ mflo $t0
+ mfhi $t1
+.L_bn_div_words_inner_loop2:
+ sltu $t2,$t3,$t0
+ seq $t8,$HH,$t1
+ sltu $at,$HH,$t1
+ and $t2,$t8
+ sltu $v1,$t0,$a2
+ or $at,$t2
+ .set noreorder
+ beqz $at,.L_bn_div_words_inner_loop2_done
+ $SUBU $t1,$v1
+ $SUBU $t0,$a2
+ b .L_bn_div_words_inner_loop2
+ $SUBU $QT,1
+ .set reorder
+.L_bn_div_words_inner_loop2_done:
+
+ $SUBU $a0,$t3,$t0
+ or $v0,$QT
+ $SRL $v1,$a0,$t9 # $v1 contains remainder if anybody wants it
+ $SRL $a2,$t9 # restore $a2
+
+ .set noreorder
+ move $a1,$v1
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ $REG_L $t3,4*$SZREG($sp)
+ $REG_L $t2,3*$SZREG($sp)
+ $REG_L $t1,2*$SZREG($sp)
+ $REG_L $t0,1*$SZREG($sp)
+ $REG_L $gp,0*$SZREG($sp)
+ $PTR_ADD $sp,6*$SZREG
+___
+$code.=<<___;
+ jr $ra
+ move $a0,$v0
+.end bn_div_words_internal
+___
+undef $HH; undef $QT; undef $DH;
+
+($a_0,$a_1,$a_2,$a_3)=($t0,$t1,$t2,$t3);
+($b_0,$b_1,$b_2,$b_3)=($ta0,$ta1,$ta2,$ta3);
+
+($a_4,$a_5,$a_6,$a_7)=($s0,$s2,$s4,$a1); # once we load a[7], no use for $a1
+($b_4,$b_5,$b_6,$b_7)=($s1,$s3,$s5,$a2); # once we load b[7], no use for $a2
+
+($t_1,$t_2,$c_1,$c_2,$c_3)=($t8,$t9,$v0,$v1,$a3);
+
+$code.=<<___;
+
+.align 5
+.globl bn_mul_comba8
+.ent bn_mul_comba8
+bn_mul_comba8:
+ .set noreorder
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ .frame $sp,12*$SZREG,$ra
+ .mask 0x803ff008,-$SZREG
+ $PTR_SUB $sp,12*$SZREG
+ $REG_S $ra,11*$SZREG($sp)
+ $REG_S $s5,10*$SZREG($sp)
+ $REG_S $s4,9*$SZREG($sp)
+ $REG_S $s3,8*$SZREG($sp)
+ $REG_S $s2,7*$SZREG($sp)
+ $REG_S $s1,6*$SZREG($sp)
+ $REG_S $s0,5*$SZREG($sp)
+ $REG_S $t3,4*$SZREG($sp)
+ $REG_S $t2,3*$SZREG($sp)
+ $REG_S $t1,2*$SZREG($sp)
+ $REG_S $t0,1*$SZREG($sp)
+ $REG_S $gp,0*$SZREG($sp)
+___
+$code.=<<___ if ($flavour !~ /nubi/i);
+ .frame $sp,6*$SZREG,$ra
+ .mask 0x003f0000,-$SZREG
+ $PTR_SUB $sp,6*$SZREG
+ $REG_S $s5,5*$SZREG($sp)
+ $REG_S $s4,4*$SZREG($sp)
+ $REG_S $s3,3*$SZREG($sp)
+ $REG_S $s2,2*$SZREG($sp)
+ $REG_S $s1,1*$SZREG($sp)
+ $REG_S $s0,0*$SZREG($sp)
+___
+$code.=<<___;
+
+ .set reorder
+ $LD $a_0,0($a1) # If compiled with -mips3 option on
+ # R5000 box assembler barks on this
+ # 1ine with "should not have mult/div
+ # as last instruction in bb (R10K
+ # bug)" warning. If anybody out there
+ # has a clue about how to circumvent
+ # this do send me a note.
+ # <appro\@fy.chalmers.se>
+
+ $LD $b_0,0($a2)
+ $LD $a_1,$BNSZ($a1)
+ $LD $a_2,2*$BNSZ($a1)
+ $MULTU $a_0,$b_0 # mul_add_c(a[0],b[0],c1,c2,c3);
+ $LD $a_3,3*$BNSZ($a1)
+ $LD $b_1,$BNSZ($a2)
+ $LD $b_2,2*$BNSZ($a2)
+ $LD $b_3,3*$BNSZ($a2)
+ mflo $c_1
+ mfhi $c_2
+
+ $LD $a_4,4*$BNSZ($a1)
+ $LD $a_5,5*$BNSZ($a1)
+ $MULTU $a_0,$b_1 # mul_add_c(a[0],b[1],c2,c3,c1);
+ $LD $a_6,6*$BNSZ($a1)
+ $LD $a_7,7*$BNSZ($a1)
+ $LD $b_4,4*$BNSZ($a2)
+ $LD $b_5,5*$BNSZ($a2)
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_1,$b_0 # mul_add_c(a[1],b[0],c2,c3,c1);
+ $ADDU $c_3,$t_2,$at
+ $LD $b_6,6*$BNSZ($a2)
+ $LD $b_7,7*$BNSZ($a2)
+ $ST $c_1,0($a0) # r[0]=c1;
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_2,$b_0 # mul_add_c(a[2],b[0],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $c_1,$c_3,$t_2
+ $ST $c_2,$BNSZ($a0) # r[1]=c2;
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_1,$b_1 # mul_add_c(a[1],b[1],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_0,$b_2 # mul_add_c(a[0],b[2],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $c_2,$c_1,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_0,$b_3 # mul_add_c(a[0],b[3],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ $ST $c_3,2*$BNSZ($a0) # r[2]=c3;
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_1,$b_2 # mul_add_c(a[1],b[2],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $c_3,$c_2,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_2,$b_1 # mul_add_c(a[2],b[1],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_3,$b_0 # mul_add_c(a[3],b[0],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_4,$b_0 # mul_add_c(a[4],b[0],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ $ST $c_1,3*$BNSZ($a0) # r[3]=c1;
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_3,$b_1 # mul_add_c(a[3],b[1],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $c_1,$c_3,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_2,$b_2 # mul_add_c(a[2],b[2],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_1,$b_3 # mul_add_c(a[1],b[3],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_0,$b_4 # mul_add_c(a[0],b[4],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_0,$b_5 # mul_add_c(a[0],b[5],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ $ST $c_2,4*$BNSZ($a0) # r[4]=c2;
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_1,$b_4 # mul_add_c(a[1],b[4],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $c_2,$c_1,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_2,$b_3 # mul_add_c(a[2],b[3],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_3,$b_2 # mul_add_c(a[3],b[2],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_4,$b_1 # mul_add_c(a[4],b[1],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_5,$b_0 # mul_add_c(a[5],b[0],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_6,$b_0 # mul_add_c(a[6],b[0],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ $ST $c_3,5*$BNSZ($a0) # r[5]=c3;
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_5,$b_1 # mul_add_c(a[5],b[1],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $c_3,$c_2,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_4,$b_2 # mul_add_c(a[4],b[2],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_3,$b_3 # mul_add_c(a[3],b[3],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_2,$b_4 # mul_add_c(a[2],b[4],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_1,$b_5 # mul_add_c(a[1],b[5],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_0,$b_6 # mul_add_c(a[0],b[6],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_0,$b_7 # mul_add_c(a[0],b[7],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ $ST $c_1,6*$BNSZ($a0) # r[6]=c1;
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_1,$b_6 # mul_add_c(a[1],b[6],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $c_1,$c_3,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_2,$b_5 # mul_add_c(a[2],b[5],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_3,$b_4 # mul_add_c(a[3],b[4],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_4,$b_3 # mul_add_c(a[4],b[3],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_5,$b_2 # mul_add_c(a[5],b[2],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_6,$b_1 # mul_add_c(a[6],b[1],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_7,$b_0 # mul_add_c(a[7],b[0],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_7,$b_1 # mul_add_c(a[7],b[1],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ $ST $c_2,7*$BNSZ($a0) # r[7]=c2;
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_6,$b_2 # mul_add_c(a[6],b[2],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $c_2,$c_1,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_5,$b_3 # mul_add_c(a[5],b[3],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_4,$b_4 # mul_add_c(a[4],b[4],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_3,$b_5 # mul_add_c(a[3],b[5],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_2,$b_6 # mul_add_c(a[2],b[6],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_1,$b_7 # mul_add_c(a[1],b[7],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_2,$b_7 # mul_add_c(a[2],b[7],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ $ST $c_3,8*$BNSZ($a0) # r[8]=c3;
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_3,$b_6 # mul_add_c(a[3],b[6],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $c_3,$c_2,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_4,$b_5 # mul_add_c(a[4],b[5],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_5,$b_4 # mul_add_c(a[5],b[4],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_6,$b_3 # mul_add_c(a[6],b[3],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_7,$b_2 # mul_add_c(a[7],b[2],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_7,$b_3 # mul_add_c(a[7],b[3],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ $ST $c_1,9*$BNSZ($a0) # r[9]=c1;
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_6,$b_4 # mul_add_c(a[6],b[4],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $c_1,$c_3,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_5,$b_5 # mul_add_c(a[5],b[5],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_4,$b_6 # mul_add_c(a[4],b[6],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_3,$b_7 # mul_add_c(a[3],b[7],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_4,$b_7 # mul_add_c(a[4],b[7],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ $ST $c_2,10*$BNSZ($a0) # r[10]=c2;
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_5,$b_6 # mul_add_c(a[5],b[6],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $c_2,$c_1,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_6,$b_5 # mul_add_c(a[6],b[5],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_7,$b_4 # mul_add_c(a[7],b[4],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_7,$b_5 # mul_add_c(a[7],b[5],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ $ST $c_3,11*$BNSZ($a0) # r[11]=c3;
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_6,$b_6 # mul_add_c(a[6],b[6],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $c_3,$c_2,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_5,$b_7 # mul_add_c(a[5],b[7],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_6,$b_7 # mul_add_c(a[6],b[7],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ $ST $c_1,12*$BNSZ($a0) # r[12]=c1;
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_7,$b_6 # mul_add_c(a[7],b[6],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $c_1,$c_3,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_7,$b_7 # mul_add_c(a[7],b[7],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ $ST $c_2,13*$BNSZ($a0) # r[13]=c2;
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ $ST $c_3,14*$BNSZ($a0) # r[14]=c3;
+ $ST $c_1,15*$BNSZ($a0) # r[15]=c1;
+
+ .set noreorder
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ $REG_L $s5,10*$SZREG($sp)
+ $REG_L $s4,9*$SZREG($sp)
+ $REG_L $s3,8*$SZREG($sp)
+ $REG_L $s2,7*$SZREG($sp)
+ $REG_L $s1,6*$SZREG($sp)
+ $REG_L $s0,5*$SZREG($sp)
+ $REG_L $t3,4*$SZREG($sp)
+ $REG_L $t2,3*$SZREG($sp)
+ $REG_L $t1,2*$SZREG($sp)
+ $REG_L $t0,1*$SZREG($sp)
+ $REG_L $gp,0*$SZREG($sp)
+ jr $ra
+ $PTR_ADD $sp,12*$SZREG
+___
+$code.=<<___ if ($flavour !~ /nubi/i);
+ $REG_L $s5,5*$SZREG($sp)
+ $REG_L $s4,4*$SZREG($sp)
+ $REG_L $s3,3*$SZREG($sp)
+ $REG_L $s2,2*$SZREG($sp)
+ $REG_L $s1,1*$SZREG($sp)
+ $REG_L $s0,0*$SZREG($sp)
+ jr $ra
+ $PTR_ADD $sp,6*$SZREG
+___
+$code.=<<___;
+.end bn_mul_comba8
+
+.align 5
+.globl bn_mul_comba4
+.ent bn_mul_comba4
+bn_mul_comba4:
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ .frame $sp,6*$SZREG,$ra
+ .mask 0x8000f008,-$SZREG
+ .set noreorder
+ $PTR_SUB $sp,6*$SZREG
+ $REG_S $ra,5*$SZREG($sp)
+ $REG_S $t3,4*$SZREG($sp)
+ $REG_S $t2,3*$SZREG($sp)
+ $REG_S $t1,2*$SZREG($sp)
+ $REG_S $t0,1*$SZREG($sp)
+ $REG_S $gp,0*$SZREG($sp)
+___
+$code.=<<___;
+ .set reorder
+ $LD $a_0,0($a1)
+ $LD $b_0,0($a2)
+ $LD $a_1,$BNSZ($a1)
+ $LD $a_2,2*$BNSZ($a1)
+ $MULTU $a_0,$b_0 # mul_add_c(a[0],b[0],c1,c2,c3);
+ $LD $a_3,3*$BNSZ($a1)
+ $LD $b_1,$BNSZ($a2)
+ $LD $b_2,2*$BNSZ($a2)
+ $LD $b_3,3*$BNSZ($a2)
+ mflo $c_1
+ mfhi $c_2
+ $ST $c_1,0($a0)
+
+ $MULTU $a_0,$b_1 # mul_add_c(a[0],b[1],c2,c3,c1);
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_1,$b_0 # mul_add_c(a[1],b[0],c2,c3,c1);
+ $ADDU $c_3,$t_2,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_2,$b_0 # mul_add_c(a[2],b[0],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $c_1,$c_3,$t_2
+ $ST $c_2,$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_1,$b_1 # mul_add_c(a[1],b[1],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_0,$b_2 # mul_add_c(a[0],b[2],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $c_2,$c_1,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_0,$b_3 # mul_add_c(a[0],b[3],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ $ST $c_3,2*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_1,$b_2 # mul_add_c(a[1],b[2],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $c_3,$c_2,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_2,$b_1 # mul_add_c(a[2],b[1],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_3,$b_0 # mul_add_c(a[3],b[0],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_3,$b_1 # mul_add_c(a[3],b[1],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ $ST $c_1,3*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_2,$b_2 # mul_add_c(a[2],b[2],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $c_1,$c_3,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_1,$b_3 # mul_add_c(a[1],b[3],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_2,$b_3 # mul_add_c(a[2],b[3],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ $ST $c_2,4*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_3,$b_2 # mul_add_c(a[3],b[2],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $c_2,$c_1,$t_2
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_3,$b_3 # mul_add_c(a[3],b[3],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ $ST $c_3,5*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ $ST $c_1,6*$BNSZ($a0)
+ $ST $c_2,7*$BNSZ($a0)
+
+ .set noreorder
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ $REG_L $t3,4*$SZREG($sp)
+ $REG_L $t2,3*$SZREG($sp)
+ $REG_L $t1,2*$SZREG($sp)
+ $REG_L $t0,1*$SZREG($sp)
+ $REG_L $gp,0*$SZREG($sp)
+ $PTR_ADD $sp,6*$SZREG
+___
+$code.=<<___;
+ jr $ra
+ nop
+.end bn_mul_comba4
+___
+
+($a_4,$a_5,$a_6,$a_7)=($b_0,$b_1,$b_2,$b_3);
+
+sub add_c2 () {
+my ($hi,$lo,$c0,$c1,$c2,
+ $warm, # !$warm denotes first call with specific sequence of
+ # $c_[XYZ] when there is no Z-carry to accumulate yet;
+ $an,$bn # these two are arguments for multiplication which
+ # result is used in *next* step [which is why it's
+ # commented as "forward multiplication" below];
+ )=@_;
+$code.=<<___;
+ mflo $lo
+ mfhi $hi
+ $ADDU $c0,$lo
+ sltu $at,$c0,$lo
+ $MULTU $an,$bn # forward multiplication
+ $ADDU $c0,$lo
+ $ADDU $at,$hi
+ sltu $lo,$c0,$lo
+ $ADDU $c1,$at
+ $ADDU $hi,$lo
+___
+$code.=<<___ if (!$warm);
+ sltu $c2,$c1,$at
+ $ADDU $c1,$hi
+ sltu $hi,$c1,$hi
+ $ADDU $c2,$hi
+___
+$code.=<<___ if ($warm);
+ sltu $at,$c1,$at
+ $ADDU $c1,$hi
+ $ADDU $c2,$at
+ sltu $hi,$c1,$hi
+ $ADDU $c2,$hi
+___
+}
+
+$code.=<<___;
+
+.align 5
+.globl bn_sqr_comba8
+.ent bn_sqr_comba8
+bn_sqr_comba8:
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ .frame $sp,6*$SZREG,$ra
+ .mask 0x8000f008,-$SZREG
+ .set noreorder
+ $PTR_SUB $sp,6*$SZREG
+ $REG_S $ra,5*$SZREG($sp)
+ $REG_S $t3,4*$SZREG($sp)
+ $REG_S $t2,3*$SZREG($sp)
+ $REG_S $t1,2*$SZREG($sp)
+ $REG_S $t0,1*$SZREG($sp)
+ $REG_S $gp,0*$SZREG($sp)
+___
+$code.=<<___;
+ .set reorder
+ $LD $a_0,0($a1)
+ $LD $a_1,$BNSZ($a1)
+ $LD $a_2,2*$BNSZ($a1)
+ $LD $a_3,3*$BNSZ($a1)
+
+ $MULTU $a_0,$a_0 # mul_add_c(a[0],b[0],c1,c2,c3);
+ $LD $a_4,4*$BNSZ($a1)
+ $LD $a_5,5*$BNSZ($a1)
+ $LD $a_6,6*$BNSZ($a1)
+ $LD $a_7,7*$BNSZ($a1)
+ mflo $c_1
+ mfhi $c_2
+ $ST $c_1,0($a0)
+
+ $MULTU $a_0,$a_1 # mul_add_c2(a[0],b[1],c2,c3,c1);
+ mflo $t_1
+ mfhi $t_2
+ slt $c_1,$t_2,$zero
+ $SLL $t_2,1
+ $MULTU $a_2,$a_0 # mul_add_c2(a[2],b[0],c3,c1,c2);
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $ADDU $c_3,$t_2,$at
+ $ST $c_2,$BNSZ($a0)
+___
+ &add_c2($t_2,$t_1,$c_3,$c_1,$c_2,0,
+ $a_1,$a_1); # mul_add_c(a[1],b[1],c3,c1,c2);
+$code.=<<___;
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_0,$a_3 # mul_add_c2(a[0],b[3],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ $ST $c_3,2*$BNSZ($a0)
+___
+ &add_c2($t_2,$t_1,$c_1,$c_2,$c_3,0,
+ $a_1,$a_2); # mul_add_c2(a[1],b[2],c1,c2,c3);
+ &add_c2($t_2,$t_1,$c_1,$c_2,$c_3,1,
+ $a_4,$a_0); # mul_add_c2(a[4],b[0],c2,c3,c1);
+$code.=<<___;
+ $ST $c_1,3*$BNSZ($a0)
+___
+ &add_c2($t_2,$t_1,$c_2,$c_3,$c_1,0,
+ $a_3,$a_1); # mul_add_c2(a[3],b[1],c2,c3,c1);
+ &add_c2($t_2,$t_1,$c_2,$c_3,$c_1,1,
+ $a_2,$a_2); # mul_add_c(a[2],b[2],c2,c3,c1);
+$code.=<<___;
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_0,$a_5 # mul_add_c2(a[0],b[5],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ $ST $c_2,4*$BNSZ($a0)
+___
+ &add_c2($t_2,$t_1,$c_3,$c_1,$c_2,0,
+ $a_1,$a_4); # mul_add_c2(a[1],b[4],c3,c1,c2);
+ &add_c2($t_2,$t_1,$c_3,$c_1,$c_2,1,
+ $a_2,$a_3); # mul_add_c2(a[2],b[3],c3,c1,c2);
+ &add_c2($t_2,$t_1,$c_3,$c_1,$c_2,1,
+ $a_6,$a_0); # mul_add_c2(a[6],b[0],c1,c2,c3);
+$code.=<<___;
+ $ST $c_3,5*$BNSZ($a0)
+___
+ &add_c2($t_2,$t_1,$c_1,$c_2,$c_3,0,
+ $a_5,$a_1); # mul_add_c2(a[5],b[1],c1,c2,c3);
+ &add_c2($t_2,$t_1,$c_1,$c_2,$c_3,1,
+ $a_4,$a_2); # mul_add_c2(a[4],b[2],c1,c2,c3);
+ &add_c2($t_2,$t_1,$c_1,$c_2,$c_3,1,
+ $a_3,$a_3); # mul_add_c(a[3],b[3],c1,c2,c3);
+$code.=<<___;
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_0,$a_7 # mul_add_c2(a[0],b[7],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ $ST $c_1,6*$BNSZ($a0)
+___
+ &add_c2($t_2,$t_1,$c_2,$c_3,$c_1,0,
+ $a_1,$a_6); # mul_add_c2(a[1],b[6],c2,c3,c1);
+ &add_c2($t_2,$t_1,$c_2,$c_3,$c_1,1,
+ $a_2,$a_5); # mul_add_c2(a[2],b[5],c2,c3,c1);
+ &add_c2($t_2,$t_1,$c_2,$c_3,$c_1,1,
+ $a_3,$a_4); # mul_add_c2(a[3],b[4],c2,c3,c1);
+ &add_c2($t_2,$t_1,$c_2,$c_3,$c_1,1,
+ $a_7,$a_1); # mul_add_c2(a[7],b[1],c3,c1,c2);
+$code.=<<___;
+ $ST $c_2,7*$BNSZ($a0)
+___
+ &add_c2($t_2,$t_1,$c_3,$c_1,$c_2,0,
+ $a_6,$a_2); # mul_add_c2(a[6],b[2],c3,c1,c2);
+ &add_c2($t_2,$t_1,$c_3,$c_1,$c_2,1,
+ $a_5,$a_3); # mul_add_c2(a[5],b[3],c3,c1,c2);
+ &add_c2($t_2,$t_1,$c_3,$c_1,$c_2,1,
+ $a_4,$a_4); # mul_add_c(a[4],b[4],c3,c1,c2);
+$code.=<<___;
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_2,$a_7 # mul_add_c2(a[2],b[7],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ $ST $c_3,8*$BNSZ($a0)
+___
+ &add_c2($t_2,$t_1,$c_1,$c_2,$c_3,0,
+ $a_3,$a_6); # mul_add_c2(a[3],b[6],c1,c2,c3);
+ &add_c2($t_2,$t_1,$c_1,$c_2,$c_3,1,
+ $a_4,$a_5); # mul_add_c2(a[4],b[5],c1,c2,c3);
+ &add_c2($t_2,$t_1,$c_1,$c_2,$c_3,1,
+ $a_7,$a_3); # mul_add_c2(a[7],b[3],c2,c3,c1);
+$code.=<<___;
+ $ST $c_1,9*$BNSZ($a0)
+___
+ &add_c2($t_2,$t_1,$c_2,$c_3,$c_1,0,
+ $a_6,$a_4); # mul_add_c2(a[6],b[4],c2,c3,c1);
+ &add_c2($t_2,$t_1,$c_2,$c_3,$c_1,1,
+ $a_5,$a_5); # mul_add_c(a[5],b[5],c2,c3,c1);
+$code.=<<___;
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_4,$a_7 # mul_add_c2(a[4],b[7],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ $ST $c_2,10*$BNSZ($a0)
+___
+ &add_c2($t_2,$t_1,$c_3,$c_1,$c_2,0,
+ $a_5,$a_6); # mul_add_c2(a[5],b[6],c3,c1,c2);
+ &add_c2($t_2,$t_1,$c_3,$c_1,$c_2,1,
+ $a_7,$a_5); # mul_add_c2(a[7],b[5],c1,c2,c3);
+$code.=<<___;
+ $ST $c_3,11*$BNSZ($a0)
+___
+ &add_c2($t_2,$t_1,$c_1,$c_2,$c_3,0,
+ $a_6,$a_6); # mul_add_c(a[6],b[6],c1,c2,c3);
+$code.=<<___;
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $MULTU $a_6,$a_7 # mul_add_c2(a[6],b[7],c2,c3,c1);
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ sltu $at,$c_2,$t_2
+ $ADDU $c_3,$at
+ $ST $c_1,12*$BNSZ($a0)
+___
+ &add_c2($t_2,$t_1,$c_2,$c_3,$c_1,0,
+ $a_7,$a_7); # mul_add_c(a[7],b[7],c3,c1,c2);
+$code.=<<___;
+ $ST $c_2,13*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ $ST $c_3,14*$BNSZ($a0)
+ $ST $c_1,15*$BNSZ($a0)
+
+ .set noreorder
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ $REG_L $t3,4*$SZREG($sp)
+ $REG_L $t2,3*$SZREG($sp)
+ $REG_L $t1,2*$SZREG($sp)
+ $REG_L $t0,1*$SZREG($sp)
+ $REG_L $gp,0*$SZREG($sp)
+ $PTR_ADD $sp,6*$SZREG
+___
+$code.=<<___;
+ jr $ra
+ nop
+.end bn_sqr_comba8
+
+.align 5
+.globl bn_sqr_comba4
+.ent bn_sqr_comba4
+bn_sqr_comba4:
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ .frame $sp,6*$SZREG,$ra
+ .mask 0x8000f008,-$SZREG
+ .set noreorder
+ $PTR_SUB $sp,6*$SZREG
+ $REG_S $ra,5*$SZREG($sp)
+ $REG_S $t3,4*$SZREG($sp)
+ $REG_S $t2,3*$SZREG($sp)
+ $REG_S $t1,2*$SZREG($sp)
+ $REG_S $t0,1*$SZREG($sp)
+ $REG_S $gp,0*$SZREG($sp)
+___
+$code.=<<___;
+ .set reorder
+ $LD $a_0,0($a1)
+ $LD $a_1,$BNSZ($a1)
+ $MULTU $a_0,$a_0 # mul_add_c(a[0],b[0],c1,c2,c3);
+ $LD $a_2,2*$BNSZ($a1)
+ $LD $a_3,3*$BNSZ($a1)
+ mflo $c_1
+ mfhi $c_2
+ $ST $c_1,0($a0)
+
+ $MULTU $a_0,$a_1 # mul_add_c2(a[0],b[1],c2,c3,c1);
+ mflo $t_1
+ mfhi $t_2
+ slt $c_1,$t_2,$zero
+ $SLL $t_2,1
+ $MULTU $a_2,$a_0 # mul_add_c2(a[2],b[0],c3,c1,c2);
+ slt $a2,$t_1,$zero
+ $ADDU $t_2,$a2
+ $SLL $t_1,1
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $ADDU $c_3,$t_2,$at
+ $ST $c_2,$BNSZ($a0)
+___
+ &add_c2($t_2,$t_1,$c_3,$c_1,$c_2,0,
+ $a_1,$a_1); # mul_add_c(a[1],b[1],c3,c1,c2);
+$code.=<<___;
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_3,$t_1
+ sltu $at,$c_3,$t_1
+ $MULTU $a_0,$a_3 # mul_add_c2(a[0],b[3],c1,c2,c3);
+ $ADDU $t_2,$at
+ $ADDU $c_1,$t_2
+ sltu $at,$c_1,$t_2
+ $ADDU $c_2,$at
+ $ST $c_3,2*$BNSZ($a0)
+___
+ &add_c2($t_2,$t_1,$c_1,$c_2,$c_3,0,
+ $a_1,$a_2); # mul_add_c2(a2[1],b[2],c1,c2,c3);
+ &add_c2($t_2,$t_1,$c_1,$c_2,$c_3,1,
+ $a_3,$a_1); # mul_add_c2(a[3],b[1],c2,c3,c1);
+$code.=<<___;
+ $ST $c_1,3*$BNSZ($a0)
+___
+ &add_c2($t_2,$t_1,$c_2,$c_3,$c_1,0,
+ $a_2,$a_2); # mul_add_c(a[2],b[2],c2,c3,c1);
+$code.=<<___;
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_2,$t_1
+ sltu $at,$c_2,$t_1
+ $MULTU $a_2,$a_3 # mul_add_c2(a[2],b[3],c3,c1,c2);
+ $ADDU $t_2,$at
+ $ADDU $c_3,$t_2
+ sltu $at,$c_3,$t_2
+ $ADDU $c_1,$at
+ $ST $c_2,4*$BNSZ($a0)
+___
+ &add_c2($t_2,$t_1,$c_3,$c_1,$c_2,0,
+ $a_3,$a_3); # mul_add_c(a[3],b[3],c1,c2,c3);
+$code.=<<___;
+ $ST $c_3,5*$BNSZ($a0)
+
+ mflo $t_1
+ mfhi $t_2
+ $ADDU $c_1,$t_1
+ sltu $at,$c_1,$t_1
+ $ADDU $t_2,$at
+ $ADDU $c_2,$t_2
+ $ST $c_1,6*$BNSZ($a0)
+ $ST $c_2,7*$BNSZ($a0)
+
+ .set noreorder
+___
+$code.=<<___ if ($flavour =~ /nubi/i);
+ $REG_L $t3,4*$SZREG($sp)
+ $REG_L $t2,3*$SZREG($sp)
+ $REG_L $t1,2*$SZREG($sp)
+ $REG_L $t0,1*$SZREG($sp)
+ $REG_L $gp,0*$SZREG($sp)
+ $PTR_ADD $sp,6*$SZREG
+___
+$code.=<<___;
+ jr $ra
+ nop
+.end bn_sqr_comba4
+___
+print $code;
+close STDOUT;
diff --git a/openssl-1.1.0h/crypto/bn/asm/pa-risc2.s b/openssl-1.1.0h/crypto/bn/asm/pa-risc2.s
new file mode 100644
index 0000000..413eac7
--- /dev/null
+++ b/openssl-1.1.0h/crypto/bn/asm/pa-risc2.s
@@ -0,0 +1,1624 @@
+; Copyright 1998-2016 The OpenSSL Project Authors. All Rights Reserved.
+;
+; Licensed under the OpenSSL license (the "License"). You may not use
+; this file except in compliance with the License. You can obtain a copy
+; in the file LICENSE in the source distribution or at
+; https://www.openssl.org/source/license.html
+;
+; PA-RISC 2.0 implementation of bn_asm code, based on the
+; 64-bit version of the code. This code is effectively the
+; same as the 64-bit version except the register model is
+; slightly different given all values must be 32-bit between
+; function calls. Thus the 64-bit return values are returned
+; in %ret0 and %ret1 vs just %ret0 as is done in 64-bit
+;
+;
+; This code is approximately 2x faster than the C version
+; for RSA/DSA.
+;
+; See http://devresource.hp.com/ for more details on the PA-RISC
+; architecture. Also see the book "PA-RISC 2.0 Architecture"
+; by Gerry Kane for information on the instruction set architecture.
+;
+; Code written by Chris Ruemmler (with some help from the HP C
+; compiler).
+;
+; The code compiles with HP's assembler
+;
+
+ .level 2.0N
+ .space $TEXT$
+ .subspa $CODE$,QUAD=0,ALIGN=8,ACCESS=0x2c,CODE_ONLY
+
+;
+; Global Register definitions used for the routines.
+;
+; Some information about HP's runtime architecture for 32-bits.
+;
+; "Caller save" means the calling function must save the register
+; if it wants the register to be preserved.
+; "Callee save" means if a function uses the register, it must save
+; the value before using it.
+;
+; For the floating point registers
+;
+; "caller save" registers: fr4-fr11, fr22-fr31
+; "callee save" registers: fr12-fr21
+; "special" registers: fr0-fr3 (status and exception registers)
+;
+; For the integer registers
+; value zero : r0
+; "caller save" registers: r1,r19-r26
+; "callee save" registers: r3-r18
+; return register : r2 (rp)
+; return values ; r28,r29 (ret0,ret1)
+; Stack pointer ; r30 (sp)
+; millicode return ptr ; r31 (also a caller save register)
+
+
+;
+; Arguments to the routines
+;
+r_ptr .reg %r26
+a_ptr .reg %r25
+b_ptr .reg %r24
+num .reg %r24
+n .reg %r23
+
+;
+; Note that the "w" argument for bn_mul_add_words and bn_mul_words
+; is passed on the stack at a delta of -56 from the top of stack
+; as the routine is entered.
+;
+
+;
+; Globals used in some routines
+;
+
+top_overflow .reg %r23
+high_mask .reg %r22 ; value 0xffffffff80000000L
+
+
+;------------------------------------------------------------------------------
+;
+; bn_mul_add_words
+;
+;BN_ULONG bn_mul_add_words(BN_ULONG *r_ptr, BN_ULONG *a_ptr,
+; int num, BN_ULONG w)
+;
+; arg0 = r_ptr
+; arg1 = a_ptr
+; arg3 = num
+; -56(sp) = w
+;
+; Local register definitions
+;
+
+fm1 .reg %fr22
+fm .reg %fr23
+ht_temp .reg %fr24
+ht_temp_1 .reg %fr25
+lt_temp .reg %fr26
+lt_temp_1 .reg %fr27
+fm1_1 .reg %fr28
+fm_1 .reg %fr29
+
+fw_h .reg %fr7L
+fw_l .reg %fr7R
+fw .reg %fr7
+
+fht_0 .reg %fr8L
+flt_0 .reg %fr8R
+t_float_0 .reg %fr8
+
+fht_1 .reg %fr9L
+flt_1 .reg %fr9R
+t_float_1 .reg %fr9
+
+tmp_0 .reg %r31
+tmp_1 .reg %r21
+m_0 .reg %r20
+m_1 .reg %r19
+ht_0 .reg %r1
+ht_1 .reg %r3
+lt_0 .reg %r4
+lt_1 .reg %r5
+m1_0 .reg %r6
+m1_1 .reg %r7
+rp_val .reg %r8
+rp_val_1 .reg %r9
+
+bn_mul_add_words
+ .export bn_mul_add_words,entry,NO_RELOCATION,LONG_RETURN
+ .proc
+ .callinfo frame=128
+ .entry
+ .align 64
+
+ STD %r3,0(%sp) ; save r3
+ STD %r4,8(%sp) ; save r4
+ NOP ; Needed to make the loop 16-byte aligned
+ NOP ; needed to make the loop 16-byte aligned
+
+ STD %r5,16(%sp) ; save r5
+ NOP
+ STD %r6,24(%sp) ; save r6
+ STD %r7,32(%sp) ; save r7
+
+ STD %r8,40(%sp) ; save r8
+ STD %r9,48(%sp) ; save r9
+ COPY %r0,%ret1 ; return 0 by default
+ DEPDI,Z 1,31,1,top_overflow ; top_overflow = 1 << 32
+
+ CMPIB,>= 0,num,bn_mul_add_words_exit ; if (num <= 0) then exit
+ LDO 128(%sp),%sp ; bump stack
+
+ ;
+ ; The loop is unrolled twice, so if there is only 1 number
+ ; then go straight to the cleanup code.
+ ;
+ CMPIB,= 1,num,bn_mul_add_words_single_top
+ FLDD -184(%sp),fw ; (-56-128) load up w into fw (fw_h/fw_l)
+
+ ;
+ ; This loop is unrolled 2 times (64-byte aligned as well)
+ ;
+ ; PA-RISC 2.0 chips have two fully pipelined multipliers, thus
+ ; two 32-bit mutiplies can be issued per cycle.
+ ;
+bn_mul_add_words_unroll2
+
+ FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
+ FLDD 8(a_ptr),t_float_1 ; load up 64-bit value (fr8L) ht(L)/lt(R)
+ LDD 0(r_ptr),rp_val ; rp[0]
+ LDD 8(r_ptr),rp_val_1 ; rp[1]
+
+ XMPYU fht_0,fw_l,fm1 ; m1[0] = fht_0*fw_l
+ XMPYU fht_1,fw_l,fm1_1 ; m1[1] = fht_1*fw_l
+ FSTD fm1,-16(%sp) ; -16(sp) = m1[0]
+ FSTD fm1_1,-48(%sp) ; -48(sp) = m1[1]
+
+ XMPYU flt_0,fw_h,fm ; m[0] = flt_0*fw_h
+ XMPYU flt_1,fw_h,fm_1 ; m[1] = flt_1*fw_h
+ FSTD fm,-8(%sp) ; -8(sp) = m[0]
+ FSTD fm_1,-40(%sp) ; -40(sp) = m[1]
+
+ XMPYU fht_0,fw_h,ht_temp ; ht_temp = fht_0*fw_h
+ XMPYU fht_1,fw_h,ht_temp_1 ; ht_temp_1 = fht_1*fw_h
+ FSTD ht_temp,-24(%sp) ; -24(sp) = ht_temp
+ FSTD ht_temp_1,-56(%sp) ; -56(sp) = ht_temp_1
+
+ XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l
+ XMPYU flt_1,fw_l,lt_temp_1 ; lt_temp = lt*fw_l
+ FSTD lt_temp,-32(%sp) ; -32(sp) = lt_temp
+ FSTD lt_temp_1,-64(%sp) ; -64(sp) = lt_temp_1
+
+ LDD -8(%sp),m_0 ; m[0]
+ LDD -40(%sp),m_1 ; m[1]
+ LDD -16(%sp),m1_0 ; m1[0]
+ LDD -48(%sp),m1_1 ; m1[1]
+
+ LDD -24(%sp),ht_0 ; ht[0]
+ LDD -56(%sp),ht_1 ; ht[1]
+ ADD,L m1_0,m_0,tmp_0 ; tmp_0 = m[0] + m1[0];
+ ADD,L m1_1,m_1,tmp_1 ; tmp_1 = m[1] + m1[1];
+
+ LDD -32(%sp),lt_0
+ LDD -64(%sp),lt_1
+ CMPCLR,*>>= tmp_0,m1_0, %r0 ; if (m[0] < m1[0])
+ ADD,L ht_0,top_overflow,ht_0 ; ht[0] += (1<<32)
+
+ CMPCLR,*>>= tmp_1,m1_1,%r0 ; if (m[1] < m1[1])
+ ADD,L ht_1,top_overflow,ht_1 ; ht[1] += (1<<32)
+ EXTRD,U tmp_0,31,32,m_0 ; m[0]>>32
+ DEPD,Z tmp_0,31,32,m1_0 ; m1[0] = m[0]<<32
+
+ EXTRD,U tmp_1,31,32,m_1 ; m[1]>>32
+ DEPD,Z tmp_1,31,32,m1_1 ; m1[1] = m[1]<<32
+ ADD,L ht_0,m_0,ht_0 ; ht[0]+= (m[0]>>32)
+ ADD,L ht_1,m_1,ht_1 ; ht[1]+= (m[1]>>32)
+
+ ADD lt_0,m1_0,lt_0 ; lt[0] = lt[0]+m1[0];
+ ADD,DC ht_0,%r0,ht_0 ; ht[0]++
+ ADD lt_1,m1_1,lt_1 ; lt[1] = lt[1]+m1[1];
+ ADD,DC ht_1,%r0,ht_1 ; ht[1]++
+
+ ADD %ret1,lt_0,lt_0 ; lt[0] = lt[0] + c;
+ ADD,DC ht_0,%r0,ht_0 ; ht[0]++
+ ADD lt_0,rp_val,lt_0 ; lt[0] = lt[0]+rp[0]
+ ADD,DC ht_0,%r0,ht_0 ; ht[0]++
+
+ LDO -2(num),num ; num = num - 2;
+ ADD ht_0,lt_1,lt_1 ; lt[1] = lt[1] + ht_0 (c);
+ ADD,DC ht_1,%r0,ht_1 ; ht[1]++
+ STD lt_0,0(r_ptr) ; rp[0] = lt[0]
+
+ ADD lt_1,rp_val_1,lt_1 ; lt[1] = lt[1]+rp[1]
+ ADD,DC ht_1,%r0,%ret1 ; ht[1]++
+ LDO 16(a_ptr),a_ptr ; a_ptr += 2
+
+ STD lt_1,8(r_ptr) ; rp[1] = lt[1]
+ CMPIB,<= 2,num,bn_mul_add_words_unroll2 ; go again if more to do
+ LDO 16(r_ptr),r_ptr ; r_ptr += 2
+
+ CMPIB,=,N 0,num,bn_mul_add_words_exit ; are we done, or cleanup last one
+
+ ;
+ ; Top of loop aligned on 64-byte boundary
+ ;
+bn_mul_add_words_single_top
+ FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
+ LDD 0(r_ptr),rp_val ; rp[0]
+ LDO 8(a_ptr),a_ptr ; a_ptr++
+ XMPYU fht_0,fw_l,fm1 ; m1 = ht*fw_l
+ FSTD fm1,-16(%sp) ; -16(sp) = m1
+ XMPYU flt_0,fw_h,fm ; m = lt*fw_h
+ FSTD fm,-8(%sp) ; -8(sp) = m
+ XMPYU fht_0,fw_h,ht_temp ; ht_temp = ht*fw_h
+ FSTD ht_temp,-24(%sp) ; -24(sp) = ht
+ XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l
+ FSTD lt_temp,-32(%sp) ; -32(sp) = lt
+
+ LDD -8(%sp),m_0
+ LDD -16(%sp),m1_0 ; m1 = temp1
+ ADD,L m_0,m1_0,tmp_0 ; tmp_0 = m + m1;
+ LDD -24(%sp),ht_0
+ LDD -32(%sp),lt_0
+
+ CMPCLR,*>>= tmp_0,m1_0,%r0 ; if (m < m1)
+ ADD,L ht_0,top_overflow,ht_0 ; ht += (1<<32)
+
+ EXTRD,U tmp_0,31,32,m_0 ; m>>32
+ DEPD,Z tmp_0,31,32,m1_0 ; m1 = m<<32
+
+ ADD,L ht_0,m_0,ht_0 ; ht+= (m>>32)
+ ADD lt_0,m1_0,tmp_0 ; tmp_0 = lt+m1;
+ ADD,DC ht_0,%r0,ht_0 ; ht++
+ ADD %ret1,tmp_0,lt_0 ; lt = lt + c;
+ ADD,DC ht_0,%r0,ht_0 ; ht++
+ ADD lt_0,rp_val,lt_0 ; lt = lt+rp[0]
+ ADD,DC ht_0,%r0,%ret1 ; ht++
+ STD lt_0,0(r_ptr) ; rp[0] = lt
+
+bn_mul_add_words_exit
+ .EXIT
+
+ EXTRD,U %ret1,31,32,%ret0 ; for 32-bit, return in ret0/ret1
+ LDD -80(%sp),%r9 ; restore r9
+ LDD -88(%sp),%r8 ; restore r8
+ LDD -96(%sp),%r7 ; restore r7
+ LDD -104(%sp),%r6 ; restore r6
+ LDD -112(%sp),%r5 ; restore r5
+ LDD -120(%sp),%r4 ; restore r4
+ BVE (%rp)
+ LDD,MB -128(%sp),%r3 ; restore r3
+ .PROCEND ;in=23,24,25,26,29;out=28;
+
+;----------------------------------------------------------------------------
+;
+;BN_ULONG bn_mul_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
+;
+; arg0 = rp
+; arg1 = ap
+; arg3 = num
+; w on stack at -56(sp)
+
+bn_mul_words
+ .proc
+ .callinfo frame=128
+ .entry
+ .EXPORT bn_mul_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
+ .align 64
+
+ STD %r3,0(%sp) ; save r3
+ STD %r4,8(%sp) ; save r4
+ NOP
+ STD %r5,16(%sp) ; save r5
+
+ STD %r6,24(%sp) ; save r6
+ STD %r7,32(%sp) ; save r7
+ COPY %r0,%ret1 ; return 0 by default
+ DEPDI,Z 1,31,1,top_overflow ; top_overflow = 1 << 32
+
+ CMPIB,>= 0,num,bn_mul_words_exit
+ LDO 128(%sp),%sp ; bump stack
+
+ ;
+ ; See if only 1 word to do, thus just do cleanup
+ ;
+ CMPIB,= 1,num,bn_mul_words_single_top
+ FLDD -184(%sp),fw ; (-56-128) load up w into fw (fw_h/fw_l)
+
+ ;
+ ; This loop is unrolled 2 times (64-byte aligned as well)
+ ;
+ ; PA-RISC 2.0 chips have two fully pipelined multipliers, thus
+ ; two 32-bit mutiplies can be issued per cycle.
+ ;
+bn_mul_words_unroll2
+
+ FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
+ FLDD 8(a_ptr),t_float_1 ; load up 64-bit value (fr8L) ht(L)/lt(R)
+ XMPYU fht_0,fw_l,fm1 ; m1[0] = fht_0*fw_l
+ XMPYU fht_1,fw_l,fm1_1 ; m1[1] = ht*fw_l
+
+ FSTD fm1,-16(%sp) ; -16(sp) = m1
+ FSTD fm1_1,-48(%sp) ; -48(sp) = m1
+ XMPYU flt_0,fw_h,fm ; m = lt*fw_h
+ XMPYU flt_1,fw_h,fm_1 ; m = lt*fw_h
+
+ FSTD fm,-8(%sp) ; -8(sp) = m
+ FSTD fm_1,-40(%sp) ; -40(sp) = m
+ XMPYU fht_0,fw_h,ht_temp ; ht_temp = fht_0*fw_h
+ XMPYU fht_1,fw_h,ht_temp_1 ; ht_temp = ht*fw_h
+
+ FSTD ht_temp,-24(%sp) ; -24(sp) = ht
+ FSTD ht_temp_1,-56(%sp) ; -56(sp) = ht
+ XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l
+ XMPYU flt_1,fw_l,lt_temp_1 ; lt_temp = lt*fw_l
+
+ FSTD lt_temp,-32(%sp) ; -32(sp) = lt
+ FSTD lt_temp_1,-64(%sp) ; -64(sp) = lt
+ LDD -8(%sp),m_0
+ LDD -40(%sp),m_1
+
+ LDD -16(%sp),m1_0
+ LDD -48(%sp),m1_1
+ LDD -24(%sp),ht_0
+ LDD -56(%sp),ht_1
+
+ ADD,L m1_0,m_0,tmp_0 ; tmp_0 = m + m1;
+ ADD,L m1_1,m_1,tmp_1 ; tmp_1 = m + m1;
+ LDD -32(%sp),lt_0
+ LDD -64(%sp),lt_1
+
+ CMPCLR,*>>= tmp_0,m1_0, %r0 ; if (m < m1)
+ ADD,L ht_0,top_overflow,ht_0 ; ht += (1<<32)
+ CMPCLR,*>>= tmp_1,m1_1,%r0 ; if (m < m1)
+ ADD,L ht_1,top_overflow,ht_1 ; ht += (1<<32)
+
+ EXTRD,U tmp_0,31,32,m_0 ; m>>32
+ DEPD,Z tmp_0,31,32,m1_0 ; m1 = m<<32
+ EXTRD,U tmp_1,31,32,m_1 ; m>>32
+ DEPD,Z tmp_1,31,32,m1_1 ; m1 = m<<32
+
+ ADD,L ht_0,m_0,ht_0 ; ht+= (m>>32)
+ ADD,L ht_1,m_1,ht_1 ; ht+= (m>>32)
+ ADD lt_0,m1_0,lt_0 ; lt = lt+m1;
+ ADD,DC ht_0,%r0,ht_0 ; ht++
+
+ ADD lt_1,m1_1,lt_1 ; lt = lt+m1;
+ ADD,DC ht_1,%r0,ht_1 ; ht++
+ ADD %ret1,lt_0,lt_0 ; lt = lt + c (ret1);
+ ADD,DC ht_0,%r0,ht_0 ; ht++
+
+ ADD ht_0,lt_1,lt_1 ; lt = lt + c (ht_0)
+ ADD,DC ht_1,%r0,ht_1 ; ht++
+ STD lt_0,0(r_ptr) ; rp[0] = lt
+ STD lt_1,8(r_ptr) ; rp[1] = lt
+
+ COPY ht_1,%ret1 ; carry = ht
+ LDO -2(num),num ; num = num - 2;
+ LDO 16(a_ptr),a_ptr ; ap += 2
+ CMPIB,<= 2,num,bn_mul_words_unroll2
+ LDO 16(r_ptr),r_ptr ; rp++
+
+ CMPIB,=,N 0,num,bn_mul_words_exit ; are we done?
+
+ ;
+ ; Top of loop aligned on 64-byte boundary
+ ;
+bn_mul_words_single_top
+ FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
+
+ XMPYU fht_0,fw_l,fm1 ; m1 = ht*fw_l
+ FSTD fm1,-16(%sp) ; -16(sp) = m1
+ XMPYU flt_0,fw_h,fm ; m = lt*fw_h
+ FSTD fm,-8(%sp) ; -8(sp) = m
+ XMPYU fht_0,fw_h,ht_temp ; ht_temp = ht*fw_h
+ FSTD ht_temp,-24(%sp) ; -24(sp) = ht
+ XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l
+ FSTD lt_temp,-32(%sp) ; -32(sp) = lt
+
+ LDD -8(%sp),m_0
+ LDD -16(%sp),m1_0
+ ADD,L m_0,m1_0,tmp_0 ; tmp_0 = m + m1;
+ LDD -24(%sp),ht_0
+ LDD -32(%sp),lt_0
+
+ CMPCLR,*>>= tmp_0,m1_0,%r0 ; if (m < m1)
+ ADD,L ht_0,top_overflow,ht_0 ; ht += (1<<32)
+
+ EXTRD,U tmp_0,31,32,m_0 ; m>>32
+ DEPD,Z tmp_0,31,32,m1_0 ; m1 = m<<32
+
+ ADD,L ht_0,m_0,ht_0 ; ht+= (m>>32)
+ ADD lt_0,m1_0,lt_0 ; lt= lt+m1;
+ ADD,DC ht_0,%r0,ht_0 ; ht++
+
+ ADD %ret1,lt_0,lt_0 ; lt = lt + c;
+ ADD,DC ht_0,%r0,ht_0 ; ht++
+
+ COPY ht_0,%ret1 ; copy carry
+ STD lt_0,0(r_ptr) ; rp[0] = lt
+
+bn_mul_words_exit
+ .EXIT
+ EXTRD,U %ret1,31,32,%ret0 ; for 32-bit, return in ret0/ret1
+ LDD -96(%sp),%r7 ; restore r7
+ LDD -104(%sp),%r6 ; restore r6
+ LDD -112(%sp),%r5 ; restore r5
+ LDD -120(%sp),%r4 ; restore r4
+ BVE (%rp)
+ LDD,MB -128(%sp),%r3 ; restore r3
+ .PROCEND
+
+;----------------------------------------------------------------------------
+;
+;void bn_sqr_words(BN_ULONG *rp, BN_ULONG *ap, int num)
+;
+; arg0 = rp
+; arg1 = ap
+; arg2 = num
+;
+
+bn_sqr_words
+ .proc
+ .callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
+ .EXPORT bn_sqr_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
+ .entry
+ .align 64
+
+ STD %r3,0(%sp) ; save r3
+ STD %r4,8(%sp) ; save r4
+ NOP
+ STD %r5,16(%sp) ; save r5
+
+ CMPIB,>= 0,num,bn_sqr_words_exit
+ LDO 128(%sp),%sp ; bump stack
+
+ ;
+ ; If only 1, the goto straight to cleanup
+ ;
+ CMPIB,= 1,num,bn_sqr_words_single_top
+ DEPDI,Z -1,32,33,high_mask ; Create Mask 0xffffffff80000000L
+
+ ;
+ ; This loop is unrolled 2 times (64-byte aligned as well)
+ ;
+
+bn_sqr_words_unroll2
+ FLDD 0(a_ptr),t_float_0 ; a[0]
+ FLDD 8(a_ptr),t_float_1 ; a[1]
+ XMPYU fht_0,flt_0,fm ; m[0]
+ XMPYU fht_1,flt_1,fm_1 ; m[1]
+
+ FSTD fm,-24(%sp) ; store m[0]
+ FSTD fm_1,-56(%sp) ; store m[1]
+ XMPYU flt_0,flt_0,lt_temp ; lt[0]
+ XMPYU flt_1,flt_1,lt_temp_1 ; lt[1]
+
+ FSTD lt_temp,-16(%sp) ; store lt[0]
+ FSTD lt_temp_1,-48(%sp) ; store lt[1]
+ XMPYU fht_0,fht_0,ht_temp ; ht[0]
+ XMPYU fht_1,fht_1,ht_temp_1 ; ht[1]
+
+ FSTD ht_temp,-8(%sp) ; store ht[0]
+ FSTD ht_temp_1,-40(%sp) ; store ht[1]
+ LDD -24(%sp),m_0
+ LDD -56(%sp),m_1
+
+ AND m_0,high_mask,tmp_0 ; m[0] & Mask
+ AND m_1,high_mask,tmp_1 ; m[1] & Mask
+ DEPD,Z m_0,30,31,m_0 ; m[0] << 32+1
+ DEPD,Z m_1,30,31,m_1 ; m[1] << 32+1
+
+ LDD -16(%sp),lt_0
+ LDD -48(%sp),lt_1
+ EXTRD,U tmp_0,32,33,tmp_0 ; tmp_0 = m[0]&Mask >> 32-1
+ EXTRD,U tmp_1,32,33,tmp_1 ; tmp_1 = m[1]&Mask >> 32-1
+
+ LDD -8(%sp),ht_0
+ LDD -40(%sp),ht_1
+ ADD,L ht_0,tmp_0,ht_0 ; ht[0] += tmp_0
+ ADD,L ht_1,tmp_1,ht_1 ; ht[1] += tmp_1
+
+ ADD lt_0,m_0,lt_0 ; lt = lt+m
+ ADD,DC ht_0,%r0,ht_0 ; ht[0]++
+ STD lt_0,0(r_ptr) ; rp[0] = lt[0]
+ STD ht_0,8(r_ptr) ; rp[1] = ht[1]
+
+ ADD lt_1,m_1,lt_1 ; lt = lt+m
+ ADD,DC ht_1,%r0,ht_1 ; ht[1]++
+ STD lt_1,16(r_ptr) ; rp[2] = lt[1]
+ STD ht_1,24(r_ptr) ; rp[3] = ht[1]
+
+ LDO -2(num),num ; num = num - 2;
+ LDO 16(a_ptr),a_ptr ; ap += 2
+ CMPIB,<= 2,num,bn_sqr_words_unroll2
+ LDO 32(r_ptr),r_ptr ; rp += 4
+
+ CMPIB,=,N 0,num,bn_sqr_words_exit ; are we done?
+
+ ;
+ ; Top of loop aligned on 64-byte boundary
+ ;
+bn_sqr_words_single_top
+ FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
+
+ XMPYU fht_0,flt_0,fm ; m
+ FSTD fm,-24(%sp) ; store m
+
+ XMPYU flt_0,flt_0,lt_temp ; lt
+ FSTD lt_temp,-16(%sp) ; store lt
+
+ XMPYU fht_0,fht_0,ht_temp ; ht
+ FSTD ht_temp,-8(%sp) ; store ht
+
+ LDD -24(%sp),m_0 ; load m
+ AND m_0,high_mask,tmp_0 ; m & Mask
+ DEPD,Z m_0,30,31,m_0 ; m << 32+1
+ LDD -16(%sp),lt_0 ; lt
+
+ LDD -8(%sp),ht_0 ; ht
+ EXTRD,U tmp_0,32,33,tmp_0 ; tmp_0 = m&Mask >> 32-1
+ ADD m_0,lt_0,lt_0 ; lt = lt+m
+ ADD,L ht_0,tmp_0,ht_0 ; ht += tmp_0
+ ADD,DC ht_0,%r0,ht_0 ; ht++
+
+ STD lt_0,0(r_ptr) ; rp[0] = lt
+ STD ht_0,8(r_ptr) ; rp[1] = ht
+
+bn_sqr_words_exit
+ .EXIT
+ LDD -112(%sp),%r5 ; restore r5
+ LDD -120(%sp),%r4 ; restore r4
+ BVE (%rp)
+ LDD,MB -128(%sp),%r3
+ .PROCEND ;in=23,24,25,26,29;out=28;
+
+
+;----------------------------------------------------------------------------
+;
+;BN_ULONG bn_add_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
+;
+; arg0 = rp
+; arg1 = ap
+; arg2 = bp
+; arg3 = n
+
+t .reg %r22
+b .reg %r21
+l .reg %r20
+
+bn_add_words
+ .proc
+ .entry
+ .callinfo
+ .EXPORT bn_add_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
+ .align 64
+
+ CMPIB,>= 0,n,bn_add_words_exit
+ COPY %r0,%ret1 ; return 0 by default
+
+ ;
+ ; If 2 or more numbers do the loop
+ ;
+ CMPIB,= 1,n,bn_add_words_single_top
+ NOP
+
+ ;
+ ; This loop is unrolled 2 times (64-byte aligned as well)
+ ;
+bn_add_words_unroll2
+ LDD 0(a_ptr),t
+ LDD 0(b_ptr),b
+ ADD t,%ret1,t ; t = t+c;
+ ADD,DC %r0,%r0,%ret1 ; set c to carry
+ ADD t,b,l ; l = t + b[0]
+ ADD,DC %ret1,%r0,%ret1 ; c+= carry
+ STD l,0(r_ptr)
+
+ LDD 8(a_ptr),t
+ LDD 8(b_ptr),b
+ ADD t,%ret1,t ; t = t+c;
+ ADD,DC %r0,%r0,%ret1 ; set c to carry
+ ADD t,b,l ; l = t + b[0]
+ ADD,DC %ret1,%r0,%ret1 ; c+= carry
+ STD l,8(r_ptr)
+
+ LDO -2(n),n
+ LDO 16(a_ptr),a_ptr
+ LDO 16(b_ptr),b_ptr
+
+ CMPIB,<= 2,n,bn_add_words_unroll2
+ LDO 16(r_ptr),r_ptr
+
+ CMPIB,=,N 0,n,bn_add_words_exit ; are we done?
+
+bn_add_words_single_top
+ LDD 0(a_ptr),t
+ LDD 0(b_ptr),b
+
+ ADD t,%ret1,t ; t = t+c;
+ ADD,DC %r0,%r0,%ret1 ; set c to carry (could use CMPCLR??)
+ ADD t,b,l ; l = t + b[0]
+ ADD,DC %ret1,%r0,%ret1 ; c+= carry
+ STD l,0(r_ptr)
+
+bn_add_words_exit
+ .EXIT
+ BVE (%rp)
+ EXTRD,U %ret1,31,32,%ret0 ; for 32-bit, return in ret0/ret1
+ .PROCEND ;in=23,24,25,26,29;out=28;
+
+;----------------------------------------------------------------------------
+;
+;BN_ULONG bn_sub_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
+;
+; arg0 = rp
+; arg1 = ap
+; arg2 = bp
+; arg3 = n
+
+t1 .reg %r22
+t2 .reg %r21
+sub_tmp1 .reg %r20
+sub_tmp2 .reg %r19
+
+
+bn_sub_words
+ .proc
+ .callinfo
+ .EXPORT bn_sub_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
+ .entry
+ .align 64
+
+ CMPIB,>= 0,n,bn_sub_words_exit
+ COPY %r0,%ret1 ; return 0 by default
+
+ ;
+ ; If 2 or more numbers do the loop
+ ;
+ CMPIB,= 1,n,bn_sub_words_single_top
+ NOP
+
+ ;
+ ; This loop is unrolled 2 times (64-byte aligned as well)
+ ;
+bn_sub_words_unroll2
+ LDD 0(a_ptr),t1
+ LDD 0(b_ptr),t2
+ SUB t1,t2,sub_tmp1 ; t3 = t1-t2;
+ SUB sub_tmp1,%ret1,sub_tmp1 ; t3 = t3- c;
+
+ CMPCLR,*>> t1,t2,sub_tmp2 ; clear if t1 > t2
+ LDO 1(%r0),sub_tmp2
+
+ CMPCLR,*= t1,t2,%r0
+ COPY sub_tmp2,%ret1
+ STD sub_tmp1,0(r_ptr)
+
+ LDD 8(a_ptr),t1
+ LDD 8(b_ptr),t2
+ SUB t1,t2,sub_tmp1 ; t3 = t1-t2;
+ SUB sub_tmp1,%ret1,sub_tmp1 ; t3 = t3- c;
+ CMPCLR,*>> t1,t2,sub_tmp2 ; clear if t1 > t2
+ LDO 1(%r0),sub_tmp2
+
+ CMPCLR,*= t1,t2,%r0
+ COPY sub_tmp2,%ret1
+ STD sub_tmp1,8(r_ptr)
+
+ LDO -2(n),n
+ LDO 16(a_ptr),a_ptr
+ LDO 16(b_ptr),b_ptr
+
+ CMPIB,<= 2,n,bn_sub_words_unroll2
+ LDO 16(r_ptr),r_ptr
+
+ CMPIB,=,N 0,n,bn_sub_words_exit ; are we done?
+
+bn_sub_words_single_top
+ LDD 0(a_ptr),t1
+ LDD 0(b_ptr),t2
+ SUB t1,t2,sub_tmp1 ; t3 = t1-t2;
+ SUB sub_tmp1,%ret1,sub_tmp1 ; t3 = t3- c;
+ CMPCLR,*>> t1,t2,sub_tmp2 ; clear if t1 > t2
+ LDO 1(%r0),sub_tmp2
+
+ CMPCLR,*= t1,t2,%r0
+ COPY sub_tmp2,%ret1
+
+ STD sub_tmp1,0(r_ptr)
+
+bn_sub_words_exit
+ .EXIT
+ BVE (%rp)
+ EXTRD,U %ret1,31,32,%ret0 ; for 32-bit, return in ret0/ret1
+ .PROCEND ;in=23,24,25,26,29;out=28;
+
+;------------------------------------------------------------------------------
+;
+; unsigned long bn_div_words(unsigned long h, unsigned long l, unsigned long d)
+;
+; arg0 = h
+; arg1 = l
+; arg2 = d
+;
+; This is mainly just output from the HP C compiler.
+;
+;------------------------------------------------------------------------------
+bn_div_words
+ .PROC
+ .EXPORT bn_div_words,ENTRY,PRIV_LEV=3,ARGW0=GR,ARGW1=GR,ARGW2=GR,ARGW3=GR,RTNVAL=GR,LONG_RETURN
+ .IMPORT BN_num_bits_word,CODE
+ ;--- not PIC .IMPORT __iob,DATA
+ ;--- not PIC .IMPORT fprintf,CODE
+ .IMPORT abort,CODE
+ .IMPORT $$div2U,MILLICODE
+ .CALLINFO CALLER,FRAME=144,ENTRY_GR=%r9,SAVE_RP,ARGS_SAVED,ORDERING_AWARE
+ .ENTRY
+ STW %r2,-20(%r30) ;offset 0x8ec
+ STW,MA %r3,192(%r30) ;offset 0x8f0
+ STW %r4,-188(%r30) ;offset 0x8f4
+ DEPD %r5,31,32,%r6 ;offset 0x8f8
+ STD %r6,-184(%r30) ;offset 0x8fc
+ DEPD %r7,31,32,%r8 ;offset 0x900
+ STD %r8,-176(%r30) ;offset 0x904
+ STW %r9,-168(%r30) ;offset 0x908
+ LDD -248(%r30),%r3 ;offset 0x90c
+ COPY %r26,%r4 ;offset 0x910
+ COPY %r24,%r5 ;offset 0x914
+ DEPD %r25,31,32,%r4 ;offset 0x918
+ CMPB,*<> %r3,%r0,$0006000C ;offset 0x91c
+ DEPD %r23,31,32,%r5 ;offset 0x920
+ MOVIB,TR -1,%r29,$00060002 ;offset 0x924
+ EXTRD,U %r29,31,32,%r28 ;offset 0x928
+$0006002A
+ LDO -1(%r29),%r29 ;offset 0x92c
+ SUB %r23,%r7,%r23 ;offset 0x930
+$00060024
+ SUB %r4,%r31,%r25 ;offset 0x934
+ AND %r25,%r19,%r26 ;offset 0x938
+ CMPB,*<>,N %r0,%r26,$00060046 ;offset 0x93c
+ DEPD,Z %r25,31,32,%r20 ;offset 0x940
+ OR %r20,%r24,%r21 ;offset 0x944
+ CMPB,*<<,N %r21,%r23,$0006002A ;offset 0x948
+ SUB %r31,%r2,%r31 ;offset 0x94c
+$00060046
+$0006002E
+ DEPD,Z %r23,31,32,%r25 ;offset 0x950
+ EXTRD,U %r23,31,32,%r26 ;offset 0x954
+ AND %r25,%r19,%r24 ;offset 0x958
+ ADD,L %r31,%r26,%r31 ;offset 0x95c
+ CMPCLR,*>>= %r5,%r24,%r0 ;offset 0x960
+ LDO 1(%r31),%r31 ;offset 0x964
+$00060032
+ CMPB,*<<=,N %r31,%r4,$00060036 ;offset 0x968
+ LDO -1(%r29),%r29 ;offset 0x96c
+ ADD,L %r4,%r3,%r4 ;offset 0x970
+$00060036
+ ADDIB,=,N -1,%r8,$D0 ;offset 0x974
+ SUB %r5,%r24,%r28 ;offset 0x978
+$0006003A
+ SUB %r4,%r31,%r24 ;offset 0x97c
+ SHRPD %r24,%r28,32,%r4 ;offset 0x980
+ DEPD,Z %r29,31,32,%r9 ;offset 0x984
+ DEPD,Z %r28,31,32,%r5 ;offset 0x988
+$0006001C
+ EXTRD,U %r4,31,32,%r31 ;offset 0x98c
+ CMPB,*<>,N %r31,%r2,$00060020 ;offset 0x990
+ MOVB,TR %r6,%r29,$D1 ;offset 0x994
+ STD %r29,-152(%r30) ;offset 0x998
+$0006000C
+ EXTRD,U %r3,31,32,%r25 ;offset 0x99c
+ COPY %r3,%r26 ;offset 0x9a0
+ EXTRD,U %r3,31,32,%r9 ;offset 0x9a4
+ EXTRD,U %r4,31,32,%r8 ;offset 0x9a8
+ .CALL ARGW0=GR,ARGW1=GR,RTNVAL=GR ;in=25,26;out=28;
+ B,L BN_num_bits_word,%r2 ;offset 0x9ac
+ EXTRD,U %r5,31,32,%r7 ;offset 0x9b0
+ LDI 64,%r20 ;offset 0x9b4
+ DEPD %r7,31,32,%r5 ;offset 0x9b8
+ DEPD %r8,31,32,%r4 ;offset 0x9bc
+ DEPD %r9,31,32,%r3 ;offset 0x9c0
+ CMPB,= %r28,%r20,$00060012 ;offset 0x9c4
+ COPY %r28,%r24 ;offset 0x9c8
+ MTSARCM %r24 ;offset 0x9cc
+ DEPDI,Z -1,%sar,1,%r19 ;offset 0x9d0
+ CMPB,*>>,N %r4,%r19,$D2 ;offset 0x9d4
+$00060012
+ SUBI 64,%r24,%r31 ;offset 0x9d8
+ CMPCLR,*<< %r4,%r3,%r0 ;offset 0x9dc
+ SUB %r4,%r3,%r4 ;offset 0x9e0
+$00060016
+ CMPB,= %r31,%r0,$0006001A ;offset 0x9e4
+ COPY %r0,%r9 ;offset 0x9e8
+ MTSARCM %r31 ;offset 0x9ec
+ DEPD,Z %r3,%sar,64,%r3 ;offset 0x9f0
+ SUBI 64,%r31,%r26 ;offset 0x9f4
+ MTSAR %r26 ;offset 0x9f8
+ SHRPD %r4,%r5,%sar,%r4 ;offset 0x9fc
+ MTSARCM %r31 ;offset 0xa00
+ DEPD,Z %r5,%sar,64,%r5 ;offset 0xa04
+$0006001A
+ DEPDI,Z -1,31,32,%r19 ;offset 0xa08
+ AND %r3,%r19,%r29 ;offset 0xa0c
+ EXTRD,U %r29,31,32,%r2 ;offset 0xa10
+ DEPDI,Z -1,63,32,%r6 ;offset 0xa14
+ MOVIB,TR 2,%r8,$0006001C ;offset 0xa18
+ EXTRD,U %r3,63,32,%r7 ;offset 0xa1c
+$D2
+ ;--- not PIC ADDIL LR'__iob-$global$,%r27,%r1 ;offset 0xa20
+ ;--- not PIC LDIL LR'C$7,%r21 ;offset 0xa24
+ ;--- not PIC LDO RR'__iob-$global$+32(%r1),%r26 ;offset 0xa28
+ ;--- not PIC .CALL ARGW0=GR,ARGW1=GR,ARGW2=GR,RTNVAL=GR ;in=24,25,26;out=28;
+ ;--- not PIC B,L fprintf,%r2 ;offset 0xa2c
+ ;--- not PIC LDO RR'C$7(%r21),%r25 ;offset 0xa30
+ .CALL ;
+ B,L abort,%r2 ;offset 0xa34
+ NOP ;offset 0xa38
+ B $D3 ;offset 0xa3c
+ LDW -212(%r30),%r2 ;offset 0xa40
+$00060020
+ COPY %r4,%r26 ;offset 0xa44
+ EXTRD,U %r4,31,32,%r25 ;offset 0xa48
+ COPY %r2,%r24 ;offset 0xa4c
+ .CALL ;in=23,24,25,26;out=20,21,22,28,29; (MILLICALL)
+ B,L $$div2U,%r31 ;offset 0xa50
+ EXTRD,U %r2,31,32,%r23 ;offset 0xa54
+ DEPD %r28,31,32,%r29 ;offset 0xa58
+$00060022
+ STD %r29,-152(%r30) ;offset 0xa5c
+$D1
+ AND %r5,%r19,%r24 ;offset 0xa60
+ EXTRD,U %r24,31,32,%r24 ;offset 0xa64
+ STW %r2,-160(%r30) ;offset 0xa68
+ STW %r7,-128(%r30) ;offset 0xa6c
+ FLDD -152(%r30),%fr4 ;offset 0xa70
+ FLDD -152(%r30),%fr7 ;offset 0xa74
+ FLDW -160(%r30),%fr8L ;offset 0xa78
+ FLDW -128(%r30),%fr5L ;offset 0xa7c
+ XMPYU %fr8L,%fr7L,%fr10 ;offset 0xa80
+ FSTD %fr10,-136(%r30) ;offset 0xa84
+ XMPYU %fr8L,%fr7R,%fr22 ;offset 0xa88
+ FSTD %fr22,-144(%r30) ;offset 0xa8c
+ XMPYU %fr5L,%fr4L,%fr11 ;offset 0xa90
+ XMPYU %fr5L,%fr4R,%fr23 ;offset 0xa94
+ FSTD %fr11,-112(%r30) ;offset 0xa98
+ FSTD %fr23,-120(%r30) ;offset 0xa9c
+ LDD -136(%r30),%r28 ;offset 0xaa0
+ DEPD,Z %r28,31,32,%r31 ;offset 0xaa4
+ LDD -144(%r30),%r20 ;offset 0xaa8
+ ADD,L %r20,%r31,%r31 ;offset 0xaac
+ LDD -112(%r30),%r22 ;offset 0xab0
+ DEPD,Z %r22,31,32,%r22 ;offset 0xab4
+ LDD -120(%r30),%r21 ;offset 0xab8
+ B $00060024 ;offset 0xabc
+ ADD,L %r21,%r22,%r23 ;offset 0xac0
+$D0
+ OR %r9,%r29,%r29 ;offset 0xac4
+$00060040
+ EXTRD,U %r29,31,32,%r28 ;offset 0xac8
+$00060002
+$L2
+ LDW -212(%r30),%r2 ;offset 0xacc
+$D3
+ LDW -168(%r30),%r9 ;offset 0xad0
+ LDD -176(%r30),%r8 ;offset 0xad4
+ EXTRD,U %r8,31,32,%r7 ;offset 0xad8
+ LDD -184(%r30),%r6 ;offset 0xadc
+ EXTRD,U %r6,31,32,%r5 ;offset 0xae0
+ LDW -188(%r30),%r4 ;offset 0xae4
+ BVE (%r2) ;offset 0xae8
+ .EXIT
+ LDW,MB -192(%r30),%r3 ;offset 0xaec
+ .PROCEND ;in=23,25;out=28,29;fpin=105,107;
+
+
+
+
+;----------------------------------------------------------------------------
+;
+; Registers to hold 64-bit values to manipulate. The "L" part
+; of the register corresponds to the upper 32-bits, while the "R"
+; part corresponds to the lower 32-bits
+;
+; Note, that when using b6 and b7, the code must save these before
+; using them because they are callee save registers
+;
+;
+; Floating point registers to use to save values that
+; are manipulated. These don't collide with ftemp1-6 and
+; are all caller save registers
+;
+a0 .reg %fr22
+a0L .reg %fr22L
+a0R .reg %fr22R
+
+a1 .reg %fr23
+a1L .reg %fr23L
+a1R .reg %fr23R
+
+a2 .reg %fr24
+a2L .reg %fr24L
+a2R .reg %fr24R
+
+a3 .reg %fr25
+a3L .reg %fr25L
+a3R .reg %fr25R
+
+a4 .reg %fr26
+a4L .reg %fr26L
+a4R .reg %fr26R
+
+a5 .reg %fr27
+a5L .reg %fr27L
+a5R .reg %fr27R
+
+a6 .reg %fr28
+a6L .reg %fr28L
+a6R .reg %fr28R
+
+a7 .reg %fr29
+a7L .reg %fr29L
+a7R .reg %fr29R
+
+b0 .reg %fr30
+b0L .reg %fr30L
+b0R .reg %fr30R
+
+b1 .reg %fr31
+b1L .reg %fr31L
+b1R .reg %fr31R
+
+;
+; Temporary floating point variables, these are all caller save
+; registers
+;
+ftemp1 .reg %fr4
+ftemp2 .reg %fr5
+ftemp3 .reg %fr6
+ftemp4 .reg %fr7
+
+;
+; The B set of registers when used.
+;
+
+b2 .reg %fr8
+b2L .reg %fr8L
+b2R .reg %fr8R
+
+b3 .reg %fr9
+b3L .reg %fr9L
+b3R .reg %fr9R
+
+b4 .reg %fr10
+b4L .reg %fr10L
+b4R .reg %fr10R
+
+b5 .reg %fr11
+b5L .reg %fr11L
+b5R .reg %fr11R
+
+b6 .reg %fr12
+b6L .reg %fr12L
+b6R .reg %fr12R
+
+b7 .reg %fr13
+b7L .reg %fr13L
+b7R .reg %fr13R
+
+c1 .reg %r21 ; only reg
+temp1 .reg %r20 ; only reg
+temp2 .reg %r19 ; only reg
+temp3 .reg %r31 ; only reg
+
+m1 .reg %r28
+c2 .reg %r23
+high_one .reg %r1
+ht .reg %r6
+lt .reg %r5
+m .reg %r4
+c3 .reg %r3
+
+SQR_ADD_C .macro A0L,A0R,C1,C2,C3
+ XMPYU A0L,A0R,ftemp1 ; m
+ FSTD ftemp1,-24(%sp) ; store m
+
+ XMPYU A0R,A0R,ftemp2 ; lt
+ FSTD ftemp2,-16(%sp) ; store lt
+
+ XMPYU A0L,A0L,ftemp3 ; ht
+ FSTD ftemp3,-8(%sp) ; store ht
+
+ LDD -24(%sp),m ; load m
+ AND m,high_mask,temp2 ; m & Mask
+ DEPD,Z m,30,31,temp3 ; m << 32+1
+ LDD -16(%sp),lt ; lt
+
+ LDD -8(%sp),ht ; ht
+ EXTRD,U temp2,32,33,temp1 ; temp1 = m&Mask >> 32-1
+ ADD temp3,lt,lt ; lt = lt+m
+ ADD,L ht,temp1,ht ; ht += temp1
+ ADD,DC ht,%r0,ht ; ht++
+
+ ADD C1,lt,C1 ; c1=c1+lt
+ ADD,DC ht,%r0,ht ; ht++
+
+ ADD C2,ht,C2 ; c2=c2+ht
+ ADD,DC C3,%r0,C3 ; c3++
+.endm
+
+SQR_ADD_C2 .macro A0L,A0R,A1L,A1R,C1,C2,C3
+ XMPYU A0L,A1R,ftemp1 ; m1 = bl*ht
+ FSTD ftemp1,-16(%sp) ;
+ XMPYU A0R,A1L,ftemp2 ; m = bh*lt
+ FSTD ftemp2,-8(%sp) ;
+ XMPYU A0R,A1R,ftemp3 ; lt = bl*lt
+ FSTD ftemp3,-32(%sp)
+ XMPYU A0L,A1L,ftemp4 ; ht = bh*ht
+ FSTD ftemp4,-24(%sp) ;
+
+ LDD -8(%sp),m ; r21 = m
+ LDD -16(%sp),m1 ; r19 = m1
+ ADD,L m,m1,m ; m+m1
+
+ DEPD,Z m,31,32,temp3 ; (m+m1<<32)
+ LDD -24(%sp),ht ; r24 = ht
+
+ CMPCLR,*>>= m,m1,%r0 ; if (m < m1)
+ ADD,L ht,high_one,ht ; ht+=high_one
+
+ EXTRD,U m,31,32,temp1 ; m >> 32
+ LDD -32(%sp),lt ; lt
+ ADD,L ht,temp1,ht ; ht+= m>>32
+ ADD lt,temp3,lt ; lt = lt+m1
+ ADD,DC ht,%r0,ht ; ht++
+
+ ADD ht,ht,ht ; ht=ht+ht;
+ ADD,DC C3,%r0,C3 ; add in carry (c3++)
+
+ ADD lt,lt,lt ; lt=lt+lt;
+ ADD,DC ht,%r0,ht ; add in carry (ht++)
+
+ ADD C1,lt,C1 ; c1=c1+lt
+ ADD,DC,*NUV ht,%r0,ht ; add in carry (ht++)
+ LDO 1(C3),C3 ; bump c3 if overflow,nullify otherwise
+
+ ADD C2,ht,C2 ; c2 = c2 + ht
+ ADD,DC C3,%r0,C3 ; add in carry (c3++)
+.endm
+
+;
+;void bn_sqr_comba8(BN_ULONG *r, BN_ULONG *a)
+; arg0 = r_ptr
+; arg1 = a_ptr
+;
+
+bn_sqr_comba8
+ .PROC
+ .CALLINFO FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
+ .EXPORT bn_sqr_comba8,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
+ .ENTRY
+ .align 64
+
+ STD %r3,0(%sp) ; save r3
+ STD %r4,8(%sp) ; save r4
+ STD %r5,16(%sp) ; save r5
+ STD %r6,24(%sp) ; save r6
+
+ ;
+ ; Zero out carries
+ ;
+ COPY %r0,c1
+ COPY %r0,c2
+ COPY %r0,c3
+
+ LDO 128(%sp),%sp ; bump stack
+ DEPDI,Z -1,32,33,high_mask ; Create Mask 0xffffffff80000000L
+ DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32
+
+ ;
+ ; Load up all of the values we are going to use
+ ;
+ FLDD 0(a_ptr),a0
+ FLDD 8(a_ptr),a1
+ FLDD 16(a_ptr),a2
+ FLDD 24(a_ptr),a3
+ FLDD 32(a_ptr),a4
+ FLDD 40(a_ptr),a5
+ FLDD 48(a_ptr),a6
+ FLDD 56(a_ptr),a7
+
+ SQR_ADD_C a0L,a0R,c1,c2,c3
+ STD c1,0(r_ptr) ; r[0] = c1;
+ COPY %r0,c1
+
+ SQR_ADD_C2 a1L,a1R,a0L,a0R,c2,c3,c1
+ STD c2,8(r_ptr) ; r[1] = c2;
+ COPY %r0,c2
+
+ SQR_ADD_C a1L,a1R,c3,c1,c2
+ SQR_ADD_C2 a2L,a2R,a0L,a0R,c3,c1,c2
+ STD c3,16(r_ptr) ; r[2] = c3;
+ COPY %r0,c3
+
+ SQR_ADD_C2 a3L,a3R,a0L,a0R,c1,c2,c3
+ SQR_ADD_C2 a2L,a2R,a1L,a1R,c1,c2,c3
+ STD c1,24(r_ptr) ; r[3] = c1;
+ COPY %r0,c1
+
+ SQR_ADD_C a2L,a2R,c2,c3,c1
+ SQR_ADD_C2 a3L,a3R,a1L,a1R,c2,c3,c1
+ SQR_ADD_C2 a4L,a4R,a0L,a0R,c2,c3,c1
+ STD c2,32(r_ptr) ; r[4] = c2;
+ COPY %r0,c2
+
+ SQR_ADD_C2 a5L,a5R,a0L,a0R,c3,c1,c2
+ SQR_ADD_C2 a4L,a4R,a1L,a1R,c3,c1,c2
+ SQR_ADD_C2 a3L,a3R,a2L,a2R,c3,c1,c2
+ STD c3,40(r_ptr) ; r[5] = c3;
+ COPY %r0,c3
+
+ SQR_ADD_C a3L,a3R,c1,c2,c3
+ SQR_ADD_C2 a4L,a4R,a2L,a2R,c1,c2,c3
+ SQR_ADD_C2 a5L,a5R,a1L,a1R,c1,c2,c3
+ SQR_ADD_C2 a6L,a6R,a0L,a0R,c1,c2,c3
+ STD c1,48(r_ptr) ; r[6] = c1;
+ COPY %r0,c1
+
+ SQR_ADD_C2 a7L,a7R,a0L,a0R,c2,c3,c1
+ SQR_ADD_C2 a6L,a6R,a1L,a1R,c2,c3,c1
+ SQR_ADD_C2 a5L,a5R,a2L,a2R,c2,c3,c1
+ SQR_ADD_C2 a4L,a4R,a3L,a3R,c2,c3,c1
+ STD c2,56(r_ptr) ; r[7] = c2;
+ COPY %r0,c2
+
+ SQR_ADD_C a4L,a4R,c3,c1,c2
+ SQR_ADD_C2 a5L,a5R,a3L,a3R,c3,c1,c2
+ SQR_ADD_C2 a6L,a6R,a2L,a2R,c3,c1,c2
+ SQR_ADD_C2 a7L,a7R,a1L,a1R,c3,c1,c2
+ STD c3,64(r_ptr) ; r[8] = c3;
+ COPY %r0,c3
+
+ SQR_ADD_C2 a7L,a7R,a2L,a2R,c1,c2,c3
+ SQR_ADD_C2 a6L,a6R,a3L,a3R,c1,c2,c3
+ SQR_ADD_C2 a5L,a5R,a4L,a4R,c1,c2,c3
+ STD c1,72(r_ptr) ; r[9] = c1;
+ COPY %r0,c1
+
+ SQR_ADD_C a5L,a5R,c2,c3,c1
+ SQR_ADD_C2 a6L,a6R,a4L,a4R,c2,c3,c1
+ SQR_ADD_C2 a7L,a7R,a3L,a3R,c2,c3,c1
+ STD c2,80(r_ptr) ; r[10] = c2;
+ COPY %r0,c2
+
+ SQR_ADD_C2 a7L,a7R,a4L,a4R,c3,c1,c2
+ SQR_ADD_C2 a6L,a6R,a5L,a5R,c3,c1,c2
+ STD c3,88(r_ptr) ; r[11] = c3;
+ COPY %r0,c3
+
+ SQR_ADD_C a6L,a6R,c1,c2,c3
+ SQR_ADD_C2 a7L,a7R,a5L,a5R,c1,c2,c3
+ STD c1,96(r_ptr) ; r[12] = c1;
+ COPY %r0,c1
+
+ SQR_ADD_C2 a7L,a7R,a6L,a6R,c2,c3,c1
+ STD c2,104(r_ptr) ; r[13] = c2;
+ COPY %r0,c2
+
+ SQR_ADD_C a7L,a7R,c3,c1,c2
+ STD c3, 112(r_ptr) ; r[14] = c3
+ STD c1, 120(r_ptr) ; r[15] = c1
+
+ .EXIT
+ LDD -104(%sp),%r6 ; restore r6
+ LDD -112(%sp),%r5 ; restore r5
+ LDD -120(%sp),%r4 ; restore r4
+ BVE (%rp)
+ LDD,MB -128(%sp),%r3
+
+ .PROCEND
+
+;-----------------------------------------------------------------------------
+;
+;void bn_sqr_comba4(BN_ULONG *r, BN_ULONG *a)
+; arg0 = r_ptr
+; arg1 = a_ptr
+;
+
+bn_sqr_comba4
+ .proc
+ .callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
+ .EXPORT bn_sqr_comba4,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
+ .entry
+ .align 64
+ STD %r3,0(%sp) ; save r3
+ STD %r4,8(%sp) ; save r4
+ STD %r5,16(%sp) ; save r5
+ STD %r6,24(%sp) ; save r6
+
+ ;
+ ; Zero out carries
+ ;
+ COPY %r0,c1
+ COPY %r0,c2
+ COPY %r0,c3
+
+ LDO 128(%sp),%sp ; bump stack
+ DEPDI,Z -1,32,33,high_mask ; Create Mask 0xffffffff80000000L
+ DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32
+
+ ;
+ ; Load up all of the values we are going to use
+ ;
+ FLDD 0(a_ptr),a0
+ FLDD 8(a_ptr),a1
+ FLDD 16(a_ptr),a2
+ FLDD 24(a_ptr),a3
+ FLDD 32(a_ptr),a4
+ FLDD 40(a_ptr),a5
+ FLDD 48(a_ptr),a6
+ FLDD 56(a_ptr),a7
+
+ SQR_ADD_C a0L,a0R,c1,c2,c3
+
+ STD c1,0(r_ptr) ; r[0] = c1;
+ COPY %r0,c1
+
+ SQR_ADD_C2 a1L,a1R,a0L,a0R,c2,c3,c1
+
+ STD c2,8(r_ptr) ; r[1] = c2;
+ COPY %r0,c2
+
+ SQR_ADD_C a1L,a1R,c3,c1,c2
+ SQR_ADD_C2 a2L,a2R,a0L,a0R,c3,c1,c2
+
+ STD c3,16(r_ptr) ; r[2] = c3;
+ COPY %r0,c3
+
+ SQR_ADD_C2 a3L,a3R,a0L,a0R,c1,c2,c3
+ SQR_ADD_C2 a2L,a2R,a1L,a1R,c1,c2,c3
+
+ STD c1,24(r_ptr) ; r[3] = c1;
+ COPY %r0,c1
+
+ SQR_ADD_C a2L,a2R,c2,c3,c1
+ SQR_ADD_C2 a3L,a3R,a1L,a1R,c2,c3,c1
+
+ STD c2,32(r_ptr) ; r[4] = c2;
+ COPY %r0,c2
+
+ SQR_ADD_C2 a3L,a3R,a2L,a2R,c3,c1,c2
+ STD c3,40(r_ptr) ; r[5] = c3;
+ COPY %r0,c3
+
+ SQR_ADD_C a3L,a3R,c1,c2,c3
+ STD c1,48(r_ptr) ; r[6] = c1;
+ STD c2,56(r_ptr) ; r[7] = c2;
+
+ .EXIT
+ LDD -104(%sp),%r6 ; restore r6
+ LDD -112(%sp),%r5 ; restore r5
+ LDD -120(%sp),%r4 ; restore r4
+ BVE (%rp)
+ LDD,MB -128(%sp),%r3
+
+ .PROCEND
+
+
+;---------------------------------------------------------------------------
+
+MUL_ADD_C .macro A0L,A0R,B0L,B0R,C1,C2,C3
+ XMPYU A0L,B0R,ftemp1 ; m1 = bl*ht
+ FSTD ftemp1,-16(%sp) ;
+ XMPYU A0R,B0L,ftemp2 ; m = bh*lt
+ FSTD ftemp2,-8(%sp) ;
+ XMPYU A0R,B0R,ftemp3 ; lt = bl*lt
+ FSTD ftemp3,-32(%sp)
+ XMPYU A0L,B0L,ftemp4 ; ht = bh*ht
+ FSTD ftemp4,-24(%sp) ;
+
+ LDD -8(%sp),m ; r21 = m
+ LDD -16(%sp),m1 ; r19 = m1
+ ADD,L m,m1,m ; m+m1
+
+ DEPD,Z m,31,32,temp3 ; (m+m1<<32)
+ LDD -24(%sp),ht ; r24 = ht
+
+ CMPCLR,*>>= m,m1,%r0 ; if (m < m1)
+ ADD,L ht,high_one,ht ; ht+=high_one
+
+ EXTRD,U m,31,32,temp1 ; m >> 32
+ LDD -32(%sp),lt ; lt
+ ADD,L ht,temp1,ht ; ht+= m>>32
+ ADD lt,temp3,lt ; lt = lt+m1
+ ADD,DC ht,%r0,ht ; ht++
+
+ ADD C1,lt,C1 ; c1=c1+lt
+ ADD,DC ht,%r0,ht ; bump c3 if overflow,nullify otherwise
+
+ ADD C2,ht,C2 ; c2 = c2 + ht
+ ADD,DC C3,%r0,C3 ; add in carry (c3++)
+.endm
+
+
+;
+;void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
+; arg0 = r_ptr
+; arg1 = a_ptr
+; arg2 = b_ptr
+;
+
+bn_mul_comba8
+ .proc
+ .callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
+ .EXPORT bn_mul_comba8,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
+ .entry
+ .align 64
+
+ STD %r3,0(%sp) ; save r3
+ STD %r4,8(%sp) ; save r4
+ STD %r5,16(%sp) ; save r5
+ STD %r6,24(%sp) ; save r6
+ FSTD %fr12,32(%sp) ; save r6
+ FSTD %fr13,40(%sp) ; save r7
+
+ ;
+ ; Zero out carries
+ ;
+ COPY %r0,c1
+ COPY %r0,c2
+ COPY %r0,c3
+
+ LDO 128(%sp),%sp ; bump stack
+ DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32
+
+ ;
+ ; Load up all of the values we are going to use
+ ;
+ FLDD 0(a_ptr),a0
+ FLDD 8(a_ptr),a1
+ FLDD 16(a_ptr),a2
+ FLDD 24(a_ptr),a3
+ FLDD 32(a_ptr),a4
+ FLDD 40(a_ptr),a5
+ FLDD 48(a_ptr),a6
+ FLDD 56(a_ptr),a7
+
+ FLDD 0(b_ptr),b0
+ FLDD 8(b_ptr),b1
+ FLDD 16(b_ptr),b2
+ FLDD 24(b_ptr),b3
+ FLDD 32(b_ptr),b4
+ FLDD 40(b_ptr),b5
+ FLDD 48(b_ptr),b6
+ FLDD 56(b_ptr),b7
+
+ MUL_ADD_C a0L,a0R,b0L,b0R,c1,c2,c3
+ STD c1,0(r_ptr)
+ COPY %r0,c1
+
+ MUL_ADD_C a0L,a0R,b1L,b1R,c2,c3,c1
+ MUL_ADD_C a1L,a1R,b0L,b0R,c2,c3,c1
+ STD c2,8(r_ptr)
+ COPY %r0,c2
+
+ MUL_ADD_C a2L,a2R,b0L,b0R,c3,c1,c2
+ MUL_ADD_C a1L,a1R,b1L,b1R,c3,c1,c2
+ MUL_ADD_C a0L,a0R,b2L,b2R,c3,c1,c2
+ STD c3,16(r_ptr)
+ COPY %r0,c3
+
+ MUL_ADD_C a0L,a0R,b3L,b3R,c1,c2,c3
+ MUL_ADD_C a1L,a1R,b2L,b2R,c1,c2,c3
+ MUL_ADD_C a2L,a2R,b1L,b1R,c1,c2,c3
+ MUL_ADD_C a3L,a3R,b0L,b0R,c1,c2,c3
+ STD c1,24(r_ptr)
+ COPY %r0,c1
+
+ MUL_ADD_C a4L,a4R,b0L,b0R,c2,c3,c1
+ MUL_ADD_C a3L,a3R,b1L,b1R,c2,c3,c1
+ MUL_ADD_C a2L,a2R,b2L,b2R,c2,c3,c1
+ MUL_ADD_C a1L,a1R,b3L,b3R,c2,c3,c1
+ MUL_ADD_C a0L,a0R,b4L,b4R,c2,c3,c1
+ STD c2,32(r_ptr)
+ COPY %r0,c2
+
+ MUL_ADD_C a0L,a0R,b5L,b5R,c3,c1,c2
+ MUL_ADD_C a1L,a1R,b4L,b4R,c3,c1,c2
+ MUL_ADD_C a2L,a2R,b3L,b3R,c3,c1,c2
+ MUL_ADD_C a3L,a3R,b2L,b2R,c3,c1,c2
+ MUL_ADD_C a4L,a4R,b1L,b1R,c3,c1,c2
+ MUL_ADD_C a5L,a5R,b0L,b0R,c3,c1,c2
+ STD c3,40(r_ptr)
+ COPY %r0,c3
+
+ MUL_ADD_C a6L,a6R,b0L,b0R,c1,c2,c3
+ MUL_ADD_C a5L,a5R,b1L,b1R,c1,c2,c3
+ MUL_ADD_C a4L,a4R,b2L,b2R,c1,c2,c3
+ MUL_ADD_C a3L,a3R,b3L,b3R,c1,c2,c3
+ MUL_ADD_C a2L,a2R,b4L,b4R,c1,c2,c3
+ MUL_ADD_C a1L,a1R,b5L,b5R,c1,c2,c3
+ MUL_ADD_C a0L,a0R,b6L,b6R,c1,c2,c3
+ STD c1,48(r_ptr)
+ COPY %r0,c1
+
+ MUL_ADD_C a0L,a0R,b7L,b7R,c2,c3,c1
+ MUL_ADD_C a1L,a1R,b6L,b6R,c2,c3,c1
+ MUL_ADD_C a2L,a2R,b5L,b5R,c2,c3,c1
+ MUL_ADD_C a3L,a3R,b4L,b4R,c2,c3,c1
+ MUL_ADD_C a4L,a4R,b3L,b3R,c2,c3,c1
+ MUL_ADD_C a5L,a5R,b2L,b2R,c2,c3,c1
+ MUL_ADD_C a6L,a6R,b1L,b1R,c2,c3,c1
+ MUL_ADD_C a7L,a7R,b0L,b0R,c2,c3,c1
+ STD c2,56(r_ptr)
+ COPY %r0,c2
+
+ MUL_ADD_C a7L,a7R,b1L,b1R,c3,c1,c2
+ MUL_ADD_C a6L,a6R,b2L,b2R,c3,c1,c2
+ MUL_ADD_C a5L,a5R,b3L,b3R,c3,c1,c2
+ MUL_ADD_C a4L,a4R,b4L,b4R,c3,c1,c2
+ MUL_ADD_C a3L,a3R,b5L,b5R,c3,c1,c2
+ MUL_ADD_C a2L,a2R,b6L,b6R,c3,c1,c2
+ MUL_ADD_C a1L,a1R,b7L,b7R,c3,c1,c2
+ STD c3,64(r_ptr)
+ COPY %r0,c3
+
+ MUL_ADD_C a2L,a2R,b7L,b7R,c1,c2,c3
+ MUL_ADD_C a3L,a3R,b6L,b6R,c1,c2,c3
+ MUL_ADD_C a4L,a4R,b5L,b5R,c1,c2,c3
+ MUL_ADD_C a5L,a5R,b4L,b4R,c1,c2,c3
+ MUL_ADD_C a6L,a6R,b3L,b3R,c1,c2,c3
+ MUL_ADD_C a7L,a7R,b2L,b2R,c1,c2,c3
+ STD c1,72(r_ptr)
+ COPY %r0,c1
+
+ MUL_ADD_C a7L,a7R,b3L,b3R,c2,c3,c1
+ MUL_ADD_C a6L,a6R,b4L,b4R,c2,c3,c1
+ MUL_ADD_C a5L,a5R,b5L,b5R,c2,c3,c1
+ MUL_ADD_C a4L,a4R,b6L,b6R,c2,c3,c1
+ MUL_ADD_C a3L,a3R,b7L,b7R,c2,c3,c1
+ STD c2,80(r_ptr)
+ COPY %r0,c2
+
+ MUL_ADD_C a4L,a4R,b7L,b7R,c3,c1,c2
+ MUL_ADD_C a5L,a5R,b6L,b6R,c3,c1,c2
+ MUL_ADD_C a6L,a6R,b5L,b5R,c3,c1,c2
+ MUL_ADD_C a7L,a7R,b4L,b4R,c3,c1,c2
+ STD c3,88(r_ptr)
+ COPY %r0,c3
+
+ MUL_ADD_C a7L,a7R,b5L,b5R,c1,c2,c3
+ MUL_ADD_C a6L,a6R,b6L,b6R,c1,c2,c3
+ MUL_ADD_C a5L,a5R,b7L,b7R,c1,c2,c3
+ STD c1,96(r_ptr)
+ COPY %r0,c1
+
+ MUL_ADD_C a6L,a6R,b7L,b7R,c2,c3,c1
+ MUL_ADD_C a7L,a7R,b6L,b6R,c2,c3,c1
+ STD c2,104(r_ptr)
+ COPY %r0,c2
+
+ MUL_ADD_C a7L,a7R,b7L,b7R,c3,c1,c2
+ STD c3,112(r_ptr)
+ STD c1,120(r_ptr)
+
+ .EXIT
+ FLDD -88(%sp),%fr13
+ FLDD -96(%sp),%fr12
+ LDD -104(%sp),%r6 ; restore r6
+ LDD -112(%sp),%r5 ; restore r5
+ LDD -120(%sp),%r4 ; restore r4
+ BVE (%rp)
+ LDD,MB -128(%sp),%r3
+
+ .PROCEND
+
+;-----------------------------------------------------------------------------
+;
+;void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
+; arg0 = r_ptr
+; arg1 = a_ptr
+; arg2 = b_ptr
+;
+
+bn_mul_comba4
+ .proc
+ .callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
+ .EXPORT bn_mul_comba4,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
+ .entry
+ .align 64
+
+ STD %r3,0(%sp) ; save r3
+ STD %r4,8(%sp) ; save r4
+ STD %r5,16(%sp) ; save r5
+ STD %r6,24(%sp) ; save r6
+ FSTD %fr12,32(%sp) ; save r6
+ FSTD %fr13,40(%sp) ; save r7
+
+ ;
+ ; Zero out carries
+ ;
+ COPY %r0,c1
+ COPY %r0,c2
+ COPY %r0,c3
+
+ LDO 128(%sp),%sp ; bump stack
+ DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32
+
+ ;
+ ; Load up all of the values we are going to use
+ ;
+ FLDD 0(a_ptr),a0
+ FLDD 8(a_ptr),a1
+ FLDD 16(a_ptr),a2
+ FLDD 24(a_ptr),a3
+
+ FLDD 0(b_ptr),b0
+ FLDD 8(b_ptr),b1
+ FLDD 16(b_ptr),b2
+ FLDD 24(b_ptr),b3
+
+ MUL_ADD_C a0L,a0R,b0L,b0R,c1,c2,c3
+ STD c1,0(r_ptr)
+ COPY %r0,c1
+
+ MUL_ADD_C a0L,a0R,b1L,b1R,c2,c3,c1
+ MUL_ADD_C a1L,a1R,b0L,b0R,c2,c3,c1
+ STD c2,8(r_ptr)
+ COPY %r0,c2
+
+ MUL_ADD_C a2L,a2R,b0L,b0R,c3,c1,c2
+ MUL_ADD_C a1L,a1R,b1L,b1R,c3,c1,c2
+ MUL_ADD_C a0L,a0R,b2L,b2R,c3,c1,c2
+ STD c3,16(r_ptr)
+ COPY %r0,c3
+
+ MUL_ADD_C a0L,a0R,b3L,b3R,c1,c2,c3
+ MUL_ADD_C a1L,a1R,b2L,b2R,c1,c2,c3
+ MUL_ADD_C a2L,a2R,b1L,b1R,c1,c2,c3
+ MUL_ADD_C a3L,a3R,b0L,b0R,c1,c2,c3
+ STD c1,24(r_ptr)
+ COPY %r0,c1
+
+ MUL_ADD_C a3L,a3R,b1L,b1R,c2,c3,c1
+ MUL_ADD_C a2L,a2R,b2L,b2R,c2,c3,c1
+ MUL_ADD_C a1L,a1R,b3L,b3R,c2,c3,c1
+ STD c2,32(r_ptr)
+ COPY %r0,c2
+
+ MUL_ADD_C a2L,a2R,b3L,b3R,c3,c1,c2
+ MUL_ADD_C a3L,a3R,b2L,b2R,c3,c1,c2
+ STD c3,40(r_ptr)
+ COPY %r0,c3
+
+ MUL_ADD_C a3L,a3R,b3L,b3R,c1,c2,c3
+ STD c1,48(r_ptr)
+ STD c2,56(r_ptr)
+
+ .EXIT
+ FLDD -88(%sp),%fr13
+ FLDD -96(%sp),%fr12
+ LDD -104(%sp),%r6 ; restore r6
+ LDD -112(%sp),%r5 ; restore r5
+ LDD -120(%sp),%r4 ; restore r4
+ BVE (%rp)
+ LDD,MB -128(%sp),%r3
+
+ .PROCEND
+
+
+;--- not PIC .SPACE $TEXT$
+;--- not PIC .SUBSPA $CODE$
+;--- not PIC .SPACE $PRIVATE$,SORT=16
+;--- not PIC .IMPORT $global$,DATA
+;--- not PIC .SPACE $TEXT$
+;--- not PIC .SUBSPA $CODE$
+;--- not PIC .SUBSPA $LIT$,ACCESS=0x2c
+;--- not PIC C$7
+;--- not PIC .ALIGN 8
+;--- not PIC .STRINGZ "Division would overflow (%d)\n"
+ .END
diff --git a/openssl-1.1.0h/crypto/bn/asm/pa-risc2W.s b/openssl-1.1.0h/crypto/bn/asm/pa-risc2W.s
new file mode 100644
index 0000000..9738117
--- /dev/null
+++ b/openssl-1.1.0h/crypto/bn/asm/pa-risc2W.s
@@ -0,0 +1,1612 @@
+; Copyright 2000-2016 The OpenSSL Project Authors. All Rights Reserved.
+;
+; Licensed under the OpenSSL license (the "License"). You may not use
+; this file except in compliance with the License. You can obtain a copy
+; in the file LICENSE in the source distribution or at
+; https://www.openssl.org/source/license.html
+
+;
+; PA-RISC 64-bit implementation of bn_asm code
+;
+; This code is approximately 2x faster than the C version
+; for RSA/DSA.
+;
+; See http://devresource.hp.com/ for more details on the PA-RISC
+; architecture. Also see the book "PA-RISC 2.0 Architecture"
+; by Gerry Kane for information on the instruction set architecture.
+;
+; Code written by Chris Ruemmler (with some help from the HP C
+; compiler).
+;
+; The code compiles with HP's assembler
+;
+
+ .level 2.0W
+ .space $TEXT$
+ .subspa $CODE$,QUAD=0,ALIGN=8,ACCESS=0x2c,CODE_ONLY
+
+;
+; Global Register definitions used for the routines.
+;
+; Some information about HP's runtime architecture for 64-bits.
+;
+; "Caller save" means the calling function must save the register
+; if it wants the register to be preserved.
+; "Callee save" means if a function uses the register, it must save
+; the value before using it.
+;
+; For the floating point registers
+;
+; "caller save" registers: fr4-fr11, fr22-fr31
+; "callee save" registers: fr12-fr21
+; "special" registers: fr0-fr3 (status and exception registers)
+;
+; For the integer registers
+; value zero : r0
+; "caller save" registers: r1,r19-r26
+; "callee save" registers: r3-r18
+; return register : r2 (rp)
+; return values ; r28 (ret0,ret1)
+; Stack pointer ; r30 (sp)
+; global data pointer ; r27 (dp)
+; argument pointer ; r29 (ap)
+; millicode return ptr ; r31 (also a caller save register)
+
+
+;
+; Arguments to the routines
+;
+r_ptr .reg %r26
+a_ptr .reg %r25
+b_ptr .reg %r24
+num .reg %r24
+w .reg %r23
+n .reg %r23
+
+
+;
+; Globals used in some routines
+;
+
+top_overflow .reg %r29
+high_mask .reg %r22 ; value 0xffffffff80000000L
+
+
+;------------------------------------------------------------------------------
+;
+; bn_mul_add_words
+;
+;BN_ULONG bn_mul_add_words(BN_ULONG *r_ptr, BN_ULONG *a_ptr,
+; int num, BN_ULONG w)
+;
+; arg0 = r_ptr
+; arg1 = a_ptr
+; arg2 = num
+; arg3 = w
+;
+; Local register definitions
+;
+
+fm1 .reg %fr22
+fm .reg %fr23
+ht_temp .reg %fr24
+ht_temp_1 .reg %fr25
+lt_temp .reg %fr26
+lt_temp_1 .reg %fr27
+fm1_1 .reg %fr28
+fm_1 .reg %fr29
+
+fw_h .reg %fr7L
+fw_l .reg %fr7R
+fw .reg %fr7
+
+fht_0 .reg %fr8L
+flt_0 .reg %fr8R
+t_float_0 .reg %fr8
+
+fht_1 .reg %fr9L
+flt_1 .reg %fr9R
+t_float_1 .reg %fr9
+
+tmp_0 .reg %r31
+tmp_1 .reg %r21
+m_0 .reg %r20
+m_1 .reg %r19
+ht_0 .reg %r1
+ht_1 .reg %r3
+lt_0 .reg %r4
+lt_1 .reg %r5
+m1_0 .reg %r6
+m1_1 .reg %r7
+rp_val .reg %r8
+rp_val_1 .reg %r9
+
+bn_mul_add_words
+ .export bn_mul_add_words,entry,NO_RELOCATION,LONG_RETURN
+ .proc
+ .callinfo frame=128
+ .entry
+ .align 64
+
+ STD %r3,0(%sp) ; save r3
+ STD %r4,8(%sp) ; save r4
+ NOP ; Needed to make the loop 16-byte aligned
+ NOP ; Needed to make the loop 16-byte aligned
+
+ STD %r5,16(%sp) ; save r5
+ STD %r6,24(%sp) ; save r6
+ STD %r7,32(%sp) ; save r7
+ STD %r8,40(%sp) ; save r8
+
+ STD %r9,48(%sp) ; save r9
+ COPY %r0,%ret0 ; return 0 by default
+ DEPDI,Z 1,31,1,top_overflow ; top_overflow = 1 << 32
+ STD w,56(%sp) ; store w on stack
+
+ CMPIB,>= 0,num,bn_mul_add_words_exit ; if (num <= 0) then exit
+ LDO 128(%sp),%sp ; bump stack
+
+ ;
+ ; The loop is unrolled twice, so if there is only 1 number
+ ; then go straight to the cleanup code.
+ ;
+ CMPIB,= 1,num,bn_mul_add_words_single_top
+ FLDD -72(%sp),fw ; load up w into fp register fw (fw_h/fw_l)
+
+ ;
+ ; This loop is unrolled 2 times (64-byte aligned as well)
+ ;
+ ; PA-RISC 2.0 chips have two fully pipelined multipliers, thus
+ ; two 32-bit mutiplies can be issued per cycle.
+ ;
+bn_mul_add_words_unroll2
+
+ FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
+ FLDD 8(a_ptr),t_float_1 ; load up 64-bit value (fr8L) ht(L)/lt(R)
+ LDD 0(r_ptr),rp_val ; rp[0]
+ LDD 8(r_ptr),rp_val_1 ; rp[1]
+
+ XMPYU fht_0,fw_l,fm1 ; m1[0] = fht_0*fw_l
+ XMPYU fht_1,fw_l,fm1_1 ; m1[1] = fht_1*fw_l
+ FSTD fm1,-16(%sp) ; -16(sp) = m1[0]
+ FSTD fm1_1,-48(%sp) ; -48(sp) = m1[1]
+
+ XMPYU flt_0,fw_h,fm ; m[0] = flt_0*fw_h
+ XMPYU flt_1,fw_h,fm_1 ; m[1] = flt_1*fw_h
+ FSTD fm,-8(%sp) ; -8(sp) = m[0]
+ FSTD fm_1,-40(%sp) ; -40(sp) = m[1]
+
+ XMPYU fht_0,fw_h,ht_temp ; ht_temp = fht_0*fw_h
+ XMPYU fht_1,fw_h,ht_temp_1 ; ht_temp_1 = fht_1*fw_h
+ FSTD ht_temp,-24(%sp) ; -24(sp) = ht_temp
+ FSTD ht_temp_1,-56(%sp) ; -56(sp) = ht_temp_1
+
+ XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l
+ XMPYU flt_1,fw_l,lt_temp_1 ; lt_temp = lt*fw_l
+ FSTD lt_temp,-32(%sp) ; -32(sp) = lt_temp
+ FSTD lt_temp_1,-64(%sp) ; -64(sp) = lt_temp_1
+
+ LDD -8(%sp),m_0 ; m[0]
+ LDD -40(%sp),m_1 ; m[1]
+ LDD -16(%sp),m1_0 ; m1[0]
+ LDD -48(%sp),m1_1 ; m1[1]
+
+ LDD -24(%sp),ht_0 ; ht[0]
+ LDD -56(%sp),ht_1 ; ht[1]
+ ADD,L m1_0,m_0,tmp_0 ; tmp_0 = m[0] + m1[0];
+ ADD,L m1_1,m_1,tmp_1 ; tmp_1 = m[1] + m1[1];
+
+ LDD -32(%sp),lt_0
+ LDD -64(%sp),lt_1
+ CMPCLR,*>>= tmp_0,m1_0, %r0 ; if (m[0] < m1[0])
+ ADD,L ht_0,top_overflow,ht_0 ; ht[0] += (1<<32)
+
+ CMPCLR,*>>= tmp_1,m1_1,%r0 ; if (m[1] < m1[1])
+ ADD,L ht_1,top_overflow,ht_1 ; ht[1] += (1<<32)
+ EXTRD,U tmp_0,31,32,m_0 ; m[0]>>32
+ DEPD,Z tmp_0,31,32,m1_0 ; m1[0] = m[0]<<32
+
+ EXTRD,U tmp_1,31,32,m_1 ; m[1]>>32
+ DEPD,Z tmp_1,31,32,m1_1 ; m1[1] = m[1]<<32
+ ADD,L ht_0,m_0,ht_0 ; ht[0]+= (m[0]>>32)
+ ADD,L ht_1,m_1,ht_1 ; ht[1]+= (m[1]>>32)
+
+ ADD lt_0,m1_0,lt_0 ; lt[0] = lt[0]+m1[0];
+ ADD,DC ht_0,%r0,ht_0 ; ht[0]++
+ ADD lt_1,m1_1,lt_1 ; lt[1] = lt[1]+m1[1];
+ ADD,DC ht_1,%r0,ht_1 ; ht[1]++
+
+ ADD %ret0,lt_0,lt_0 ; lt[0] = lt[0] + c;
+ ADD,DC ht_0,%r0,ht_0 ; ht[0]++
+ ADD lt_0,rp_val,lt_0 ; lt[0] = lt[0]+rp[0]
+ ADD,DC ht_0,%r0,ht_0 ; ht[0]++
+
+ LDO -2(num),num ; num = num - 2;
+ ADD ht_0,lt_1,lt_1 ; lt[1] = lt[1] + ht_0 (c);
+ ADD,DC ht_1,%r0,ht_1 ; ht[1]++
+ STD lt_0,0(r_ptr) ; rp[0] = lt[0]
+
+ ADD lt_1,rp_val_1,lt_1 ; lt[1] = lt[1]+rp[1]
+ ADD,DC ht_1,%r0,%ret0 ; ht[1]++
+ LDO 16(a_ptr),a_ptr ; a_ptr += 2
+
+ STD lt_1,8(r_ptr) ; rp[1] = lt[1]
+ CMPIB,<= 2,num,bn_mul_add_words_unroll2 ; go again if more to do
+ LDO 16(r_ptr),r_ptr ; r_ptr += 2
+
+ CMPIB,=,N 0,num,bn_mul_add_words_exit ; are we done, or cleanup last one
+
+ ;
+ ; Top of loop aligned on 64-byte boundary
+ ;
+bn_mul_add_words_single_top
+ FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
+ LDD 0(r_ptr),rp_val ; rp[0]
+ LDO 8(a_ptr),a_ptr ; a_ptr++
+ XMPYU fht_0,fw_l,fm1 ; m1 = ht*fw_l
+ FSTD fm1,-16(%sp) ; -16(sp) = m1
+ XMPYU flt_0,fw_h,fm ; m = lt*fw_h
+ FSTD fm,-8(%sp) ; -8(sp) = m
+ XMPYU fht_0,fw_h,ht_temp ; ht_temp = ht*fw_h
+ FSTD ht_temp,-24(%sp) ; -24(sp) = ht
+ XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l
+ FSTD lt_temp,-32(%sp) ; -32(sp) = lt
+
+ LDD -8(%sp),m_0
+ LDD -16(%sp),m1_0 ; m1 = temp1
+ ADD,L m_0,m1_0,tmp_0 ; tmp_0 = m + m1;
+ LDD -24(%sp),ht_0
+ LDD -32(%sp),lt_0
+
+ CMPCLR,*>>= tmp_0,m1_0,%r0 ; if (m < m1)
+ ADD,L ht_0,top_overflow,ht_0 ; ht += (1<<32)
+
+ EXTRD,U tmp_0,31,32,m_0 ; m>>32
+ DEPD,Z tmp_0,31,32,m1_0 ; m1 = m<<32
+
+ ADD,L ht_0,m_0,ht_0 ; ht+= (m>>32)
+ ADD lt_0,m1_0,tmp_0 ; tmp_0 = lt+m1;
+ ADD,DC ht_0,%r0,ht_0 ; ht++
+ ADD %ret0,tmp_0,lt_0 ; lt = lt + c;
+ ADD,DC ht_0,%r0,ht_0 ; ht++
+ ADD lt_0,rp_val,lt_0 ; lt = lt+rp[0]
+ ADD,DC ht_0,%r0,%ret0 ; ht++
+ STD lt_0,0(r_ptr) ; rp[0] = lt
+
+bn_mul_add_words_exit
+ .EXIT
+ LDD -80(%sp),%r9 ; restore r9
+ LDD -88(%sp),%r8 ; restore r8
+ LDD -96(%sp),%r7 ; restore r7
+ LDD -104(%sp),%r6 ; restore r6
+ LDD -112(%sp),%r5 ; restore r5
+ LDD -120(%sp),%r4 ; restore r4
+ BVE (%rp)
+ LDD,MB -128(%sp),%r3 ; restore r3
+ .PROCEND ;in=23,24,25,26,29;out=28;
+
+;----------------------------------------------------------------------------
+;
+;BN_ULONG bn_mul_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
+;
+; arg0 = rp
+; arg1 = ap
+; arg2 = num
+; arg3 = w
+
+bn_mul_words
+ .proc
+ .callinfo frame=128
+ .entry
+ .EXPORT bn_mul_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
+ .align 64
+
+ STD %r3,0(%sp) ; save r3
+ STD %r4,8(%sp) ; save r4
+ STD %r5,16(%sp) ; save r5
+ STD %r6,24(%sp) ; save r6
+
+ STD %r7,32(%sp) ; save r7
+ COPY %r0,%ret0 ; return 0 by default
+ DEPDI,Z 1,31,1,top_overflow ; top_overflow = 1 << 32
+ STD w,56(%sp) ; w on stack
+
+ CMPIB,>= 0,num,bn_mul_words_exit
+ LDO 128(%sp),%sp ; bump stack
+
+ ;
+ ; See if only 1 word to do, thus just do cleanup
+ ;
+ CMPIB,= 1,num,bn_mul_words_single_top
+ FLDD -72(%sp),fw ; load up w into fp register fw (fw_h/fw_l)
+
+ ;
+ ; This loop is unrolled 2 times (64-byte aligned as well)
+ ;
+ ; PA-RISC 2.0 chips have two fully pipelined multipliers, thus
+ ; two 32-bit mutiplies can be issued per cycle.
+ ;
+bn_mul_words_unroll2
+
+ FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
+ FLDD 8(a_ptr),t_float_1 ; load up 64-bit value (fr8L) ht(L)/lt(R)
+ XMPYU fht_0,fw_l,fm1 ; m1[0] = fht_0*fw_l
+ XMPYU fht_1,fw_l,fm1_1 ; m1[1] = ht*fw_l
+
+ FSTD fm1,-16(%sp) ; -16(sp) = m1
+ FSTD fm1_1,-48(%sp) ; -48(sp) = m1
+ XMPYU flt_0,fw_h,fm ; m = lt*fw_h
+ XMPYU flt_1,fw_h,fm_1 ; m = lt*fw_h
+
+ FSTD fm,-8(%sp) ; -8(sp) = m
+ FSTD fm_1,-40(%sp) ; -40(sp) = m
+ XMPYU fht_0,fw_h,ht_temp ; ht_temp = fht_0*fw_h
+ XMPYU fht_1,fw_h,ht_temp_1 ; ht_temp = ht*fw_h
+
+ FSTD ht_temp,-24(%sp) ; -24(sp) = ht
+ FSTD ht_temp_1,-56(%sp) ; -56(sp) = ht
+ XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l
+ XMPYU flt_1,fw_l,lt_temp_1 ; lt_temp = lt*fw_l
+
+ FSTD lt_temp,-32(%sp) ; -32(sp) = lt
+ FSTD lt_temp_1,-64(%sp) ; -64(sp) = lt
+ LDD -8(%sp),m_0
+ LDD -40(%sp),m_1
+
+ LDD -16(%sp),m1_0
+ LDD -48(%sp),m1_1
+ LDD -24(%sp),ht_0
+ LDD -56(%sp),ht_1
+
+ ADD,L m1_0,m_0,tmp_0 ; tmp_0 = m + m1;
+ ADD,L m1_1,m_1,tmp_1 ; tmp_1 = m + m1;
+ LDD -32(%sp),lt_0
+ LDD -64(%sp),lt_1
+
+ CMPCLR,*>>= tmp_0,m1_0, %r0 ; if (m < m1)
+ ADD,L ht_0,top_overflow,ht_0 ; ht += (1<<32)
+ CMPCLR,*>>= tmp_1,m1_1,%r0 ; if (m < m1)
+ ADD,L ht_1,top_overflow,ht_1 ; ht += (1<<32)
+
+ EXTRD,U tmp_0,31,32,m_0 ; m>>32
+ DEPD,Z tmp_0,31,32,m1_0 ; m1 = m<<32
+ EXTRD,U tmp_1,31,32,m_1 ; m>>32
+ DEPD,Z tmp_1,31,32,m1_1 ; m1 = m<<32
+
+ ADD,L ht_0,m_0,ht_0 ; ht+= (m>>32)
+ ADD,L ht_1,m_1,ht_1 ; ht+= (m>>32)
+ ADD lt_0,m1_0,lt_0 ; lt = lt+m1;
+ ADD,DC ht_0,%r0,ht_0 ; ht++
+
+ ADD lt_1,m1_1,lt_1 ; lt = lt+m1;
+ ADD,DC ht_1,%r0,ht_1 ; ht++
+ ADD %ret0,lt_0,lt_0 ; lt = lt + c (ret0);
+ ADD,DC ht_0,%r0,ht_0 ; ht++
+
+ ADD ht_0,lt_1,lt_1 ; lt = lt + c (ht_0)
+ ADD,DC ht_1,%r0,ht_1 ; ht++
+ STD lt_0,0(r_ptr) ; rp[0] = lt
+ STD lt_1,8(r_ptr) ; rp[1] = lt
+
+ COPY ht_1,%ret0 ; carry = ht
+ LDO -2(num),num ; num = num - 2;
+ LDO 16(a_ptr),a_ptr ; ap += 2
+ CMPIB,<= 2,num,bn_mul_words_unroll2
+ LDO 16(r_ptr),r_ptr ; rp++
+
+ CMPIB,=,N 0,num,bn_mul_words_exit ; are we done?
+
+ ;
+ ; Top of loop aligned on 64-byte boundary
+ ;
+bn_mul_words_single_top
+ FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
+
+ XMPYU fht_0,fw_l,fm1 ; m1 = ht*fw_l
+ FSTD fm1,-16(%sp) ; -16(sp) = m1
+ XMPYU flt_0,fw_h,fm ; m = lt*fw_h
+ FSTD fm,-8(%sp) ; -8(sp) = m
+ XMPYU fht_0,fw_h,ht_temp ; ht_temp = ht*fw_h
+ FSTD ht_temp,-24(%sp) ; -24(sp) = ht
+ XMPYU flt_0,fw_l,lt_temp ; lt_temp = lt*fw_l
+ FSTD lt_temp,-32(%sp) ; -32(sp) = lt
+
+ LDD -8(%sp),m_0
+ LDD -16(%sp),m1_0
+ ADD,L m_0,m1_0,tmp_0 ; tmp_0 = m + m1;
+ LDD -24(%sp),ht_0
+ LDD -32(%sp),lt_0
+
+ CMPCLR,*>>= tmp_0,m1_0,%r0 ; if (m < m1)
+ ADD,L ht_0,top_overflow,ht_0 ; ht += (1<<32)
+
+ EXTRD,U tmp_0,31,32,m_0 ; m>>32
+ DEPD,Z tmp_0,31,32,m1_0 ; m1 = m<<32
+
+ ADD,L ht_0,m_0,ht_0 ; ht+= (m>>32)
+ ADD lt_0,m1_0,lt_0 ; lt= lt+m1;
+ ADD,DC ht_0,%r0,ht_0 ; ht++
+
+ ADD %ret0,lt_0,lt_0 ; lt = lt + c;
+ ADD,DC ht_0,%r0,ht_0 ; ht++
+
+ COPY ht_0,%ret0 ; copy carry
+ STD lt_0,0(r_ptr) ; rp[0] = lt
+
+bn_mul_words_exit
+ .EXIT
+ LDD -96(%sp),%r7 ; restore r7
+ LDD -104(%sp),%r6 ; restore r6
+ LDD -112(%sp),%r5 ; restore r5
+ LDD -120(%sp),%r4 ; restore r4
+ BVE (%rp)
+ LDD,MB -128(%sp),%r3 ; restore r3
+ .PROCEND ;in=23,24,25,26,29;out=28;
+
+;----------------------------------------------------------------------------
+;
+;void bn_sqr_words(BN_ULONG *rp, BN_ULONG *ap, int num)
+;
+; arg0 = rp
+; arg1 = ap
+; arg2 = num
+;
+
+bn_sqr_words
+ .proc
+ .callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
+ .EXPORT bn_sqr_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
+ .entry
+ .align 64
+
+ STD %r3,0(%sp) ; save r3
+ STD %r4,8(%sp) ; save r4
+ NOP
+ STD %r5,16(%sp) ; save r5
+
+ CMPIB,>= 0,num,bn_sqr_words_exit
+ LDO 128(%sp),%sp ; bump stack
+
+ ;
+ ; If only 1, the goto straight to cleanup
+ ;
+ CMPIB,= 1,num,bn_sqr_words_single_top
+ DEPDI,Z -1,32,33,high_mask ; Create Mask 0xffffffff80000000L
+
+ ;
+ ; This loop is unrolled 2 times (64-byte aligned as well)
+ ;
+
+bn_sqr_words_unroll2
+ FLDD 0(a_ptr),t_float_0 ; a[0]
+ FLDD 8(a_ptr),t_float_1 ; a[1]
+ XMPYU fht_0,flt_0,fm ; m[0]
+ XMPYU fht_1,flt_1,fm_1 ; m[1]
+
+ FSTD fm,-24(%sp) ; store m[0]
+ FSTD fm_1,-56(%sp) ; store m[1]
+ XMPYU flt_0,flt_0,lt_temp ; lt[0]
+ XMPYU flt_1,flt_1,lt_temp_1 ; lt[1]
+
+ FSTD lt_temp,-16(%sp) ; store lt[0]
+ FSTD lt_temp_1,-48(%sp) ; store lt[1]
+ XMPYU fht_0,fht_0,ht_temp ; ht[0]
+ XMPYU fht_1,fht_1,ht_temp_1 ; ht[1]
+
+ FSTD ht_temp,-8(%sp) ; store ht[0]
+ FSTD ht_temp_1,-40(%sp) ; store ht[1]
+ LDD -24(%sp),m_0
+ LDD -56(%sp),m_1
+
+ AND m_0,high_mask,tmp_0 ; m[0] & Mask
+ AND m_1,high_mask,tmp_1 ; m[1] & Mask
+ DEPD,Z m_0,30,31,m_0 ; m[0] << 32+1
+ DEPD,Z m_1,30,31,m_1 ; m[1] << 32+1
+
+ LDD -16(%sp),lt_0
+ LDD -48(%sp),lt_1
+ EXTRD,U tmp_0,32,33,tmp_0 ; tmp_0 = m[0]&Mask >> 32-1
+ EXTRD,U tmp_1,32,33,tmp_1 ; tmp_1 = m[1]&Mask >> 32-1
+
+ LDD -8(%sp),ht_0
+ LDD -40(%sp),ht_1
+ ADD,L ht_0,tmp_0,ht_0 ; ht[0] += tmp_0
+ ADD,L ht_1,tmp_1,ht_1 ; ht[1] += tmp_1
+
+ ADD lt_0,m_0,lt_0 ; lt = lt+m
+ ADD,DC ht_0,%r0,ht_0 ; ht[0]++
+ STD lt_0,0(r_ptr) ; rp[0] = lt[0]
+ STD ht_0,8(r_ptr) ; rp[1] = ht[1]
+
+ ADD lt_1,m_1,lt_1 ; lt = lt+m
+ ADD,DC ht_1,%r0,ht_1 ; ht[1]++
+ STD lt_1,16(r_ptr) ; rp[2] = lt[1]
+ STD ht_1,24(r_ptr) ; rp[3] = ht[1]
+
+ LDO -2(num),num ; num = num - 2;
+ LDO 16(a_ptr),a_ptr ; ap += 2
+ CMPIB,<= 2,num,bn_sqr_words_unroll2
+ LDO 32(r_ptr),r_ptr ; rp += 4
+
+ CMPIB,=,N 0,num,bn_sqr_words_exit ; are we done?
+
+ ;
+ ; Top of loop aligned on 64-byte boundary
+ ;
+bn_sqr_words_single_top
+ FLDD 0(a_ptr),t_float_0 ; load up 64-bit value (fr8L) ht(L)/lt(R)
+
+ XMPYU fht_0,flt_0,fm ; m
+ FSTD fm,-24(%sp) ; store m
+
+ XMPYU flt_0,flt_0,lt_temp ; lt
+ FSTD lt_temp,-16(%sp) ; store lt
+
+ XMPYU fht_0,fht_0,ht_temp ; ht
+ FSTD ht_temp,-8(%sp) ; store ht
+
+ LDD -24(%sp),m_0 ; load m
+ AND m_0,high_mask,tmp_0 ; m & Mask
+ DEPD,Z m_0,30,31,m_0 ; m << 32+1
+ LDD -16(%sp),lt_0 ; lt
+
+ LDD -8(%sp),ht_0 ; ht
+ EXTRD,U tmp_0,32,33,tmp_0 ; tmp_0 = m&Mask >> 32-1
+ ADD m_0,lt_0,lt_0 ; lt = lt+m
+ ADD,L ht_0,tmp_0,ht_0 ; ht += tmp_0
+ ADD,DC ht_0,%r0,ht_0 ; ht++
+
+ STD lt_0,0(r_ptr) ; rp[0] = lt
+ STD ht_0,8(r_ptr) ; rp[1] = ht
+
+bn_sqr_words_exit
+ .EXIT
+ LDD -112(%sp),%r5 ; restore r5
+ LDD -120(%sp),%r4 ; restore r4
+ BVE (%rp)
+ LDD,MB -128(%sp),%r3
+ .PROCEND ;in=23,24,25,26,29;out=28;
+
+
+;----------------------------------------------------------------------------
+;
+;BN_ULONG bn_add_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
+;
+; arg0 = rp
+; arg1 = ap
+; arg2 = bp
+; arg3 = n
+
+t .reg %r22
+b .reg %r21
+l .reg %r20
+
+bn_add_words
+ .proc
+ .entry
+ .callinfo
+ .EXPORT bn_add_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
+ .align 64
+
+ CMPIB,>= 0,n,bn_add_words_exit
+ COPY %r0,%ret0 ; return 0 by default
+
+ ;
+ ; If 2 or more numbers do the loop
+ ;
+ CMPIB,= 1,n,bn_add_words_single_top
+ NOP
+
+ ;
+ ; This loop is unrolled 2 times (64-byte aligned as well)
+ ;
+bn_add_words_unroll2
+ LDD 0(a_ptr),t
+ LDD 0(b_ptr),b
+ ADD t,%ret0,t ; t = t+c;
+ ADD,DC %r0,%r0,%ret0 ; set c to carry
+ ADD t,b,l ; l = t + b[0]
+ ADD,DC %ret0,%r0,%ret0 ; c+= carry
+ STD l,0(r_ptr)
+
+ LDD 8(a_ptr),t
+ LDD 8(b_ptr),b
+ ADD t,%ret0,t ; t = t+c;
+ ADD,DC %r0,%r0,%ret0 ; set c to carry
+ ADD t,b,l ; l = t + b[0]
+ ADD,DC %ret0,%r0,%ret0 ; c+= carry
+ STD l,8(r_ptr)
+
+ LDO -2(n),n
+ LDO 16(a_ptr),a_ptr
+ LDO 16(b_ptr),b_ptr
+
+ CMPIB,<= 2,n,bn_add_words_unroll2
+ LDO 16(r_ptr),r_ptr
+
+ CMPIB,=,N 0,n,bn_add_words_exit ; are we done?
+
+bn_add_words_single_top
+ LDD 0(a_ptr),t
+ LDD 0(b_ptr),b
+
+ ADD t,%ret0,t ; t = t+c;
+ ADD,DC %r0,%r0,%ret0 ; set c to carry (could use CMPCLR??)
+ ADD t,b,l ; l = t + b[0]
+ ADD,DC %ret0,%r0,%ret0 ; c+= carry
+ STD l,0(r_ptr)
+
+bn_add_words_exit
+ .EXIT
+ BVE (%rp)
+ NOP
+ .PROCEND ;in=23,24,25,26,29;out=28;
+
+;----------------------------------------------------------------------------
+;
+;BN_ULONG bn_sub_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
+;
+; arg0 = rp
+; arg1 = ap
+; arg2 = bp
+; arg3 = n
+
+t1 .reg %r22
+t2 .reg %r21
+sub_tmp1 .reg %r20
+sub_tmp2 .reg %r19
+
+
+bn_sub_words
+ .proc
+ .callinfo
+ .EXPORT bn_sub_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
+ .entry
+ .align 64
+
+ CMPIB,>= 0,n,bn_sub_words_exit
+ COPY %r0,%ret0 ; return 0 by default
+
+ ;
+ ; If 2 or more numbers do the loop
+ ;
+ CMPIB,= 1,n,bn_sub_words_single_top
+ NOP
+
+ ;
+ ; This loop is unrolled 2 times (64-byte aligned as well)
+ ;
+bn_sub_words_unroll2
+ LDD 0(a_ptr),t1
+ LDD 0(b_ptr),t2
+ SUB t1,t2,sub_tmp1 ; t3 = t1-t2;
+ SUB sub_tmp1,%ret0,sub_tmp1 ; t3 = t3- c;
+
+ CMPCLR,*>> t1,t2,sub_tmp2 ; clear if t1 > t2
+ LDO 1(%r0),sub_tmp2
+
+ CMPCLR,*= t1,t2,%r0
+ COPY sub_tmp2,%ret0
+ STD sub_tmp1,0(r_ptr)
+
+ LDD 8(a_ptr),t1
+ LDD 8(b_ptr),t2
+ SUB t1,t2,sub_tmp1 ; t3 = t1-t2;
+ SUB sub_tmp1,%ret0,sub_tmp1 ; t3 = t3- c;
+ CMPCLR,*>> t1,t2,sub_tmp2 ; clear if t1 > t2
+ LDO 1(%r0),sub_tmp2
+
+ CMPCLR,*= t1,t2,%r0
+ COPY sub_tmp2,%ret0
+ STD sub_tmp1,8(r_ptr)
+
+ LDO -2(n),n
+ LDO 16(a_ptr),a_ptr
+ LDO 16(b_ptr),b_ptr
+
+ CMPIB,<= 2,n,bn_sub_words_unroll2
+ LDO 16(r_ptr),r_ptr
+
+ CMPIB,=,N 0,n,bn_sub_words_exit ; are we done?
+
+bn_sub_words_single_top
+ LDD 0(a_ptr),t1
+ LDD 0(b_ptr),t2
+ SUB t1,t2,sub_tmp1 ; t3 = t1-t2;
+ SUB sub_tmp1,%ret0,sub_tmp1 ; t3 = t3- c;
+ CMPCLR,*>> t1,t2,sub_tmp2 ; clear if t1 > t2
+ LDO 1(%r0),sub_tmp2
+
+ CMPCLR,*= t1,t2,%r0
+ COPY sub_tmp2,%ret0
+
+ STD sub_tmp1,0(r_ptr)
+
+bn_sub_words_exit
+ .EXIT
+ BVE (%rp)
+ NOP
+ .PROCEND ;in=23,24,25,26,29;out=28;
+
+;------------------------------------------------------------------------------
+;
+; unsigned long bn_div_words(unsigned long h, unsigned long l, unsigned long d)
+;
+; arg0 = h
+; arg1 = l
+; arg2 = d
+;
+; This is mainly just modified assembly from the compiler, thus the
+; lack of variable names.
+;
+;------------------------------------------------------------------------------
+bn_div_words
+ .proc
+ .callinfo CALLER,FRAME=272,ENTRY_GR=%r10,SAVE_RP,ARGS_SAVED,ORDERING_AWARE
+ .EXPORT bn_div_words,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
+ .IMPORT BN_num_bits_word,CODE,NO_RELOCATION
+ .IMPORT __iob,DATA
+ .IMPORT fprintf,CODE,NO_RELOCATION
+ .IMPORT abort,CODE,NO_RELOCATION
+ .IMPORT $$div2U,MILLICODE
+ .entry
+ STD %r2,-16(%r30)
+ STD,MA %r3,352(%r30)
+ STD %r4,-344(%r30)
+ STD %r5,-336(%r30)
+ STD %r6,-328(%r30)
+ STD %r7,-320(%r30)
+ STD %r8,-312(%r30)
+ STD %r9,-304(%r30)
+ STD %r10,-296(%r30)
+
+ STD %r27,-288(%r30) ; save gp
+
+ COPY %r24,%r3 ; save d
+ COPY %r26,%r4 ; save h (high 64-bits)
+ LDO -1(%r0),%ret0 ; return -1 by default
+
+ CMPB,*= %r0,%arg2,$D3 ; if (d == 0)
+ COPY %r25,%r5 ; save l (low 64-bits)
+
+ LDO -48(%r30),%r29 ; create ap
+ .CALL ;in=26,29;out=28;
+ B,L BN_num_bits_word,%r2
+ COPY %r3,%r26
+ LDD -288(%r30),%r27 ; restore gp
+ LDI 64,%r21
+
+ CMPB,= %r21,%ret0,$00000012 ;if (i == 64) (forward)
+ COPY %ret0,%r24 ; i
+ MTSARCM %r24
+ DEPDI,Z -1,%sar,1,%r29
+ CMPB,*<<,N %r29,%r4,bn_div_err_case ; if (h > 1<<i) (forward)
+
+$00000012
+ SUBI 64,%r24,%r31 ; i = 64 - i;
+ CMPCLR,*<< %r4,%r3,%r0 ; if (h >= d)
+ SUB %r4,%r3,%r4 ; h -= d
+ CMPB,= %r31,%r0,$0000001A ; if (i)
+ COPY %r0,%r10 ; ret = 0
+ MTSARCM %r31 ; i to shift
+ DEPD,Z %r3,%sar,64,%r3 ; d <<= i;
+ SUBI 64,%r31,%r19 ; 64 - i; redundent
+ MTSAR %r19 ; (64 -i) to shift
+ SHRPD %r4,%r5,%sar,%r4 ; l>> (64-i)
+ MTSARCM %r31 ; i to shift
+ DEPD,Z %r5,%sar,64,%r5 ; l <<= i;
+
+$0000001A
+ DEPDI,Z -1,31,32,%r19
+ EXTRD,U %r3,31,32,%r6 ; dh=(d&0xfff)>>32
+ EXTRD,U %r3,63,32,%r8 ; dl = d&0xffffff
+ LDO 2(%r0),%r9
+ STD %r3,-280(%r30) ; "d" to stack
+
+$0000001C
+ DEPDI,Z -1,63,32,%r29 ;
+ EXTRD,U %r4,31,32,%r31 ; h >> 32
+ CMPB,*=,N %r31,%r6,$D2 ; if ((h>>32) != dh)(forward) div
+ COPY %r4,%r26
+ EXTRD,U %r4,31,32,%r25
+ COPY %r6,%r24
+ .CALL ;in=23,24,25,26;out=20,21,22,28,29; (MILLICALL)
+ B,L $$div2U,%r2
+ EXTRD,U %r6,31,32,%r23
+ DEPD %r28,31,32,%r29
+$D2
+ STD %r29,-272(%r30) ; q
+ AND %r5,%r19,%r24 ; t & 0xffffffff00000000;
+ EXTRD,U %r24,31,32,%r24 ; ???
+ FLDD -272(%r30),%fr7 ; q
+ FLDD -280(%r30),%fr8 ; d
+ XMPYU %fr8L,%fr7L,%fr10
+ FSTD %fr10,-256(%r30)
+ XMPYU %fr8L,%fr7R,%fr22
+ FSTD %fr22,-264(%r30)
+ XMPYU %fr8R,%fr7L,%fr11
+ XMPYU %fr8R,%fr7R,%fr23
+ FSTD %fr11,-232(%r30)
+ FSTD %fr23,-240(%r30)
+ LDD -256(%r30),%r28
+ DEPD,Z %r28,31,32,%r2
+ LDD -264(%r30),%r20
+ ADD,L %r20,%r2,%r31
+ LDD -232(%r30),%r22
+ DEPD,Z %r22,31,32,%r22
+ LDD -240(%r30),%r21
+ B $00000024 ; enter loop
+ ADD,L %r21,%r22,%r23
+
+$0000002A
+ LDO -1(%r29),%r29
+ SUB %r23,%r8,%r23
+$00000024
+ SUB %r4,%r31,%r25
+ AND %r25,%r19,%r26
+ CMPB,*<>,N %r0,%r26,$00000046 ; (forward)
+ DEPD,Z %r25,31,32,%r20
+ OR %r20,%r24,%r21
+ CMPB,*<<,N %r21,%r23,$0000002A ;(backward)
+ SUB %r31,%r6,%r31
+;-------------Break path---------------------
+
+$00000046
+ DEPD,Z %r23,31,32,%r25 ;tl
+ EXTRD,U %r23,31,32,%r26 ;t
+ AND %r25,%r19,%r24 ;tl = (tl<<32)&0xfffffff0000000L
+ ADD,L %r31,%r26,%r31 ;th += t;
+ CMPCLR,*>>= %r5,%r24,%r0 ;if (l<tl)
+ LDO 1(%r31),%r31 ; th++;
+ CMPB,*<<=,N %r31,%r4,$00000036 ;if (n < th) (forward)
+ LDO -1(%r29),%r29 ;q--;
+ ADD,L %r4,%r3,%r4 ;h += d;
+$00000036
+ ADDIB,=,N -1,%r9,$D1 ;if (--count == 0) break (forward)
+ SUB %r5,%r24,%r28 ; l -= tl;
+ SUB %r4,%r31,%r24 ; h -= th;
+ SHRPD %r24,%r28,32,%r4 ; h = ((h<<32)|(l>>32));
+ DEPD,Z %r29,31,32,%r10 ; ret = q<<32
+ b $0000001C
+ DEPD,Z %r28,31,32,%r5 ; l = l << 32
+
+$D1
+ OR %r10,%r29,%r28 ; ret |= q
+$D3
+ LDD -368(%r30),%r2
+$D0
+ LDD -296(%r30),%r10
+ LDD -304(%r30),%r9
+ LDD -312(%r30),%r8
+ LDD -320(%r30),%r7
+ LDD -328(%r30),%r6
+ LDD -336(%r30),%r5
+ LDD -344(%r30),%r4
+ BVE (%r2)
+ .EXIT
+ LDD,MB -352(%r30),%r3
+
+bn_div_err_case
+ MFIA %r6
+ ADDIL L'bn_div_words-bn_div_err_case,%r6,%r1
+ LDO R'bn_div_words-bn_div_err_case(%r1),%r6
+ ADDIL LT'__iob,%r27,%r1
+ LDD RT'__iob(%r1),%r26
+ ADDIL L'C$4-bn_div_words,%r6,%r1
+ LDO R'C$4-bn_div_words(%r1),%r25
+ LDO 64(%r26),%r26
+ .CALL ;in=24,25,26,29;out=28;
+ B,L fprintf,%r2
+ LDO -48(%r30),%r29
+ LDD -288(%r30),%r27
+ .CALL ;in=29;
+ B,L abort,%r2
+ LDO -48(%r30),%r29
+ LDD -288(%r30),%r27
+ B $D0
+ LDD -368(%r30),%r2
+ .PROCEND ;in=24,25,26,29;out=28;
+
+;----------------------------------------------------------------------------
+;
+; Registers to hold 64-bit values to manipulate. The "L" part
+; of the register corresponds to the upper 32-bits, while the "R"
+; part corresponds to the lower 32-bits
+;
+; Note, that when using b6 and b7, the code must save these before
+; using them because they are callee save registers
+;
+;
+; Floating point registers to use to save values that
+; are manipulated. These don't collide with ftemp1-6 and
+; are all caller save registers
+;
+a0 .reg %fr22
+a0L .reg %fr22L
+a0R .reg %fr22R
+
+a1 .reg %fr23
+a1L .reg %fr23L
+a1R .reg %fr23R
+
+a2 .reg %fr24
+a2L .reg %fr24L
+a2R .reg %fr24R
+
+a3 .reg %fr25
+a3L .reg %fr25L
+a3R .reg %fr25R
+
+a4 .reg %fr26
+a4L .reg %fr26L
+a4R .reg %fr26R
+
+a5 .reg %fr27
+a5L .reg %fr27L
+a5R .reg %fr27R
+
+a6 .reg %fr28
+a6L .reg %fr28L
+a6R .reg %fr28R
+
+a7 .reg %fr29
+a7L .reg %fr29L
+a7R .reg %fr29R
+
+b0 .reg %fr30
+b0L .reg %fr30L
+b0R .reg %fr30R
+
+b1 .reg %fr31
+b1L .reg %fr31L
+b1R .reg %fr31R
+
+;
+; Temporary floating point variables, these are all caller save
+; registers
+;
+ftemp1 .reg %fr4
+ftemp2 .reg %fr5
+ftemp3 .reg %fr6
+ftemp4 .reg %fr7
+
+;
+; The B set of registers when used.
+;
+
+b2 .reg %fr8
+b2L .reg %fr8L
+b2R .reg %fr8R
+
+b3 .reg %fr9
+b3L .reg %fr9L
+b3R .reg %fr9R
+
+b4 .reg %fr10
+b4L .reg %fr10L
+b4R .reg %fr10R
+
+b5 .reg %fr11
+b5L .reg %fr11L
+b5R .reg %fr11R
+
+b6 .reg %fr12
+b6L .reg %fr12L
+b6R .reg %fr12R
+
+b7 .reg %fr13
+b7L .reg %fr13L
+b7R .reg %fr13R
+
+c1 .reg %r21 ; only reg
+temp1 .reg %r20 ; only reg
+temp2 .reg %r19 ; only reg
+temp3 .reg %r31 ; only reg
+
+m1 .reg %r28
+c2 .reg %r23
+high_one .reg %r1
+ht .reg %r6
+lt .reg %r5
+m .reg %r4
+c3 .reg %r3
+
+SQR_ADD_C .macro A0L,A0R,C1,C2,C3
+ XMPYU A0L,A0R,ftemp1 ; m
+ FSTD ftemp1,-24(%sp) ; store m
+
+ XMPYU A0R,A0R,ftemp2 ; lt
+ FSTD ftemp2,-16(%sp) ; store lt
+
+ XMPYU A0L,A0L,ftemp3 ; ht
+ FSTD ftemp3,-8(%sp) ; store ht
+
+ LDD -24(%sp),m ; load m
+ AND m,high_mask,temp2 ; m & Mask
+ DEPD,Z m,30,31,temp3 ; m << 32+1
+ LDD -16(%sp),lt ; lt
+
+ LDD -8(%sp),ht ; ht
+ EXTRD,U temp2,32,33,temp1 ; temp1 = m&Mask >> 32-1
+ ADD temp3,lt,lt ; lt = lt+m
+ ADD,L ht,temp1,ht ; ht += temp1
+ ADD,DC ht,%r0,ht ; ht++
+
+ ADD C1,lt,C1 ; c1=c1+lt
+ ADD,DC ht,%r0,ht ; ht++
+
+ ADD C2,ht,C2 ; c2=c2+ht
+ ADD,DC C3,%r0,C3 ; c3++
+.endm
+
+SQR_ADD_C2 .macro A0L,A0R,A1L,A1R,C1,C2,C3
+ XMPYU A0L,A1R,ftemp1 ; m1 = bl*ht
+ FSTD ftemp1,-16(%sp) ;
+ XMPYU A0R,A1L,ftemp2 ; m = bh*lt
+ FSTD ftemp2,-8(%sp) ;
+ XMPYU A0R,A1R,ftemp3 ; lt = bl*lt
+ FSTD ftemp3,-32(%sp)
+ XMPYU A0L,A1L,ftemp4 ; ht = bh*ht
+ FSTD ftemp4,-24(%sp) ;
+
+ LDD -8(%sp),m ; r21 = m
+ LDD -16(%sp),m1 ; r19 = m1
+ ADD,L m,m1,m ; m+m1
+
+ DEPD,Z m,31,32,temp3 ; (m+m1<<32)
+ LDD -24(%sp),ht ; r24 = ht
+
+ CMPCLR,*>>= m,m1,%r0 ; if (m < m1)
+ ADD,L ht,high_one,ht ; ht+=high_one
+
+ EXTRD,U m,31,32,temp1 ; m >> 32
+ LDD -32(%sp),lt ; lt
+ ADD,L ht,temp1,ht ; ht+= m>>32
+ ADD lt,temp3,lt ; lt = lt+m1
+ ADD,DC ht,%r0,ht ; ht++
+
+ ADD ht,ht,ht ; ht=ht+ht;
+ ADD,DC C3,%r0,C3 ; add in carry (c3++)
+
+ ADD lt,lt,lt ; lt=lt+lt;
+ ADD,DC ht,%r0,ht ; add in carry (ht++)
+
+ ADD C1,lt,C1 ; c1=c1+lt
+ ADD,DC,*NUV ht,%r0,ht ; add in carry (ht++)
+ LDO 1(C3),C3 ; bump c3 if overflow,nullify otherwise
+
+ ADD C2,ht,C2 ; c2 = c2 + ht
+ ADD,DC C3,%r0,C3 ; add in carry (c3++)
+.endm
+
+;
+;void bn_sqr_comba8(BN_ULONG *r, BN_ULONG *a)
+; arg0 = r_ptr
+; arg1 = a_ptr
+;
+
+bn_sqr_comba8
+ .PROC
+ .CALLINFO FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
+ .EXPORT bn_sqr_comba8,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
+ .ENTRY
+ .align 64
+
+ STD %r3,0(%sp) ; save r3
+ STD %r4,8(%sp) ; save r4
+ STD %r5,16(%sp) ; save r5
+ STD %r6,24(%sp) ; save r6
+
+ ;
+ ; Zero out carries
+ ;
+ COPY %r0,c1
+ COPY %r0,c2
+ COPY %r0,c3
+
+ LDO 128(%sp),%sp ; bump stack
+ DEPDI,Z -1,32,33,high_mask ; Create Mask 0xffffffff80000000L
+ DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32
+
+ ;
+ ; Load up all of the values we are going to use
+ ;
+ FLDD 0(a_ptr),a0
+ FLDD 8(a_ptr),a1
+ FLDD 16(a_ptr),a2
+ FLDD 24(a_ptr),a3
+ FLDD 32(a_ptr),a4
+ FLDD 40(a_ptr),a5
+ FLDD 48(a_ptr),a6
+ FLDD 56(a_ptr),a7
+
+ SQR_ADD_C a0L,a0R,c1,c2,c3
+ STD c1,0(r_ptr) ; r[0] = c1;
+ COPY %r0,c1
+
+ SQR_ADD_C2 a1L,a1R,a0L,a0R,c2,c3,c1
+ STD c2,8(r_ptr) ; r[1] = c2;
+ COPY %r0,c2
+
+ SQR_ADD_C a1L,a1R,c3,c1,c2
+ SQR_ADD_C2 a2L,a2R,a0L,a0R,c3,c1,c2
+ STD c3,16(r_ptr) ; r[2] = c3;
+ COPY %r0,c3
+
+ SQR_ADD_C2 a3L,a3R,a0L,a0R,c1,c2,c3
+ SQR_ADD_C2 a2L,a2R,a1L,a1R,c1,c2,c3
+ STD c1,24(r_ptr) ; r[3] = c1;
+ COPY %r0,c1
+
+ SQR_ADD_C a2L,a2R,c2,c3,c1
+ SQR_ADD_C2 a3L,a3R,a1L,a1R,c2,c3,c1
+ SQR_ADD_C2 a4L,a4R,a0L,a0R,c2,c3,c1
+ STD c2,32(r_ptr) ; r[4] = c2;
+ COPY %r0,c2
+
+ SQR_ADD_C2 a5L,a5R,a0L,a0R,c3,c1,c2
+ SQR_ADD_C2 a4L,a4R,a1L,a1R,c3,c1,c2
+ SQR_ADD_C2 a3L,a3R,a2L,a2R,c3,c1,c2
+ STD c3,40(r_ptr) ; r[5] = c3;
+ COPY %r0,c3
+
+ SQR_ADD_C a3L,a3R,c1,c2,c3
+ SQR_ADD_C2 a4L,a4R,a2L,a2R,c1,c2,c3
+ SQR_ADD_C2 a5L,a5R,a1L,a1R,c1,c2,c3
+ SQR_ADD_C2 a6L,a6R,a0L,a0R,c1,c2,c3
+ STD c1,48(r_ptr) ; r[6] = c1;
+ COPY %r0,c1
+
+ SQR_ADD_C2 a7L,a7R,a0L,a0R,c2,c3,c1
+ SQR_ADD_C2 a6L,a6R,a1L,a1R,c2,c3,c1
+ SQR_ADD_C2 a5L,a5R,a2L,a2R,c2,c3,c1
+ SQR_ADD_C2 a4L,a4R,a3L,a3R,c2,c3,c1
+ STD c2,56(r_ptr) ; r[7] = c2;
+ COPY %r0,c2
+
+ SQR_ADD_C a4L,a4R,c3,c1,c2
+ SQR_ADD_C2 a5L,a5R,a3L,a3R,c3,c1,c2
+ SQR_ADD_C2 a6L,a6R,a2L,a2R,c3,c1,c2
+ SQR_ADD_C2 a7L,a7R,a1L,a1R,c3,c1,c2
+ STD c3,64(r_ptr) ; r[8] = c3;
+ COPY %r0,c3
+
+ SQR_ADD_C2 a7L,a7R,a2L,a2R,c1,c2,c3
+ SQR_ADD_C2 a6L,a6R,a3L,a3R,c1,c2,c3
+ SQR_ADD_C2 a5L,a5R,a4L,a4R,c1,c2,c3
+ STD c1,72(r_ptr) ; r[9] = c1;
+ COPY %r0,c1
+
+ SQR_ADD_C a5L,a5R,c2,c3,c1
+ SQR_ADD_C2 a6L,a6R,a4L,a4R,c2,c3,c1
+ SQR_ADD_C2 a7L,a7R,a3L,a3R,c2,c3,c1
+ STD c2,80(r_ptr) ; r[10] = c2;
+ COPY %r0,c2
+
+ SQR_ADD_C2 a7L,a7R,a4L,a4R,c3,c1,c2
+ SQR_ADD_C2 a6L,a6R,a5L,a5R,c3,c1,c2
+ STD c3,88(r_ptr) ; r[11] = c3;
+ COPY %r0,c3
+
+ SQR_ADD_C a6L,a6R,c1,c2,c3
+ SQR_ADD_C2 a7L,a7R,a5L,a5R,c1,c2,c3
+ STD c1,96(r_ptr) ; r[12] = c1;
+ COPY %r0,c1
+
+ SQR_ADD_C2 a7L,a7R,a6L,a6R,c2,c3,c1
+ STD c2,104(r_ptr) ; r[13] = c2;
+ COPY %r0,c2
+
+ SQR_ADD_C a7L,a7R,c3,c1,c2
+ STD c3, 112(r_ptr) ; r[14] = c3
+ STD c1, 120(r_ptr) ; r[15] = c1
+
+ .EXIT
+ LDD -104(%sp),%r6 ; restore r6
+ LDD -112(%sp),%r5 ; restore r5
+ LDD -120(%sp),%r4 ; restore r4
+ BVE (%rp)
+ LDD,MB -128(%sp),%r3
+
+ .PROCEND
+
+;-----------------------------------------------------------------------------
+;
+;void bn_sqr_comba4(BN_ULONG *r, BN_ULONG *a)
+; arg0 = r_ptr
+; arg1 = a_ptr
+;
+
+bn_sqr_comba4
+ .proc
+ .callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
+ .EXPORT bn_sqr_comba4,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
+ .entry
+ .align 64
+ STD %r3,0(%sp) ; save r3
+ STD %r4,8(%sp) ; save r4
+ STD %r5,16(%sp) ; save r5
+ STD %r6,24(%sp) ; save r6
+
+ ;
+ ; Zero out carries
+ ;
+ COPY %r0,c1
+ COPY %r0,c2
+ COPY %r0,c3
+
+ LDO 128(%sp),%sp ; bump stack
+ DEPDI,Z -1,32,33,high_mask ; Create Mask 0xffffffff80000000L
+ DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32
+
+ ;
+ ; Load up all of the values we are going to use
+ ;
+ FLDD 0(a_ptr),a0
+ FLDD 8(a_ptr),a1
+ FLDD 16(a_ptr),a2
+ FLDD 24(a_ptr),a3
+ FLDD 32(a_ptr),a4
+ FLDD 40(a_ptr),a5
+ FLDD 48(a_ptr),a6
+ FLDD 56(a_ptr),a7
+
+ SQR_ADD_C a0L,a0R,c1,c2,c3
+
+ STD c1,0(r_ptr) ; r[0] = c1;
+ COPY %r0,c1
+
+ SQR_ADD_C2 a1L,a1R,a0L,a0R,c2,c3,c1
+
+ STD c2,8(r_ptr) ; r[1] = c2;
+ COPY %r0,c2
+
+ SQR_ADD_C a1L,a1R,c3,c1,c2
+ SQR_ADD_C2 a2L,a2R,a0L,a0R,c3,c1,c2
+
+ STD c3,16(r_ptr) ; r[2] = c3;
+ COPY %r0,c3
+
+ SQR_ADD_C2 a3L,a3R,a0L,a0R,c1,c2,c3
+ SQR_ADD_C2 a2L,a2R,a1L,a1R,c1,c2,c3
+
+ STD c1,24(r_ptr) ; r[3] = c1;
+ COPY %r0,c1
+
+ SQR_ADD_C a2L,a2R,c2,c3,c1
+ SQR_ADD_C2 a3L,a3R,a1L,a1R,c2,c3,c1
+
+ STD c2,32(r_ptr) ; r[4] = c2;
+ COPY %r0,c2
+
+ SQR_ADD_C2 a3L,a3R,a2L,a2R,c3,c1,c2
+ STD c3,40(r_ptr) ; r[5] = c3;
+ COPY %r0,c3
+
+ SQR_ADD_C a3L,a3R,c1,c2,c3
+ STD c1,48(r_ptr) ; r[6] = c1;
+ STD c2,56(r_ptr) ; r[7] = c2;
+
+ .EXIT
+ LDD -104(%sp),%r6 ; restore r6
+ LDD -112(%sp),%r5 ; restore r5
+ LDD -120(%sp),%r4 ; restore r4
+ BVE (%rp)
+ LDD,MB -128(%sp),%r3
+
+ .PROCEND
+
+
+;---------------------------------------------------------------------------
+
+MUL_ADD_C .macro A0L,A0R,B0L,B0R,C1,C2,C3
+ XMPYU A0L,B0R,ftemp1 ; m1 = bl*ht
+ FSTD ftemp1,-16(%sp) ;
+ XMPYU A0R,B0L,ftemp2 ; m = bh*lt
+ FSTD ftemp2,-8(%sp) ;
+ XMPYU A0R,B0R,ftemp3 ; lt = bl*lt
+ FSTD ftemp3,-32(%sp)
+ XMPYU A0L,B0L,ftemp4 ; ht = bh*ht
+ FSTD ftemp4,-24(%sp) ;
+
+ LDD -8(%sp),m ; r21 = m
+ LDD -16(%sp),m1 ; r19 = m1
+ ADD,L m,m1,m ; m+m1
+
+ DEPD,Z m,31,32,temp3 ; (m+m1<<32)
+ LDD -24(%sp),ht ; r24 = ht
+
+ CMPCLR,*>>= m,m1,%r0 ; if (m < m1)
+ ADD,L ht,high_one,ht ; ht+=high_one
+
+ EXTRD,U m,31,32,temp1 ; m >> 32
+ LDD -32(%sp),lt ; lt
+ ADD,L ht,temp1,ht ; ht+= m>>32
+ ADD lt,temp3,lt ; lt = lt+m1
+ ADD,DC ht,%r0,ht ; ht++
+
+ ADD C1,lt,C1 ; c1=c1+lt
+ ADD,DC ht,%r0,ht ; bump c3 if overflow,nullify otherwise
+
+ ADD C2,ht,C2 ; c2 = c2 + ht
+ ADD,DC C3,%r0,C3 ; add in carry (c3++)
+.endm
+
+
+;
+;void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
+; arg0 = r_ptr
+; arg1 = a_ptr
+; arg2 = b_ptr
+;
+
+bn_mul_comba8
+ .proc
+ .callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
+ .EXPORT bn_mul_comba8,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
+ .entry
+ .align 64
+
+ STD %r3,0(%sp) ; save r3
+ STD %r4,8(%sp) ; save r4
+ STD %r5,16(%sp) ; save r5
+ STD %r6,24(%sp) ; save r6
+ FSTD %fr12,32(%sp) ; save r6
+ FSTD %fr13,40(%sp) ; save r7
+
+ ;
+ ; Zero out carries
+ ;
+ COPY %r0,c1
+ COPY %r0,c2
+ COPY %r0,c3
+
+ LDO 128(%sp),%sp ; bump stack
+ DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32
+
+ ;
+ ; Load up all of the values we are going to use
+ ;
+ FLDD 0(a_ptr),a0
+ FLDD 8(a_ptr),a1
+ FLDD 16(a_ptr),a2
+ FLDD 24(a_ptr),a3
+ FLDD 32(a_ptr),a4
+ FLDD 40(a_ptr),a5
+ FLDD 48(a_ptr),a6
+ FLDD 56(a_ptr),a7
+
+ FLDD 0(b_ptr),b0
+ FLDD 8(b_ptr),b1
+ FLDD 16(b_ptr),b2
+ FLDD 24(b_ptr),b3
+ FLDD 32(b_ptr),b4
+ FLDD 40(b_ptr),b5
+ FLDD 48(b_ptr),b6
+ FLDD 56(b_ptr),b7
+
+ MUL_ADD_C a0L,a0R,b0L,b0R,c1,c2,c3
+ STD c1,0(r_ptr)
+ COPY %r0,c1
+
+ MUL_ADD_C a0L,a0R,b1L,b1R,c2,c3,c1
+ MUL_ADD_C a1L,a1R,b0L,b0R,c2,c3,c1
+ STD c2,8(r_ptr)
+ COPY %r0,c2
+
+ MUL_ADD_C a2L,a2R,b0L,b0R,c3,c1,c2
+ MUL_ADD_C a1L,a1R,b1L,b1R,c3,c1,c2
+ MUL_ADD_C a0L,a0R,b2L,b2R,c3,c1,c2
+ STD c3,16(r_ptr)
+ COPY %r0,c3
+
+ MUL_ADD_C a0L,a0R,b3L,b3R,c1,c2,c3
+ MUL_ADD_C a1L,a1R,b2L,b2R,c1,c2,c3
+ MUL_ADD_C a2L,a2R,b1L,b1R,c1,c2,c3
+ MUL_ADD_C a3L,a3R,b0L,b0R,c1,c2,c3
+ STD c1,24(r_ptr)
+ COPY %r0,c1
+
+ MUL_ADD_C a4L,a4R,b0L,b0R,c2,c3,c1
+ MUL_ADD_C a3L,a3R,b1L,b1R,c2,c3,c1
+ MUL_ADD_C a2L,a2R,b2L,b2R,c2,c3,c1
+ MUL_ADD_C a1L,a1R,b3L,b3R,c2,c3,c1
+ MUL_ADD_C a0L,a0R,b4L,b4R,c2,c3,c1
+ STD c2,32(r_ptr)
+ COPY %r0,c2
+
+ MUL_ADD_C a0L,a0R,b5L,b5R,c3,c1,c2
+ MUL_ADD_C a1L,a1R,b4L,b4R,c3,c1,c2
+ MUL_ADD_C a2L,a2R,b3L,b3R,c3,c1,c2
+ MUL_ADD_C a3L,a3R,b2L,b2R,c3,c1,c2
+ MUL_ADD_C a4L,a4R,b1L,b1R,c3,c1,c2
+ MUL_ADD_C a5L,a5R,b0L,b0R,c3,c1,c2
+ STD c3,40(r_ptr)
+ COPY %r0,c3
+
+ MUL_ADD_C a6L,a6R,b0L,b0R,c1,c2,c3
+ MUL_ADD_C a5L,a5R,b1L,b1R,c1,c2,c3
+ MUL_ADD_C a4L,a4R,b2L,b2R,c1,c2,c3
+ MUL_ADD_C a3L,a3R,b3L,b3R,c1,c2,c3
+ MUL_ADD_C a2L,a2R,b4L,b4R,c1,c2,c3
+ MUL_ADD_C a1L,a1R,b5L,b5R,c1,c2,c3
+ MUL_ADD_C a0L,a0R,b6L,b6R,c1,c2,c3
+ STD c1,48(r_ptr)
+ COPY %r0,c1
+
+ MUL_ADD_C a0L,a0R,b7L,b7R,c2,c3,c1
+ MUL_ADD_C a1L,a1R,b6L,b6R,c2,c3,c1
+ MUL_ADD_C a2L,a2R,b5L,b5R,c2,c3,c1
+ MUL_ADD_C a3L,a3R,b4L,b4R,c2,c3,c1
+ MUL_ADD_C a4L,a4R,b3L,b3R,c2,c3,c1
+ MUL_ADD_C a5L,a5R,b2L,b2R,c2,c3,c1
+ MUL_ADD_C a6L,a6R,b1L,b1R,c2,c3,c1
+ MUL_ADD_C a7L,a7R,b0L,b0R,c2,c3,c1
+ STD c2,56(r_ptr)
+ COPY %r0,c2
+
+ MUL_ADD_C a7L,a7R,b1L,b1R,c3,c1,c2
+ MUL_ADD_C a6L,a6R,b2L,b2R,c3,c1,c2
+ MUL_ADD_C a5L,a5R,b3L,b3R,c3,c1,c2
+ MUL_ADD_C a4L,a4R,b4L,b4R,c3,c1,c2
+ MUL_ADD_C a3L,a3R,b5L,b5R,c3,c1,c2
+ MUL_ADD_C a2L,a2R,b6L,b6R,c3,c1,c2
+ MUL_ADD_C a1L,a1R,b7L,b7R,c3,c1,c2
+ STD c3,64(r_ptr)
+ COPY %r0,c3
+
+ MUL_ADD_C a2L,a2R,b7L,b7R,c1,c2,c3
+ MUL_ADD_C a3L,a3R,b6L,b6R,c1,c2,c3
+ MUL_ADD_C a4L,a4R,b5L,b5R,c1,c2,c3
+ MUL_ADD_C a5L,a5R,b4L,b4R,c1,c2,c3
+ MUL_ADD_C a6L,a6R,b3L,b3R,c1,c2,c3
+ MUL_ADD_C a7L,a7R,b2L,b2R,c1,c2,c3
+ STD c1,72(r_ptr)
+ COPY %r0,c1
+
+ MUL_ADD_C a7L,a7R,b3L,b3R,c2,c3,c1
+ MUL_ADD_C a6L,a6R,b4L,b4R,c2,c3,c1
+ MUL_ADD_C a5L,a5R,b5L,b5R,c2,c3,c1
+ MUL_ADD_C a4L,a4R,b6L,b6R,c2,c3,c1
+ MUL_ADD_C a3L,a3R,b7L,b7R,c2,c3,c1
+ STD c2,80(r_ptr)
+ COPY %r0,c2
+
+ MUL_ADD_C a4L,a4R,b7L,b7R,c3,c1,c2
+ MUL_ADD_C a5L,a5R,b6L,b6R,c3,c1,c2
+ MUL_ADD_C a6L,a6R,b5L,b5R,c3,c1,c2
+ MUL_ADD_C a7L,a7R,b4L,b4R,c3,c1,c2
+ STD c3,88(r_ptr)
+ COPY %r0,c3
+
+ MUL_ADD_C a7L,a7R,b5L,b5R,c1,c2,c3
+ MUL_ADD_C a6L,a6R,b6L,b6R,c1,c2,c3
+ MUL_ADD_C a5L,a5R,b7L,b7R,c1,c2,c3
+ STD c1,96(r_ptr)
+ COPY %r0,c1
+
+ MUL_ADD_C a6L,a6R,b7L,b7R,c2,c3,c1
+ MUL_ADD_C a7L,a7R,b6L,b6R,c2,c3,c1
+ STD c2,104(r_ptr)
+ COPY %r0,c2
+
+ MUL_ADD_C a7L,a7R,b7L,b7R,c3,c1,c2
+ STD c3,112(r_ptr)
+ STD c1,120(r_ptr)
+
+ .EXIT
+ FLDD -88(%sp),%fr13
+ FLDD -96(%sp),%fr12
+ LDD -104(%sp),%r6 ; restore r6
+ LDD -112(%sp),%r5 ; restore r5
+ LDD -120(%sp),%r4 ; restore r4
+ BVE (%rp)
+ LDD,MB -128(%sp),%r3
+
+ .PROCEND
+
+;-----------------------------------------------------------------------------
+;
+;void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
+; arg0 = r_ptr
+; arg1 = a_ptr
+; arg2 = b_ptr
+;
+
+bn_mul_comba4
+ .proc
+ .callinfo FRAME=128,ENTRY_GR=%r3,ARGS_SAVED,ORDERING_AWARE
+ .EXPORT bn_mul_comba4,ENTRY,PRIV_LEV=3,NO_RELOCATION,LONG_RETURN
+ .entry
+ .align 64
+
+ STD %r3,0(%sp) ; save r3
+ STD %r4,8(%sp) ; save r4
+ STD %r5,16(%sp) ; save r5
+ STD %r6,24(%sp) ; save r6
+ FSTD %fr12,32(%sp) ; save r6
+ FSTD %fr13,40(%sp) ; save r7
+
+ ;
+ ; Zero out carries
+ ;
+ COPY %r0,c1
+ COPY %r0,c2
+ COPY %r0,c3
+
+ LDO 128(%sp),%sp ; bump stack
+ DEPDI,Z 1,31,1,high_one ; Create Value 1 << 32
+
+ ;
+ ; Load up all of the values we are going to use
+ ;
+ FLDD 0(a_ptr),a0
+ FLDD 8(a_ptr),a1
+ FLDD 16(a_ptr),a2
+ FLDD 24(a_ptr),a3
+
+ FLDD 0(b_ptr),b0
+ FLDD 8(b_ptr),b1
+ FLDD 16(b_ptr),b2
+ FLDD 24(b_ptr),b3
+
+ MUL_ADD_C a0L,a0R,b0L,b0R,c1,c2,c3
+ STD c1,0(r_ptr)
+ COPY %r0,c1
+
+ MUL_ADD_C a0L,a0R,b1L,b1R,c2,c3,c1
+ MUL_ADD_C a1L,a1R,b0L,b0R,c2,c3,c1
+ STD c2,8(r_ptr)
+ COPY %r0,c2
+
+ MUL_ADD_C a2L,a2R,b0L,b0R,c3,c1,c2
+ MUL_ADD_C a1L,a1R,b1L,b1R,c3,c1,c2
+ MUL_ADD_C a0L,a0R,b2L,b2R,c3,c1,c2
+ STD c3,16(r_ptr)
+ COPY %r0,c3
+
+ MUL_ADD_C a0L,a0R,b3L,b3R,c1,c2,c3
+ MUL_ADD_C a1L,a1R,b2L,b2R,c1,c2,c3
+ MUL_ADD_C a2L,a2R,b1L,b1R,c1,c2,c3
+ MUL_ADD_C a3L,a3R,b0L,b0R,c1,c2,c3
+ STD c1,24(r_ptr)
+ COPY %r0,c1
+
+ MUL_ADD_C a3L,a3R,b1L,b1R,c2,c3,c1
+ MUL_ADD_C a2L,a2R,b2L,b2R,c2,c3,c1
+ MUL_ADD_C a1L,a1R,b3L,b3R,c2,c3,c1
+ STD c2,32(r_ptr)
+ COPY %r0,c2
+
+ MUL_ADD_C a2L,a2R,b3L,b3R,c3,c1,c2
+ MUL_ADD_C a3L,a3R,b2L,b2R,c3,c1,c2
+ STD c3,40(r_ptr)
+ COPY %r0,c3
+
+ MUL_ADD_C a3L,a3R,b3L,b3R,c1,c2,c3
+ STD c1,48(r_ptr)
+ STD c2,56(r_ptr)
+
+ .EXIT
+ FLDD -88(%sp),%fr13
+ FLDD -96(%sp),%fr12
+ LDD -104(%sp),%r6 ; restore r6
+ LDD -112(%sp),%r5 ; restore r5
+ LDD -120(%sp),%r4 ; restore r4
+ BVE (%rp)
+ LDD,MB -128(%sp),%r3
+
+ .PROCEND
+
+
+ .SPACE $TEXT$
+ .SUBSPA $CODE$
+ .SPACE $PRIVATE$,SORT=16
+ .IMPORT $global$,DATA
+ .SPACE $TEXT$
+ .SUBSPA $CODE$
+ .SUBSPA $LIT$,ACCESS=0x2c
+C$4
+ .ALIGN 8
+ .STRINGZ "Division would overflow (%d)\n"
+ .END
diff --git a/openssl-1.1.0h/crypto/bn/asm/parisc-mont.pl b/openssl-1.1.0h/crypto/bn/asm/parisc-mont.pl
new file mode 100644
index 0000000..8aa94e8
--- /dev/null
+++ b/openssl-1.1.0h/crypto/bn/asm/parisc-mont.pl
@@ -0,0 +1,1002 @@
+#! /usr/bin/env perl
+# Copyright 2009-2016 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License"). You may not use
+# this file except in compliance with the License. You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# On PA-7100LC this module performs ~90-50% better, less for longer
+# keys, than code generated by gcc 3.2 for PA-RISC 1.1. Latter means
+# that compiler utilized xmpyu instruction to perform 32x32=64-bit
+# multiplication, which in turn means that "baseline" performance was
+# optimal in respect to instruction set capabilities. Fair comparison
+# with vendor compiler is problematic, because OpenSSL doesn't define
+# BN_LLONG [presumably] for historical reasons, which drives compiler
+# toward 4 times 16x16=32-bit multiplicatons [plus complementary
+# shifts and additions] instead. This means that you should observe
+# several times improvement over code generated by vendor compiler
+# for PA-RISC 1.1, but the "baseline" is far from optimal. The actual
+# improvement coefficient was never collected on PA-7100LC, or any
+# other 1.1 CPU, because I don't have access to such machine with
+# vendor compiler. But to give you a taste, PA-RISC 1.1 code path
+# reportedly outperformed code generated by cc +DA1.1 +O3 by factor
+# of ~5x on PA-8600.
+#
+# On PA-RISC 2.0 it has to compete with pa-risc2[W].s, which is
+# reportedly ~2x faster than vendor compiler generated code [according
+# to comment in pa-risc2[W].s]. Here comes a catch. Execution core of
+# this implementation is actually 32-bit one, in the sense that it
+# operates on 32-bit values. But pa-risc2[W].s operates on arrays of
+# 64-bit BN_LONGs... How do they interoperate then? No problem. This
+# module picks halves of 64-bit values in reverse order and pretends
+# they were 32-bit BN_LONGs. But can 32-bit core compete with "pure"
+# 64-bit code such as pa-risc2[W].s then? Well, the thing is that
+# 32x32=64-bit multiplication is the best even PA-RISC 2.0 can do,
+# i.e. there is no "wider" multiplication like on most other 64-bit
+# platforms. This means that even being effectively 32-bit, this
+# implementation performs "64-bit" computational task in same amount
+# of arithmetic operations, most notably multiplications. It requires
+# more memory references, most notably to tp[num], but this doesn't
+# seem to exhaust memory port capacity. And indeed, dedicated PA-RISC
+# 2.0 code path provides virtually same performance as pa-risc2[W].s:
+# it's ~10% better for shortest key length and ~10% worse for longest
+# one.
+#
+# In case it wasn't clear. The module has two distinct code paths:
+# PA-RISC 1.1 and PA-RISC 2.0 ones. Latter features carry-free 64-bit
+# additions and 64-bit integer loads, not to mention specific
+# instruction scheduling. In 64-bit build naturally only 2.0 code path
+# is assembled. In 32-bit application context both code paths are
+# assembled, PA-RISC 2.0 CPU is detected at run-time and proper path
+# is taken automatically. Also, in 32-bit build the module imposes
+# couple of limitations: vector lengths has to be even and vector
+# addresses has to be 64-bit aligned. Normally neither is a problem:
+# most common key lengths are even and vectors are commonly malloc-ed,
+# which ensures alignment.
+#
+# Special thanks to polarhome.com for providing HP-UX account on
+# PA-RISC 1.1 machine, and to correspondent who chose to remain
+# anonymous for testing the code on PA-RISC 2.0 machine.
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+
+$flavour = shift;
+$output = shift;
+
+open STDOUT,">$output";
+
+if ($flavour =~ /64/) {
+ $LEVEL ="2.0W";
+ $SIZE_T =8;
+ $FRAME_MARKER =80;
+ $SAVED_RP =16;
+ $PUSH ="std";
+ $PUSHMA ="std,ma";
+ $POP ="ldd";
+ $POPMB ="ldd,mb";
+ $BN_SZ =$SIZE_T;
+} else {
+ $LEVEL ="1.1"; #$LEVEL.="\n\t.ALLOW\t2.0";
+ $SIZE_T =4;
+ $FRAME_MARKER =48;
+ $SAVED_RP =20;
+ $PUSH ="stw";
+ $PUSHMA ="stwm";
+ $POP ="ldw";
+ $POPMB ="ldwm";
+ $BN_SZ =$SIZE_T;
+ if (open CONF,"<${dir}../../opensslconf.h") {
+ while(<CONF>) {
+ if (m/#\s*define\s+SIXTY_FOUR_BIT/) {
+ $BN_SZ=8;
+ $LEVEL="2.0";
+ last;
+ }
+ }
+ close CONF;
+ }
+}
+
+$FRAME=8*$SIZE_T+$FRAME_MARKER; # 8 saved regs + frame marker
+ # [+ argument transfer]
+$LOCALS=$FRAME-$FRAME_MARKER;
+$FRAME+=32; # local variables
+
+$tp="%r31";
+$ti1="%r29";
+$ti0="%r28";
+
+$rp="%r26";
+$ap="%r25";
+$bp="%r24";
+$np="%r23";
+$n0="%r22"; # passed through stack in 32-bit
+$num="%r21"; # passed through stack in 32-bit
+$idx="%r20";
+$arrsz="%r19";
+
+$nm1="%r7";
+$nm0="%r6";
+$ab1="%r5";
+$ab0="%r4";
+
+$fp="%r3";
+$hi1="%r2";
+$hi0="%r1";
+
+$xfer=$n0; # accommodates [-16..15] offset in fld[dw]s
+
+$fm0="%fr4"; $fti=$fm0;
+$fbi="%fr5L";
+$fn0="%fr5R";
+$fai="%fr6"; $fab0="%fr7"; $fab1="%fr8";
+$fni="%fr9"; $fnm0="%fr10"; $fnm1="%fr11";
+
+$code=<<___;
+ .LEVEL $LEVEL
+ .SPACE \$TEXT\$
+ .SUBSPA \$CODE\$,QUAD=0,ALIGN=8,ACCESS=0x2C,CODE_ONLY
+
+ .EXPORT bn_mul_mont,ENTRY,ARGW0=GR,ARGW1=GR,ARGW2=GR,ARGW3=GR
+ .ALIGN 64
+bn_mul_mont
+ .PROC
+ .CALLINFO FRAME=`$FRAME-8*$SIZE_T`,NO_CALLS,SAVE_RP,SAVE_SP,ENTRY_GR=6
+ .ENTRY
+ $PUSH %r2,-$SAVED_RP(%sp) ; standard prologue
+ $PUSHMA %r3,$FRAME(%sp)
+ $PUSH %r4,`-$FRAME+1*$SIZE_T`(%sp)
+ $PUSH %r5,`-$FRAME+2*$SIZE_T`(%sp)
+ $PUSH %r6,`-$FRAME+3*$SIZE_T`(%sp)
+ $PUSH %r7,`-$FRAME+4*$SIZE_T`(%sp)
+ $PUSH %r8,`-$FRAME+5*$SIZE_T`(%sp)
+ $PUSH %r9,`-$FRAME+6*$SIZE_T`(%sp)
+ $PUSH %r10,`-$FRAME+7*$SIZE_T`(%sp)
+ ldo -$FRAME(%sp),$fp
+___
+$code.=<<___ if ($SIZE_T==4);
+ ldw `-$FRAME_MARKER-4`($fp),$n0
+ ldw `-$FRAME_MARKER-8`($fp),$num
+ nop
+ nop ; alignment
+___
+$code.=<<___ if ($BN_SZ==4);
+ comiclr,<= 6,$num,%r0 ; are vectors long enough?
+ b L\$abort
+ ldi 0,%r28 ; signal "unhandled"
+ add,ev %r0,$num,$num ; is $num even?
+ b L\$abort
+ nop
+ or $ap,$np,$ti1
+ extru,= $ti1,31,3,%r0 ; are ap and np 64-bit aligned?
+ b L\$abort
+ nop
+ nop ; alignment
+ nop
+
+ fldws 0($n0),${fn0}
+ fldws,ma 4($bp),${fbi} ; bp[0]
+___
+$code.=<<___ if ($BN_SZ==8);
+ comib,> 3,$num,L\$abort ; are vectors long enough?
+ ldi 0,%r28 ; signal "unhandled"
+ addl $num,$num,$num ; I operate on 32-bit values
+
+ fldws 4($n0),${fn0} ; only low part of n0
+ fldws 4($bp),${fbi} ; bp[0] in flipped word order
+___
+$code.=<<___;
+ fldds 0($ap),${fai} ; ap[0,1]
+ fldds 0($np),${fni} ; np[0,1]
+
+ sh2addl $num,%r0,$arrsz
+ ldi 31,$hi0
+ ldo 36($arrsz),$hi1 ; space for tp[num+1]
+ andcm $hi1,$hi0,$hi1 ; align
+ addl $hi1,%sp,%sp
+ $PUSH $fp,-$SIZE_T(%sp)
+
+ ldo `$LOCALS+16`($fp),$xfer
+ ldo `$LOCALS+32+4`($fp),$tp
+
+ xmpyu ${fai}L,${fbi},${fab0} ; ap[0]*bp[0]
+ xmpyu ${fai}R,${fbi},${fab1} ; ap[1]*bp[0]
+ xmpyu ${fn0},${fab0}R,${fm0}
+
+ addl $arrsz,$ap,$ap ; point at the end
+ addl $arrsz,$np,$np
+ subi 0,$arrsz,$idx ; j=0
+ ldo 8($idx),$idx ; j++++
+
+ xmpyu ${fni}L,${fm0}R,${fnm0} ; np[0]*m
+ xmpyu ${fni}R,${fm0}R,${fnm1} ; np[1]*m
+ fstds ${fab0},-16($xfer)
+ fstds ${fnm0},-8($xfer)
+ fstds ${fab1},0($xfer)
+ fstds ${fnm1},8($xfer)
+ flddx $idx($ap),${fai} ; ap[2,3]
+ flddx $idx($np),${fni} ; np[2,3]
+___
+$code.=<<___ if ($BN_SZ==4);
+ mtctl $hi0,%cr11 ; $hi0 still holds 31
+ extrd,u,*= $hi0,%sar,1,$hi0 ; executes on PA-RISC 1.0
+ b L\$parisc11
+ nop
+___
+$code.=<<___; # PA-RISC 2.0 code-path
+ xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[0]
+ xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
+ ldd -16($xfer),$ab0
+ fstds ${fab0},-16($xfer)
+
+ extrd,u $ab0,31,32,$hi0
+ extrd,u $ab0,63,32,$ab0
+ ldd -8($xfer),$nm0
+ fstds ${fnm0},-8($xfer)
+ ldo 8($idx),$idx ; j++++
+ addl $ab0,$nm0,$nm0 ; low part is discarded
+ extrd,u $nm0,31,32,$hi1
+
+L\$1st
+ xmpyu ${fai}R,${fbi},${fab1} ; ap[j+1]*bp[0]
+ xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j+1]*m
+ ldd 0($xfer),$ab1
+ fstds ${fab1},0($xfer)
+ addl $hi0,$ab1,$ab1
+ extrd,u $ab1,31,32,$hi0
+ ldd 8($xfer),$nm1
+ fstds ${fnm1},8($xfer)
+ extrd,u $ab1,63,32,$ab1
+ addl $hi1,$nm1,$nm1
+ flddx $idx($ap),${fai} ; ap[j,j+1]
+ flddx $idx($np),${fni} ; np[j,j+1]
+ addl $ab1,$nm1,$nm1
+ extrd,u $nm1,31,32,$hi1
+
+ xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[0]
+ xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
+ ldd -16($xfer),$ab0
+ fstds ${fab0},-16($xfer)
+ addl $hi0,$ab0,$ab0
+ extrd,u $ab0,31,32,$hi0
+ ldd -8($xfer),$nm0
+ fstds ${fnm0},-8($xfer)
+ extrd,u $ab0,63,32,$ab0
+ addl $hi1,$nm0,$nm0
+ stw $nm1,-4($tp) ; tp[j-1]
+ addl $ab0,$nm0,$nm0
+ stw,ma $nm0,8($tp) ; tp[j-1]
+ addib,<> 8,$idx,L\$1st ; j++++
+ extrd,u $nm0,31,32,$hi1
+
+ xmpyu ${fai}R,${fbi},${fab1} ; ap[j]*bp[0]
+ xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j]*m
+ ldd 0($xfer),$ab1
+ fstds ${fab1},0($xfer)
+ addl $hi0,$ab1,$ab1
+ extrd,u $ab1,31,32,$hi0
+ ldd 8($xfer),$nm1
+ fstds ${fnm1},8($xfer)
+ extrd,u $ab1,63,32,$ab1
+ addl $hi1,$nm1,$nm1
+ ldd -16($xfer),$ab0
+ addl $ab1,$nm1,$nm1
+ ldd -8($xfer),$nm0
+ extrd,u $nm1,31,32,$hi1
+
+ addl $hi0,$ab0,$ab0
+ extrd,u $ab0,31,32,$hi0
+ stw $nm1,-4($tp) ; tp[j-1]
+ extrd,u $ab0,63,32,$ab0
+ addl $hi1,$nm0,$nm0
+ ldd 0($xfer),$ab1
+ addl $ab0,$nm0,$nm0
+ ldd,mb 8($xfer),$nm1
+ extrd,u $nm0,31,32,$hi1
+ stw,ma $nm0,8($tp) ; tp[j-1]
+
+ ldo -1($num),$num ; i--
+ subi 0,$arrsz,$idx ; j=0
+___
+$code.=<<___ if ($BN_SZ==4);
+ fldws,ma 4($bp),${fbi} ; bp[1]
+___
+$code.=<<___ if ($BN_SZ==8);
+ fldws 0($bp),${fbi} ; bp[1] in flipped word order
+___
+$code.=<<___;
+ flddx $idx($ap),${fai} ; ap[0,1]
+ flddx $idx($np),${fni} ; np[0,1]
+ fldws 8($xfer),${fti}R ; tp[0]
+ addl $hi0,$ab1,$ab1
+ extrd,u $ab1,31,32,$hi0
+ extrd,u $ab1,63,32,$ab1
+ ldo 8($idx),$idx ; j++++
+ xmpyu ${fai}L,${fbi},${fab0} ; ap[0]*bp[1]
+ xmpyu ${fai}R,${fbi},${fab1} ; ap[1]*bp[1]
+ addl $hi1,$nm1,$nm1
+ addl $ab1,$nm1,$nm1
+ extrd,u $nm1,31,32,$hi1
+ fstws,mb ${fab0}L,-8($xfer) ; save high part
+ stw $nm1,-4($tp) ; tp[j-1]
+
+ fcpy,sgl %fr0,${fti}L ; zero high part
+ fcpy,sgl %fr0,${fab0}L
+ addl $hi1,$hi0,$hi0
+ extrd,u $hi0,31,32,$hi1
+ fcnvxf,dbl,dbl ${fti},${fti} ; 32-bit unsigned int -> double
+ fcnvxf,dbl,dbl ${fab0},${fab0}
+ stw $hi0,0($tp)
+ stw $hi1,4($tp)
+
+ fadd,dbl ${fti},${fab0},${fab0} ; add tp[0]
+ fcnvfx,dbl,dbl ${fab0},${fab0} ; double -> 33-bit unsigned int
+ xmpyu ${fn0},${fab0}R,${fm0}
+ ldo `$LOCALS+32+4`($fp),$tp
+L\$outer
+ xmpyu ${fni}L,${fm0}R,${fnm0} ; np[0]*m
+ xmpyu ${fni}R,${fm0}R,${fnm1} ; np[1]*m
+ fstds ${fab0},-16($xfer) ; 33-bit value
+ fstds ${fnm0},-8($xfer)
+ flddx $idx($ap),${fai} ; ap[2]
+ flddx $idx($np),${fni} ; np[2]
+ ldo 8($idx),$idx ; j++++
+ ldd -16($xfer),$ab0 ; 33-bit value
+ ldd -8($xfer),$nm0
+ ldw 0($xfer),$hi0 ; high part
+
+ xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[i]
+ xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
+ extrd,u $ab0,31,32,$ti0 ; carry bit
+ extrd,u $ab0,63,32,$ab0
+ fstds ${fab1},0($xfer)
+ addl $ti0,$hi0,$hi0 ; account carry bit
+ fstds ${fnm1},8($xfer)
+ addl $ab0,$nm0,$nm0 ; low part is discarded
+ ldw 0($tp),$ti1 ; tp[1]
+ extrd,u $nm0,31,32,$hi1
+ fstds ${fab0},-16($xfer)
+ fstds ${fnm0},-8($xfer)
+
+L\$inner
+ xmpyu ${fai}R,${fbi},${fab1} ; ap[j+1]*bp[i]
+ xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j+1]*m
+ ldd 0($xfer),$ab1
+ fstds ${fab1},0($xfer)
+ addl $hi0,$ti1,$ti1
+ addl $ti1,$ab1,$ab1
+ ldd 8($xfer),$nm1
+ fstds ${fnm1},8($xfer)
+ extrd,u $ab1,31,32,$hi0
+ extrd,u $ab1,63,32,$ab1
+ flddx $idx($ap),${fai} ; ap[j,j+1]
+ flddx $idx($np),${fni} ; np[j,j+1]
+ addl $hi1,$nm1,$nm1
+ addl $ab1,$nm1,$nm1
+ ldw 4($tp),$ti0 ; tp[j]
+ stw $nm1,-4($tp) ; tp[j-1]
+
+ xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[i]
+ xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
+ ldd -16($xfer),$ab0
+ fstds ${fab0},-16($xfer)
+ addl $hi0,$ti0,$ti0
+ addl $ti0,$ab0,$ab0
+ ldd -8($xfer),$nm0
+ fstds ${fnm0},-8($xfer)
+ extrd,u $ab0,31,32,$hi0
+ extrd,u $nm1,31,32,$hi1
+ ldw 8($tp),$ti1 ; tp[j]
+ extrd,u $ab0,63,32,$ab0
+ addl $hi1,$nm0,$nm0
+ addl $ab0,$nm0,$nm0
+ stw,ma $nm0,8($tp) ; tp[j-1]
+ addib,<> 8,$idx,L\$inner ; j++++
+ extrd,u $nm0,31,32,$hi1
+
+ xmpyu ${fai}R,${fbi},${fab1} ; ap[j]*bp[i]
+ xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j]*m
+ ldd 0($xfer),$ab1
+ fstds ${fab1},0($xfer)
+ addl $hi0,$ti1,$ti1
+ addl $ti1,$ab1,$ab1
+ ldd 8($xfer),$nm1
+ fstds ${fnm1},8($xfer)
+ extrd,u $ab1,31,32,$hi0
+ extrd,u $ab1,63,32,$ab1
+ ldw 4($tp),$ti0 ; tp[j]
+ addl $hi1,$nm1,$nm1
+ addl $ab1,$nm1,$nm1
+ ldd -16($xfer),$ab0
+ ldd -8($xfer),$nm0
+ extrd,u $nm1,31,32,$hi1
+
+ addl $hi0,$ab0,$ab0
+ addl $ti0,$ab0,$ab0
+ stw $nm1,-4($tp) ; tp[j-1]
+ extrd,u $ab0,31,32,$hi0
+ ldw 8($tp),$ti1 ; tp[j]
+ extrd,u $ab0,63,32,$ab0
+ addl $hi1,$nm0,$nm0
+ ldd 0($xfer),$ab1
+ addl $ab0,$nm0,$nm0
+ ldd,mb 8($xfer),$nm1
+ extrd,u $nm0,31,32,$hi1
+ stw,ma $nm0,8($tp) ; tp[j-1]
+
+ addib,= -1,$num,L\$outerdone ; i--
+ subi 0,$arrsz,$idx ; j=0
+___
+$code.=<<___ if ($BN_SZ==4);
+ fldws,ma 4($bp),${fbi} ; bp[i]
+___
+$code.=<<___ if ($BN_SZ==8);
+ ldi 12,$ti0 ; bp[i] in flipped word order
+ addl,ev %r0,$num,$num
+ ldi -4,$ti0
+ addl $ti0,$bp,$bp
+ fldws 0($bp),${fbi}
+___
+$code.=<<___;
+ flddx $idx($ap),${fai} ; ap[0]
+ addl $hi0,$ab1,$ab1
+ flddx $idx($np),${fni} ; np[0]
+ fldws 8($xfer),${fti}R ; tp[0]
+ addl $ti1,$ab1,$ab1
+ extrd,u $ab1,31,32,$hi0
+ extrd,u $ab1,63,32,$ab1
+
+ ldo 8($idx),$idx ; j++++
+ xmpyu ${fai}L,${fbi},${fab0} ; ap[0]*bp[i]
+ xmpyu ${fai}R,${fbi},${fab1} ; ap[1]*bp[i]
+ ldw 4($tp),$ti0 ; tp[j]
+
+ addl $hi1,$nm1,$nm1
+ fstws,mb ${fab0}L,-8($xfer) ; save high part
+ addl $ab1,$nm1,$nm1
+ extrd,u $nm1,31,32,$hi1
+ fcpy,sgl %fr0,${fti}L ; zero high part
+ fcpy,sgl %fr0,${fab0}L
+ stw $nm1,-4($tp) ; tp[j-1]
+
+ fcnvxf,dbl,dbl ${fti},${fti} ; 32-bit unsigned int -> double
+ fcnvxf,dbl,dbl ${fab0},${fab0}
+ addl $hi1,$hi0,$hi0
+ fadd,dbl ${fti},${fab0},${fab0} ; add tp[0]
+ addl $ti0,$hi0,$hi0
+ extrd,u $hi0,31,32,$hi1
+ fcnvfx,dbl,dbl ${fab0},${fab0} ; double -> 33-bit unsigned int
+ stw $hi0,0($tp)
+ stw $hi1,4($tp)
+ xmpyu ${fn0},${fab0}R,${fm0}
+
+ b L\$outer
+ ldo `$LOCALS+32+4`($fp),$tp
+
+L\$outerdone
+ addl $hi0,$ab1,$ab1
+ addl $ti1,$ab1,$ab1
+ extrd,u $ab1,31,32,$hi0
+ extrd,u $ab1,63,32,$ab1
+
+ ldw 4($tp),$ti0 ; tp[j]
+
+ addl $hi1,$nm1,$nm1
+ addl $ab1,$nm1,$nm1
+ extrd,u $nm1,31,32,$hi1
+ stw $nm1,-4($tp) ; tp[j-1]
+
+ addl $hi1,$hi0,$hi0
+ addl $ti0,$hi0,$hi0
+ extrd,u $hi0,31,32,$hi1
+ stw $hi0,0($tp)
+ stw $hi1,4($tp)
+
+ ldo `$LOCALS+32`($fp),$tp
+ sub %r0,%r0,%r0 ; clear borrow
+___
+$code.=<<___ if ($BN_SZ==4);
+ ldws,ma 4($tp),$ti0
+ extru,= $rp,31,3,%r0 ; is rp 64-bit aligned?
+ b L\$sub_pa11
+ addl $tp,$arrsz,$tp
+L\$sub
+ ldwx $idx($np),$hi0
+ subb $ti0,$hi0,$hi1
+ ldwx $idx($tp),$ti0
+ addib,<> 4,$idx,L\$sub
+ stws,ma $hi1,4($rp)
+
+ subb $ti0,%r0,$hi1
+ ldo -4($tp),$tp
+___
+$code.=<<___ if ($BN_SZ==8);
+ ldd,ma 8($tp),$ti0
+L\$sub
+ ldd $idx($np),$hi0
+ shrpd $ti0,$ti0,32,$ti0 ; flip word order
+ std $ti0,-8($tp) ; save flipped value
+ sub,db $ti0,$hi0,$hi1
+ ldd,ma 8($tp),$ti0
+ addib,<> 8,$idx,L\$sub
+ std,ma $hi1,8($rp)
+
+ extrd,u $ti0,31,32,$ti0 ; carry in flipped word order
+ sub,db $ti0,%r0,$hi1
+ ldo -8($tp),$tp
+___
+$code.=<<___;
+ and $tp,$hi1,$ap
+ andcm $rp,$hi1,$bp
+ or $ap,$bp,$np
+
+ sub $rp,$arrsz,$rp ; rewind rp
+ subi 0,$arrsz,$idx
+ ldo `$LOCALS+32`($fp),$tp
+L\$copy
+ ldd $idx($np),$hi0
+ std,ma %r0,8($tp)
+ addib,<> 8,$idx,.-8 ; L\$copy
+ std,ma $hi0,8($rp)
+___
+
+if ($BN_SZ==4) { # PA-RISC 1.1 code-path
+$ablo=$ab0;
+$abhi=$ab1;
+$nmlo0=$nm0;
+$nmhi0=$nm1;
+$nmlo1="%r9";
+$nmhi1="%r8";
+
+$code.=<<___;
+ b L\$done
+ nop
+
+ .ALIGN 8
+L\$parisc11
+ xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[0]
+ xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
+ ldw -12($xfer),$ablo
+ ldw -16($xfer),$hi0
+ ldw -4($xfer),$nmlo0
+ ldw -8($xfer),$nmhi0
+ fstds ${fab0},-16($xfer)
+ fstds ${fnm0},-8($xfer)
+
+ ldo 8($idx),$idx ; j++++
+ add $ablo,$nmlo0,$nmlo0 ; discarded
+ addc %r0,$nmhi0,$hi1
+ ldw 4($xfer),$ablo
+ ldw 0($xfer),$abhi
+ nop
+
+L\$1st_pa11
+ xmpyu ${fai}R,${fbi},${fab1} ; ap[j+1]*bp[0]
+ flddx $idx($ap),${fai} ; ap[j,j+1]
+ xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j+1]*m
+ flddx $idx($np),${fni} ; np[j,j+1]
+ add $hi0,$ablo,$ablo
+ ldw 12($xfer),$nmlo1
+ addc %r0,$abhi,$hi0
+ ldw 8($xfer),$nmhi1
+ add $ablo,$nmlo1,$nmlo1
+ fstds ${fab1},0($xfer)
+ addc %r0,$nmhi1,$nmhi1
+ fstds ${fnm1},8($xfer)
+ add $hi1,$nmlo1,$nmlo1
+ ldw -12($xfer),$ablo
+ addc %r0,$nmhi1,$hi1
+ ldw -16($xfer),$abhi
+
+ xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[0]
+ ldw -4($xfer),$nmlo0
+ xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
+ ldw -8($xfer),$nmhi0
+ add $hi0,$ablo,$ablo
+ stw $nmlo1,-4($tp) ; tp[j-1]
+ addc %r0,$abhi,$hi0
+ fstds ${fab0},-16($xfer)
+ add $ablo,$nmlo0,$nmlo0
+ fstds ${fnm0},-8($xfer)
+ addc %r0,$nmhi0,$nmhi0
+ ldw 0($xfer),$abhi
+ add $hi1,$nmlo0,$nmlo0
+ ldw 4($xfer),$ablo
+ stws,ma $nmlo0,8($tp) ; tp[j-1]
+ addib,<> 8,$idx,L\$1st_pa11 ; j++++
+ addc %r0,$nmhi0,$hi1
+
+ ldw 8($xfer),$nmhi1
+ ldw 12($xfer),$nmlo1
+ xmpyu ${fai}R,${fbi},${fab1} ; ap[j]*bp[0]
+ xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j]*m
+ add $hi0,$ablo,$ablo
+ fstds ${fab1},0($xfer)
+ addc %r0,$abhi,$hi0
+ fstds ${fnm1},8($xfer)
+ add $ablo,$nmlo1,$nmlo1
+ ldw -16($xfer),$abhi
+ addc %r0,$nmhi1,$nmhi1
+ ldw -12($xfer),$ablo
+ add $hi1,$nmlo1,$nmlo1
+ ldw -8($xfer),$nmhi0
+ addc %r0,$nmhi1,$hi1
+ ldw -4($xfer),$nmlo0
+
+ add $hi0,$ablo,$ablo
+ stw $nmlo1,-4($tp) ; tp[j-1]
+ addc %r0,$abhi,$hi0
+ ldw 0($xfer),$abhi
+ add $ablo,$nmlo0,$nmlo0
+ ldw 4($xfer),$ablo
+ addc %r0,$nmhi0,$nmhi0
+ ldws,mb 8($xfer),$nmhi1
+ add $hi1,$nmlo0,$nmlo0
+ ldw 4($xfer),$nmlo1
+ addc %r0,$nmhi0,$hi1
+ stws,ma $nmlo0,8($tp) ; tp[j-1]
+
+ ldo -1($num),$num ; i--
+ subi 0,$arrsz,$idx ; j=0
+
+ fldws,ma 4($bp),${fbi} ; bp[1]
+ flddx $idx($ap),${fai} ; ap[0,1]
+ flddx $idx($np),${fni} ; np[0,1]
+ fldws 8($xfer),${fti}R ; tp[0]
+ add $hi0,$ablo,$ablo
+ addc %r0,$abhi,$hi0
+ ldo 8($idx),$idx ; j++++
+ xmpyu ${fai}L,${fbi},${fab0} ; ap[0]*bp[1]
+ xmpyu ${fai}R,${fbi},${fab1} ; ap[1]*bp[1]
+ add $hi1,$nmlo1,$nmlo1
+ addc %r0,$nmhi1,$nmhi1
+ add $ablo,$nmlo1,$nmlo1
+ addc %r0,$nmhi1,$hi1
+ fstws,mb ${fab0}L,-8($xfer) ; save high part
+ stw $nmlo1,-4($tp) ; tp[j-1]
+
+ fcpy,sgl %fr0,${fti}L ; zero high part
+ fcpy,sgl %fr0,${fab0}L
+ add $hi1,$hi0,$hi0
+ addc %r0,%r0,$hi1
+ fcnvxf,dbl,dbl ${fti},${fti} ; 32-bit unsigned int -> double
+ fcnvxf,dbl,dbl ${fab0},${fab0}
+ stw $hi0,0($tp)
+ stw $hi1,4($tp)
+
+ fadd,dbl ${fti},${fab0},${fab0} ; add tp[0]
+ fcnvfx,dbl,dbl ${fab0},${fab0} ; double -> 33-bit unsigned int
+ xmpyu ${fn0},${fab0}R,${fm0}
+ ldo `$LOCALS+32+4`($fp),$tp
+L\$outer_pa11
+ xmpyu ${fni}L,${fm0}R,${fnm0} ; np[0]*m
+ xmpyu ${fni}R,${fm0}R,${fnm1} ; np[1]*m
+ fstds ${fab0},-16($xfer) ; 33-bit value
+ fstds ${fnm0},-8($xfer)
+ flddx $idx($ap),${fai} ; ap[2,3]
+ flddx $idx($np),${fni} ; np[2,3]
+ ldw -16($xfer),$abhi ; carry bit actually
+ ldo 8($idx),$idx ; j++++
+ ldw -12($xfer),$ablo
+ ldw -8($xfer),$nmhi0
+ ldw -4($xfer),$nmlo0
+ ldw 0($xfer),$hi0 ; high part
+
+ xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[i]
+ xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
+ fstds ${fab1},0($xfer)
+ addl $abhi,$hi0,$hi0 ; account carry bit
+ fstds ${fnm1},8($xfer)
+ add $ablo,$nmlo0,$nmlo0 ; discarded
+ ldw 0($tp),$ti1 ; tp[1]
+ addc %r0,$nmhi0,$hi1
+ fstds ${fab0},-16($xfer)
+ fstds ${fnm0},-8($xfer)
+ ldw 4($xfer),$ablo
+ ldw 0($xfer),$abhi
+
+L\$inner_pa11
+ xmpyu ${fai}R,${fbi},${fab1} ; ap[j+1]*bp[i]
+ flddx $idx($ap),${fai} ; ap[j,j+1]
+ xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j+1]*m
+ flddx $idx($np),${fni} ; np[j,j+1]
+ add $hi0,$ablo,$ablo
+ ldw 4($tp),$ti0 ; tp[j]
+ addc %r0,$abhi,$abhi
+ ldw 12($xfer),$nmlo1
+ add $ti1,$ablo,$ablo
+ ldw 8($xfer),$nmhi1
+ addc %r0,$abhi,$hi0
+ fstds ${fab1},0($xfer)
+ add $ablo,$nmlo1,$nmlo1
+ fstds ${fnm1},8($xfer)
+ addc %r0,$nmhi1,$nmhi1
+ ldw -12($xfer),$ablo
+ add $hi1,$nmlo1,$nmlo1
+ ldw -16($xfer),$abhi
+ addc %r0,$nmhi1,$hi1
+
+ xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[i]
+ ldw 8($tp),$ti1 ; tp[j]
+ xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
+ ldw -4($xfer),$nmlo0
+ add $hi0,$ablo,$ablo
+ ldw -8($xfer),$nmhi0
+ addc %r0,$abhi,$abhi
+ stw $nmlo1,-4($tp) ; tp[j-1]
+ add $ti0,$ablo,$ablo
+ fstds ${fab0},-16($xfer)
+ addc %r0,$abhi,$hi0
+ fstds ${fnm0},-8($xfer)
+ add $ablo,$nmlo0,$nmlo0
+ ldw 4($xfer),$ablo
+ addc %r0,$nmhi0,$nmhi0
+ ldw 0($xfer),$abhi
+ add $hi1,$nmlo0,$nmlo0
+ stws,ma $nmlo0,8($tp) ; tp[j-1]
+ addib,<> 8,$idx,L\$inner_pa11 ; j++++
+ addc %r0,$nmhi0,$hi1
+
+ xmpyu ${fai}R,${fbi},${fab1} ; ap[j]*bp[i]
+ ldw 12($xfer),$nmlo1
+ xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j]*m
+ ldw 8($xfer),$nmhi1
+ add $hi0,$ablo,$ablo
+ ldw 4($tp),$ti0 ; tp[j]
+ addc %r0,$abhi,$abhi
+ fstds ${fab1},0($xfer)
+ add $ti1,$ablo,$ablo
+ fstds ${fnm1},8($xfer)
+ addc %r0,$abhi,$hi0
+ ldw -16($xfer),$abhi
+ add $ablo,$nmlo1,$nmlo1
+ ldw -12($xfer),$ablo
+ addc %r0,$nmhi1,$nmhi1
+ ldw -8($xfer),$nmhi0
+ add $hi1,$nmlo1,$nmlo1
+ ldw -4($xfer),$nmlo0
+ addc %r0,$nmhi1,$hi1
+
+ add $hi0,$ablo,$ablo
+ stw $nmlo1,-4($tp) ; tp[j-1]
+ addc %r0,$abhi,$abhi
+ add $ti0,$ablo,$ablo
+ ldw 8($tp),$ti1 ; tp[j]
+ addc %r0,$abhi,$hi0
+ ldw 0($xfer),$abhi
+ add $ablo,$nmlo0,$nmlo0
+ ldw 4($xfer),$ablo
+ addc %r0,$nmhi0,$nmhi0
+ ldws,mb 8($xfer),$nmhi1
+ add $hi1,$nmlo0,$nmlo0
+ ldw 4($xfer),$nmlo1
+ addc %r0,$nmhi0,$hi1
+ stws,ma $nmlo0,8($tp) ; tp[j-1]
+
+ addib,= -1,$num,L\$outerdone_pa11; i--
+ subi 0,$arrsz,$idx ; j=0
+
+ fldws,ma 4($bp),${fbi} ; bp[i]
+ flddx $idx($ap),${fai} ; ap[0]
+ add $hi0,$ablo,$ablo
+ addc %r0,$abhi,$abhi
+ flddx $idx($np),${fni} ; np[0]
+ fldws 8($xfer),${fti}R ; tp[0]
+ add $ti1,$ablo,$ablo
+ addc %r0,$abhi,$hi0
+
+ ldo 8($idx),$idx ; j++++
+ xmpyu ${fai}L,${fbi},${fab0} ; ap[0]*bp[i]
+ xmpyu ${fai}R,${fbi},${fab1} ; ap[1]*bp[i]
+ ldw 4($tp),$ti0 ; tp[j]
+
+ add $hi1,$nmlo1,$nmlo1
+ addc %r0,$nmhi1,$nmhi1
+ fstws,mb ${fab0}L,-8($xfer) ; save high part
+ add $ablo,$nmlo1,$nmlo1
+ addc %r0,$nmhi1,$hi1
+ fcpy,sgl %fr0,${fti}L ; zero high part
+ fcpy,sgl %fr0,${fab0}L
+ stw $nmlo1,-4($tp) ; tp[j-1]
+
+ fcnvxf,dbl,dbl ${fti},${fti} ; 32-bit unsigned int -> double
+ fcnvxf,dbl,dbl ${fab0},${fab0}
+ add $hi1,$hi0,$hi0
+ addc %r0,%r0,$hi1
+ fadd,dbl ${fti},${fab0},${fab0} ; add tp[0]
+ add $ti0,$hi0,$hi0
+ addc %r0,$hi1,$hi1
+ fcnvfx,dbl,dbl ${fab0},${fab0} ; double -> 33-bit unsigned int
+ stw $hi0,0($tp)
+ stw $hi1,4($tp)
+ xmpyu ${fn0},${fab0}R,${fm0}
+
+ b L\$outer_pa11
+ ldo `$LOCALS+32+4`($fp),$tp
+
+L\$outerdone_pa11
+ add $hi0,$ablo,$ablo
+ addc %r0,$abhi,$abhi
+ add $ti1,$ablo,$ablo
+ addc %r0,$abhi,$hi0
+
+ ldw 4($tp),$ti0 ; tp[j]
+
+ add $hi1,$nmlo1,$nmlo1
+ addc %r0,$nmhi1,$nmhi1
+ add $ablo,$nmlo1,$nmlo1
+ addc %r0,$nmhi1,$hi1
+ stw $nmlo1,-4($tp) ; tp[j-1]
+
+ add $hi1,$hi0,$hi0
+ addc %r0,%r0,$hi1
+ add $ti0,$hi0,$hi0
+ addc %r0,$hi1,$hi1
+ stw $hi0,0($tp)
+ stw $hi1,4($tp)
+
+ ldo `$LOCALS+32+4`($fp),$tp
+ sub %r0,%r0,%r0 ; clear borrow
+ ldw -4($tp),$ti0
+ addl $tp,$arrsz,$tp
+L\$sub_pa11
+ ldwx $idx($np),$hi0
+ subb $ti0,$hi0,$hi1
+ ldwx $idx($tp),$ti0
+ addib,<> 4,$idx,L\$sub_pa11
+ stws,ma $hi1,4($rp)
+
+ subb $ti0,%r0,$hi1
+ ldo -4($tp),$tp
+ and $tp,$hi1,$ap
+ andcm $rp,$hi1,$bp
+ or $ap,$bp,$np
+
+ sub $rp,$arrsz,$rp ; rewind rp
+ subi 0,$arrsz,$idx
+ ldo `$LOCALS+32`($fp),$tp
+L\$copy_pa11
+ ldwx $idx($np),$hi0
+ stws,ma %r0,4($tp)
+ addib,<> 4,$idx,L\$copy_pa11
+ stws,ma $hi0,4($rp)
+
+ nop ; alignment
+L\$done
+___
+}
+
+$code.=<<___;
+ ldi 1,%r28 ; signal "handled"
+ ldo $FRAME($fp),%sp ; destroy tp[num+1]
+
+ $POP `-$FRAME-$SAVED_RP`(%sp),%r2 ; standard epilogue
+ $POP `-$FRAME+1*$SIZE_T`(%sp),%r4
+ $POP `-$FRAME+2*$SIZE_T`(%sp),%r5
+ $POP `-$FRAME+3*$SIZE_T`(%sp),%r6
+ $POP `-$FRAME+4*$SIZE_T`(%sp),%r7
+ $POP `-$FRAME+5*$SIZE_T`(%sp),%r8
+ $POP `-$FRAME+6*$SIZE_T`(%sp),%r9
+ $POP `-$FRAME+7*$SIZE_T`(%sp),%r10
+L\$abort
+ bv (%r2)
+ .EXIT
+ $POPMB -$FRAME(%sp),%r3
+ .PROCEND
+ .STRINGZ "Montgomery Multiplication for PA-RISC, CRYPTOGAMS by <appro\@openssl.org>"
+___
+
+# Explicitly encode PA-RISC 2.0 instructions used in this module, so
+# that it can be compiled with .LEVEL 1.0. It should be noted that I
+# wouldn't have to do this, if GNU assembler understood .ALLOW 2.0
+# directive...
+
+my $ldd = sub {
+ my ($mod,$args) = @_;
+ my $orig = "ldd$mod\t$args";
+
+ if ($args =~ /%r([0-9]+)\(%r([0-9]+)\),%r([0-9]+)/) # format 4
+ { my $opcode=(0x03<<26)|($2<<21)|($1<<16)|(3<<6)|$3;
+ sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig;
+ }
+ elsif ($args =~ /(\-?[0-9]+)\(%r([0-9]+)\),%r([0-9]+)/) # format 5
+ { my $opcode=(0x03<<26)|($2<<21)|(1<<12)|(3<<6)|$3;
+ $opcode|=(($1&0xF)<<17)|(($1&0x10)<<12); # encode offset
+ $opcode|=(1<<5) if ($mod =~ /^,m/);
+ $opcode|=(1<<13) if ($mod =~ /^,mb/);
+ sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig;
+ }
+ else { "\t".$orig; }
+};
+
+my $std = sub {
+ my ($mod,$args) = @_;
+ my $orig = "std$mod\t$args";
+
+ if ($args =~ /%r([0-9]+),(\-?[0-9]+)\(%r([0-9]+)\)/) # format 6
+ { my $opcode=(0x03<<26)|($3<<21)|($1<<16)|(1<<12)|(0xB<<6);
+ $opcode|=(($2&0xF)<<1)|(($2&0x10)>>4); # encode offset
+ $opcode|=(1<<5) if ($mod =~ /^,m/);
+ $opcode|=(1<<13) if ($mod =~ /^,mb/);
+ sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig;
+ }
+ else { "\t".$orig; }
+};
+
+my $extrd = sub {
+ my ($mod,$args) = @_;
+ my $orig = "extrd$mod\t$args";
+
+ # I only have ",u" completer, it's implicitly encoded...
+ if ($args =~ /%r([0-9]+),([0-9]+),([0-9]+),%r([0-9]+)/) # format 15
+ { my $opcode=(0x36<<26)|($1<<21)|($4<<16);
+ my $len=32-$3;
+ $opcode |= (($2&0x20)<<6)|(($2&0x1f)<<5); # encode pos
+ $opcode |= (($len&0x20)<<7)|($len&0x1f); # encode len
+ sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig;
+ }
+ elsif ($args =~ /%r([0-9]+),%sar,([0-9]+),%r([0-9]+)/) # format 12
+ { my $opcode=(0x34<<26)|($1<<21)|($3<<16)|(2<<11)|(1<<9);
+ my $len=32-$2;
+ $opcode |= (($len&0x20)<<3)|($len&0x1f); # encode len
+ $opcode |= (1<<13) if ($mod =~ /,\**=/);
+ sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig;
+ }
+ else { "\t".$orig; }
+};
+
+my $shrpd = sub {
+ my ($mod,$args) = @_;
+ my $orig = "shrpd$mod\t$args";
+
+ if ($args =~ /%r([0-9]+),%r([0-9]+),([0-9]+),%r([0-9]+)/) # format 14
+ { my $opcode=(0x34<<26)|($2<<21)|($1<<16)|(1<<10)|$4;
+ my $cpos=63-$3;
+ $opcode |= (($cpos&0x20)<<6)|(($cpos&0x1f)<<5); # encode sa
+ sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig;
+ }
+ else { "\t".$orig; }
+};
+
+my $sub = sub {
+ my ($mod,$args) = @_;
+ my $orig = "sub$mod\t$args";
+
+ if ($mod eq ",db" && $args =~ /%r([0-9]+),%r([0-9]+),%r([0-9]+)/) {
+ my $opcode=(0x02<<26)|($2<<21)|($1<<16)|$3;
+ $opcode|=(1<<10); # e1
+ $opcode|=(1<<8); # e2
+ $opcode|=(1<<5); # d
+ sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig
+ }
+ else { "\t".$orig; }
+};
+
+sub assemble {
+ my ($mnemonic,$mod,$args)=@_;
+ my $opcode = eval("\$$mnemonic");
+
+ ref($opcode) eq 'CODE' ? &$opcode($mod,$args) : "\t$mnemonic$mod\t$args";
+}
+
+foreach (split("\n",$code)) {
+ s/\`([^\`]*)\`/eval $1/ge;
+ # flip word order in 64-bit mode...
+ s/(xmpyu\s+)($fai|$fni)([LR])/$1.$2.($3 eq "L"?"R":"L")/e if ($BN_SZ==8);
+ # assemble 2.0 instructions in 32-bit mode...
+ s/^\s+([a-z]+)([\S]*)\s+([\S]*)/&assemble($1,$2,$3)/e if ($BN_SZ==4);
+
+ s/\bbv\b/bve/gm if ($SIZE_T==8);
+
+ print $_,"\n";
+}
+close STDOUT;
diff --git a/openssl-1.1.0h/crypto/bn/asm/ppc-mont.pl b/openssl-1.1.0h/crypto/bn/asm/ppc-mont.pl
new file mode 100644
index 0000000..5802260
--- /dev/null
+++ b/openssl-1.1.0h/crypto/bn/asm/ppc-mont.pl
@@ -0,0 +1,342 @@
+#! /usr/bin/env perl
+# Copyright 2006-2016 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License"). You may not use
+# this file except in compliance with the License. You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# April 2006
+
+# "Teaser" Montgomery multiplication module for PowerPC. It's possible
+# to gain a bit more by modulo-scheduling outer loop, then dedicated
+# squaring procedure should give further 20% and code can be adapted
+# for 32-bit application running on 64-bit CPU. As for the latter.
+# It won't be able to achieve "native" 64-bit performance, because in
+# 32-bit application context every addc instruction will have to be
+# expanded as addc, twice right shift by 32 and finally adde, etc.
+# So far RSA *sign* performance improvement over pre-bn_mul_mont asm
+# for 64-bit application running on PPC970/G5 is:
+#
+# 512-bit +65%
+# 1024-bit +35%
+# 2048-bit +18%
+# 4096-bit +4%
+
+$flavour = shift;
+
+if ($flavour =~ /32/) {
+ $BITS= 32;
+ $BNSZ= $BITS/8;
+ $SIZE_T=4;
+ $RZONE= 224;
+
+ $LD= "lwz"; # load
+ $LDU= "lwzu"; # load and update
+ $LDX= "lwzx"; # load indexed
+ $ST= "stw"; # store
+ $STU= "stwu"; # store and update
+ $STX= "stwx"; # store indexed
+ $STUX= "stwux"; # store indexed and update
+ $UMULL= "mullw"; # unsigned multiply low
+ $UMULH= "mulhwu"; # unsigned multiply high
+ $UCMP= "cmplw"; # unsigned compare
+ $SHRI= "srwi"; # unsigned shift right by immediate
+ $PUSH= $ST;
+ $POP= $LD;
+} elsif ($flavour =~ /64/) {
+ $BITS= 64;
+ $BNSZ= $BITS/8;
+ $SIZE_T=8;
+ $RZONE= 288;
+
+ # same as above, but 64-bit mnemonics...
+ $LD= "ld"; # load
+ $LDU= "ldu"; # load and update
+ $LDX= "ldx"; # load indexed
+ $ST= "std"; # store
+ $STU= "stdu"; # store and update
+ $STX= "stdx"; # store indexed
+ $STUX= "stdux"; # store indexed and update
+ $UMULL= "mulld"; # unsigned multiply low
+ $UMULH= "mulhdu"; # unsigned multiply high
+ $UCMP= "cmpld"; # unsigned compare
+ $SHRI= "srdi"; # unsigned shift right by immediate
+ $PUSH= $ST;
+ $POP= $LD;
+} else { die "nonsense $flavour"; }
+
+$FRAME=8*$SIZE_T+$RZONE;
+$LOCALS=8*$SIZE_T;
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
+die "can't locate ppc-xlate.pl";
+
+open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
+
+$sp="r1";
+$toc="r2";
+$rp="r3"; $ovf="r3";
+$ap="r4";
+$bp="r5";
+$np="r6";
+$n0="r7";
+$num="r8";
+$rp="r9"; # $rp is reassigned
+$aj="r10";
+$nj="r11";
+$tj="r12";
+# non-volatile registers
+$i="r20";
+$j="r21";
+$tp="r22";
+$m0="r23";
+$m1="r24";
+$lo0="r25";
+$hi0="r26";
+$lo1="r27";
+$hi1="r28";
+$alo="r29";
+$ahi="r30";
+$nlo="r31";
+#
+$nhi="r0";
+
+$code=<<___;
+.machine "any"
+.text
+
+.globl .bn_mul_mont_int
+.align 4
+.bn_mul_mont_int:
+ cmpwi $num,4
+ mr $rp,r3 ; $rp is reassigned
+ li r3,0
+ bltlr
+___
+$code.=<<___ if ($BNSZ==4);
+ cmpwi $num,32 ; longer key performance is not better
+ bgelr
+___
+$code.=<<___;
+ slwi $num,$num,`log($BNSZ)/log(2)`
+ li $tj,-4096
+ addi $ovf,$num,$FRAME
+ subf $ovf,$ovf,$sp ; $sp-$ovf
+ and $ovf,$ovf,$tj ; minimize TLB usage
+ subf $ovf,$sp,$ovf ; $ovf-$sp
+ mr $tj,$sp
+ srwi $num,$num,`log($BNSZ)/log(2)`
+ $STUX $sp,$sp,$ovf
+
+ $PUSH r20,`-12*$SIZE_T`($tj)
+ $PUSH r21,`-11*$SIZE_T`($tj)
+ $PUSH r22,`-10*$SIZE_T`($tj)
+ $PUSH r23,`-9*$SIZE_T`($tj)
+ $PUSH r24,`-8*$SIZE_T`($tj)
+ $PUSH r25,`-7*$SIZE_T`($tj)
+ $PUSH r26,`-6*$SIZE_T`($tj)
+ $PUSH r27,`-5*$SIZE_T`($tj)
+ $PUSH r28,`-4*$SIZE_T`($tj)
+ $PUSH r29,`-3*$SIZE_T`($tj)
+ $PUSH r30,`-2*$SIZE_T`($tj)
+ $PUSH r31,`-1*$SIZE_T`($tj)
+
+ $LD $n0,0($n0) ; pull n0[0] value
+ addi $num,$num,-2 ; adjust $num for counter register
+
+ $LD $m0,0($bp) ; m0=bp[0]
+ $LD $aj,0($ap) ; ap[0]
+ addi $tp,$sp,$LOCALS
+ $UMULL $lo0,$aj,$m0 ; ap[0]*bp[0]
+ $UMULH $hi0,$aj,$m0
+
+ $LD $aj,$BNSZ($ap) ; ap[1]
+ $LD $nj,0($np) ; np[0]
+
+ $UMULL $m1,$lo0,$n0 ; "tp[0]"*n0
+
+ $UMULL $alo,$aj,$m0 ; ap[1]*bp[0]
+ $UMULH $ahi,$aj,$m0
+
+ $UMULL $lo1,$nj,$m1 ; np[0]*m1
+ $UMULH $hi1,$nj,$m1
+ $LD $nj,$BNSZ($np) ; np[1]
+ addc $lo1,$lo1,$lo0
+ addze $hi1,$hi1
+
+ $UMULL $nlo,$nj,$m1 ; np[1]*m1
+ $UMULH $nhi,$nj,$m1
+
+ mtctr $num
+ li $j,`2*$BNSZ`
+.align 4
+L1st:
+ $LDX $aj,$ap,$j ; ap[j]
+ addc $lo0,$alo,$hi0
+ $LDX $nj,$np,$j ; np[j]
+ addze $hi0,$ahi
+ $UMULL $alo,$aj,$m0 ; ap[j]*bp[0]
+ addc $lo1,$nlo,$hi1
+ $UMULH $ahi,$aj,$m0
+ addze $hi1,$nhi
+ $UMULL $nlo,$nj,$m1 ; np[j]*m1
+ addc $lo1,$lo1,$lo0 ; np[j]*m1+ap[j]*bp[0]
+ $UMULH $nhi,$nj,$m1
+ addze $hi1,$hi1
+ $ST $lo1,0($tp) ; tp[j-1]
+
+ addi $j,$j,$BNSZ ; j++
+ addi $tp,$tp,$BNSZ ; tp++
+ bdnz L1st
+;L1st
+ addc $lo0,$alo,$hi0
+ addze $hi0,$ahi
+
+ addc $lo1,$nlo,$hi1
+ addze $hi1,$nhi
+ addc $lo1,$lo1,$lo0 ; np[j]*m1+ap[j]*bp[0]
+ addze $hi1,$hi1
+ $ST $lo1,0($tp) ; tp[j-1]
+
+ li $ovf,0
+ addc $hi1,$hi1,$hi0
+ addze $ovf,$ovf ; upmost overflow bit
+ $ST $hi1,$BNSZ($tp)
+
+ li $i,$BNSZ
+.align 4
+Louter:
+ $LDX $m0,$bp,$i ; m0=bp[i]
+ $LD $aj,0($ap) ; ap[0]
+ addi $tp,$sp,$LOCALS
+ $LD $tj,$LOCALS($sp); tp[0]
+ $UMULL $lo0,$aj,$m0 ; ap[0]*bp[i]
+ $UMULH $hi0,$aj,$m0
+ $LD $aj,$BNSZ($ap) ; ap[1]
+ $LD $nj,0($np) ; np[0]
+ addc $lo0,$lo0,$tj ; ap[0]*bp[i]+tp[0]
+ $UMULL $alo,$aj,$m0 ; ap[j]*bp[i]
+ addze $hi0,$hi0
+ $UMULL $m1,$lo0,$n0 ; tp[0]*n0
+ $UMULH $ahi,$aj,$m0
+ $UMULL $lo1,$nj,$m1 ; np[0]*m1
+ $UMULH $hi1,$nj,$m1
+ $LD $nj,$BNSZ($np) ; np[1]
+ addc $lo1,$lo1,$lo0
+ $UMULL $nlo,$nj,$m1 ; np[1]*m1
+ addze $hi1,$hi1
+ $UMULH $nhi,$nj,$m1
+
+ mtctr $num
+ li $j,`2*$BNSZ`
+.align 4
+Linner:
+ $LDX $aj,$ap,$j ; ap[j]
+ addc $lo0,$alo,$hi0
+ $LD $tj,$BNSZ($tp) ; tp[j]
+ addze $hi0,$ahi
+ $LDX $nj,$np,$j ; np[j]
+ addc $lo1,$nlo,$hi1
+ $UMULL $alo,$aj,$m0 ; ap[j]*bp[i]
+ addze $hi1,$nhi
+ $UMULH $ahi,$aj,$m0
+ addc $lo0,$lo0,$tj ; ap[j]*bp[i]+tp[j]
+ $UMULL $nlo,$nj,$m1 ; np[j]*m1
+ addze $hi0,$hi0
+ $UMULH $nhi,$nj,$m1
+ addc $lo1,$lo1,$lo0 ; np[j]*m1+ap[j]*bp[i]+tp[j]
+ addi $j,$j,$BNSZ ; j++
+ addze $hi1,$hi1
+ $ST $lo1,0($tp) ; tp[j-1]
+ addi $tp,$tp,$BNSZ ; tp++
+ bdnz Linner
+;Linner
+ $LD $tj,$BNSZ($tp) ; tp[j]
+ addc $lo0,$alo,$hi0
+ addze $hi0,$ahi
+ addc $lo0,$lo0,$tj ; ap[j]*bp[i]+tp[j]
+ addze $hi0,$hi0
+
+ addc $lo1,$nlo,$hi1
+ addze $hi1,$nhi
+ addc $lo1,$lo1,$lo0 ; np[j]*m1+ap[j]*bp[i]+tp[j]
+ addze $hi1,$hi1
+ $ST $lo1,0($tp) ; tp[j-1]
+
+ addic $ovf,$ovf,-1 ; move upmost overflow to XER[CA]
+ li $ovf,0
+ adde $hi1,$hi1,$hi0
+ addze $ovf,$ovf
+ $ST $hi1,$BNSZ($tp)
+;
+ slwi $tj,$num,`log($BNSZ)/log(2)`
+ $UCMP $i,$tj
+ addi $i,$i,$BNSZ
+ ble Louter
+
+ addi $num,$num,2 ; restore $num
+ subfc $j,$j,$j ; j=0 and "clear" XER[CA]
+ addi $tp,$sp,$LOCALS
+ mtctr $num
+
+.align 4
+Lsub: $LDX $tj,$tp,$j
+ $LDX $nj,$np,$j
+ subfe $aj,$nj,$tj ; tp[j]-np[j]
+ $STX $aj,$rp,$j
+ addi $j,$j,$BNSZ
+ bdnz Lsub
+
+ li $j,0
+ mtctr $num
+ subfe $ovf,$j,$ovf ; handle upmost overflow bit
+ and $ap,$tp,$ovf
+ andc $np,$rp,$ovf
+ or $ap,$ap,$np ; ap=borrow?tp:rp
+
+.align 4
+Lcopy: ; copy or in-place refresh
+ $LDX $tj,$ap,$j
+ $STX $tj,$rp,$j
+ $STX $j,$tp,$j ; zap at once
+ addi $j,$j,$BNSZ
+ bdnz Lcopy
+
+ $POP $tj,0($sp)
+ li r3,1
+ $POP r20,`-12*$SIZE_T`($tj)
+ $POP r21,`-11*$SIZE_T`($tj)
+ $POP r22,`-10*$SIZE_T`($tj)
+ $POP r23,`-9*$SIZE_T`($tj)
+ $POP r24,`-8*$SIZE_T`($tj)
+ $POP r25,`-7*$SIZE_T`($tj)
+ $POP r26,`-6*$SIZE_T`($tj)
+ $POP r27,`-5*$SIZE_T`($tj)
+ $POP r28,`-4*$SIZE_T`($tj)
+ $POP r29,`-3*$SIZE_T`($tj)
+ $POP r30,`-2*$SIZE_T`($tj)
+ $POP r31,`-1*$SIZE_T`($tj)
+ mr $sp,$tj
+ blr
+ .long 0
+ .byte 0,12,4,0,0x80,12,6,0
+ .long 0
+.size .bn_mul_mont_int,.-.bn_mul_mont_int
+
+.asciz "Montgomery Multiplication for PPC, CRYPTOGAMS by <appro\@openssl.org>"
+___
+
+$code =~ s/\`([^\`]*)\`/eval $1/gem;
+print $code;
+close STDOUT;
diff --git a/openssl-1.1.0h/crypto/bn/asm/ppc.pl b/openssl-1.1.0h/crypto/bn/asm/ppc.pl
new file mode 100644
index 0000000..4ea534a
--- /dev/null
+++ b/openssl-1.1.0h/crypto/bn/asm/ppc.pl
@@ -0,0 +1,2014 @@
+#! /usr/bin/env perl
+# Copyright 2004-2016 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License"). You may not use
+# this file except in compliance with the License. You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+# Implemented as a Perl wrapper as we want to support several different
+# architectures with single file. We pick up the target based on the
+# file name we are asked to generate.
+#
+# It should be noted though that this perl code is nothing like
+# <openssl>/crypto/perlasm/x86*. In this case perl is used pretty much
+# as pre-processor to cover for platform differences in name decoration,
+# linker tables, 32-/64-bit instruction sets...
+#
+# As you might know there're several PowerPC ABI in use. Most notably
+# Linux and AIX use different 32-bit ABIs. Good news are that these ABIs
+# are similar enough to implement leaf(!) functions, which would be ABI
+# neutral. And that's what you find here: ABI neutral leaf functions.
+# In case you wonder what that is...
+#
+# AIX performance
+#
+# MEASUREMENTS WITH cc ON a 200 MhZ PowerPC 604e.
+#
+# The following is the performance of 32-bit compiler
+# generated code:
+#
+# OpenSSL 0.9.6c 21 dec 2001
+# built on: Tue Jun 11 11:06:51 EDT 2002
+# options:bn(64,32) ...
+#compiler: cc -DTHREADS -DAIX -DB_ENDIAN -DBN_LLONG -O3
+# sign verify sign/s verify/s
+#rsa 512 bits 0.0098s 0.0009s 102.0 1170.6
+#rsa 1024 bits 0.0507s 0.0026s 19.7 387.5
+#rsa 2048 bits 0.3036s 0.0085s 3.3 117.1
+#rsa 4096 bits 2.0040s 0.0299s 0.5 33.4
+#dsa 512 bits 0.0087s 0.0106s 114.3 94.5
+#dsa 1024 bits 0.0256s 0.0313s 39.0 32.0
+#
+# Same bechmark with this assembler code:
+#
+#rsa 512 bits 0.0056s 0.0005s 178.6 2049.2
+#rsa 1024 bits 0.0283s 0.0015s 35.3 674.1
+#rsa 2048 bits 0.1744s 0.0050s 5.7 201.2
+#rsa 4096 bits 1.1644s 0.0179s 0.9 55.7
+#dsa 512 bits 0.0052s 0.0062s 191.6 162.0
+#dsa 1024 bits 0.0149s 0.0180s 67.0 55.5
+#
+# Number of operations increases by at almost 75%
+#
+# Here are performance numbers for 64-bit compiler
+# generated code:
+#
+# OpenSSL 0.9.6g [engine] 9 Aug 2002
+# built on: Fri Apr 18 16:59:20 EDT 2003
+# options:bn(64,64) ...
+# compiler: cc -DTHREADS -D_REENTRANT -q64 -DB_ENDIAN -O3
+# sign verify sign/s verify/s
+#rsa 512 bits 0.0028s 0.0003s 357.1 3844.4
+#rsa 1024 bits 0.0148s 0.0008s 67.5 1239.7
+#rsa 2048 bits 0.0963s 0.0028s 10.4 353.0
+#rsa 4096 bits 0.6538s 0.0102s 1.5 98.1
+#dsa 512 bits 0.0026s 0.0032s 382.5 313.7
+#dsa 1024 bits 0.0081s 0.0099s 122.8 100.6
+#
+# Same benchmark with this assembler code:
+#
+#rsa 512 bits 0.0020s 0.0002s 510.4 6273.7
+#rsa 1024 bits 0.0088s 0.0005s 114.1 2128.3
+#rsa 2048 bits 0.0540s 0.0016s 18.5 622.5
+#rsa 4096 bits 0.3700s 0.0058s 2.7 171.0
+#dsa 512 bits 0.0016s 0.0020s 610.7 507.1
+#dsa 1024 bits 0.0047s 0.0058s 212.5 173.2
+#
+# Again, performance increases by at about 75%
+#
+# Mac OS X, Apple G5 1.8GHz (Note this is 32 bit code)
+# OpenSSL 0.9.7c 30 Sep 2003
+#
+# Original code.
+#
+#rsa 512 bits 0.0011s 0.0001s 906.1 11012.5
+#rsa 1024 bits 0.0060s 0.0003s 166.6 3363.1
+#rsa 2048 bits 0.0370s 0.0010s 27.1 982.4
+#rsa 4096 bits 0.2426s 0.0036s 4.1 280.4
+#dsa 512 bits 0.0010s 0.0012s 1038.1 841.5
+#dsa 1024 bits 0.0030s 0.0037s 329.6 269.7
+#dsa 2048 bits 0.0101s 0.0127s 98.9 78.6
+#
+# Same benchmark with this assembler code:
+#
+#rsa 512 bits 0.0007s 0.0001s 1416.2 16645.9
+#rsa 1024 bits 0.0036s 0.0002s 274.4 5380.6
+#rsa 2048 bits 0.0222s 0.0006s 45.1 1589.5
+#rsa 4096 bits 0.1469s 0.0022s 6.8 449.6
+#dsa 512 bits 0.0006s 0.0007s 1664.2 1376.2
+#dsa 1024 bits 0.0018s 0.0023s 545.0 442.2
+#dsa 2048 bits 0.0061s 0.0075s 163.5 132.8
+#
+# Performance increase of ~60%
+#
+# If you have comments or suggestions to improve code send
+# me a note at schari@us.ibm.com
+#
+
+$flavour = shift;
+
+if ($flavour =~ /32/) {
+ $BITS= 32;
+ $BNSZ= $BITS/8;
+ $ISA= "\"ppc\"";
+
+ $LD= "lwz"; # load
+ $LDU= "lwzu"; # load and update
+ $ST= "stw"; # store
+ $STU= "stwu"; # store and update
+ $UMULL= "mullw"; # unsigned multiply low
+ $UMULH= "mulhwu"; # unsigned multiply high
+ $UDIV= "divwu"; # unsigned divide
+ $UCMPI= "cmplwi"; # unsigned compare with immediate
+ $UCMP= "cmplw"; # unsigned compare
+ $CNTLZ= "cntlzw"; # count leading zeros
+ $SHL= "slw"; # shift left
+ $SHR= "srw"; # unsigned shift right
+ $SHRI= "srwi"; # unsigned shift right by immediate
+ $SHLI= "slwi"; # shift left by immediate
+ $CLRU= "clrlwi"; # clear upper bits
+ $INSR= "insrwi"; # insert right
+ $ROTL= "rotlwi"; # rotate left by immediate
+ $TR= "tw"; # conditional trap
+} elsif ($flavour =~ /64/) {
+ $BITS= 64;
+ $BNSZ= $BITS/8;
+ $ISA= "\"ppc64\"";
+
+ # same as above, but 64-bit mnemonics...
+ $LD= "ld"; # load
+ $LDU= "ldu"; # load and update
+ $ST= "std"; # store
+ $STU= "stdu"; # store and update
+ $UMULL= "mulld"; # unsigned multiply low
+ $UMULH= "mulhdu"; # unsigned multiply high
+ $UDIV= "divdu"; # unsigned divide
+ $UCMPI= "cmpldi"; # unsigned compare with immediate
+ $UCMP= "cmpld"; # unsigned compare
+ $CNTLZ= "cntlzd"; # count leading zeros
+ $SHL= "sld"; # shift left
+ $SHR= "srd"; # unsigned shift right
+ $SHRI= "srdi"; # unsigned shift right by immediate
+ $SHLI= "sldi"; # shift left by immediate
+ $CLRU= "clrldi"; # clear upper bits
+ $INSR= "insrdi"; # insert right
+ $ROTL= "rotldi"; # rotate left by immediate
+ $TR= "td"; # conditional trap
+} else { die "nonsense $flavour"; }
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
+die "can't locate ppc-xlate.pl";
+
+open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
+
+$data=<<EOF;
+#--------------------------------------------------------------------
+#
+#
+#
+#
+# File: ppc32.s
+#
+# Created by: Suresh Chari
+# IBM Thomas J. Watson Research Library
+# Hawthorne, NY
+#
+#
+# Description: Optimized assembly routines for OpenSSL crypto
+# on the 32 bitPowerPC platform.
+#
+#
+# Version History
+#
+# 2. Fixed bn_add,bn_sub and bn_div_words, added comments,
+# cleaned up code. Also made a single version which can
+# be used for both the AIX and Linux compilers. See NOTE
+# below.
+# 12/05/03 Suresh Chari
+# (with lots of help from) Andy Polyakov
+##
+# 1. Initial version 10/20/02 Suresh Chari
+#
+#
+# The following file works for the xlc,cc
+# and gcc compilers.
+#
+# NOTE: To get the file to link correctly with the gcc compiler
+# you have to change the names of the routines and remove
+# the first .(dot) character. This should automatically
+# be done in the build process.
+#
+# Hand optimized assembly code for the following routines
+#
+# bn_sqr_comba4
+# bn_sqr_comba8
+# bn_mul_comba4
+# bn_mul_comba8
+# bn_sub_words
+# bn_add_words
+# bn_div_words
+# bn_sqr_words
+# bn_mul_words
+# bn_mul_add_words
+#
+# NOTE: It is possible to optimize this code more for
+# specific PowerPC or Power architectures. On the Northstar
+# architecture the optimizations in this file do
+# NOT provide much improvement.
+#
+# If you have comments or suggestions to improve code send
+# me a note at schari\@us.ibm.com
+#
+#--------------------------------------------------------------------------
+#
+# Defines to be used in the assembly code.
+#
+#.set r0,0 # we use it as storage for value of 0
+#.set SP,1 # preserved
+#.set RTOC,2 # preserved
+#.set r3,3 # 1st argument/return value
+#.set r4,4 # 2nd argument/volatile register
+#.set r5,5 # 3rd argument/volatile register
+#.set r6,6 # ...
+#.set r7,7
+#.set r8,8
+#.set r9,9
+#.set r10,10
+#.set r11,11
+#.set r12,12
+#.set r13,13 # not used, nor any other "below" it...
+
+# Declare function names to be global
+# NOTE: For gcc these names MUST be changed to remove
+# the first . i.e. for example change ".bn_sqr_comba4"
+# to "bn_sqr_comba4". This should be automatically done
+# in the build.
+
+ .globl .bn_sqr_comba4
+ .globl .bn_sqr_comba8
+ .globl .bn_mul_comba4
+ .globl .bn_mul_comba8
+ .globl .bn_sub_words
+ .globl .bn_add_words
+ .globl .bn_div_words
+ .globl .bn_sqr_words
+ .globl .bn_mul_words
+ .globl .bn_mul_add_words
+
+# .text section
+
+ .machine "any"
+
+#
+# NOTE: The following label name should be changed to
+# "bn_sqr_comba4" i.e. remove the first dot
+# for the gcc compiler. This should be automatically
+# done in the build
+#
+
+.align 4
+.bn_sqr_comba4:
+#
+# Optimized version of bn_sqr_comba4.
+#
+# void bn_sqr_comba4(BN_ULONG *r, BN_ULONG *a)
+# r3 contains r
+# r4 contains a
+#
+# Freely use registers r5,r6,r7,r8,r9,r10,r11 as follows:
+#
+# r5,r6 are the two BN_ULONGs being multiplied.
+# r7,r8 are the results of the 32x32 giving 64 bit multiply.
+# r9,r10, r11 are the equivalents of c1,c2, c3.
+# Here's the assembly
+#
+#
+ xor r0,r0,r0 # set r0 = 0. Used in the addze
+ # instructions below
+
+ #sqr_add_c(a,0,c1,c2,c3)
+ $LD r5,`0*$BNSZ`(r4)
+ $UMULL r9,r5,r5
+ $UMULH r10,r5,r5 #in first iteration. No need
+ #to add since c1=c2=c3=0.
+ # Note c3(r11) is NOT set to 0
+ # but will be.
+
+ $ST r9,`0*$BNSZ`(r3) # r[0]=c1;
+ # sqr_add_c2(a,1,0,c2,c3,c1);
+ $LD r6,`1*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+
+ addc r7,r7,r7 # compute (r7,r8)=2*(r7,r8)
+ adde r8,r8,r8
+ addze r9,r0 # catch carry if any.
+ # r9= r0(=0) and carry
+
+ addc r10,r7,r10 # now add to temp result.
+ addze r11,r8 # r8 added to r11 which is 0
+ addze r9,r9
+
+ $ST r10,`1*$BNSZ`(r3) #r[1]=c2;
+ #sqr_add_c(a,1,c3,c1,c2)
+ $UMULL r7,r6,r6
+ $UMULH r8,r6,r6
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r0
+ #sqr_add_c2(a,2,0,c3,c1,c2)
+ $LD r6,`2*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+
+ addc r7,r7,r7
+ adde r8,r8,r8
+ addze r10,r10
+
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r10
+ $ST r11,`2*$BNSZ`(r3) #r[2]=c3
+ #sqr_add_c2(a,3,0,c1,c2,c3);
+ $LD r6,`3*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+ addc r7,r7,r7
+ adde r8,r8,r8
+ addze r11,r0
+
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r11
+ #sqr_add_c2(a,2,1,c1,c2,c3);
+ $LD r5,`1*$BNSZ`(r4)
+ $LD r6,`2*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+
+ addc r7,r7,r7
+ adde r8,r8,r8
+ addze r11,r11
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r11
+ $ST r9,`3*$BNSZ`(r3) #r[3]=c1
+ #sqr_add_c(a,2,c2,c3,c1);
+ $UMULL r7,r6,r6
+ $UMULH r8,r6,r6
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r0
+ #sqr_add_c2(a,3,1,c2,c3,c1);
+ $LD r6,`3*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+ addc r7,r7,r7
+ adde r8,r8,r8
+ addze r9,r9
+
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r9
+ $ST r10,`4*$BNSZ`(r3) #r[4]=c2
+ #sqr_add_c2(a,3,2,c3,c1,c2);
+ $LD r5,`2*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+ addc r7,r7,r7
+ adde r8,r8,r8
+ addze r10,r0
+
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r10
+ $ST r11,`5*$BNSZ`(r3) #r[5] = c3
+ #sqr_add_c(a,3,c1,c2,c3);
+ $UMULL r7,r6,r6
+ $UMULH r8,r6,r6
+ addc r9,r7,r9
+ adde r10,r8,r10
+
+ $ST r9,`6*$BNSZ`(r3) #r[6]=c1
+ $ST r10,`7*$BNSZ`(r3) #r[7]=c2
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,2,0
+ .long 0
+.size .bn_sqr_comba4,.-.bn_sqr_comba4
+
+#
+# NOTE: The following label name should be changed to
+# "bn_sqr_comba8" i.e. remove the first dot
+# for the gcc compiler. This should be automatically
+# done in the build
+#
+
+.align 4
+.bn_sqr_comba8:
+#
+# This is an optimized version of the bn_sqr_comba8 routine.
+# Tightly uses the adde instruction
+#
+#
+# void bn_sqr_comba8(BN_ULONG *r, BN_ULONG *a)
+# r3 contains r
+# r4 contains a
+#
+# Freely use registers r5,r6,r7,r8,r9,r10,r11 as follows:
+#
+# r5,r6 are the two BN_ULONGs being multiplied.
+# r7,r8 are the results of the 32x32 giving 64 bit multiply.
+# r9,r10, r11 are the equivalents of c1,c2, c3.
+#
+# Possible optimization of loading all 8 longs of a into registers
+# doesn't provide any speedup
+#
+
+ xor r0,r0,r0 #set r0 = 0.Used in addze
+ #instructions below.
+
+ #sqr_add_c(a,0,c1,c2,c3);
+ $LD r5,`0*$BNSZ`(r4)
+ $UMULL r9,r5,r5 #1st iteration: no carries.
+ $UMULH r10,r5,r5
+ $ST r9,`0*$BNSZ`(r3) # r[0]=c1;
+ #sqr_add_c2(a,1,0,c2,c3,c1);
+ $LD r6,`1*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+
+ addc r10,r7,r10 #add the two register number
+ adde r11,r8,r0 # (r8,r7) to the three register
+ addze r9,r0 # number (r9,r11,r10).NOTE:r0=0
+
+ addc r10,r7,r10 #add the two register number
+ adde r11,r8,r11 # (r8,r7) to the three register
+ addze r9,r9 # number (r9,r11,r10).
+
+ $ST r10,`1*$BNSZ`(r3) # r[1]=c2
+
+ #sqr_add_c(a,1,c3,c1,c2);
+ $UMULL r7,r6,r6
+ $UMULH r8,r6,r6
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r0
+ #sqr_add_c2(a,2,0,c3,c1,c2);
+ $LD r6,`2*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r10
+
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r10
+
+ $ST r11,`2*$BNSZ`(r3) #r[2]=c3
+ #sqr_add_c2(a,3,0,c1,c2,c3);
+ $LD r6,`3*$BNSZ`(r4) #r6 = a[3]. r5 is already a[0].
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r0
+
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r11
+ #sqr_add_c2(a,2,1,c1,c2,c3);
+ $LD r5,`1*$BNSZ`(r4)
+ $LD r6,`2*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r11
+
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r11
+
+ $ST r9,`3*$BNSZ`(r3) #r[3]=c1;
+ #sqr_add_c(a,2,c2,c3,c1);
+ $UMULL r7,r6,r6
+ $UMULH r8,r6,r6
+
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r0
+ #sqr_add_c2(a,3,1,c2,c3,c1);
+ $LD r6,`3*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r9
+
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r9
+ #sqr_add_c2(a,4,0,c2,c3,c1);
+ $LD r5,`0*$BNSZ`(r4)
+ $LD r6,`4*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r9
+
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r9
+ $ST r10,`4*$BNSZ`(r3) #r[4]=c2;
+ #sqr_add_c2(a,5,0,c3,c1,c2);
+ $LD r6,`5*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r0
+
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r10
+ #sqr_add_c2(a,4,1,c3,c1,c2);
+ $LD r5,`1*$BNSZ`(r4)
+ $LD r6,`4*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r10
+
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r10
+ #sqr_add_c2(a,3,2,c3,c1,c2);
+ $LD r5,`2*$BNSZ`(r4)
+ $LD r6,`3*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r10
+
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r10
+ $ST r11,`5*$BNSZ`(r3) #r[5]=c3;
+ #sqr_add_c(a,3,c1,c2,c3);
+ $UMULL r7,r6,r6
+ $UMULH r8,r6,r6
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r0
+ #sqr_add_c2(a,4,2,c1,c2,c3);
+ $LD r6,`4*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r11
+
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r11
+ #sqr_add_c2(a,5,1,c1,c2,c3);
+ $LD r5,`1*$BNSZ`(r4)
+ $LD r6,`5*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r11
+
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r11
+ #sqr_add_c2(a,6,0,c1,c2,c3);
+ $LD r5,`0*$BNSZ`(r4)
+ $LD r6,`6*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r11
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r11
+ $ST r9,`6*$BNSZ`(r3) #r[6]=c1;
+ #sqr_add_c2(a,7,0,c2,c3,c1);
+ $LD r6,`7*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r0
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r9
+ #sqr_add_c2(a,6,1,c2,c3,c1);
+ $LD r5,`1*$BNSZ`(r4)
+ $LD r6,`6*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r9
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r9
+ #sqr_add_c2(a,5,2,c2,c3,c1);
+ $LD r5,`2*$BNSZ`(r4)
+ $LD r6,`5*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r9
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r9
+ #sqr_add_c2(a,4,3,c2,c3,c1);
+ $LD r5,`3*$BNSZ`(r4)
+ $LD r6,`4*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r9
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r9
+ $ST r10,`7*$BNSZ`(r3) #r[7]=c2;
+ #sqr_add_c(a,4,c3,c1,c2);
+ $UMULL r7,r6,r6
+ $UMULH r8,r6,r6
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r0
+ #sqr_add_c2(a,5,3,c3,c1,c2);
+ $LD r6,`5*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r10
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r10
+ #sqr_add_c2(a,6,2,c3,c1,c2);
+ $LD r5,`2*$BNSZ`(r4)
+ $LD r6,`6*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r10
+
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r10
+ #sqr_add_c2(a,7,1,c3,c1,c2);
+ $LD r5,`1*$BNSZ`(r4)
+ $LD r6,`7*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r10
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r10
+ $ST r11,`8*$BNSZ`(r3) #r[8]=c3;
+ #sqr_add_c2(a,7,2,c1,c2,c3);
+ $LD r5,`2*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r0
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r11
+ #sqr_add_c2(a,6,3,c1,c2,c3);
+ $LD r5,`3*$BNSZ`(r4)
+ $LD r6,`6*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r11
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r11
+ #sqr_add_c2(a,5,4,c1,c2,c3);
+ $LD r5,`4*$BNSZ`(r4)
+ $LD r6,`5*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r11
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r11
+ $ST r9,`9*$BNSZ`(r3) #r[9]=c1;
+ #sqr_add_c(a,5,c2,c3,c1);
+ $UMULL r7,r6,r6
+ $UMULH r8,r6,r6
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r0
+ #sqr_add_c2(a,6,4,c2,c3,c1);
+ $LD r6,`6*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r9
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r9
+ #sqr_add_c2(a,7,3,c2,c3,c1);
+ $LD r5,`3*$BNSZ`(r4)
+ $LD r6,`7*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r9
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r9
+ $ST r10,`10*$BNSZ`(r3) #r[10]=c2;
+ #sqr_add_c2(a,7,4,c3,c1,c2);
+ $LD r5,`4*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r0
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r10
+ #sqr_add_c2(a,6,5,c3,c1,c2);
+ $LD r5,`5*$BNSZ`(r4)
+ $LD r6,`6*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r10
+ addc r11,r7,r11
+ adde r9,r8,r9
+ addze r10,r10
+ $ST r11,`11*$BNSZ`(r3) #r[11]=c3;
+ #sqr_add_c(a,6,c1,c2,c3);
+ $UMULL r7,r6,r6
+ $UMULH r8,r6,r6
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r0
+ #sqr_add_c2(a,7,5,c1,c2,c3)
+ $LD r6,`7*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r11
+ addc r9,r7,r9
+ adde r10,r8,r10
+ addze r11,r11
+ $ST r9,`12*$BNSZ`(r3) #r[12]=c1;
+
+ #sqr_add_c2(a,7,6,c2,c3,c1)
+ $LD r5,`6*$BNSZ`(r4)
+ $UMULL r7,r5,r6
+ $UMULH r8,r5,r6
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r0
+ addc r10,r7,r10
+ adde r11,r8,r11
+ addze r9,r9
+ $ST r10,`13*$BNSZ`(r3) #r[13]=c2;
+ #sqr_add_c(a,7,c3,c1,c2);
+ $UMULL r7,r6,r6
+ $UMULH r8,r6,r6
+ addc r11,r7,r11
+ adde r9,r8,r9
+ $ST r11,`14*$BNSZ`(r3) #r[14]=c3;
+ $ST r9, `15*$BNSZ`(r3) #r[15]=c1;
+
+
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,2,0
+ .long 0
+.size .bn_sqr_comba8,.-.bn_sqr_comba8
+
+#
+# NOTE: The following label name should be changed to
+# "bn_mul_comba4" i.e. remove the first dot
+# for the gcc compiler. This should be automatically
+# done in the build
+#
+
+.align 4
+.bn_mul_comba4:
+#
+# This is an optimized version of the bn_mul_comba4 routine.
+#
+# void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
+# r3 contains r
+# r4 contains a
+# r5 contains b
+# r6, r7 are the 2 BN_ULONGs being multiplied.
+# r8, r9 are the results of the 32x32 giving 64 multiply.
+# r10, r11, r12 are the equivalents of c1, c2, and c3.
+#
+ xor r0,r0,r0 #r0=0. Used in addze below.
+ #mul_add_c(a[0],b[0],c1,c2,c3);
+ $LD r6,`0*$BNSZ`(r4)
+ $LD r7,`0*$BNSZ`(r5)
+ $UMULL r10,r6,r7
+ $UMULH r11,r6,r7
+ $ST r10,`0*$BNSZ`(r3) #r[0]=c1
+ #mul_add_c(a[0],b[1],c2,c3,c1);
+ $LD r7,`1*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r8,r11
+ adde r12,r9,r0
+ addze r10,r0
+ #mul_add_c(a[1],b[0],c2,c3,c1);
+ $LD r6, `1*$BNSZ`(r4)
+ $LD r7, `0*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r8,r11
+ adde r12,r9,r12
+ addze r10,r10
+ $ST r11,`1*$BNSZ`(r3) #r[1]=c2
+ #mul_add_c(a[2],b[0],c3,c1,c2);
+ $LD r6,`2*$BNSZ`(r4)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r8,r12
+ adde r10,r9,r10
+ addze r11,r0
+ #mul_add_c(a[1],b[1],c3,c1,c2);
+ $LD r6,`1*$BNSZ`(r4)
+ $LD r7,`1*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r8,r12
+ adde r10,r9,r10
+ addze r11,r11
+ #mul_add_c(a[0],b[2],c3,c1,c2);
+ $LD r6,`0*$BNSZ`(r4)
+ $LD r7,`2*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r8,r12
+ adde r10,r9,r10
+ addze r11,r11
+ $ST r12,`2*$BNSZ`(r3) #r[2]=c3
+ #mul_add_c(a[0],b[3],c1,c2,c3);
+ $LD r7,`3*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r8,r10
+ adde r11,r9,r11
+ addze r12,r0
+ #mul_add_c(a[1],b[2],c1,c2,c3);
+ $LD r6,`1*$BNSZ`(r4)
+ $LD r7,`2*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r8,r10
+ adde r11,r9,r11
+ addze r12,r12
+ #mul_add_c(a[2],b[1],c1,c2,c3);
+ $LD r6,`2*$BNSZ`(r4)
+ $LD r7,`1*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r8,r10
+ adde r11,r9,r11
+ addze r12,r12
+ #mul_add_c(a[3],b[0],c1,c2,c3);
+ $LD r6,`3*$BNSZ`(r4)
+ $LD r7,`0*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r8,r10
+ adde r11,r9,r11
+ addze r12,r12
+ $ST r10,`3*$BNSZ`(r3) #r[3]=c1
+ #mul_add_c(a[3],b[1],c2,c3,c1);
+ $LD r7,`1*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r8,r11
+ adde r12,r9,r12
+ addze r10,r0
+ #mul_add_c(a[2],b[2],c2,c3,c1);
+ $LD r6,`2*$BNSZ`(r4)
+ $LD r7,`2*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r8,r11
+ adde r12,r9,r12
+ addze r10,r10
+ #mul_add_c(a[1],b[3],c2,c3,c1);
+ $LD r6,`1*$BNSZ`(r4)
+ $LD r7,`3*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r8,r11
+ adde r12,r9,r12
+ addze r10,r10
+ $ST r11,`4*$BNSZ`(r3) #r[4]=c2
+ #mul_add_c(a[2],b[3],c3,c1,c2);
+ $LD r6,`2*$BNSZ`(r4)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r8,r12
+ adde r10,r9,r10
+ addze r11,r0
+ #mul_add_c(a[3],b[2],c3,c1,c2);
+ $LD r6,`3*$BNSZ`(r4)
+ $LD r7,`2*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r8,r12
+ adde r10,r9,r10
+ addze r11,r11
+ $ST r12,`5*$BNSZ`(r3) #r[5]=c3
+ #mul_add_c(a[3],b[3],c1,c2,c3);
+ $LD r7,`3*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r8,r10
+ adde r11,r9,r11
+
+ $ST r10,`6*$BNSZ`(r3) #r[6]=c1
+ $ST r11,`7*$BNSZ`(r3) #r[7]=c2
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,3,0
+ .long 0
+.size .bn_mul_comba4,.-.bn_mul_comba4
+
+#
+# NOTE: The following label name should be changed to
+# "bn_mul_comba8" i.e. remove the first dot
+# for the gcc compiler. This should be automatically
+# done in the build
+#
+
+.align 4
+.bn_mul_comba8:
+#
+# Optimized version of the bn_mul_comba8 routine.
+#
+# void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
+# r3 contains r
+# r4 contains a
+# r5 contains b
+# r6, r7 are the 2 BN_ULONGs being multiplied.
+# r8, r9 are the results of the 32x32 giving 64 multiply.
+# r10, r11, r12 are the equivalents of c1, c2, and c3.
+#
+ xor r0,r0,r0 #r0=0. Used in addze below.
+
+ #mul_add_c(a[0],b[0],c1,c2,c3);
+ $LD r6,`0*$BNSZ`(r4) #a[0]
+ $LD r7,`0*$BNSZ`(r5) #b[0]
+ $UMULL r10,r6,r7
+ $UMULH r11,r6,r7
+ $ST r10,`0*$BNSZ`(r3) #r[0]=c1;
+ #mul_add_c(a[0],b[1],c2,c3,c1);
+ $LD r7,`1*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ addze r12,r9 # since we didn't set r12 to zero before.
+ addze r10,r0
+ #mul_add_c(a[1],b[0],c2,c3,c1);
+ $LD r6,`1*$BNSZ`(r4)
+ $LD r7,`0*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r10
+ $ST r11,`1*$BNSZ`(r3) #r[1]=c2;
+ #mul_add_c(a[2],b[0],c3,c1,c2);
+ $LD r6,`2*$BNSZ`(r4)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r0
+ #mul_add_c(a[1],b[1],c3,c1,c2);
+ $LD r6,`1*$BNSZ`(r4)
+ $LD r7,`1*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r11
+ #mul_add_c(a[0],b[2],c3,c1,c2);
+ $LD r6,`0*$BNSZ`(r4)
+ $LD r7,`2*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r11
+ $ST r12,`2*$BNSZ`(r3) #r[2]=c3;
+ #mul_add_c(a[0],b[3],c1,c2,c3);
+ $LD r7,`3*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r0
+ #mul_add_c(a[1],b[2],c1,c2,c3);
+ $LD r6,`1*$BNSZ`(r4)
+ $LD r7,`2*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r12
+
+ #mul_add_c(a[2],b[1],c1,c2,c3);
+ $LD r6,`2*$BNSZ`(r4)
+ $LD r7,`1*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r12
+ #mul_add_c(a[3],b[0],c1,c2,c3);
+ $LD r6,`3*$BNSZ`(r4)
+ $LD r7,`0*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r12
+ $ST r10,`3*$BNSZ`(r3) #r[3]=c1;
+ #mul_add_c(a[4],b[0],c2,c3,c1);
+ $LD r6,`4*$BNSZ`(r4)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r0
+ #mul_add_c(a[3],b[1],c2,c3,c1);
+ $LD r6,`3*$BNSZ`(r4)
+ $LD r7,`1*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r10
+ #mul_add_c(a[2],b[2],c2,c3,c1);
+ $LD r6,`2*$BNSZ`(r4)
+ $LD r7,`2*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r10
+ #mul_add_c(a[1],b[3],c2,c3,c1);
+ $LD r6,`1*$BNSZ`(r4)
+ $LD r7,`3*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r10
+ #mul_add_c(a[0],b[4],c2,c3,c1);
+ $LD r6,`0*$BNSZ`(r4)
+ $LD r7,`4*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r10
+ $ST r11,`4*$BNSZ`(r3) #r[4]=c2;
+ #mul_add_c(a[0],b[5],c3,c1,c2);
+ $LD r7,`5*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r0
+ #mul_add_c(a[1],b[4],c3,c1,c2);
+ $LD r6,`1*$BNSZ`(r4)
+ $LD r7,`4*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r11
+ #mul_add_c(a[2],b[3],c3,c1,c2);
+ $LD r6,`2*$BNSZ`(r4)
+ $LD r7,`3*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r11
+ #mul_add_c(a[3],b[2],c3,c1,c2);
+ $LD r6,`3*$BNSZ`(r4)
+ $LD r7,`2*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r11
+ #mul_add_c(a[4],b[1],c3,c1,c2);
+ $LD r6,`4*$BNSZ`(r4)
+ $LD r7,`1*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r11
+ #mul_add_c(a[5],b[0],c3,c1,c2);
+ $LD r6,`5*$BNSZ`(r4)
+ $LD r7,`0*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r11
+ $ST r12,`5*$BNSZ`(r3) #r[5]=c3;
+ #mul_add_c(a[6],b[0],c1,c2,c3);
+ $LD r6,`6*$BNSZ`(r4)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r0
+ #mul_add_c(a[5],b[1],c1,c2,c3);
+ $LD r6,`5*$BNSZ`(r4)
+ $LD r7,`1*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r12
+ #mul_add_c(a[4],b[2],c1,c2,c3);
+ $LD r6,`4*$BNSZ`(r4)
+ $LD r7,`2*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r12
+ #mul_add_c(a[3],b[3],c1,c2,c3);
+ $LD r6,`3*$BNSZ`(r4)
+ $LD r7,`3*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r12
+ #mul_add_c(a[2],b[4],c1,c2,c3);
+ $LD r6,`2*$BNSZ`(r4)
+ $LD r7,`4*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r12
+ #mul_add_c(a[1],b[5],c1,c2,c3);
+ $LD r6,`1*$BNSZ`(r4)
+ $LD r7,`5*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r12
+ #mul_add_c(a[0],b[6],c1,c2,c3);
+ $LD r6,`0*$BNSZ`(r4)
+ $LD r7,`6*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r12
+ $ST r10,`6*$BNSZ`(r3) #r[6]=c1;
+ #mul_add_c(a[0],b[7],c2,c3,c1);
+ $LD r7,`7*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r0
+ #mul_add_c(a[1],b[6],c2,c3,c1);
+ $LD r6,`1*$BNSZ`(r4)
+ $LD r7,`6*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r10
+ #mul_add_c(a[2],b[5],c2,c3,c1);
+ $LD r6,`2*$BNSZ`(r4)
+ $LD r7,`5*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r10
+ #mul_add_c(a[3],b[4],c2,c3,c1);
+ $LD r6,`3*$BNSZ`(r4)
+ $LD r7,`4*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r10
+ #mul_add_c(a[4],b[3],c2,c3,c1);
+ $LD r6,`4*$BNSZ`(r4)
+ $LD r7,`3*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r10
+ #mul_add_c(a[5],b[2],c2,c3,c1);
+ $LD r6,`5*$BNSZ`(r4)
+ $LD r7,`2*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r10
+ #mul_add_c(a[6],b[1],c2,c3,c1);
+ $LD r6,`6*$BNSZ`(r4)
+ $LD r7,`1*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r10
+ #mul_add_c(a[7],b[0],c2,c3,c1);
+ $LD r6,`7*$BNSZ`(r4)
+ $LD r7,`0*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r10
+ $ST r11,`7*$BNSZ`(r3) #r[7]=c2;
+ #mul_add_c(a[7],b[1],c3,c1,c2);
+ $LD r7,`1*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r0
+ #mul_add_c(a[6],b[2],c3,c1,c2);
+ $LD r6,`6*$BNSZ`(r4)
+ $LD r7,`2*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r11
+ #mul_add_c(a[5],b[3],c3,c1,c2);
+ $LD r6,`5*$BNSZ`(r4)
+ $LD r7,`3*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r11
+ #mul_add_c(a[4],b[4],c3,c1,c2);
+ $LD r6,`4*$BNSZ`(r4)
+ $LD r7,`4*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r11
+ #mul_add_c(a[3],b[5],c3,c1,c2);
+ $LD r6,`3*$BNSZ`(r4)
+ $LD r7,`5*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r11
+ #mul_add_c(a[2],b[6],c3,c1,c2);
+ $LD r6,`2*$BNSZ`(r4)
+ $LD r7,`6*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r11
+ #mul_add_c(a[1],b[7],c3,c1,c2);
+ $LD r6,`1*$BNSZ`(r4)
+ $LD r7,`7*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r11
+ $ST r12,`8*$BNSZ`(r3) #r[8]=c3;
+ #mul_add_c(a[2],b[7],c1,c2,c3);
+ $LD r6,`2*$BNSZ`(r4)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r0
+ #mul_add_c(a[3],b[6],c1,c2,c3);
+ $LD r6,`3*$BNSZ`(r4)
+ $LD r7,`6*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r12
+ #mul_add_c(a[4],b[5],c1,c2,c3);
+ $LD r6,`4*$BNSZ`(r4)
+ $LD r7,`5*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r12
+ #mul_add_c(a[5],b[4],c1,c2,c3);
+ $LD r6,`5*$BNSZ`(r4)
+ $LD r7,`4*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r12
+ #mul_add_c(a[6],b[3],c1,c2,c3);
+ $LD r6,`6*$BNSZ`(r4)
+ $LD r7,`3*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r12
+ #mul_add_c(a[7],b[2],c1,c2,c3);
+ $LD r6,`7*$BNSZ`(r4)
+ $LD r7,`2*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r12
+ $ST r10,`9*$BNSZ`(r3) #r[9]=c1;
+ #mul_add_c(a[7],b[3],c2,c3,c1);
+ $LD r7,`3*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r0
+ #mul_add_c(a[6],b[4],c2,c3,c1);
+ $LD r6,`6*$BNSZ`(r4)
+ $LD r7,`4*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r10
+ #mul_add_c(a[5],b[5],c2,c3,c1);
+ $LD r6,`5*$BNSZ`(r4)
+ $LD r7,`5*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r10
+ #mul_add_c(a[4],b[6],c2,c3,c1);
+ $LD r6,`4*$BNSZ`(r4)
+ $LD r7,`6*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r10
+ #mul_add_c(a[3],b[7],c2,c3,c1);
+ $LD r6,`3*$BNSZ`(r4)
+ $LD r7,`7*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r10
+ $ST r11,`10*$BNSZ`(r3) #r[10]=c2;
+ #mul_add_c(a[4],b[7],c3,c1,c2);
+ $LD r6,`4*$BNSZ`(r4)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r0
+ #mul_add_c(a[5],b[6],c3,c1,c2);
+ $LD r6,`5*$BNSZ`(r4)
+ $LD r7,`6*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r11
+ #mul_add_c(a[6],b[5],c3,c1,c2);
+ $LD r6,`6*$BNSZ`(r4)
+ $LD r7,`5*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r11
+ #mul_add_c(a[7],b[4],c3,c1,c2);
+ $LD r6,`7*$BNSZ`(r4)
+ $LD r7,`4*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ addze r11,r11
+ $ST r12,`11*$BNSZ`(r3) #r[11]=c3;
+ #mul_add_c(a[7],b[5],c1,c2,c3);
+ $LD r7,`5*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r0
+ #mul_add_c(a[6],b[6],c1,c2,c3);
+ $LD r6,`6*$BNSZ`(r4)
+ $LD r7,`6*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r12
+ #mul_add_c(a[5],b[7],c1,c2,c3);
+ $LD r6,`5*$BNSZ`(r4)
+ $LD r7,`7*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r10,r10,r8
+ adde r11,r11,r9
+ addze r12,r12
+ $ST r10,`12*$BNSZ`(r3) #r[12]=c1;
+ #mul_add_c(a[6],b[7],c2,c3,c1);
+ $LD r6,`6*$BNSZ`(r4)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r0
+ #mul_add_c(a[7],b[6],c2,c3,c1);
+ $LD r6,`7*$BNSZ`(r4)
+ $LD r7,`6*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r11,r11,r8
+ adde r12,r12,r9
+ addze r10,r10
+ $ST r11,`13*$BNSZ`(r3) #r[13]=c2;
+ #mul_add_c(a[7],b[7],c3,c1,c2);
+ $LD r7,`7*$BNSZ`(r5)
+ $UMULL r8,r6,r7
+ $UMULH r9,r6,r7
+ addc r12,r12,r8
+ adde r10,r10,r9
+ $ST r12,`14*$BNSZ`(r3) #r[14]=c3;
+ $ST r10,`15*$BNSZ`(r3) #r[15]=c1;
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,3,0
+ .long 0
+.size .bn_mul_comba8,.-.bn_mul_comba8
+
+#
+# NOTE: The following label name should be changed to
+# "bn_sub_words" i.e. remove the first dot
+# for the gcc compiler. This should be automatically
+# done in the build
+#
+#
+.align 4
+.bn_sub_words:
+#
+# Handcoded version of bn_sub_words
+#
+#BN_ULONG bn_sub_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
+#
+# r3 = r
+# r4 = a
+# r5 = b
+# r6 = n
+#
+# Note: No loop unrolling done since this is not a performance
+# critical loop.
+
+ xor r0,r0,r0 #set r0 = 0
+#
+# check for r6 = 0 AND set carry bit.
+#
+ subfc. r7,r0,r6 # If r6 is 0 then result is 0.
+ # if r6 > 0 then result !=0
+ # In either case carry bit is set.
+ beq Lppcasm_sub_adios
+ addi r4,r4,-$BNSZ
+ addi r3,r3,-$BNSZ
+ addi r5,r5,-$BNSZ
+ mtctr r6
+Lppcasm_sub_mainloop:
+ $LDU r7,$BNSZ(r4)
+ $LDU r8,$BNSZ(r5)
+ subfe r6,r8,r7 # r6 = r7+carry bit + onescomplement(r8)
+ # if carry = 1 this is r7-r8. Else it
+ # is r7-r8 -1 as we need.
+ $STU r6,$BNSZ(r3)
+ bdnz Lppcasm_sub_mainloop
+Lppcasm_sub_adios:
+ subfze r3,r0 # if carry bit is set then r3 = 0 else -1
+ andi. r3,r3,1 # keep only last bit.
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,4,0
+ .long 0
+.size .bn_sub_words,.-.bn_sub_words
+
+#
+# NOTE: The following label name should be changed to
+# "bn_add_words" i.e. remove the first dot
+# for the gcc compiler. This should be automatically
+# done in the build
+#
+
+.align 4
+.bn_add_words:
+#
+# Handcoded version of bn_add_words
+#
+#BN_ULONG bn_add_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
+#
+# r3 = r
+# r4 = a
+# r5 = b
+# r6 = n
+#
+# Note: No loop unrolling done since this is not a performance
+# critical loop.
+
+ xor r0,r0,r0
+#
+# check for r6 = 0. Is this needed?
+#
+ addic. r6,r6,0 #test r6 and clear carry bit.
+ beq Lppcasm_add_adios
+ addi r4,r4,-$BNSZ
+ addi r3,r3,-$BNSZ
+ addi r5,r5,-$BNSZ
+ mtctr r6
+Lppcasm_add_mainloop:
+ $LDU r7,$BNSZ(r4)
+ $LDU r8,$BNSZ(r5)
+ adde r8,r7,r8
+ $STU r8,$BNSZ(r3)
+ bdnz Lppcasm_add_mainloop
+Lppcasm_add_adios:
+ addze r3,r0 #return carry bit.
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,4,0
+ .long 0
+.size .bn_add_words,.-.bn_add_words
+
+#
+# NOTE: The following label name should be changed to
+# "bn_div_words" i.e. remove the first dot
+# for the gcc compiler. This should be automatically
+# done in the build
+#
+
+.align 4
+.bn_div_words:
+#
+# This is a cleaned up version of code generated by
+# the AIX compiler. The only optimization is to use
+# the PPC instruction to count leading zeros instead
+# of call to num_bits_word. Since this was compiled
+# only at level -O2 we can possibly squeeze it more?
+#
+# r3 = h
+# r4 = l
+# r5 = d
+
+ $UCMPI 0,r5,0 # compare r5 and 0
+ bne Lppcasm_div1 # proceed if d!=0
+ li r3,-1 # d=0 return -1
+ blr
+Lppcasm_div1:
+ xor r0,r0,r0 #r0=0
+ li r8,$BITS
+ $CNTLZ. r7,r5 #r7 = num leading 0s in d.
+ beq Lppcasm_div2 #proceed if no leading zeros
+ subf r8,r7,r8 #r8 = BN_num_bits_word(d)
+ $SHR. r9,r3,r8 #are there any bits above r8'th?
+ $TR 16,r9,r0 #if there're, signal to dump core...
+Lppcasm_div2:
+ $UCMP 0,r3,r5 #h>=d?
+ blt Lppcasm_div3 #goto Lppcasm_div3 if not
+ subf r3,r5,r3 #h-=d ;
+Lppcasm_div3: #r7 = BN_BITS2-i. so r7=i
+ cmpi 0,0,r7,0 # is (i == 0)?
+ beq Lppcasm_div4
+ $SHL r3,r3,r7 # h = (h<< i)
+ $SHR r8,r4,r8 # r8 = (l >> BN_BITS2 -i)
+ $SHL r5,r5,r7 # d<<=i
+ or r3,r3,r8 # h = (h<<i)|(l>>(BN_BITS2-i))
+ $SHL r4,r4,r7 # l <<=i
+Lppcasm_div4:
+ $SHRI r9,r5,`$BITS/2` # r9 = dh
+ # dl will be computed when needed
+ # as it saves registers.
+ li r6,2 #r6=2
+ mtctr r6 #counter will be in count.
+Lppcasm_divouterloop:
+ $SHRI r8,r3,`$BITS/2` #r8 = (h>>BN_BITS4)
+ $SHRI r11,r4,`$BITS/2` #r11= (l&BN_MASK2h)>>BN_BITS4
+ # compute here for innerloop.
+ $UCMP 0,r8,r9 # is (h>>BN_BITS4)==dh
+ bne Lppcasm_div5 # goto Lppcasm_div5 if not
+
+ li r8,-1
+ $CLRU r8,r8,`$BITS/2` #q = BN_MASK2l
+ b Lppcasm_div6
+Lppcasm_div5:
+ $UDIV r8,r3,r9 #q = h/dh
+Lppcasm_div6:
+ $UMULL r12,r9,r8 #th = q*dh
+ $CLRU r10,r5,`$BITS/2` #r10=dl
+ $UMULL r6,r8,r10 #tl = q*dl
+
+Lppcasm_divinnerloop:
+ subf r10,r12,r3 #t = h -th
+ $SHRI r7,r10,`$BITS/2` #r7= (t &BN_MASK2H), sort of...
+ addic. r7,r7,0 #test if r7 == 0. used below.
+ # now want to compute
+ # r7 = (t<<BN_BITS4)|((l&BN_MASK2h)>>BN_BITS4)
+ # the following 2 instructions do that
+ $SHLI r7,r10,`$BITS/2` # r7 = (t<<BN_BITS4)
+ or r7,r7,r11 # r7|=((l&BN_MASK2h)>>BN_BITS4)
+ $UCMP cr1,r6,r7 # compare (tl <= r7)
+ bne Lppcasm_divinnerexit
+ ble cr1,Lppcasm_divinnerexit
+ addi r8,r8,-1 #q--
+ subf r12,r9,r12 #th -=dh
+ $CLRU r10,r5,`$BITS/2` #r10=dl. t is no longer needed in loop.
+ subf r6,r10,r6 #tl -=dl
+ b Lppcasm_divinnerloop
+Lppcasm_divinnerexit:
+ $SHRI r10,r6,`$BITS/2` #t=(tl>>BN_BITS4)
+ $SHLI r11,r6,`$BITS/2` #tl=(tl<<BN_BITS4)&BN_MASK2h;
+ $UCMP cr1,r4,r11 # compare l and tl
+ add r12,r12,r10 # th+=t
+ bge cr1,Lppcasm_div7 # if (l>=tl) goto Lppcasm_div7
+ addi r12,r12,1 # th++
+Lppcasm_div7:
+ subf r11,r11,r4 #r11=l-tl
+ $UCMP cr1,r3,r12 #compare h and th
+ bge cr1,Lppcasm_div8 #if (h>=th) goto Lppcasm_div8
+ addi r8,r8,-1 # q--
+ add r3,r5,r3 # h+=d
+Lppcasm_div8:
+ subf r12,r12,r3 #r12 = h-th
+ $SHLI r4,r11,`$BITS/2` #l=(l&BN_MASK2l)<<BN_BITS4
+ # want to compute
+ # h = ((h<<BN_BITS4)|(l>>BN_BITS4))&BN_MASK2
+ # the following 2 instructions will do this.
+ $INSR r11,r12,`$BITS/2`,`$BITS/2` # r11 is the value we want rotated $BITS/2.
+ $ROTL r3,r11,`$BITS/2` # rotate by $BITS/2 and store in r3
+ bdz Lppcasm_div9 #if (count==0) break ;
+ $SHLI r0,r8,`$BITS/2` #ret =q<<BN_BITS4
+ b Lppcasm_divouterloop
+Lppcasm_div9:
+ or r3,r8,r0
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,3,0
+ .long 0
+.size .bn_div_words,.-.bn_div_words
+
+#
+# NOTE: The following label name should be changed to
+# "bn_sqr_words" i.e. remove the first dot
+# for the gcc compiler. This should be automatically
+# done in the build
+#
+.align 4
+.bn_sqr_words:
+#
+# Optimized version of bn_sqr_words
+#
+# void bn_sqr_words(BN_ULONG *r, BN_ULONG *a, int n)
+#
+# r3 = r
+# r4 = a
+# r5 = n
+#
+# r6 = a[i].
+# r7,r8 = product.
+#
+# No unrolling done here. Not performance critical.
+
+ addic. r5,r5,0 #test r5.
+ beq Lppcasm_sqr_adios
+ addi r4,r4,-$BNSZ
+ addi r3,r3,-$BNSZ
+ mtctr r5
+Lppcasm_sqr_mainloop:
+ #sqr(r[0],r[1],a[0]);
+ $LDU r6,$BNSZ(r4)
+ $UMULL r7,r6,r6
+ $UMULH r8,r6,r6
+ $STU r7,$BNSZ(r3)
+ $STU r8,$BNSZ(r3)
+ bdnz Lppcasm_sqr_mainloop
+Lppcasm_sqr_adios:
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,3,0
+ .long 0
+.size .bn_sqr_words,.-.bn_sqr_words
+
+#
+# NOTE: The following label name should be changed to
+# "bn_mul_words" i.e. remove the first dot
+# for the gcc compiler. This should be automatically
+# done in the build
+#
+
+.align 4
+.bn_mul_words:
+#
+# BN_ULONG bn_mul_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
+#
+# r3 = rp
+# r4 = ap
+# r5 = num
+# r6 = w
+ xor r0,r0,r0
+ xor r12,r12,r12 # used for carry
+ rlwinm. r7,r5,30,2,31 # num >> 2
+ beq Lppcasm_mw_REM
+ mtctr r7
+Lppcasm_mw_LOOP:
+ #mul(rp[0],ap[0],w,c1);
+ $LD r8,`0*$BNSZ`(r4)
+ $UMULL r9,r6,r8
+ $UMULH r10,r6,r8
+ addc r9,r9,r12
+ #addze r10,r10 #carry is NOT ignored.
+ #will be taken care of
+ #in second spin below
+ #using adde.
+ $ST r9,`0*$BNSZ`(r3)
+ #mul(rp[1],ap[1],w,c1);
+ $LD r8,`1*$BNSZ`(r4)
+ $UMULL r11,r6,r8
+ $UMULH r12,r6,r8
+ adde r11,r11,r10
+ #addze r12,r12
+ $ST r11,`1*$BNSZ`(r3)
+ #mul(rp[2],ap[2],w,c1);
+ $LD r8,`2*$BNSZ`(r4)
+ $UMULL r9,r6,r8
+ $UMULH r10,r6,r8
+ adde r9,r9,r12
+ #addze r10,r10
+ $ST r9,`2*$BNSZ`(r3)
+ #mul_add(rp[3],ap[3],w,c1);
+ $LD r8,`3*$BNSZ`(r4)
+ $UMULL r11,r6,r8
+ $UMULH r12,r6,r8
+ adde r11,r11,r10
+ addze r12,r12 #this spin we collect carry into
+ #r12
+ $ST r11,`3*$BNSZ`(r3)
+
+ addi r3,r3,`4*$BNSZ`
+ addi r4,r4,`4*$BNSZ`
+ bdnz Lppcasm_mw_LOOP
+
+Lppcasm_mw_REM:
+ andi. r5,r5,0x3
+ beq Lppcasm_mw_OVER
+ #mul(rp[0],ap[0],w,c1);
+ $LD r8,`0*$BNSZ`(r4)
+ $UMULL r9,r6,r8
+ $UMULH r10,r6,r8
+ addc r9,r9,r12
+ addze r10,r10
+ $ST r9,`0*$BNSZ`(r3)
+ addi r12,r10,0
+
+ addi r5,r5,-1
+ cmpli 0,0,r5,0
+ beq Lppcasm_mw_OVER
+
+
+ #mul(rp[1],ap[1],w,c1);
+ $LD r8,`1*$BNSZ`(r4)
+ $UMULL r9,r6,r8
+ $UMULH r10,r6,r8
+ addc r9,r9,r12
+ addze r10,r10
+ $ST r9,`1*$BNSZ`(r3)
+ addi r12,r10,0
+
+ addi r5,r5,-1
+ cmpli 0,0,r5,0
+ beq Lppcasm_mw_OVER
+
+ #mul_add(rp[2],ap[2],w,c1);
+ $LD r8,`2*$BNSZ`(r4)
+ $UMULL r9,r6,r8
+ $UMULH r10,r6,r8
+ addc r9,r9,r12
+ addze r10,r10
+ $ST r9,`2*$BNSZ`(r3)
+ addi r12,r10,0
+
+Lppcasm_mw_OVER:
+ addi r3,r12,0
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,4,0
+ .long 0
+.size bn_mul_words,.-bn_mul_words
+
+#
+# NOTE: The following label name should be changed to
+# "bn_mul_add_words" i.e. remove the first dot
+# for the gcc compiler. This should be automatically
+# done in the build
+#
+
+.align 4
+.bn_mul_add_words:
+#
+# BN_ULONG bn_mul_add_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
+#
+# r3 = rp
+# r4 = ap
+# r5 = num
+# r6 = w
+#
+# empirical evidence suggests that unrolled version performs best!!
+#
+ xor r0,r0,r0 #r0 = 0
+ xor r12,r12,r12 #r12 = 0 . used for carry
+ rlwinm. r7,r5,30,2,31 # num >> 2
+ beq Lppcasm_maw_leftover # if (num < 4) go LPPCASM_maw_leftover
+ mtctr r7
+Lppcasm_maw_mainloop:
+ #mul_add(rp[0],ap[0],w,c1);
+ $LD r8,`0*$BNSZ`(r4)
+ $LD r11,`0*$BNSZ`(r3)
+ $UMULL r9,r6,r8
+ $UMULH r10,r6,r8
+ addc r9,r9,r12 #r12 is carry.
+ addze r10,r10
+ addc r9,r9,r11
+ #addze r10,r10
+ #the above instruction addze
+ #is NOT needed. Carry will NOT
+ #be ignored. It's not affected
+ #by multiply and will be collected
+ #in the next spin
+ $ST r9,`0*$BNSZ`(r3)
+
+ #mul_add(rp[1],ap[1],w,c1);
+ $LD r8,`1*$BNSZ`(r4)
+ $LD r9,`1*$BNSZ`(r3)
+ $UMULL r11,r6,r8
+ $UMULH r12,r6,r8
+ adde r11,r11,r10 #r10 is carry.
+ addze r12,r12
+ addc r11,r11,r9
+ #addze r12,r12
+ $ST r11,`1*$BNSZ`(r3)
+
+ #mul_add(rp[2],ap[2],w,c1);
+ $LD r8,`2*$BNSZ`(r4)
+ $UMULL r9,r6,r8
+ $LD r11,`2*$BNSZ`(r3)
+ $UMULH r10,r6,r8
+ adde r9,r9,r12
+ addze r10,r10
+ addc r9,r9,r11
+ #addze r10,r10
+ $ST r9,`2*$BNSZ`(r3)
+
+ #mul_add(rp[3],ap[3],w,c1);
+ $LD r8,`3*$BNSZ`(r4)
+ $UMULL r11,r6,r8
+ $LD r9,`3*$BNSZ`(r3)
+ $UMULH r12,r6,r8
+ adde r11,r11,r10
+ addze r12,r12
+ addc r11,r11,r9
+ addze r12,r12
+ $ST r11,`3*$BNSZ`(r3)
+ addi r3,r3,`4*$BNSZ`
+ addi r4,r4,`4*$BNSZ`
+ bdnz Lppcasm_maw_mainloop
+
+Lppcasm_maw_leftover:
+ andi. r5,r5,0x3
+ beq Lppcasm_maw_adios
+ addi r3,r3,-$BNSZ
+ addi r4,r4,-$BNSZ
+ #mul_add(rp[0],ap[0],w,c1);
+ mtctr r5
+ $LDU r8,$BNSZ(r4)
+ $UMULL r9,r6,r8
+ $UMULH r10,r6,r8
+ $LDU r11,$BNSZ(r3)
+ addc r9,r9,r11
+ addze r10,r10
+ addc r9,r9,r12
+ addze r12,r10
+ $ST r9,0(r3)
+
+ bdz Lppcasm_maw_adios
+ #mul_add(rp[1],ap[1],w,c1);
+ $LDU r8,$BNSZ(r4)
+ $UMULL r9,r6,r8
+ $UMULH r10,r6,r8
+ $LDU r11,$BNSZ(r3)
+ addc r9,r9,r11
+ addze r10,r10
+ addc r9,r9,r12
+ addze r12,r10
+ $ST r9,0(r3)
+
+ bdz Lppcasm_maw_adios
+ #mul_add(rp[2],ap[2],w,c1);
+ $LDU r8,$BNSZ(r4)
+ $UMULL r9,r6,r8
+ $UMULH r10,r6,r8
+ $LDU r11,$BNSZ(r3)
+ addc r9,r9,r11
+ addze r10,r10
+ addc r9,r9,r12
+ addze r12,r10
+ $ST r9,0(r3)
+
+Lppcasm_maw_adios:
+ addi r3,r12,0
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,4,0
+ .long 0
+.size .bn_mul_add_words,.-.bn_mul_add_words
+ .align 4
+EOF
+$data =~ s/\`([^\`]*)\`/eval $1/gem;
+print $data;
+close STDOUT;
diff --git a/openssl-1.1.0h/crypto/bn/asm/ppc64-mont.pl b/openssl-1.1.0h/crypto/bn/asm/ppc64-mont.pl
new file mode 100644
index 0000000..1e19c95
--- /dev/null
+++ b/openssl-1.1.0h/crypto/bn/asm/ppc64-mont.pl
@@ -0,0 +1,1635 @@
+#! /usr/bin/env perl
+# Copyright 2007-2016 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License"). You may not use
+# this file except in compliance with the License. You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# December 2007
+
+# The reason for undertaken effort is basically following. Even though
+# Power 6 CPU operates at incredible 4.7GHz clock frequency, its PKI
+# performance was observed to be less than impressive, essentially as
+# fast as 1.8GHz PPC970, or 2.6 times(!) slower than one would hope.
+# Well, it's not surprising that IBM had to make some sacrifices to
+# boost the clock frequency that much, but no overall improvement?
+# Having observed how much difference did switching to FPU make on
+# UltraSPARC, playing same stunt on Power 6 appeared appropriate...
+# Unfortunately the resulting performance improvement is not as
+# impressive, ~30%, and in absolute terms is still very far from what
+# one would expect from 4.7GHz CPU. There is a chance that I'm doing
+# something wrong, but in the lack of assembler level micro-profiling
+# data or at least decent platform guide I can't tell... Or better
+# results might be achieved with VMX... Anyway, this module provides
+# *worse* performance on other PowerPC implementations, ~40-15% slower
+# on PPC970 depending on key length and ~40% slower on Power 5 for all
+# key lengths. As it's obviously inappropriate as "best all-round"
+# alternative, it has to be complemented with run-time CPU family
+# detection. Oh! It should also be noted that unlike other PowerPC
+# implementation IALU ppc-mont.pl module performs *suboptimaly* on
+# >=1024-bit key lengths on Power 6. It should also be noted that
+# *everything* said so far applies to 64-bit builds! As far as 32-bit
+# application executed on 64-bit CPU goes, this module is likely to
+# become preferred choice, because it's easy to adapt it for such
+# case and *is* faster than 32-bit ppc-mont.pl on *all* processors.
+
+# February 2008
+
+# Micro-profiling assisted optimization results in ~15% improvement
+# over original ppc64-mont.pl version, or overall ~50% improvement
+# over ppc.pl module on Power 6. If compared to ppc-mont.pl on same
+# Power 6 CPU, this module is 5-150% faster depending on key length,
+# [hereafter] more for longer keys. But if compared to ppc-mont.pl
+# on 1.8GHz PPC970, it's only 5-55% faster. Still far from impressive
+# in absolute terms, but it's apparently the way Power 6 is...
+
+# December 2009
+
+# Adapted for 32-bit build this module delivers 25-120%, yes, more
+# than *twice* for longer keys, performance improvement over 32-bit
+# ppc-mont.pl on 1.8GHz PPC970. However! This implementation utilizes
+# even 64-bit integer operations and the trouble is that most PPC
+# operating systems don't preserve upper halves of general purpose
+# registers upon 32-bit signal delivery. They do preserve them upon
+# context switch, but not signalling:-( This means that asynchronous
+# signals have to be blocked upon entry to this subroutine. Signal
+# masking (and of course complementary unmasking) has quite an impact
+# on performance, naturally larger for shorter keys. It's so severe
+# that 512-bit key performance can be as low as 1/3 of expected one.
+# This is why this routine can be engaged for longer key operations
+# only on these OSes, see crypto/ppccap.c for further details. MacOS X
+# is an exception from this and doesn't require signal masking, and
+# that's where above improvement coefficients were collected. For
+# others alternative would be to break dependence on upper halves of
+# GPRs by sticking to 32-bit integer operations...
+
+# December 2012
+
+# Remove above mentioned dependence on GPRs' upper halves in 32-bit
+# build. No signal masking overhead, but integer instructions are
+# *more* numerous... It's still "universally" faster than 32-bit
+# ppc-mont.pl, but improvement coefficient is not as impressive
+# for longer keys...
+
+$flavour = shift;
+
+if ($flavour =~ /32/) {
+ $SIZE_T=4;
+ $RZONE= 224;
+ $fname= "bn_mul_mont_fpu64";
+
+ $STUX= "stwux"; # store indexed and update
+ $PUSH= "stw";
+ $POP= "lwz";
+} elsif ($flavour =~ /64/) {
+ $SIZE_T=8;
+ $RZONE= 288;
+ $fname= "bn_mul_mont_fpu64";
+
+ # same as above, but 64-bit mnemonics...
+ $STUX= "stdux"; # store indexed and update
+ $PUSH= "std";
+ $POP= "ld";
+} else { die "nonsense $flavour"; }
+
+$LITTLE_ENDIAN = ($flavour=~/le$/) ? 4 : 0;
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
+die "can't locate ppc-xlate.pl";
+
+open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
+
+$FRAME=64; # padded frame header
+$TRANSFER=16*8;
+
+$carry="r0";
+$sp="r1";
+$toc="r2";
+$rp="r3"; $ovf="r3";
+$ap="r4";
+$bp="r5";
+$np="r6";
+$n0="r7";
+$num="r8";
+$rp="r9"; # $rp is reassigned
+$tp="r10";
+$j="r11";
+$i="r12";
+# non-volatile registers
+$c1="r19";
+$n1="r20";
+$a1="r21";
+$nap_d="r22"; # interleaved ap and np in double format
+$a0="r23"; # ap[0]
+$t0="r24"; # temporary registers
+$t1="r25";
+$t2="r26";
+$t3="r27";
+$t4="r28";
+$t5="r29";
+$t6="r30";
+$t7="r31";
+
+# PPC offers enough register bank capacity to unroll inner loops twice
+#
+# ..A3A2A1A0
+# dcba
+# -----------
+# A0a
+# A0b
+# A0c
+# A0d
+# A1a
+# A1b
+# A1c
+# A1d
+# A2a
+# A2b
+# A2c
+# A2d
+# A3a
+# A3b
+# A3c
+# A3d
+# ..a
+# ..b
+#
+$ba="f0"; $bb="f1"; $bc="f2"; $bd="f3";
+$na="f4"; $nb="f5"; $nc="f6"; $nd="f7";
+$dota="f8"; $dotb="f9";
+$A0="f10"; $A1="f11"; $A2="f12"; $A3="f13";
+$N0="f20"; $N1="f21"; $N2="f22"; $N3="f23";
+$T0a="f24"; $T0b="f25";
+$T1a="f26"; $T1b="f27";
+$T2a="f28"; $T2b="f29";
+$T3a="f30"; $T3b="f31";
+
+# sp----------->+-------------------------------+
+# | saved sp |
+# +-------------------------------+
+# . .
+# +64 +-------------------------------+
+# | 16 gpr<->fpr transfer zone |
+# . .
+# . .
+# +16*8 +-------------------------------+
+# | __int64 tmp[-1] |
+# +-------------------------------+
+# | __int64 tmp[num] |
+# . .
+# . .
+# . .
+# +(num+1)*8 +-------------------------------+
+# | padding to 64 byte boundary |
+# . .
+# +X +-------------------------------+
+# | double nap_d[4*num] |
+# . .
+# . .
+# . .
+# +-------------------------------+
+# . .
+# -13*size_t +-------------------------------+
+# | 13 saved gpr, r19-r31 |
+# . .
+# . .
+# -12*8 +-------------------------------+
+# | 12 saved fpr, f20-f31 |
+# . .
+# . .
+# +-------------------------------+
+
+$code=<<___;
+.machine "any"
+.text
+
+.globl .$fname
+.align 5
+.$fname:
+ cmpwi $num,`3*8/$SIZE_T`
+ mr $rp,r3 ; $rp is reassigned
+ li r3,0 ; possible "not handled" return code
+ bltlr-
+ andi. r0,$num,`16/$SIZE_T-1` ; $num has to be "even"
+ bnelr-
+
+ slwi $num,$num,`log($SIZE_T)/log(2)` ; num*=sizeof(BN_LONG)
+ li $i,-4096
+ slwi $tp,$num,2 ; place for {an}p_{lh}[num], i.e. 4*num
+ add $tp,$tp,$num ; place for tp[num+1]
+ addi $tp,$tp,`$FRAME+$TRANSFER+8+64+$RZONE`
+ subf $tp,$tp,$sp ; $sp-$tp
+ and $tp,$tp,$i ; minimize TLB usage
+ subf $tp,$sp,$tp ; $tp-$sp
+ mr $i,$sp
+ $STUX $sp,$sp,$tp ; alloca
+
+ $PUSH r19,`-12*8-13*$SIZE_T`($i)
+ $PUSH r20,`-12*8-12*$SIZE_T`($i)
+ $PUSH r21,`-12*8-11*$SIZE_T`($i)
+ $PUSH r22,`-12*8-10*$SIZE_T`($i)
+ $PUSH r23,`-12*8-9*$SIZE_T`($i)
+ $PUSH r24,`-12*8-8*$SIZE_T`($i)
+ $PUSH r25,`-12*8-7*$SIZE_T`($i)
+ $PUSH r26,`-12*8-6*$SIZE_T`($i)
+ $PUSH r27,`-12*8-5*$SIZE_T`($i)
+ $PUSH r28,`-12*8-4*$SIZE_T`($i)
+ $PUSH r29,`-12*8-3*$SIZE_T`($i)
+ $PUSH r30,`-12*8-2*$SIZE_T`($i)
+ $PUSH r31,`-12*8-1*$SIZE_T`($i)
+ stfd f20,`-12*8`($i)
+ stfd f21,`-11*8`($i)
+ stfd f22,`-10*8`($i)
+ stfd f23,`-9*8`($i)
+ stfd f24,`-8*8`($i)
+ stfd f25,`-7*8`($i)
+ stfd f26,`-6*8`($i)
+ stfd f27,`-5*8`($i)
+ stfd f28,`-4*8`($i)
+ stfd f29,`-3*8`($i)
+ stfd f30,`-2*8`($i)
+ stfd f31,`-1*8`($i)
+
+ addi $tp,$sp,`$FRAME+$TRANSFER+8+64`
+ li $i,-64
+ add $nap_d,$tp,$num
+ and $nap_d,$nap_d,$i ; align to 64 bytes
+ ; nap_d is off by 1, because it's used with stfdu/lfdu
+ addi $nap_d,$nap_d,-8
+ srwi $j,$num,`3+1` ; counter register, num/2
+ addi $j,$j,-1
+ addi $tp,$sp,`$FRAME+$TRANSFER-8`
+ li $carry,0
+ mtctr $j
+___
+
+$code.=<<___ if ($SIZE_T==8);
+ ld $a0,0($ap) ; pull ap[0] value
+ ld $t3,0($bp) ; bp[0]
+ ld $n0,0($n0) ; pull n0[0] value
+
+ mulld $t7,$a0,$t3 ; ap[0]*bp[0]
+ ; transfer bp[0] to FPU as 4x16-bit values
+ extrdi $t0,$t3,16,48
+ extrdi $t1,$t3,16,32
+ extrdi $t2,$t3,16,16
+ extrdi $t3,$t3,16,0
+ std $t0,`$FRAME+0`($sp)
+ std $t1,`$FRAME+8`($sp)
+ std $t2,`$FRAME+16`($sp)
+ std $t3,`$FRAME+24`($sp)
+
+ mulld $t7,$t7,$n0 ; tp[0]*n0
+ ; transfer (ap[0]*bp[0])*n0 to FPU as 4x16-bit values
+ extrdi $t4,$t7,16,48
+ extrdi $t5,$t7,16,32
+ extrdi $t6,$t7,16,16
+ extrdi $t7,$t7,16,0
+ std $t4,`$FRAME+32`($sp)
+ std $t5,`$FRAME+40`($sp)
+ std $t6,`$FRAME+48`($sp)
+ std $t7,`$FRAME+56`($sp)
+
+ extrdi $t0,$a0,32,32 ; lwz $t0,4($ap)
+ extrdi $t1,$a0,32,0 ; lwz $t1,0($ap)
+ lwz $t2,`12^$LITTLE_ENDIAN`($ap) ; load a[1] as 32-bit word pair
+ lwz $t3,`8^$LITTLE_ENDIAN`($ap)
+ lwz $t4,`4^$LITTLE_ENDIAN`($np) ; load n[0] as 32-bit word pair
+ lwz $t5,`0^$LITTLE_ENDIAN`($np)
+ lwz $t6,`12^$LITTLE_ENDIAN`($np) ; load n[1] as 32-bit word pair
+ lwz $t7,`8^$LITTLE_ENDIAN`($np)
+___
+$code.=<<___ if ($SIZE_T==4);
+ lwz $a0,0($ap) ; pull ap[0,1] value
+ mr $n1,$n0
+ lwz $a1,4($ap)
+ li $c1,0
+ lwz $t1,0($bp) ; bp[0,1]
+ lwz $t3,4($bp)
+ lwz $n0,0($n1) ; pull n0[0,1] value
+ lwz $n1,4($n1)
+
+ mullw $t4,$a0,$t1 ; mulld ap[0]*bp[0]
+ mulhwu $t5,$a0,$t1
+ mullw $t6,$a1,$t1
+ mullw $t7,$a0,$t3
+ add $t5,$t5,$t6
+ add $t5,$t5,$t7
+ ; transfer bp[0] to FPU as 4x16-bit values
+ extrwi $t0,$t1,16,16
+ extrwi $t1,$t1,16,0
+ extrwi $t2,$t3,16,16
+ extrwi $t3,$t3,16,0
+ std $t0,`$FRAME+0`($sp) ; yes, std in 32-bit build
+ std $t1,`$FRAME+8`($sp)
+ std $t2,`$FRAME+16`($sp)
+ std $t3,`$FRAME+24`($sp)
+
+ mullw $t0,$t4,$n0 ; mulld tp[0]*n0
+ mulhwu $t1,$t4,$n0
+ mullw $t2,$t5,$n0
+ mullw $t3,$t4,$n1
+ add $t1,$t1,$t2
+ add $t1,$t1,$t3
+ ; transfer (ap[0]*bp[0])*n0 to FPU as 4x16-bit values
+ extrwi $t4,$t0,16,16
+ extrwi $t5,$t0,16,0
+ extrwi $t6,$t1,16,16
+ extrwi $t7,$t1,16,0
+ std $t4,`$FRAME+32`($sp) ; yes, std in 32-bit build
+ std $t5,`$FRAME+40`($sp)
+ std $t6,`$FRAME+48`($sp)
+ std $t7,`$FRAME+56`($sp)
+
+ mr $t0,$a0 ; lwz $t0,0($ap)
+ mr $t1,$a1 ; lwz $t1,4($ap)
+ lwz $t2,8($ap) ; load a[j..j+3] as 32-bit word pairs
+ lwz $t3,12($ap)
+ lwz $t4,0($np) ; load n[j..j+3] as 32-bit word pairs
+ lwz $t5,4($np)
+ lwz $t6,8($np)
+ lwz $t7,12($np)
+___
+$code.=<<___;
+ lfd $ba,`$FRAME+0`($sp)
+ lfd $bb,`$FRAME+8`($sp)
+ lfd $bc,`$FRAME+16`($sp)
+ lfd $bd,`$FRAME+24`($sp)
+ lfd $na,`$FRAME+32`($sp)
+ lfd $nb,`$FRAME+40`($sp)
+ lfd $nc,`$FRAME+48`($sp)
+ lfd $nd,`$FRAME+56`($sp)
+ std $t0,`$FRAME+64`($sp) ; yes, std even in 32-bit build
+ std $t1,`$FRAME+72`($sp)
+ std $t2,`$FRAME+80`($sp)
+ std $t3,`$FRAME+88`($sp)
+ std $t4,`$FRAME+96`($sp)
+ std $t5,`$FRAME+104`($sp)
+ std $t6,`$FRAME+112`($sp)
+ std $t7,`$FRAME+120`($sp)
+ fcfid $ba,$ba
+ fcfid $bb,$bb
+ fcfid $bc,$bc
+ fcfid $bd,$bd
+ fcfid $na,$na
+ fcfid $nb,$nb
+ fcfid $nc,$nc
+ fcfid $nd,$nd
+
+ lfd $A0,`$FRAME+64`($sp)
+ lfd $A1,`$FRAME+72`($sp)
+ lfd $A2,`$FRAME+80`($sp)
+ lfd $A3,`$FRAME+88`($sp)
+ lfd $N0,`$FRAME+96`($sp)
+ lfd $N1,`$FRAME+104`($sp)
+ lfd $N2,`$FRAME+112`($sp)
+ lfd $N3,`$FRAME+120`($sp)
+ fcfid $A0,$A0
+ fcfid $A1,$A1
+ fcfid $A2,$A2
+ fcfid $A3,$A3
+ fcfid $N0,$N0
+ fcfid $N1,$N1
+ fcfid $N2,$N2
+ fcfid $N3,$N3
+ addi $ap,$ap,16
+ addi $np,$np,16
+
+ fmul $T1a,$A1,$ba
+ fmul $T1b,$A1,$bb
+ stfd $A0,8($nap_d) ; save a[j] in double format
+ stfd $A1,16($nap_d)
+ fmul $T2a,$A2,$ba
+ fmul $T2b,$A2,$bb
+ stfd $A2,24($nap_d) ; save a[j+1] in double format
+ stfd $A3,32($nap_d)
+ fmul $T3a,$A3,$ba
+ fmul $T3b,$A3,$bb
+ stfd $N0,40($nap_d) ; save n[j] in double format
+ stfd $N1,48($nap_d)
+ fmul $T0a,$A0,$ba
+ fmul $T0b,$A0,$bb
+ stfd $N2,56($nap_d) ; save n[j+1] in double format
+ stfdu $N3,64($nap_d)
+
+ fmadd $T1a,$A0,$bc,$T1a
+ fmadd $T1b,$A0,$bd,$T1b
+ fmadd $T2a,$A1,$bc,$T2a
+ fmadd $T2b,$A1,$bd,$T2b
+ fmadd $T3a,$A2,$bc,$T3a
+ fmadd $T3b,$A2,$bd,$T3b
+ fmul $dota,$A3,$bc
+ fmul $dotb,$A3,$bd
+
+ fmadd $T1a,$N1,$na,$T1a
+ fmadd $T1b,$N1,$nb,$T1b
+ fmadd $T2a,$N2,$na,$T2a
+ fmadd $T2b,$N2,$nb,$T2b
+ fmadd $T3a,$N3,$na,$T3a
+ fmadd $T3b,$N3,$nb,$T3b
+ fmadd $T0a,$N0,$na,$T0a
+ fmadd $T0b,$N0,$nb,$T0b
+
+ fmadd $T1a,$N0,$nc,$T1a
+ fmadd $T1b,$N0,$nd,$T1b
+ fmadd $T2a,$N1,$nc,$T2a
+ fmadd $T2b,$N1,$nd,$T2b
+ fmadd $T3a,$N2,$nc,$T3a
+ fmadd $T3b,$N2,$nd,$T3b
+ fmadd $dota,$N3,$nc,$dota
+ fmadd $dotb,$N3,$nd,$dotb
+
+ fctid $T0a,$T0a
+ fctid $T0b,$T0b
+ fctid $T1a,$T1a
+ fctid $T1b,$T1b
+ fctid $T2a,$T2a
+ fctid $T2b,$T2b
+ fctid $T3a,$T3a
+ fctid $T3b,$T3b
+
+ stfd $T0a,`$FRAME+0`($sp)
+ stfd $T0b,`$FRAME+8`($sp)
+ stfd $T1a,`$FRAME+16`($sp)
+ stfd $T1b,`$FRAME+24`($sp)
+ stfd $T2a,`$FRAME+32`($sp)
+ stfd $T2b,`$FRAME+40`($sp)
+ stfd $T3a,`$FRAME+48`($sp)
+ stfd $T3b,`$FRAME+56`($sp)
+
+.align 5
+L1st:
+___
+$code.=<<___ if ($SIZE_T==8);
+ lwz $t0,`4^$LITTLE_ENDIAN`($ap) ; load a[j] as 32-bit word pair
+ lwz $t1,`0^$LITTLE_ENDIAN`($ap)
+ lwz $t2,`12^$LITTLE_ENDIAN`($ap) ; load a[j+1] as 32-bit word pair
+ lwz $t3,`8^$LITTLE_ENDIAN`($ap)
+ lwz $t4,`4^$LITTLE_ENDIAN`($np) ; load n[j] as 32-bit word pair
+ lwz $t5,`0^$LITTLE_ENDIAN`($np)
+ lwz $t6,`12^$LITTLE_ENDIAN`($np) ; load n[j+1] as 32-bit word pair
+ lwz $t7,`8^$LITTLE_ENDIAN`($np)
+___
+$code.=<<___ if ($SIZE_T==4);
+ lwz $t0,0($ap) ; load a[j..j+3] as 32-bit word pairs
+ lwz $t1,4($ap)
+ lwz $t2,8($ap)
+ lwz $t3,12($ap)
+ lwz $t4,0($np) ; load n[j..j+3] as 32-bit word pairs
+ lwz $t5,4($np)
+ lwz $t6,8($np)
+ lwz $t7,12($np)
+___
+$code.=<<___;
+ std $t0,`$FRAME+64`($sp) ; yes, std even in 32-bit build
+ std $t1,`$FRAME+72`($sp)
+ std $t2,`$FRAME+80`($sp)
+ std $t3,`$FRAME+88`($sp)
+ std $t4,`$FRAME+96`($sp)
+ std $t5,`$FRAME+104`($sp)
+ std $t6,`$FRAME+112`($sp)
+ std $t7,`$FRAME+120`($sp)
+___
+if ($SIZE_T==8 or $flavour =~ /osx/) {
+$code.=<<___;
+ ld $t0,`$FRAME+0`($sp)
+ ld $t1,`$FRAME+8`($sp)
+ ld $t2,`$FRAME+16`($sp)
+ ld $t3,`$FRAME+24`($sp)
+ ld $t4,`$FRAME+32`($sp)
+ ld $t5,`$FRAME+40`($sp)
+ ld $t6,`$FRAME+48`($sp)
+ ld $t7,`$FRAME+56`($sp)
+___
+} else {
+$code.=<<___;
+ lwz $t1,`$FRAME+0^$LITTLE_ENDIAN`($sp)
+ lwz $t0,`$FRAME+4^$LITTLE_ENDIAN`($sp)
+ lwz $t3,`$FRAME+8^$LITTLE_ENDIAN`($sp)
+ lwz $t2,`$FRAME+12^$LITTLE_ENDIAN`($sp)
+ lwz $t5,`$FRAME+16^$LITTLE_ENDIAN`($sp)
+ lwz $t4,`$FRAME+20^$LITTLE_ENDIAN`($sp)
+ lwz $t7,`$FRAME+24^$LITTLE_ENDIAN`($sp)
+ lwz $t6,`$FRAME+28^$LITTLE_ENDIAN`($sp)
+___
+}
+$code.=<<___;
+ lfd $A0,`$FRAME+64`($sp)
+ lfd $A1,`$FRAME+72`($sp)
+ lfd $A2,`$FRAME+80`($sp)
+ lfd $A3,`$FRAME+88`($sp)
+ lfd $N0,`$FRAME+96`($sp)
+ lfd $N1,`$FRAME+104`($sp)
+ lfd $N2,`$FRAME+112`($sp)
+ lfd $N3,`$FRAME+120`($sp)
+ fcfid $A0,$A0
+ fcfid $A1,$A1
+ fcfid $A2,$A2
+ fcfid $A3,$A3
+ fcfid $N0,$N0
+ fcfid $N1,$N1
+ fcfid $N2,$N2
+ fcfid $N3,$N3
+ addi $ap,$ap,16
+ addi $np,$np,16
+
+ fmul $T1a,$A1,$ba
+ fmul $T1b,$A1,$bb
+ fmul $T2a,$A2,$ba
+ fmul $T2b,$A2,$bb
+ stfd $A0,8($nap_d) ; save a[j] in double format
+ stfd $A1,16($nap_d)
+ fmul $T3a,$A3,$ba
+ fmul $T3b,$A3,$bb
+ fmadd $T0a,$A0,$ba,$dota
+ fmadd $T0b,$A0,$bb,$dotb
+ stfd $A2,24($nap_d) ; save a[j+1] in double format
+ stfd $A3,32($nap_d)
+___
+if ($SIZE_T==8 or $flavour =~ /osx/) {
+$code.=<<___;
+ fmadd $T1a,$A0,$bc,$T1a
+ fmadd $T1b,$A0,$bd,$T1b
+ fmadd $T2a,$A1,$bc,$T2a
+ fmadd $T2b,$A1,$bd,$T2b
+ stfd $N0,40($nap_d) ; save n[j] in double format
+ stfd $N1,48($nap_d)
+ fmadd $T3a,$A2,$bc,$T3a
+ fmadd $T3b,$A2,$bd,$T3b
+ add $t0,$t0,$carry ; can not overflow
+ fmul $dota,$A3,$bc
+ fmul $dotb,$A3,$bd
+ stfd $N2,56($nap_d) ; save n[j+1] in double format
+ stfdu $N3,64($nap_d)
+ srdi $carry,$t0,16
+ add $t1,$t1,$carry
+ srdi $carry,$t1,16
+
+ fmadd $T1a,$N1,$na,$T1a
+ fmadd $T1b,$N1,$nb,$T1b
+ insrdi $t0,$t1,16,32
+ fmadd $T2a,$N2,$na,$T2a
+ fmadd $T2b,$N2,$nb,$T2b
+ add $t2,$t2,$carry
+ fmadd $T3a,$N3,$na,$T3a
+ fmadd $T3b,$N3,$nb,$T3b
+ srdi $carry,$t2,16
+ fmadd $T0a,$N0,$na,$T0a
+ fmadd $T0b,$N0,$nb,$T0b
+ insrdi $t0,$t2,16,16
+ add $t3,$t3,$carry
+ srdi $carry,$t3,16
+
+ fmadd $T1a,$N0,$nc,$T1a
+ fmadd $T1b,$N0,$nd,$T1b
+ insrdi $t0,$t3,16,0 ; 0..63 bits
+ fmadd $T2a,$N1,$nc,$T2a
+ fmadd $T2b,$N1,$nd,$T2b
+ add $t4,$t4,$carry
+ fmadd $T3a,$N2,$nc,$T3a
+ fmadd $T3b,$N2,$nd,$T3b
+ srdi $carry,$t4,16
+ fmadd $dota,$N3,$nc,$dota
+ fmadd $dotb,$N3,$nd,$dotb
+ add $t5,$t5,$carry
+ srdi $carry,$t5,16
+ insrdi $t4,$t5,16,32
+
+ fctid $T0a,$T0a
+ fctid $T0b,$T0b
+ add $t6,$t6,$carry
+ fctid $T1a,$T1a
+ fctid $T1b,$T1b
+ srdi $carry,$t6,16
+ fctid $T2a,$T2a
+ fctid $T2b,$T2b
+ insrdi $t4,$t6,16,16
+ fctid $T3a,$T3a
+ fctid $T3b,$T3b
+ add $t7,$t7,$carry
+ insrdi $t4,$t7,16,0 ; 64..127 bits
+ srdi $carry,$t7,16 ; upper 33 bits
+
+ stfd $T0a,`$FRAME+0`($sp)
+ stfd $T0b,`$FRAME+8`($sp)
+ stfd $T1a,`$FRAME+16`($sp)
+ stfd $T1b,`$FRAME+24`($sp)
+ stfd $T2a,`$FRAME+32`($sp)
+ stfd $T2b,`$FRAME+40`($sp)
+ stfd $T3a,`$FRAME+48`($sp)
+ stfd $T3b,`$FRAME+56`($sp)
+ std $t0,8($tp) ; tp[j-1]
+ stdu $t4,16($tp) ; tp[j]
+___
+} else {
+$code.=<<___;
+ fmadd $T1a,$A0,$bc,$T1a
+ fmadd $T1b,$A0,$bd,$T1b
+ addc $t0,$t0,$carry
+ adde $t1,$t1,$c1
+ srwi $carry,$t0,16
+ fmadd $T2a,$A1,$bc,$T2a
+ fmadd $T2b,$A1,$bd,$T2b
+ stfd $N0,40($nap_d) ; save n[j] in double format
+ stfd $N1,48($nap_d)
+ srwi $c1,$t1,16
+ insrwi $carry,$t1,16,0
+ fmadd $T3a,$A2,$bc,$T3a
+ fmadd $T3b,$A2,$bd,$T3b
+ addc $t2,$t2,$carry
+ adde $t3,$t3,$c1
+ srwi $carry,$t2,16
+ fmul $dota,$A3,$bc
+ fmul $dotb,$A3,$bd
+ stfd $N2,56($nap_d) ; save n[j+1] in double format
+ stfdu $N3,64($nap_d)
+ insrwi $t0,$t2,16,0 ; 0..31 bits
+ srwi $c1,$t3,16
+ insrwi $carry,$t3,16,0
+
+ fmadd $T1a,$N1,$na,$T1a
+ fmadd $T1b,$N1,$nb,$T1b
+ lwz $t3,`$FRAME+32^$LITTLE_ENDIAN`($sp) ; permuted $t1
+ lwz $t2,`$FRAME+36^$LITTLE_ENDIAN`($sp) ; permuted $t0
+ addc $t4,$t4,$carry
+ adde $t5,$t5,$c1
+ srwi $carry,$t4,16
+ fmadd $T2a,$N2,$na,$T2a
+ fmadd $T2b,$N2,$nb,$T2b
+ srwi $c1,$t5,16
+ insrwi $carry,$t5,16,0
+ fmadd $T3a,$N3,$na,$T3a
+ fmadd $T3b,$N3,$nb,$T3b
+ addc $t6,$t6,$carry
+ adde $t7,$t7,$c1
+ srwi $carry,$t6,16
+ fmadd $T0a,$N0,$na,$T0a
+ fmadd $T0b,$N0,$nb,$T0b
+ insrwi $t4,$t6,16,0 ; 32..63 bits
+ srwi $c1,$t7,16
+ insrwi $carry,$t7,16,0
+
+ fmadd $T1a,$N0,$nc,$T1a
+ fmadd $T1b,$N0,$nd,$T1b
+ lwz $t7,`$FRAME+40^$LITTLE_ENDIAN`($sp) ; permuted $t3
+ lwz $t6,`$FRAME+44^$LITTLE_ENDIAN`($sp) ; permuted $t2
+ addc $t2,$t2,$carry
+ adde $t3,$t3,$c1
+ srwi $carry,$t2,16
+ fmadd $T2a,$N1,$nc,$T2a
+ fmadd $T2b,$N1,$nd,$T2b
+ stw $t0,12($tp) ; tp[j-1]
+ stw $t4,8($tp)
+ srwi $c1,$t3,16
+ insrwi $carry,$t3,16,0
+ fmadd $T3a,$N2,$nc,$T3a
+ fmadd $T3b,$N2,$nd,$T3b
+ lwz $t1,`$FRAME+48^$LITTLE_ENDIAN`($sp) ; permuted $t5
+ lwz $t0,`$FRAME+52^$LITTLE_ENDIAN`($sp) ; permuted $t4
+ addc $t6,$t6,$carry
+ adde $t7,$t7,$c1
+ srwi $carry,$t6,16
+ fmadd $dota,$N3,$nc,$dota
+ fmadd $dotb,$N3,$nd,$dotb
+ insrwi $t2,$t6,16,0 ; 64..95 bits
+ srwi $c1,$t7,16
+ insrwi $carry,$t7,16,0
+
+ fctid $T0a,$T0a
+ fctid $T0b,$T0b
+ lwz $t5,`$FRAME+56^$LITTLE_ENDIAN`($sp) ; permuted $t7
+ lwz $t4,`$FRAME+60^$LITTLE_ENDIAN`($sp) ; permuted $t6
+ addc $t0,$t0,$carry
+ adde $t1,$t1,$c1
+ srwi $carry,$t0,16
+ fctid $T1a,$T1a
+ fctid $T1b,$T1b
+ srwi $c1,$t1,16
+ insrwi $carry,$t1,16,0
+ fctid $T2a,$T2a
+ fctid $T2b,$T2b
+ addc $t4,$t4,$carry
+ adde $t5,$t5,$c1
+ srwi $carry,$t4,16
+ fctid $T3a,$T3a
+ fctid $T3b,$T3b
+ insrwi $t0,$t4,16,0 ; 96..127 bits
+ srwi $c1,$t5,16
+ insrwi $carry,$t5,16,0
+
+ stfd $T0a,`$FRAME+0`($sp)
+ stfd $T0b,`$FRAME+8`($sp)
+ stfd $T1a,`$FRAME+16`($sp)
+ stfd $T1b,`$FRAME+24`($sp)
+ stfd $T2a,`$FRAME+32`($sp)
+ stfd $T2b,`$FRAME+40`($sp)
+ stfd $T3a,`$FRAME+48`($sp)
+ stfd $T3b,`$FRAME+56`($sp)
+ stw $t2,20($tp) ; tp[j]
+ stwu $t0,16($tp)
+___
+}
+$code.=<<___;
+ bdnz L1st
+
+ fctid $dota,$dota
+ fctid $dotb,$dotb
+___
+if ($SIZE_T==8 or $flavour =~ /osx/) {
+$code.=<<___;
+ ld $t0,`$FRAME+0`($sp)
+ ld $t1,`$FRAME+8`($sp)
+ ld $t2,`$FRAME+16`($sp)
+ ld $t3,`$FRAME+24`($sp)
+ ld $t4,`$FRAME+32`($sp)
+ ld $t5,`$FRAME+40`($sp)
+ ld $t6,`$FRAME+48`($sp)
+ ld $t7,`$FRAME+56`($sp)
+ stfd $dota,`$FRAME+64`($sp)
+ stfd $dotb,`$FRAME+72`($sp)
+
+ add $t0,$t0,$carry ; can not overflow
+ srdi $carry,$t0,16
+ add $t1,$t1,$carry
+ srdi $carry,$t1,16
+ insrdi $t0,$t1,16,32
+ add $t2,$t2,$carry
+ srdi $carry,$t2,16
+ insrdi $t0,$t2,16,16
+ add $t3,$t3,$carry
+ srdi $carry,$t3,16
+ insrdi $t0,$t3,16,0 ; 0..63 bits
+ add $t4,$t4,$carry
+ srdi $carry,$t4,16
+ add $t5,$t5,$carry
+ srdi $carry,$t5,16
+ insrdi $t4,$t5,16,32
+ add $t6,$t6,$carry
+ srdi $carry,$t6,16
+ insrdi $t4,$t6,16,16
+ add $t7,$t7,$carry
+ insrdi $t4,$t7,16,0 ; 64..127 bits
+ srdi $carry,$t7,16 ; upper 33 bits
+ ld $t6,`$FRAME+64`($sp)
+ ld $t7,`$FRAME+72`($sp)
+
+ std $t0,8($tp) ; tp[j-1]
+ stdu $t4,16($tp) ; tp[j]
+
+ add $t6,$t6,$carry ; can not overflow
+ srdi $carry,$t6,16
+ add $t7,$t7,$carry
+ insrdi $t6,$t7,48,0
+ srdi $ovf,$t7,48
+ std $t6,8($tp) ; tp[num-1]
+___
+} else {
+$code.=<<___;
+ lwz $t1,`$FRAME+0^$LITTLE_ENDIAN`($sp)
+ lwz $t0,`$FRAME+4^$LITTLE_ENDIAN`($sp)
+ lwz $t3,`$FRAME+8^$LITTLE_ENDIAN`($sp)
+ lwz $t2,`$FRAME+12^$LITTLE_ENDIAN`($sp)
+ lwz $t5,`$FRAME+16^$LITTLE_ENDIAN`($sp)
+ lwz $t4,`$FRAME+20^$LITTLE_ENDIAN`($sp)
+ lwz $t7,`$FRAME+24^$LITTLE_ENDIAN`($sp)
+ lwz $t6,`$FRAME+28^$LITTLE_ENDIAN`($sp)
+ stfd $dota,`$FRAME+64`($sp)
+ stfd $dotb,`$FRAME+72`($sp)
+
+ addc $t0,$t0,$carry
+ adde $t1,$t1,$c1
+ srwi $carry,$t0,16
+ insrwi $carry,$t1,16,0
+ srwi $c1,$t1,16
+ addc $t2,$t2,$carry
+ adde $t3,$t3,$c1
+ srwi $carry,$t2,16
+ insrwi $t0,$t2,16,0 ; 0..31 bits
+ insrwi $carry,$t3,16,0
+ srwi $c1,$t3,16
+ addc $t4,$t4,$carry
+ adde $t5,$t5,$c1
+ srwi $carry,$t4,16
+ insrwi $carry,$t5,16,0
+ srwi $c1,$t5,16
+ addc $t6,$t6,$carry
+ adde $t7,$t7,$c1
+ srwi $carry,$t6,16
+ insrwi $t4,$t6,16,0 ; 32..63 bits
+ insrwi $carry,$t7,16,0
+ srwi $c1,$t7,16
+ stw $t0,12($tp) ; tp[j-1]
+ stw $t4,8($tp)
+
+ lwz $t3,`$FRAME+32^$LITTLE_ENDIAN`($sp) ; permuted $t1
+ lwz $t2,`$FRAME+36^$LITTLE_ENDIAN`($sp) ; permuted $t0
+ lwz $t7,`$FRAME+40^$LITTLE_ENDIAN`($sp) ; permuted $t3
+ lwz $t6,`$FRAME+44^$LITTLE_ENDIAN`($sp) ; permuted $t2
+ lwz $t1,`$FRAME+48^$LITTLE_ENDIAN`($sp) ; permuted $t5
+ lwz $t0,`$FRAME+52^$LITTLE_ENDIAN`($sp) ; permuted $t4
+ lwz $t5,`$FRAME+56^$LITTLE_ENDIAN`($sp) ; permuted $t7
+ lwz $t4,`$FRAME+60^$LITTLE_ENDIAN`($sp) ; permuted $t6
+
+ addc $t2,$t2,$carry
+ adde $t3,$t3,$c1
+ srwi $carry,$t2,16
+ insrwi $carry,$t3,16,0
+ srwi $c1,$t3,16
+ addc $t6,$t6,$carry
+ adde $t7,$t7,$c1
+ srwi $carry,$t6,16
+ insrwi $t2,$t6,16,0 ; 64..95 bits
+ insrwi $carry,$t7,16,0
+ srwi $c1,$t7,16
+ addc $t0,$t0,$carry
+ adde $t1,$t1,$c1
+ srwi $carry,$t0,16
+ insrwi $carry,$t1,16,0
+ srwi $c1,$t1,16
+ addc $t4,$t4,$carry
+ adde $t5,$t5,$c1
+ srwi $carry,$t4,16
+ insrwi $t0,$t4,16,0 ; 96..127 bits
+ insrwi $carry,$t5,16,0
+ srwi $c1,$t5,16
+ stw $t2,20($tp) ; tp[j]
+ stwu $t0,16($tp)
+
+ lwz $t7,`$FRAME+64^$LITTLE_ENDIAN`($sp)
+ lwz $t6,`$FRAME+68^$LITTLE_ENDIAN`($sp)
+ lwz $t5,`$FRAME+72^$LITTLE_ENDIAN`($sp)
+ lwz $t4,`$FRAME+76^$LITTLE_ENDIAN`($sp)
+
+ addc $t6,$t6,$carry
+ adde $t7,$t7,$c1
+ srwi $carry,$t6,16
+ insrwi $carry,$t7,16,0
+ srwi $c1,$t7,16
+ addc $t4,$t4,$carry
+ adde $t5,$t5,$c1
+
+ insrwi $t6,$t4,16,0
+ srwi $t4,$t4,16
+ insrwi $t4,$t5,16,0
+ srwi $ovf,$t5,16
+ stw $t6,12($tp) ; tp[num-1]
+ stw $t4,8($tp)
+___
+}
+$code.=<<___;
+ slwi $t7,$num,2
+ subf $nap_d,$t7,$nap_d ; rewind pointer
+
+ li $i,8 ; i=1
+.align 5
+Louter:
+ addi $tp,$sp,`$FRAME+$TRANSFER`
+ li $carry,0
+ mtctr $j
+___
+$code.=<<___ if ($SIZE_T==8);
+ ldx $t3,$bp,$i ; bp[i]
+
+ ld $t6,`$FRAME+$TRANSFER+8`($sp) ; tp[0]
+ mulld $t7,$a0,$t3 ; ap[0]*bp[i]
+ add $t7,$t7,$t6 ; ap[0]*bp[i]+tp[0]
+ ; transfer bp[i] to FPU as 4x16-bit values
+ extrdi $t0,$t3,16,48
+ extrdi $t1,$t3,16,32
+ extrdi $t2,$t3,16,16
+ extrdi $t3,$t3,16,0
+ std $t0,`$FRAME+0`($sp)
+ std $t1,`$FRAME+8`($sp)
+ std $t2,`$FRAME+16`($sp)
+ std $t3,`$FRAME+24`($sp)
+
+ mulld $t7,$t7,$n0 ; tp[0]*n0
+ ; transfer (ap[0]*bp[i]+tp[0])*n0 to FPU as 4x16-bit values
+ extrdi $t4,$t7,16,48
+ extrdi $t5,$t7,16,32
+ extrdi $t6,$t7,16,16
+ extrdi $t7,$t7,16,0
+ std $t4,`$FRAME+32`($sp)
+ std $t5,`$FRAME+40`($sp)
+ std $t6,`$FRAME+48`($sp)
+ std $t7,`$FRAME+56`($sp)
+___
+$code.=<<___ if ($SIZE_T==4);
+ add $t0,$bp,$i
+ li $c1,0
+ lwz $t1,0($t0) ; bp[i,i+1]
+ lwz $t3,4($t0)
+
+ mullw $t4,$a0,$t1 ; ap[0]*bp[i]
+ lwz $t0,`$FRAME+$TRANSFER+8+4`($sp) ; tp[0]
+ mulhwu $t5,$a0,$t1
+ lwz $t2,`$FRAME+$TRANSFER+8`($sp) ; tp[0]
+ mullw $t6,$a1,$t1
+ mullw $t7,$a0,$t3
+ add $t5,$t5,$t6
+ add $t5,$t5,$t7
+ addc $t4,$t4,$t0 ; ap[0]*bp[i]+tp[0]
+ adde $t5,$t5,$t2
+ ; transfer bp[i] to FPU as 4x16-bit values
+ extrwi $t0,$t1,16,16
+ extrwi $t1,$t1,16,0
+ extrwi $t2,$t3,16,16
+ extrwi $t3,$t3,16,0
+ std $t0,`$FRAME+0`($sp) ; yes, std in 32-bit build
+ std $t1,`$FRAME+8`($sp)
+ std $t2,`$FRAME+16`($sp)
+ std $t3,`$FRAME+24`($sp)
+
+ mullw $t0,$t4,$n0 ; mulld tp[0]*n0
+ mulhwu $t1,$t4,$n0
+ mullw $t2,$t5,$n0
+ mullw $t3,$t4,$n1
+ add $t1,$t1,$t2
+ add $t1,$t1,$t3
+ ; transfer (ap[0]*bp[i]+tp[0])*n0 to FPU as 4x16-bit values
+ extrwi $t4,$t0,16,16
+ extrwi $t5,$t0,16,0
+ extrwi $t6,$t1,16,16
+ extrwi $t7,$t1,16,0
+ std $t4,`$FRAME+32`($sp) ; yes, std in 32-bit build
+ std $t5,`$FRAME+40`($sp)
+ std $t6,`$FRAME+48`($sp)
+ std $t7,`$FRAME+56`($sp)
+___
+$code.=<<___;
+ lfd $A0,8($nap_d) ; load a[j] in double format
+ lfd $A1,16($nap_d)
+ lfd $A2,24($nap_d) ; load a[j+1] in double format
+ lfd $A3,32($nap_d)
+ lfd $N0,40($nap_d) ; load n[j] in double format
+ lfd $N1,48($nap_d)
+ lfd $N2,56($nap_d) ; load n[j+1] in double format
+ lfdu $N3,64($nap_d)
+
+ lfd $ba,`$FRAME+0`($sp)
+ lfd $bb,`$FRAME+8`($sp)
+ lfd $bc,`$FRAME+16`($sp)
+ lfd $bd,`$FRAME+24`($sp)
+ lfd $na,`$FRAME+32`($sp)
+ lfd $nb,`$FRAME+40`($sp)
+ lfd $nc,`$FRAME+48`($sp)
+ lfd $nd,`$FRAME+56`($sp)
+
+ fcfid $ba,$ba
+ fcfid $bb,$bb
+ fcfid $bc,$bc
+ fcfid $bd,$bd
+ fcfid $na,$na
+ fcfid $nb,$nb
+ fcfid $nc,$nc
+ fcfid $nd,$nd
+
+ fmul $T1a,$A1,$ba
+ fmul $T1b,$A1,$bb
+ fmul $T2a,$A2,$ba
+ fmul $T2b,$A2,$bb
+ fmul $T3a,$A3,$ba
+ fmul $T3b,$A3,$bb
+ fmul $T0a,$A0,$ba
+ fmul $T0b,$A0,$bb
+
+ fmadd $T1a,$A0,$bc,$T1a
+ fmadd $T1b,$A0,$bd,$T1b
+ fmadd $T2a,$A1,$bc,$T2a
+ fmadd $T2b,$A1,$bd,$T2b
+ fmadd $T3a,$A2,$bc,$T3a
+ fmadd $T3b,$A2,$bd,$T3b
+ fmul $dota,$A3,$bc
+ fmul $dotb,$A3,$bd
+
+ fmadd $T1a,$N1,$na,$T1a
+ fmadd $T1b,$N1,$nb,$T1b
+ lfd $A0,8($nap_d) ; load a[j] in double format
+ lfd $A1,16($nap_d)
+ fmadd $T2a,$N2,$na,$T2a
+ fmadd $T2b,$N2,$nb,$T2b
+ lfd $A2,24($nap_d) ; load a[j+1] in double format
+ lfd $A3,32($nap_d)
+ fmadd $T3a,$N3,$na,$T3a
+ fmadd $T3b,$N3,$nb,$T3b
+ fmadd $T0a,$N0,$na,$T0a
+ fmadd $T0b,$N0,$nb,$T0b
+
+ fmadd $T1a,$N0,$nc,$T1a
+ fmadd $T1b,$N0,$nd,$T1b
+ fmadd $T2a,$N1,$nc,$T2a
+ fmadd $T2b,$N1,$nd,$T2b
+ fmadd $T3a,$N2,$nc,$T3a
+ fmadd $T3b,$N2,$nd,$T3b
+ fmadd $dota,$N3,$nc,$dota
+ fmadd $dotb,$N3,$nd,$dotb
+
+ fctid $T0a,$T0a
+ fctid $T0b,$T0b
+ fctid $T1a,$T1a
+ fctid $T1b,$T1b
+ fctid $T2a,$T2a
+ fctid $T2b,$T2b
+ fctid $T3a,$T3a
+ fctid $T3b,$T3b
+
+ stfd $T0a,`$FRAME+0`($sp)
+ stfd $T0b,`$FRAME+8`($sp)
+ stfd $T1a,`$FRAME+16`($sp)
+ stfd $T1b,`$FRAME+24`($sp)
+ stfd $T2a,`$FRAME+32`($sp)
+ stfd $T2b,`$FRAME+40`($sp)
+ stfd $T3a,`$FRAME+48`($sp)
+ stfd $T3b,`$FRAME+56`($sp)
+
+.align 5
+Linner:
+ fmul $T1a,$A1,$ba
+ fmul $T1b,$A1,$bb
+ fmul $T2a,$A2,$ba
+ fmul $T2b,$A2,$bb
+ lfd $N0,40($nap_d) ; load n[j] in double format
+ lfd $N1,48($nap_d)
+ fmul $T3a,$A3,$ba
+ fmul $T3b,$A3,$bb
+ fmadd $T0a,$A0,$ba,$dota
+ fmadd $T0b,$A0,$bb,$dotb
+ lfd $N2,56($nap_d) ; load n[j+1] in double format
+ lfdu $N3,64($nap_d)
+
+ fmadd $T1a,$A0,$bc,$T1a
+ fmadd $T1b,$A0,$bd,$T1b
+ fmadd $T2a,$A1,$bc,$T2a
+ fmadd $T2b,$A1,$bd,$T2b
+ lfd $A0,8($nap_d) ; load a[j] in double format
+ lfd $A1,16($nap_d)
+ fmadd $T3a,$A2,$bc,$T3a
+ fmadd $T3b,$A2,$bd,$T3b
+ fmul $dota,$A3,$bc
+ fmul $dotb,$A3,$bd
+ lfd $A2,24($nap_d) ; load a[j+1] in double format
+ lfd $A3,32($nap_d)
+___
+if ($SIZE_T==8 or $flavour =~ /osx/) {
+$code.=<<___;
+ fmadd $T1a,$N1,$na,$T1a
+ fmadd $T1b,$N1,$nb,$T1b
+ ld $t0,`$FRAME+0`($sp)
+ ld $t1,`$FRAME+8`($sp)
+ fmadd $T2a,$N2,$na,$T2a
+ fmadd $T2b,$N2,$nb,$T2b
+ ld $t2,`$FRAME+16`($sp)
+ ld $t3,`$FRAME+24`($sp)
+ fmadd $T3a,$N3,$na,$T3a
+ fmadd $T3b,$N3,$nb,$T3b
+ add $t0,$t0,$carry ; can not overflow
+ ld $t4,`$FRAME+32`($sp)
+ ld $t5,`$FRAME+40`($sp)
+ fmadd $T0a,$N0,$na,$T0a
+ fmadd $T0b,$N0,$nb,$T0b
+ srdi $carry,$t0,16
+ add $t1,$t1,$carry
+ srdi $carry,$t1,16
+ ld $t6,`$FRAME+48`($sp)
+ ld $t7,`$FRAME+56`($sp)
+
+ fmadd $T1a,$N0,$nc,$T1a
+ fmadd $T1b,$N0,$nd,$T1b
+ insrdi $t0,$t1,16,32
+ ld $t1,8($tp) ; tp[j]
+ fmadd $T2a,$N1,$nc,$T2a
+ fmadd $T2b,$N1,$nd,$T2b
+ add $t2,$t2,$carry
+ fmadd $T3a,$N2,$nc,$T3a
+ fmadd $T3b,$N2,$nd,$T3b
+ srdi $carry,$t2,16
+ insrdi $t0,$t2,16,16
+ fmadd $dota,$N3,$nc,$dota
+ fmadd $dotb,$N3,$nd,$dotb
+ add $t3,$t3,$carry
+ ldu $t2,16($tp) ; tp[j+1]
+ srdi $carry,$t3,16
+ insrdi $t0,$t3,16,0 ; 0..63 bits
+ add $t4,$t4,$carry
+
+ fctid $T0a,$T0a
+ fctid $T0b,$T0b
+ srdi $carry,$t4,16
+ fctid $T1a,$T1a
+ fctid $T1b,$T1b
+ add $t5,$t5,$carry
+ fctid $T2a,$T2a
+ fctid $T2b,$T2b
+ srdi $carry,$t5,16
+ insrdi $t4,$t5,16,32
+ fctid $T3a,$T3a
+ fctid $T3b,$T3b
+ add $t6,$t6,$carry
+ srdi $carry,$t6,16
+ insrdi $t4,$t6,16,16
+
+ stfd $T0a,`$FRAME+0`($sp)
+ stfd $T0b,`$FRAME+8`($sp)
+ add $t7,$t7,$carry
+ addc $t3,$t0,$t1
+___
+$code.=<<___ if ($SIZE_T==4); # adjust XER[CA]
+ extrdi $t0,$t0,32,0
+ extrdi $t1,$t1,32,0
+ adde $t0,$t0,$t1
+___
+$code.=<<___;
+ stfd $T1a,`$FRAME+16`($sp)
+ stfd $T1b,`$FRAME+24`($sp)
+ insrdi $t4,$t7,16,0 ; 64..127 bits
+ srdi $carry,$t7,16 ; upper 33 bits
+ stfd $T2a,`$FRAME+32`($sp)
+ stfd $T2b,`$FRAME+40`($sp)
+ adde $t5,$t4,$t2
+___
+$code.=<<___ if ($SIZE_T==4); # adjust XER[CA]
+ extrdi $t4,$t4,32,0
+ extrdi $t2,$t2,32,0
+ adde $t4,$t4,$t2
+___
+$code.=<<___;
+ stfd $T3a,`$FRAME+48`($sp)
+ stfd $T3b,`$FRAME+56`($sp)
+ addze $carry,$carry
+ std $t3,-16($tp) ; tp[j-1]
+ std $t5,-8($tp) ; tp[j]
+___
+} else {
+$code.=<<___;
+ fmadd $T1a,$N1,$na,$T1a
+ fmadd $T1b,$N1,$nb,$T1b
+ lwz $t1,`$FRAME+0^$LITTLE_ENDIAN`($sp)
+ lwz $t0,`$FRAME+4^$LITTLE_ENDIAN`($sp)
+ fmadd $T2a,$N2,$na,$T2a
+ fmadd $T2b,$N2,$nb,$T2b
+ lwz $t3,`$FRAME+8^$LITTLE_ENDIAN`($sp)
+ lwz $t2,`$FRAME+12^$LITTLE_ENDIAN`($sp)
+ fmadd $T3a,$N3,$na,$T3a
+ fmadd $T3b,$N3,$nb,$T3b
+ lwz $t5,`$FRAME+16^$LITTLE_ENDIAN`($sp)
+ lwz $t4,`$FRAME+20^$LITTLE_ENDIAN`($sp)
+ addc $t0,$t0,$carry
+ adde $t1,$t1,$c1
+ srwi $carry,$t0,16
+ fmadd $T0a,$N0,$na,$T0a
+ fmadd $T0b,$N0,$nb,$T0b
+ lwz $t7,`$FRAME+24^$LITTLE_ENDIAN`($sp)
+ lwz $t6,`$FRAME+28^$LITTLE_ENDIAN`($sp)
+ srwi $c1,$t1,16
+ insrwi $carry,$t1,16,0
+
+ fmadd $T1a,$N0,$nc,$T1a
+ fmadd $T1b,$N0,$nd,$T1b
+ addc $t2,$t2,$carry
+ adde $t3,$t3,$c1
+ srwi $carry,$t2,16
+ fmadd $T2a,$N1,$nc,$T2a
+ fmadd $T2b,$N1,$nd,$T2b
+ insrwi $t0,$t2,16,0 ; 0..31 bits
+ srwi $c1,$t3,16
+ insrwi $carry,$t3,16,0
+ fmadd $T3a,$N2,$nc,$T3a
+ fmadd $T3b,$N2,$nd,$T3b
+ lwz $t2,12($tp) ; tp[j]
+ lwz $t3,8($tp)
+ addc $t4,$t4,$carry
+ adde $t5,$t5,$c1
+ srwi $carry,$t4,16
+ fmadd $dota,$N3,$nc,$dota
+ fmadd $dotb,$N3,$nd,$dotb
+ srwi $c1,$t5,16
+ insrwi $carry,$t5,16,0
+
+ fctid $T0a,$T0a
+ addc $t6,$t6,$carry
+ adde $t7,$t7,$c1
+ srwi $carry,$t6,16
+ fctid $T0b,$T0b
+ insrwi $t4,$t6,16,0 ; 32..63 bits
+ srwi $c1,$t7,16
+ insrwi $carry,$t7,16,0
+ fctid $T1a,$T1a
+ addc $t0,$t0,$t2
+ adde $t4,$t4,$t3
+ lwz $t3,`$FRAME+32^$LITTLE_ENDIAN`($sp) ; permuted $t1
+ lwz $t2,`$FRAME+36^$LITTLE_ENDIAN`($sp) ; permuted $t0
+ fctid $T1b,$T1b
+ addze $carry,$carry
+ addze $c1,$c1
+ stw $t0,4($tp) ; tp[j-1]
+ stw $t4,0($tp)
+ fctid $T2a,$T2a
+ addc $t2,$t2,$carry
+ adde $t3,$t3,$c1
+ srwi $carry,$t2,16
+ lwz $t7,`$FRAME+40^$LITTLE_ENDIAN`($sp) ; permuted $t3
+ lwz $t6,`$FRAME+44^$LITTLE_ENDIAN`($sp) ; permuted $t2
+ fctid $T2b,$T2b
+ srwi $c1,$t3,16
+ insrwi $carry,$t3,16,0
+ lwz $t1,`$FRAME+48^$LITTLE_ENDIAN`($sp) ; permuted $t5
+ lwz $t0,`$FRAME+52^$LITTLE_ENDIAN`($sp) ; permuted $t4
+ fctid $T3a,$T3a
+ addc $t6,$t6,$carry
+ adde $t7,$t7,$c1
+ srwi $carry,$t6,16
+ lwz $t5,`$FRAME+56^$LITTLE_ENDIAN`($sp) ; permuted $t7
+ lwz $t4,`$FRAME+60^$LITTLE_ENDIAN`($sp) ; permuted $t6
+ fctid $T3b,$T3b
+
+ insrwi $t2,$t6,16,0 ; 64..95 bits
+ insrwi $carry,$t7,16,0
+ srwi $c1,$t7,16
+ lwz $t6,20($tp)
+ lwzu $t7,16($tp)
+ addc $t0,$t0,$carry
+ stfd $T0a,`$FRAME+0`($sp)
+ adde $t1,$t1,$c1
+ srwi $carry,$t0,16
+ stfd $T0b,`$FRAME+8`($sp)
+ insrwi $carry,$t1,16,0
+ srwi $c1,$t1,16
+ addc $t4,$t4,$carry
+ stfd $T1a,`$FRAME+16`($sp)
+ adde $t5,$t5,$c1
+ srwi $carry,$t4,16
+ insrwi $t0,$t4,16,0 ; 96..127 bits
+ stfd $T1b,`$FRAME+24`($sp)
+ insrwi $carry,$t5,16,0
+ srwi $c1,$t5,16
+
+ addc $t2,$t2,$t6
+ stfd $T2a,`$FRAME+32`($sp)
+ adde $t0,$t0,$t7
+ stfd $T2b,`$FRAME+40`($sp)
+ addze $carry,$carry
+ stfd $T3a,`$FRAME+48`($sp)
+ addze $c1,$c1
+ stfd $T3b,`$FRAME+56`($sp)
+ stw $t2,-4($tp) ; tp[j]
+ stw $t0,-8($tp)
+___
+}
+$code.=<<___;
+ bdnz Linner
+
+ fctid $dota,$dota
+ fctid $dotb,$dotb
+___
+if ($SIZE_T==8 or $flavour =~ /osx/) {
+$code.=<<___;
+ ld $t0,`$FRAME+0`($sp)
+ ld $t1,`$FRAME+8`($sp)
+ ld $t2,`$FRAME+16`($sp)
+ ld $t3,`$FRAME+24`($sp)
+ ld $t4,`$FRAME+32`($sp)
+ ld $t5,`$FRAME+40`($sp)
+ ld $t6,`$FRAME+48`($sp)
+ ld $t7,`$FRAME+56`($sp)
+ stfd $dota,`$FRAME+64`($sp)
+ stfd $dotb,`$FRAME+72`($sp)
+
+ add $t0,$t0,$carry ; can not overflow
+ srdi $carry,$t0,16
+ add $t1,$t1,$carry
+ srdi $carry,$t1,16
+ insrdi $t0,$t1,16,32
+ add $t2,$t2,$carry
+ ld $t1,8($tp) ; tp[j]
+ srdi $carry,$t2,16
+ insrdi $t0,$t2,16,16
+ add $t3,$t3,$carry
+ ldu $t2,16($tp) ; tp[j+1]
+ srdi $carry,$t3,16
+ insrdi $t0,$t3,16,0 ; 0..63 bits
+ add $t4,$t4,$carry
+ srdi $carry,$t4,16
+ add $t5,$t5,$carry
+ srdi $carry,$t5,16
+ insrdi $t4,$t5,16,32
+ add $t6,$t6,$carry
+ srdi $carry,$t6,16
+ insrdi $t4,$t6,16,16
+ add $t7,$t7,$carry
+ insrdi $t4,$t7,16,0 ; 64..127 bits
+ srdi $carry,$t7,16 ; upper 33 bits
+ ld $t6,`$FRAME+64`($sp)
+ ld $t7,`$FRAME+72`($sp)
+
+ addc $t3,$t0,$t1
+___
+$code.=<<___ if ($SIZE_T==4); # adjust XER[CA]
+ extrdi $t0,$t0,32,0
+ extrdi $t1,$t1,32,0
+ adde $t0,$t0,$t1
+___
+$code.=<<___;
+ adde $t5,$t4,$t2
+___
+$code.=<<___ if ($SIZE_T==4); # adjust XER[CA]
+ extrdi $t4,$t4,32,0
+ extrdi $t2,$t2,32,0
+ adde $t4,$t4,$t2
+___
+$code.=<<___;
+ addze $carry,$carry
+
+ std $t3,-16($tp) ; tp[j-1]
+ std $t5,-8($tp) ; tp[j]
+
+ add $carry,$carry,$ovf ; comsume upmost overflow
+ add $t6,$t6,$carry ; can not overflow
+ srdi $carry,$t6,16
+ add $t7,$t7,$carry
+ insrdi $t6,$t7,48,0
+ srdi $ovf,$t7,48
+ std $t6,0($tp) ; tp[num-1]
+___
+} else {
+$code.=<<___;
+ lwz $t1,`$FRAME+0^$LITTLE_ENDIAN`($sp)
+ lwz $t0,`$FRAME+4^$LITTLE_ENDIAN`($sp)
+ lwz $t3,`$FRAME+8^$LITTLE_ENDIAN`($sp)
+ lwz $t2,`$FRAME+12^$LITTLE_ENDIAN`($sp)
+ lwz $t5,`$FRAME+16^$LITTLE_ENDIAN`($sp)
+ lwz $t4,`$FRAME+20^$LITTLE_ENDIAN`($sp)
+ lwz $t7,`$FRAME+24^$LITTLE_ENDIAN`($sp)
+ lwz $t6,`$FRAME+28^$LITTLE_ENDIAN`($sp)
+ stfd $dota,`$FRAME+64`($sp)
+ stfd $dotb,`$FRAME+72`($sp)
+
+ addc $t0,$t0,$carry
+ adde $t1,$t1,$c1
+ srwi $carry,$t0,16
+ insrwi $carry,$t1,16,0
+ srwi $c1,$t1,16
+ addc $t2,$t2,$carry
+ adde $t3,$t3,$c1
+ srwi $carry,$t2,16
+ insrwi $t0,$t2,16,0 ; 0..31 bits
+ lwz $t2,12($tp) ; tp[j]
+ insrwi $carry,$t3,16,0
+ srwi $c1,$t3,16
+ lwz $t3,8($tp)
+ addc $t4,$t4,$carry
+ adde $t5,$t5,$c1
+ srwi $carry,$t4,16
+ insrwi $carry,$t5,16,0
+ srwi $c1,$t5,16
+ addc $t6,$t6,$carry
+ adde $t7,$t7,$c1
+ srwi $carry,$t6,16
+ insrwi $t4,$t6,16,0 ; 32..63 bits
+ insrwi $carry,$t7,16,0
+ srwi $c1,$t7,16
+
+ addc $t0,$t0,$t2
+ adde $t4,$t4,$t3
+ addze $carry,$carry
+ addze $c1,$c1
+ stw $t0,4($tp) ; tp[j-1]
+ stw $t4,0($tp)
+
+ lwz $t3,`$FRAME+32^$LITTLE_ENDIAN`($sp) ; permuted $t1
+ lwz $t2,`$FRAME+36^$LITTLE_ENDIAN`($sp) ; permuted $t0
+ lwz $t7,`$FRAME+40^$LITTLE_ENDIAN`($sp) ; permuted $t3
+ lwz $t6,`$FRAME+44^$LITTLE_ENDIAN`($sp) ; permuted $t2
+ lwz $t1,`$FRAME+48^$LITTLE_ENDIAN`($sp) ; permuted $t5
+ lwz $t0,`$FRAME+52^$LITTLE_ENDIAN`($sp) ; permuted $t4
+ lwz $t5,`$FRAME+56^$LITTLE_ENDIAN`($sp) ; permuted $t7
+ lwz $t4,`$FRAME+60^$LITTLE_ENDIAN`($sp) ; permuted $t6
+
+ addc $t2,$t2,$carry
+ adde $t3,$t3,$c1
+ srwi $carry,$t2,16
+ insrwi $carry,$t3,16,0
+ srwi $c1,$t3,16
+ addc $t6,$t6,$carry
+ adde $t7,$t7,$c1
+ srwi $carry,$t6,16
+ insrwi $t2,$t6,16,0 ; 64..95 bits
+ lwz $t6,20($tp)
+ insrwi $carry,$t7,16,0
+ srwi $c1,$t7,16
+ lwzu $t7,16($tp)
+ addc $t0,$t0,$carry
+ adde $t1,$t1,$c1
+ srwi $carry,$t0,16
+ insrwi $carry,$t1,16,0
+ srwi $c1,$t1,16
+ addc $t4,$t4,$carry
+ adde $t5,$t5,$c1
+ srwi $carry,$t4,16
+ insrwi $t0,$t4,16,0 ; 96..127 bits
+ insrwi $carry,$t5,16,0
+ srwi $c1,$t5,16
+
+ addc $t2,$t2,$t6
+ adde $t0,$t0,$t7
+ lwz $t7,`$FRAME+64^$LITTLE_ENDIAN`($sp)
+ lwz $t6,`$FRAME+68^$LITTLE_ENDIAN`($sp)
+ addze $carry,$carry
+ addze $c1,$c1
+ lwz $t5,`$FRAME+72^$LITTLE_ENDIAN`($sp)
+ lwz $t4,`$FRAME+76^$LITTLE_ENDIAN`($sp)
+
+ addc $t6,$t6,$carry
+ adde $t7,$t7,$c1
+ stw $t2,-4($tp) ; tp[j]
+ stw $t0,-8($tp)
+ addc $t6,$t6,$ovf
+ addze $t7,$t7
+ srwi $carry,$t6,16
+ insrwi $carry,$t7,16,0
+ srwi $c1,$t7,16
+ addc $t4,$t4,$carry
+ adde $t5,$t5,$c1
+
+ insrwi $t6,$t4,16,0
+ srwi $t4,$t4,16
+ insrwi $t4,$t5,16,0
+ srwi $ovf,$t5,16
+ stw $t6,4($tp) ; tp[num-1]
+ stw $t4,0($tp)
+___
+}
+$code.=<<___;
+ slwi $t7,$num,2
+ addi $i,$i,8
+ subf $nap_d,$t7,$nap_d ; rewind pointer
+ cmpw $i,$num
+ blt- Louter
+___
+
+$code.=<<___ if ($SIZE_T==8);
+ subf $np,$num,$np ; rewind np
+ addi $j,$j,1 ; restore counter
+ subfc $i,$i,$i ; j=0 and "clear" XER[CA]
+ addi $tp,$sp,`$FRAME+$TRANSFER+8`
+ addi $t4,$sp,`$FRAME+$TRANSFER+16`
+ addi $t5,$np,8
+ addi $t6,$rp,8
+ mtctr $j
+
+.align 4
+Lsub: ldx $t0,$tp,$i
+ ldx $t1,$np,$i
+ ldx $t2,$t4,$i
+ ldx $t3,$t5,$i
+ subfe $t0,$t1,$t0 ; tp[j]-np[j]
+ subfe $t2,$t3,$t2 ; tp[j+1]-np[j+1]
+ stdx $t0,$rp,$i
+ stdx $t2,$t6,$i
+ addi $i,$i,16
+ bdnz Lsub
+
+ li $i,0
+ subfe $ovf,$i,$ovf ; handle upmost overflow bit
+ and $ap,$tp,$ovf
+ andc $np,$rp,$ovf
+ or $ap,$ap,$np ; ap=borrow?tp:rp
+ addi $t7,$ap,8
+ mtctr $j
+
+.align 4
+Lcopy: ; copy or in-place refresh
+ ldx $t0,$ap,$i
+ ldx $t1,$t7,$i
+ std $i,8($nap_d) ; zap nap_d
+ std $i,16($nap_d)
+ std $i,24($nap_d)
+ std $i,32($nap_d)
+ std $i,40($nap_d)
+ std $i,48($nap_d)
+ std $i,56($nap_d)
+ stdu $i,64($nap_d)
+ stdx $t0,$rp,$i
+ stdx $t1,$t6,$i
+ stdx $i,$tp,$i ; zap tp at once
+ stdx $i,$t4,$i
+ addi $i,$i,16
+ bdnz Lcopy
+___
+$code.=<<___ if ($SIZE_T==4);
+ subf $np,$num,$np ; rewind np
+ addi $j,$j,1 ; restore counter
+ subfc $i,$i,$i ; j=0 and "clear" XER[CA]
+ addi $tp,$sp,`$FRAME+$TRANSFER`
+ addi $np,$np,-4
+ addi $rp,$rp,-4
+ addi $ap,$sp,`$FRAME+$TRANSFER+4`
+ mtctr $j
+
+.align 4
+Lsub: lwz $t0,12($tp) ; load tp[j..j+3] in 64-bit word order
+ lwz $t1,8($tp)
+ lwz $t2,20($tp)
+ lwzu $t3,16($tp)
+ lwz $t4,4($np) ; load np[j..j+3] in 32-bit word order
+ lwz $t5,8($np)
+ lwz $t6,12($np)
+ lwzu $t7,16($np)
+ subfe $t4,$t4,$t0 ; tp[j]-np[j]
+ stw $t0,4($ap) ; save tp[j..j+3] in 32-bit word order
+ subfe $t5,$t5,$t1 ; tp[j+1]-np[j+1]
+ stw $t1,8($ap)
+ subfe $t6,$t6,$t2 ; tp[j+2]-np[j+2]
+ stw $t2,12($ap)
+ subfe $t7,$t7,$t3 ; tp[j+3]-np[j+3]
+ stwu $t3,16($ap)
+ stw $t4,4($rp)
+ stw $t5,8($rp)
+ stw $t6,12($rp)
+ stwu $t7,16($rp)
+ bdnz Lsub
+
+ li $i,0
+ subfe $ovf,$i,$ovf ; handle upmost overflow bit
+ addi $tp,$sp,`$FRAME+$TRANSFER+4`
+ subf $rp,$num,$rp ; rewind rp
+ and $ap,$tp,$ovf
+ andc $np,$rp,$ovf
+ or $ap,$ap,$np ; ap=borrow?tp:rp
+ addi $tp,$sp,`$FRAME+$TRANSFER`
+ mtctr $j
+
+.align 4
+Lcopy: ; copy or in-place refresh
+ lwz $t0,4($ap)
+ lwz $t1,8($ap)
+ lwz $t2,12($ap)
+ lwzu $t3,16($ap)
+ std $i,8($nap_d) ; zap nap_d
+ std $i,16($nap_d)
+ std $i,24($nap_d)
+ std $i,32($nap_d)
+ std $i,40($nap_d)
+ std $i,48($nap_d)
+ std $i,56($nap_d)
+ stdu $i,64($nap_d)
+ stw $t0,4($rp)
+ stw $t1,8($rp)
+ stw $t2,12($rp)
+ stwu $t3,16($rp)
+ std $i,8($tp) ; zap tp at once
+ stdu $i,16($tp)
+ bdnz Lcopy
+___
+
+$code.=<<___;
+ $POP $i,0($sp)
+ li r3,1 ; signal "handled"
+ $POP r19,`-12*8-13*$SIZE_T`($i)
+ $POP r20,`-12*8-12*$SIZE_T`($i)
+ $POP r21,`-12*8-11*$SIZE_T`($i)
+ $POP r22,`-12*8-10*$SIZE_T`($i)
+ $POP r23,`-12*8-9*$SIZE_T`($i)
+ $POP r24,`-12*8-8*$SIZE_T`($i)
+ $POP r25,`-12*8-7*$SIZE_T`($i)
+ $POP r26,`-12*8-6*$SIZE_T`($i)
+ $POP r27,`-12*8-5*$SIZE_T`($i)
+ $POP r28,`-12*8-4*$SIZE_T`($i)
+ $POP r29,`-12*8-3*$SIZE_T`($i)
+ $POP r30,`-12*8-2*$SIZE_T`($i)
+ $POP r31,`-12*8-1*$SIZE_T`($i)
+ lfd f20,`-12*8`($i)
+ lfd f21,`-11*8`($i)
+ lfd f22,`-10*8`($i)
+ lfd f23,`-9*8`($i)
+ lfd f24,`-8*8`($i)
+ lfd f25,`-7*8`($i)
+ lfd f26,`-6*8`($i)
+ lfd f27,`-5*8`($i)
+ lfd f28,`-4*8`($i)
+ lfd f29,`-3*8`($i)
+ lfd f30,`-2*8`($i)
+ lfd f31,`-1*8`($i)
+ mr $sp,$i
+ blr
+ .long 0
+ .byte 0,12,4,0,0x8c,13,6,0
+ .long 0
+.size .$fname,.-.$fname
+
+.asciz "Montgomery Multiplication for PPC64, CRYPTOGAMS by <appro\@openssl.org>"
+___
+
+$code =~ s/\`([^\`]*)\`/eval $1/gem;
+print $code;
+close STDOUT;
diff --git a/openssl-1.1.0h/crypto/bn/asm/rsaz-avx2.pl b/openssl-1.1.0h/crypto/bn/asm/rsaz-avx2.pl
new file mode 100755
index 0000000..46d746b
--- /dev/null
+++ b/openssl-1.1.0h/crypto/bn/asm/rsaz-avx2.pl
@@ -0,0 +1,1967 @@
+#! /usr/bin/env perl
+# Copyright 2013-2016 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License"). You may not use
+# this file except in compliance with the License. You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+
+##############################################################################
+# #
+# Copyright (c) 2012, Intel Corporation #
+# #
+# All rights reserved. #
+# #
+# Redistribution and use in source and binary forms, with or without #
+# modification, are permitted provided that the following conditions are #
+# met: #
+# #
+# * Redistributions of source code must retain the above copyright #
+# notice, this list of conditions and the following disclaimer. #
+# #
+# * Redistributions in binary form must reproduce the above copyright #
+# notice, this list of conditions and the following disclaimer in the #
+# documentation and/or other materials provided with the #
+# distribution. #
+# #
+# * Neither the name of the Intel Corporation nor the names of its #
+# contributors may be used to endorse or promote products derived from #
+# this software without specific prior written permission. #
+# #
+# #
+# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION ""AS IS"" AND ANY #
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE #
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR #
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR #
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, #
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, #
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR #
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF #
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING #
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS #
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #
+# #
+##############################################################################
+# Developers and authors: #
+# Shay Gueron (1, 2), and Vlad Krasnov (1) #
+# (1) Intel Corporation, Israel Development Center, Haifa, Israel #
+# (2) University of Haifa, Israel #
+##############################################################################
+# Reference: #
+# [1] S. Gueron, V. Krasnov: "Software Implementation of Modular #
+# Exponentiation, Using Advanced Vector Instructions Architectures", #
+# F. Ozbudak and F. Rodriguez-Henriquez (Eds.): WAIFI 2012, LNCS 7369, #
+# pp. 119?135, 2012. Springer-Verlag Berlin Heidelberg 2012 #
+# [2] S. Gueron: "Efficient Software Implementations of Modular #
+# Exponentiation", Journal of Cryptographic Engineering 2:31-43 (2012). #
+# [3] S. Gueron, V. Krasnov: "Speeding up Big-numbers Squaring",IEEE #
+# Proceedings of 9th International Conference on Information Technology: #
+# New Generations (ITNG 2012), pp.821-823 (2012) #
+# [4] S. Gueron, V. Krasnov: "[PATCH] Efficient and side channel analysis #
+# resistant 1024-bit modular exponentiation, for optimizing RSA2048 #
+# on AVX2 capable x86_64 platforms", #
+# http://rt.openssl.org/Ticket/Display.html?id=2850&user=guest&pass=guest#
+##############################################################################
+#
+# +13% improvement over original submission by <appro@openssl.org>
+#
+# rsa2048 sign/sec OpenSSL 1.0.1 scalar(*) this
+# 2.3GHz Haswell 621 765/+23% 1113/+79%
+# 2.3GHz Broadwell(**) 688 1200(***)/+74% 1120/+63%
+#
+# (*) if system doesn't support AVX2, for reference purposes;
+# (**) scaled to 2.3GHz to simplify comparison;
+# (***) scalar AD*X code is faster than AVX2 and is preferred code
+# path for Broadwell;
+
+$flavour = shift;
+$output = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
+ =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
+ $avx = ($1>=2.19) + ($1>=2.22);
+ $addx = ($1>=2.23);
+}
+
+if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
+ `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
+ $avx = ($1>=2.09) + ($1>=2.10);
+ $addx = ($1>=2.10);
+}
+
+if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
+ `ml64 2>&1` =~ /Version ([0-9]+)\./) {
+ $avx = ($1>=10) + ($1>=11);
+ $addx = ($1>=11);
+}
+
+if (!$avx && `$ENV{CC} -v 2>&1` =~ /(^clang version|based on LLVM) ([3-9])\.([0-9]+)/) {
+ my $ver = $2 + $3/100.0; # 3.1->3.01, 3.10->3.10
+ $avx = ($ver>=3.0) + ($ver>=3.01);
+ $addx = ($ver>=3.03);
+}
+
+open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
+*STDOUT = *OUT;
+
+if ($avx>1) {{{
+{ # void AMS_WW(
+my $rp="%rdi"; # BN_ULONG *rp,
+my $ap="%rsi"; # const BN_ULONG *ap,
+my $np="%rdx"; # const BN_ULONG *np,
+my $n0="%ecx"; # const BN_ULONG n0,
+my $rep="%r8d"; # int repeat);
+
+# The registers that hold the accumulated redundant result
+# The AMM works on 1024 bit operands, and redundant word size is 29
+# Therefore: ceil(1024/29)/4 = 9
+my $ACC0="%ymm0";
+my $ACC1="%ymm1";
+my $ACC2="%ymm2";
+my $ACC3="%ymm3";
+my $ACC4="%ymm4";
+my $ACC5="%ymm5";
+my $ACC6="%ymm6";
+my $ACC7="%ymm7";
+my $ACC8="%ymm8";
+my $ACC9="%ymm9";
+# Registers that hold the broadcasted words of bp, currently used
+my $B1="%ymm10";
+my $B2="%ymm11";
+# Registers that hold the broadcasted words of Y, currently used
+my $Y1="%ymm12";
+my $Y2="%ymm13";
+# Helper registers
+my $TEMP1="%ymm14";
+my $AND_MASK="%ymm15";
+# alu registers that hold the first words of the ACC
+my $r0="%r9";
+my $r1="%r10";
+my $r2="%r11";
+my $r3="%r12";
+
+my $i="%r14d"; # loop counter
+my $tmp = "%r15";
+
+my $FrameSize=32*18+32*8; # place for A^2 and 2*A
+
+my $aap=$r0;
+my $tp0="%rbx";
+my $tp1=$r3;
+my $tpa=$tmp;
+
+$np="%r13"; # reassigned argument
+
+$code.=<<___;
+.text
+
+.globl rsaz_1024_sqr_avx2
+.type rsaz_1024_sqr_avx2,\@function,5
+.align 64
+rsaz_1024_sqr_avx2: # 702 cycles, 14% faster than rsaz_1024_mul_avx2
+ lea (%rsp), %rax
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ vzeroupper
+___
+$code.=<<___ if ($win64);
+ lea -0xa8(%rsp),%rsp
+ vmovaps %xmm6,-0xd8(%rax)
+ vmovaps %xmm7,-0xc8(%rax)
+ vmovaps %xmm8,-0xb8(%rax)
+ vmovaps %xmm9,-0xa8(%rax)
+ vmovaps %xmm10,-0x98(%rax)
+ vmovaps %xmm11,-0x88(%rax)
+ vmovaps %xmm12,-0x78(%rax)
+ vmovaps %xmm13,-0x68(%rax)
+ vmovaps %xmm14,-0x58(%rax)
+ vmovaps %xmm15,-0x48(%rax)
+.Lsqr_1024_body:
+___
+$code.=<<___;
+ mov %rax,%rbp
+ mov %rdx, $np # reassigned argument
+ sub \$$FrameSize, %rsp
+ mov $np, $tmp
+ sub \$-128, $rp # size optimization
+ sub \$-128, $ap
+ sub \$-128, $np
+
+ and \$4095, $tmp # see if $np crosses page
+ add \$32*10, $tmp
+ shr \$12, $tmp
+ vpxor $ACC9,$ACC9,$ACC9
+ jz .Lsqr_1024_no_n_copy
+
+ # unaligned 256-bit load that crosses page boundary can
+ # cause >2x performance degradation here, so if $np does
+ # cross page boundary, copy it to stack and make sure stack
+ # frame doesn't...
+ sub \$32*10,%rsp
+ vmovdqu 32*0-128($np), $ACC0
+ and \$-2048, %rsp
+ vmovdqu 32*1-128($np), $ACC1
+ vmovdqu 32*2-128($np), $ACC2
+ vmovdqu 32*3-128($np), $ACC3
+ vmovdqu 32*4-128($np), $ACC4
+ vmovdqu 32*5-128($np), $ACC5
+ vmovdqu 32*6-128($np), $ACC6
+ vmovdqu 32*7-128($np), $ACC7
+ vmovdqu 32*8-128($np), $ACC8
+ lea $FrameSize+128(%rsp),$np
+ vmovdqu $ACC0, 32*0-128($np)
+ vmovdqu $ACC1, 32*1-128($np)
+ vmovdqu $ACC2, 32*2-128($np)
+ vmovdqu $ACC3, 32*3-128($np)
+ vmovdqu $ACC4, 32*4-128($np)
+ vmovdqu $ACC5, 32*5-128($np)
+ vmovdqu $ACC6, 32*6-128($np)
+ vmovdqu $ACC7, 32*7-128($np)
+ vmovdqu $ACC8, 32*8-128($np)
+ vmovdqu $ACC9, 32*9-128($np) # $ACC9 is zero
+
+.Lsqr_1024_no_n_copy:
+ and \$-1024, %rsp
+
+ vmovdqu 32*1-128($ap), $ACC1
+ vmovdqu 32*2-128($ap), $ACC2
+ vmovdqu 32*3-128($ap), $ACC3
+ vmovdqu 32*4-128($ap), $ACC4
+ vmovdqu 32*5-128($ap), $ACC5
+ vmovdqu 32*6-128($ap), $ACC6
+ vmovdqu 32*7-128($ap), $ACC7
+ vmovdqu 32*8-128($ap), $ACC8
+
+ lea 192(%rsp), $tp0 # 64+128=192
+ vmovdqu .Land_mask(%rip), $AND_MASK
+ jmp .LOOP_GRANDE_SQR_1024
+
+.align 32
+.LOOP_GRANDE_SQR_1024:
+ lea 32*18+128(%rsp), $aap # size optimization
+ lea 448(%rsp), $tp1 # 64+128+256=448
+
+ # the squaring is performed as described in Variant B of
+ # "Speeding up Big-Number Squaring", so start by calculating
+ # the A*2=A+A vector
+ vpaddq $ACC1, $ACC1, $ACC1
+ vpbroadcastq 32*0-128($ap), $B1
+ vpaddq $ACC2, $ACC2, $ACC2
+ vmovdqa $ACC1, 32*0-128($aap)
+ vpaddq $ACC3, $ACC3, $ACC3
+ vmovdqa $ACC2, 32*1-128($aap)
+ vpaddq $ACC4, $ACC4, $ACC4
+ vmovdqa $ACC3, 32*2-128($aap)
+ vpaddq $ACC5, $ACC5, $ACC5
+ vmovdqa $ACC4, 32*3-128($aap)
+ vpaddq $ACC6, $ACC6, $ACC6
+ vmovdqa $ACC5, 32*4-128($aap)
+ vpaddq $ACC7, $ACC7, $ACC7
+ vmovdqa $ACC6, 32*5-128($aap)
+ vpaddq $ACC8, $ACC8, $ACC8
+ vmovdqa $ACC7, 32*6-128($aap)
+ vpxor $ACC9, $ACC9, $ACC9
+ vmovdqa $ACC8, 32*7-128($aap)
+
+ vpmuludq 32*0-128($ap), $B1, $ACC0
+ vpbroadcastq 32*1-128($ap), $B2
+ vmovdqu $ACC9, 32*9-192($tp0) # zero upper half
+ vpmuludq $B1, $ACC1, $ACC1
+ vmovdqu $ACC9, 32*10-448($tp1)
+ vpmuludq $B1, $ACC2, $ACC2
+ vmovdqu $ACC9, 32*11-448($tp1)
+ vpmuludq $B1, $ACC3, $ACC3
+ vmovdqu $ACC9, 32*12-448($tp1)
+ vpmuludq $B1, $ACC4, $ACC4
+ vmovdqu $ACC9, 32*13-448($tp1)
+ vpmuludq $B1, $ACC5, $ACC5
+ vmovdqu $ACC9, 32*14-448($tp1)
+ vpmuludq $B1, $ACC6, $ACC6
+ vmovdqu $ACC9, 32*15-448($tp1)
+ vpmuludq $B1, $ACC7, $ACC7
+ vmovdqu $ACC9, 32*16-448($tp1)
+ vpmuludq $B1, $ACC8, $ACC8
+ vpbroadcastq 32*2-128($ap), $B1
+ vmovdqu $ACC9, 32*17-448($tp1)
+
+ mov $ap, $tpa
+ mov \$4, $i
+ jmp .Lsqr_entry_1024
+___
+$TEMP0=$Y1;
+$TEMP2=$Y2;
+$code.=<<___;
+.align 32
+.LOOP_SQR_1024:
+ vpbroadcastq 32*1-128($tpa), $B2
+ vpmuludq 32*0-128($ap), $B1, $ACC0
+ vpaddq 32*0-192($tp0), $ACC0, $ACC0
+ vpmuludq 32*0-128($aap), $B1, $ACC1
+ vpaddq 32*1-192($tp0), $ACC1, $ACC1
+ vpmuludq 32*1-128($aap), $B1, $ACC2
+ vpaddq 32*2-192($tp0), $ACC2, $ACC2
+ vpmuludq 32*2-128($aap), $B1, $ACC3
+ vpaddq 32*3-192($tp0), $ACC3, $ACC3
+ vpmuludq 32*3-128($aap), $B1, $ACC4
+ vpaddq 32*4-192($tp0), $ACC4, $ACC4
+ vpmuludq 32*4-128($aap), $B1, $ACC5
+ vpaddq 32*5-192($tp0), $ACC5, $ACC5
+ vpmuludq 32*5-128($aap), $B1, $ACC6
+ vpaddq 32*6-192($tp0), $ACC6, $ACC6
+ vpmuludq 32*6-128($aap), $B1, $ACC7
+ vpaddq 32*7-192($tp0), $ACC7, $ACC7
+ vpmuludq 32*7-128($aap), $B1, $ACC8
+ vpbroadcastq 32*2-128($tpa), $B1
+ vpaddq 32*8-192($tp0), $ACC8, $ACC8
+.Lsqr_entry_1024:
+ vmovdqu $ACC0, 32*0-192($tp0)
+ vmovdqu $ACC1, 32*1-192($tp0)
+
+ vpmuludq 32*1-128($ap), $B2, $TEMP0
+ vpaddq $TEMP0, $ACC2, $ACC2
+ vpmuludq 32*1-128($aap), $B2, $TEMP1
+ vpaddq $TEMP1, $ACC3, $ACC3
+ vpmuludq 32*2-128($aap), $B2, $TEMP2
+ vpaddq $TEMP2, $ACC4, $ACC4
+ vpmuludq 32*3-128($aap), $B2, $TEMP0
+ vpaddq $TEMP0, $ACC5, $ACC5
+ vpmuludq 32*4-128($aap), $B2, $TEMP1
+ vpaddq $TEMP1, $ACC6, $ACC6
+ vpmuludq 32*5-128($aap), $B2, $TEMP2
+ vpaddq $TEMP2, $ACC7, $ACC7
+ vpmuludq 32*6-128($aap), $B2, $TEMP0
+ vpaddq $TEMP0, $ACC8, $ACC8
+ vpmuludq 32*7-128($aap), $B2, $ACC0
+ vpbroadcastq 32*3-128($tpa), $B2
+ vpaddq 32*9-192($tp0), $ACC0, $ACC0
+
+ vmovdqu $ACC2, 32*2-192($tp0)
+ vmovdqu $ACC3, 32*3-192($tp0)
+
+ vpmuludq 32*2-128($ap), $B1, $TEMP2
+ vpaddq $TEMP2, $ACC4, $ACC4
+ vpmuludq 32*2-128($aap), $B1, $TEMP0
+ vpaddq $TEMP0, $ACC5, $ACC5
+ vpmuludq 32*3-128($aap), $B1, $TEMP1
+ vpaddq $TEMP1, $ACC6, $ACC6
+ vpmuludq 32*4-128($aap), $B1, $TEMP2
+ vpaddq $TEMP2, $ACC7, $ACC7
+ vpmuludq 32*5-128($aap), $B1, $TEMP0
+ vpaddq $TEMP0, $ACC8, $ACC8
+ vpmuludq 32*6-128($aap), $B1, $TEMP1
+ vpaddq $TEMP1, $ACC0, $ACC0
+ vpmuludq 32*7-128($aap), $B1, $ACC1
+ vpbroadcastq 32*4-128($tpa), $B1
+ vpaddq 32*10-448($tp1), $ACC1, $ACC1
+
+ vmovdqu $ACC4, 32*4-192($tp0)
+ vmovdqu $ACC5, 32*5-192($tp0)
+
+ vpmuludq 32*3-128($ap), $B2, $TEMP0
+ vpaddq $TEMP0, $ACC6, $ACC6
+ vpmuludq 32*3-128($aap), $B2, $TEMP1
+ vpaddq $TEMP1, $ACC7, $ACC7
+ vpmuludq 32*4-128($aap), $B2, $TEMP2
+ vpaddq $TEMP2, $ACC8, $ACC8
+ vpmuludq 32*5-128($aap), $B2, $TEMP0
+ vpaddq $TEMP0, $ACC0, $ACC0
+ vpmuludq 32*6-128($aap), $B2, $TEMP1
+ vpaddq $TEMP1, $ACC1, $ACC1
+ vpmuludq 32*7-128($aap), $B2, $ACC2
+ vpbroadcastq 32*5-128($tpa), $B2
+ vpaddq 32*11-448($tp1), $ACC2, $ACC2
+
+ vmovdqu $ACC6, 32*6-192($tp0)
+ vmovdqu $ACC7, 32*7-192($tp0)
+
+ vpmuludq 32*4-128($ap), $B1, $TEMP0
+ vpaddq $TEMP0, $ACC8, $ACC8
+ vpmuludq 32*4-128($aap), $B1, $TEMP1
+ vpaddq $TEMP1, $ACC0, $ACC0
+ vpmuludq 32*5-128($aap), $B1, $TEMP2
+ vpaddq $TEMP2, $ACC1, $ACC1
+ vpmuludq 32*6-128($aap), $B1, $TEMP0
+ vpaddq $TEMP0, $ACC2, $ACC2
+ vpmuludq 32*7-128($aap), $B1, $ACC3
+ vpbroadcastq 32*6-128($tpa), $B1
+ vpaddq 32*12-448($tp1), $ACC3, $ACC3
+
+ vmovdqu $ACC8, 32*8-192($tp0)
+ vmovdqu $ACC0, 32*9-192($tp0)
+ lea 8($tp0), $tp0
+
+ vpmuludq 32*5-128($ap), $B2, $TEMP2
+ vpaddq $TEMP2, $ACC1, $ACC1
+ vpmuludq 32*5-128($aap), $B2, $TEMP0
+ vpaddq $TEMP0, $ACC2, $ACC2
+ vpmuludq 32*6-128($aap), $B2, $TEMP1
+ vpaddq $TEMP1, $ACC3, $ACC3
+ vpmuludq 32*7-128($aap), $B2, $ACC4
+ vpbroadcastq 32*7-128($tpa), $B2
+ vpaddq 32*13-448($tp1), $ACC4, $ACC4
+
+ vmovdqu $ACC1, 32*10-448($tp1)
+ vmovdqu $ACC2, 32*11-448($tp1)
+
+ vpmuludq 32*6-128($ap), $B1, $TEMP0
+ vpaddq $TEMP0, $ACC3, $ACC3
+ vpmuludq 32*6-128($aap), $B1, $TEMP1
+ vpbroadcastq 32*8-128($tpa), $ACC0 # borrow $ACC0 for $B1
+ vpaddq $TEMP1, $ACC4, $ACC4
+ vpmuludq 32*7-128($aap), $B1, $ACC5
+ vpbroadcastq 32*0+8-128($tpa), $B1 # for next iteration
+ vpaddq 32*14-448($tp1), $ACC5, $ACC5
+
+ vmovdqu $ACC3, 32*12-448($tp1)
+ vmovdqu $ACC4, 32*13-448($tp1)
+ lea 8($tpa), $tpa
+
+ vpmuludq 32*7-128($ap), $B2, $TEMP0
+ vpaddq $TEMP0, $ACC5, $ACC5
+ vpmuludq 32*7-128($aap), $B2, $ACC6
+ vpaddq 32*15-448($tp1), $ACC6, $ACC6
+
+ vpmuludq 32*8-128($ap), $ACC0, $ACC7
+ vmovdqu $ACC5, 32*14-448($tp1)
+ vpaddq 32*16-448($tp1), $ACC7, $ACC7
+ vmovdqu $ACC6, 32*15-448($tp1)
+ vmovdqu $ACC7, 32*16-448($tp1)
+ lea 8($tp1), $tp1
+
+ dec $i
+ jnz .LOOP_SQR_1024
+___
+$ZERO = $ACC9;
+$TEMP0 = $B1;
+$TEMP2 = $B2;
+$TEMP3 = $Y1;
+$TEMP4 = $Y2;
+$code.=<<___;
+ # we need to fix indices 32-39 to avoid overflow
+ vmovdqu 32*8(%rsp), $ACC8 # 32*8-192($tp0),
+ vmovdqu 32*9(%rsp), $ACC1 # 32*9-192($tp0)
+ vmovdqu 32*10(%rsp), $ACC2 # 32*10-192($tp0)
+ lea 192(%rsp), $tp0 # 64+128=192
+
+ vpsrlq \$29, $ACC8, $TEMP1
+ vpand $AND_MASK, $ACC8, $ACC8
+ vpsrlq \$29, $ACC1, $TEMP2
+ vpand $AND_MASK, $ACC1, $ACC1
+
+ vpermq \$0x93, $TEMP1, $TEMP1
+ vpxor $ZERO, $ZERO, $ZERO
+ vpermq \$0x93, $TEMP2, $TEMP2
+
+ vpblendd \$3, $ZERO, $TEMP1, $TEMP0
+ vpblendd \$3, $TEMP1, $TEMP2, $TEMP1
+ vpaddq $TEMP0, $ACC8, $ACC8
+ vpblendd \$3, $TEMP2, $ZERO, $TEMP2
+ vpaddq $TEMP1, $ACC1, $ACC1
+ vpaddq $TEMP2, $ACC2, $ACC2
+ vmovdqu $ACC1, 32*9-192($tp0)
+ vmovdqu $ACC2, 32*10-192($tp0)
+
+ mov (%rsp), %rax
+ mov 8(%rsp), $r1
+ mov 16(%rsp), $r2
+ mov 24(%rsp), $r3
+ vmovdqu 32*1(%rsp), $ACC1
+ vmovdqu 32*2-192($tp0), $ACC2
+ vmovdqu 32*3-192($tp0), $ACC3
+ vmovdqu 32*4-192($tp0), $ACC4
+ vmovdqu 32*5-192($tp0), $ACC5
+ vmovdqu 32*6-192($tp0), $ACC6
+ vmovdqu 32*7-192($tp0), $ACC7
+
+ mov %rax, $r0
+ imull $n0, %eax
+ and \$0x1fffffff, %eax
+ vmovd %eax, $Y1
+
+ mov %rax, %rdx
+ imulq -128($np), %rax
+ vpbroadcastq $Y1, $Y1
+ add %rax, $r0
+ mov %rdx, %rax
+ imulq 8-128($np), %rax
+ shr \$29, $r0
+ add %rax, $r1
+ mov %rdx, %rax
+ imulq 16-128($np), %rax
+ add $r0, $r1
+ add %rax, $r2
+ imulq 24-128($np), %rdx
+ add %rdx, $r3
+
+ mov $r1, %rax
+ imull $n0, %eax
+ and \$0x1fffffff, %eax
+
+ mov \$9, $i
+ jmp .LOOP_REDUCE_1024
+
+.align 32
+.LOOP_REDUCE_1024:
+ vmovd %eax, $Y2
+ vpbroadcastq $Y2, $Y2
+
+ vpmuludq 32*1-128($np), $Y1, $TEMP0
+ mov %rax, %rdx
+ imulq -128($np), %rax
+ vpaddq $TEMP0, $ACC1, $ACC1
+ add %rax, $r1
+ vpmuludq 32*2-128($np), $Y1, $TEMP1
+ mov %rdx, %rax
+ imulq 8-128($np), %rax
+ vpaddq $TEMP1, $ACC2, $ACC2
+ vpmuludq 32*3-128($np), $Y1, $TEMP2
+ .byte 0x67
+ add %rax, $r2
+ .byte 0x67
+ mov %rdx, %rax
+ imulq 16-128($np), %rax
+ shr \$29, $r1
+ vpaddq $TEMP2, $ACC3, $ACC3
+ vpmuludq 32*4-128($np), $Y1, $TEMP0
+ add %rax, $r3
+ add $r1, $r2
+ vpaddq $TEMP0, $ACC4, $ACC4
+ vpmuludq 32*5-128($np), $Y1, $TEMP1
+ mov $r2, %rax
+ imull $n0, %eax
+ vpaddq $TEMP1, $ACC5, $ACC5
+ vpmuludq 32*6-128($np), $Y1, $TEMP2
+ and \$0x1fffffff, %eax
+ vpaddq $TEMP2, $ACC6, $ACC6
+ vpmuludq 32*7-128($np), $Y1, $TEMP0
+ vpaddq $TEMP0, $ACC7, $ACC7
+ vpmuludq 32*8-128($np), $Y1, $TEMP1
+ vmovd %eax, $Y1
+ #vmovdqu 32*1-8-128($np), $TEMP2 # moved below
+ vpaddq $TEMP1, $ACC8, $ACC8
+ #vmovdqu 32*2-8-128($np), $TEMP0 # moved below
+ vpbroadcastq $Y1, $Y1
+
+ vpmuludq 32*1-8-128($np), $Y2, $TEMP2 # see above
+ vmovdqu 32*3-8-128($np), $TEMP1
+ mov %rax, %rdx
+ imulq -128($np), %rax
+ vpaddq $TEMP2, $ACC1, $ACC1
+ vpmuludq 32*2-8-128($np), $Y2, $TEMP0 # see above
+ vmovdqu 32*4-8-128($np), $TEMP2
+ add %rax, $r2
+ mov %rdx, %rax
+ imulq 8-128($np), %rax
+ vpaddq $TEMP0, $ACC2, $ACC2
+ add $r3, %rax
+ shr \$29, $r2
+ vpmuludq $Y2, $TEMP1, $TEMP1
+ vmovdqu 32*5-8-128($np), $TEMP0
+ add $r2, %rax
+ vpaddq $TEMP1, $ACC3, $ACC3
+ vpmuludq $Y2, $TEMP2, $TEMP2
+ vmovdqu 32*6-8-128($np), $TEMP1
+ .byte 0x67
+ mov %rax, $r3
+ imull $n0, %eax
+ vpaddq $TEMP2, $ACC4, $ACC4
+ vpmuludq $Y2, $TEMP0, $TEMP0
+ .byte 0xc4,0x41,0x7e,0x6f,0x9d,0x58,0x00,0x00,0x00 # vmovdqu 32*7-8-128($np), $TEMP2
+ and \$0x1fffffff, %eax
+ vpaddq $TEMP0, $ACC5, $ACC5
+ vpmuludq $Y2, $TEMP1, $TEMP1
+ vmovdqu 32*8-8-128($np), $TEMP0
+ vpaddq $TEMP1, $ACC6, $ACC6
+ vpmuludq $Y2, $TEMP2, $TEMP2
+ vmovdqu 32*9-8-128($np), $ACC9
+ vmovd %eax, $ACC0 # borrow ACC0 for Y2
+ imulq -128($np), %rax
+ vpaddq $TEMP2, $ACC7, $ACC7
+ vpmuludq $Y2, $TEMP0, $TEMP0
+ vmovdqu 32*1-16-128($np), $TEMP1
+ vpbroadcastq $ACC0, $ACC0
+ vpaddq $TEMP0, $ACC8, $ACC8
+ vpmuludq $Y2, $ACC9, $ACC9
+ vmovdqu 32*2-16-128($np), $TEMP2
+ add %rax, $r3
+
+___
+($ACC0,$Y2)=($Y2,$ACC0);
+$code.=<<___;
+ vmovdqu 32*1-24-128($np), $ACC0
+ vpmuludq $Y1, $TEMP1, $TEMP1
+ vmovdqu 32*3-16-128($np), $TEMP0
+ vpaddq $TEMP1, $ACC1, $ACC1
+ vpmuludq $Y2, $ACC0, $ACC0
+ vpmuludq $Y1, $TEMP2, $TEMP2
+ .byte 0xc4,0x41,0x7e,0x6f,0xb5,0xf0,0xff,0xff,0xff # vmovdqu 32*4-16-128($np), $TEMP1
+ vpaddq $ACC1, $ACC0, $ACC0
+ vpaddq $TEMP2, $ACC2, $ACC2
+ vpmuludq $Y1, $TEMP0, $TEMP0
+ vmovdqu 32*5-16-128($np), $TEMP2
+ .byte 0x67
+ vmovq $ACC0, %rax
+ vmovdqu $ACC0, (%rsp) # transfer $r0-$r3
+ vpaddq $TEMP0, $ACC3, $ACC3
+ vpmuludq $Y1, $TEMP1, $TEMP1
+ vmovdqu 32*6-16-128($np), $TEMP0
+ vpaddq $TEMP1, $ACC4, $ACC4
+ vpmuludq $Y1, $TEMP2, $TEMP2
+ vmovdqu 32*7-16-128($np), $TEMP1
+ vpaddq $TEMP2, $ACC5, $ACC5
+ vpmuludq $Y1, $TEMP0, $TEMP0
+ vmovdqu 32*8-16-128($np), $TEMP2
+ vpaddq $TEMP0, $ACC6, $ACC6
+ vpmuludq $Y1, $TEMP1, $TEMP1
+ shr \$29, $r3
+ vmovdqu 32*9-16-128($np), $TEMP0
+ add $r3, %rax
+ vpaddq $TEMP1, $ACC7, $ACC7
+ vpmuludq $Y1, $TEMP2, $TEMP2
+ #vmovdqu 32*2-24-128($np), $TEMP1 # moved below
+ mov %rax, $r0
+ imull $n0, %eax
+ vpaddq $TEMP2, $ACC8, $ACC8
+ vpmuludq $Y1, $TEMP0, $TEMP0
+ and \$0x1fffffff, %eax
+ vmovd %eax, $Y1
+ vmovdqu 32*3-24-128($np), $TEMP2
+ .byte 0x67
+ vpaddq $TEMP0, $ACC9, $ACC9
+ vpbroadcastq $Y1, $Y1
+
+ vpmuludq 32*2-24-128($np), $Y2, $TEMP1 # see above
+ vmovdqu 32*4-24-128($np), $TEMP0
+ mov %rax, %rdx
+ imulq -128($np), %rax
+ mov 8(%rsp), $r1
+ vpaddq $TEMP1, $ACC2, $ACC1
+ vpmuludq $Y2, $TEMP2, $TEMP2
+ vmovdqu 32*5-24-128($np), $TEMP1
+ add %rax, $r0
+ mov %rdx, %rax
+ imulq 8-128($np), %rax
+ .byte 0x67
+ shr \$29, $r0
+ mov 16(%rsp), $r2
+ vpaddq $TEMP2, $ACC3, $ACC2
+ vpmuludq $Y2, $TEMP0, $TEMP0
+ vmovdqu 32*6-24-128($np), $TEMP2
+ add %rax, $r1
+ mov %rdx, %rax
+ imulq 16-128($np), %rax
+ vpaddq $TEMP0, $ACC4, $ACC3
+ vpmuludq $Y2, $TEMP1, $TEMP1
+ vmovdqu 32*7-24-128($np), $TEMP0
+ imulq 24-128($np), %rdx # future $r3
+ add %rax, $r2
+ lea ($r0,$r1), %rax
+ vpaddq $TEMP1, $ACC5, $ACC4
+ vpmuludq $Y2, $TEMP2, $TEMP2
+ vmovdqu 32*8-24-128($np), $TEMP1
+ mov %rax, $r1
+ imull $n0, %eax
+ vpmuludq $Y2, $TEMP0, $TEMP0
+ vpaddq $TEMP2, $ACC6, $ACC5
+ vmovdqu 32*9-24-128($np), $TEMP2
+ and \$0x1fffffff, %eax
+ vpaddq $TEMP0, $ACC7, $ACC6
+ vpmuludq $Y2, $TEMP1, $TEMP1
+ add 24(%rsp), %rdx
+ vpaddq $TEMP1, $ACC8, $ACC7
+ vpmuludq $Y2, $TEMP2, $TEMP2
+ vpaddq $TEMP2, $ACC9, $ACC8
+ vmovq $r3, $ACC9
+ mov %rdx, $r3
+
+ dec $i
+ jnz .LOOP_REDUCE_1024
+___
+($ACC0,$Y2)=($Y2,$ACC0);
+$code.=<<___;
+ lea 448(%rsp), $tp1 # size optimization
+ vpaddq $ACC9, $Y2, $ACC0
+ vpxor $ZERO, $ZERO, $ZERO
+
+ vpaddq 32*9-192($tp0), $ACC0, $ACC0
+ vpaddq 32*10-448($tp1), $ACC1, $ACC1
+ vpaddq 32*11-448($tp1), $ACC2, $ACC2
+ vpaddq 32*12-448($tp1), $ACC3, $ACC3
+ vpaddq 32*13-448($tp1), $ACC4, $ACC4
+ vpaddq 32*14-448($tp1), $ACC5, $ACC5
+ vpaddq 32*15-448($tp1), $ACC6, $ACC6
+ vpaddq 32*16-448($tp1), $ACC7, $ACC7
+ vpaddq 32*17-448($tp1), $ACC8, $ACC8
+
+ vpsrlq \$29, $ACC0, $TEMP1
+ vpand $AND_MASK, $ACC0, $ACC0
+ vpsrlq \$29, $ACC1, $TEMP2
+ vpand $AND_MASK, $ACC1, $ACC1
+ vpsrlq \$29, $ACC2, $TEMP3
+ vpermq \$0x93, $TEMP1, $TEMP1
+ vpand $AND_MASK, $ACC2, $ACC2
+ vpsrlq \$29, $ACC3, $TEMP4
+ vpermq \$0x93, $TEMP2, $TEMP2
+ vpand $AND_MASK, $ACC3, $ACC3
+ vpermq \$0x93, $TEMP3, $TEMP3
+
+ vpblendd \$3, $ZERO, $TEMP1, $TEMP0
+ vpermq \$0x93, $TEMP4, $TEMP4
+ vpblendd \$3, $TEMP1, $TEMP2, $TEMP1
+ vpaddq $TEMP0, $ACC0, $ACC0
+ vpblendd \$3, $TEMP2, $TEMP3, $TEMP2
+ vpaddq $TEMP1, $ACC1, $ACC1
+ vpblendd \$3, $TEMP3, $TEMP4, $TEMP3
+ vpaddq $TEMP2, $ACC2, $ACC2
+ vpblendd \$3, $TEMP4, $ZERO, $TEMP4
+ vpaddq $TEMP3, $ACC3, $ACC3
+ vpaddq $TEMP4, $ACC4, $ACC4
+
+ vpsrlq \$29, $ACC0, $TEMP1
+ vpand $AND_MASK, $ACC0, $ACC0
+ vpsrlq \$29, $ACC1, $TEMP2
+ vpand $AND_MASK, $ACC1, $ACC1
+ vpsrlq \$29, $ACC2, $TEMP3
+ vpermq \$0x93, $TEMP1, $TEMP1
+ vpand $AND_MASK, $ACC2, $ACC2
+ vpsrlq \$29, $ACC3, $TEMP4
+ vpermq \$0x93, $TEMP2, $TEMP2
+ vpand $AND_MASK, $ACC3, $ACC3
+ vpermq \$0x93, $TEMP3, $TEMP3
+
+ vpblendd \$3, $ZERO, $TEMP1, $TEMP0
+ vpermq \$0x93, $TEMP4, $TEMP4
+ vpblendd \$3, $TEMP1, $TEMP2, $TEMP1
+ vpaddq $TEMP0, $ACC0, $ACC0
+ vpblendd \$3, $TEMP2, $TEMP3, $TEMP2
+ vpaddq $TEMP1, $ACC1, $ACC1
+ vmovdqu $ACC0, 32*0-128($rp)
+ vpblendd \$3, $TEMP3, $TEMP4, $TEMP3
+ vpaddq $TEMP2, $ACC2, $ACC2
+ vmovdqu $ACC1, 32*1-128($rp)
+ vpblendd \$3, $TEMP4, $ZERO, $TEMP4
+ vpaddq $TEMP3, $ACC3, $ACC3
+ vmovdqu $ACC2, 32*2-128($rp)
+ vpaddq $TEMP4, $ACC4, $ACC4
+ vmovdqu $ACC3, 32*3-128($rp)
+___
+$TEMP5=$ACC0;
+$code.=<<___;
+ vpsrlq \$29, $ACC4, $TEMP1
+ vpand $AND_MASK, $ACC4, $ACC4
+ vpsrlq \$29, $ACC5, $TEMP2
+ vpand $AND_MASK, $ACC5, $ACC5
+ vpsrlq \$29, $ACC6, $TEMP3
+ vpermq \$0x93, $TEMP1, $TEMP1
+ vpand $AND_MASK, $ACC6, $ACC6
+ vpsrlq \$29, $ACC7, $TEMP4
+ vpermq \$0x93, $TEMP2, $TEMP2
+ vpand $AND_MASK, $ACC7, $ACC7
+ vpsrlq \$29, $ACC8, $TEMP5
+ vpermq \$0x93, $TEMP3, $TEMP3
+ vpand $AND_MASK, $ACC8, $ACC8
+ vpermq \$0x93, $TEMP4, $TEMP4
+
+ vpblendd \$3, $ZERO, $TEMP1, $TEMP0
+ vpermq \$0x93, $TEMP5, $TEMP5
+ vpblendd \$3, $TEMP1, $TEMP2, $TEMP1
+ vpaddq $TEMP0, $ACC4, $ACC4
+ vpblendd \$3, $TEMP2, $TEMP3, $TEMP2
+ vpaddq $TEMP1, $ACC5, $ACC5
+ vpblendd \$3, $TEMP3, $TEMP4, $TEMP3
+ vpaddq $TEMP2, $ACC6, $ACC6
+ vpblendd \$3, $TEMP4, $TEMP5, $TEMP4
+ vpaddq $TEMP3, $ACC7, $ACC7
+ vpaddq $TEMP4, $ACC8, $ACC8
+
+ vpsrlq \$29, $ACC4, $TEMP1
+ vpand $AND_MASK, $ACC4, $ACC4
+ vpsrlq \$29, $ACC5, $TEMP2
+ vpand $AND_MASK, $ACC5, $ACC5
+ vpsrlq \$29, $ACC6, $TEMP3
+ vpermq \$0x93, $TEMP1, $TEMP1
+ vpand $AND_MASK, $ACC6, $ACC6
+ vpsrlq \$29, $ACC7, $TEMP4
+ vpermq \$0x93, $TEMP2, $TEMP2
+ vpand $AND_MASK, $ACC7, $ACC7
+ vpsrlq \$29, $ACC8, $TEMP5
+ vpermq \$0x93, $TEMP3, $TEMP3
+ vpand $AND_MASK, $ACC8, $ACC8
+ vpermq \$0x93, $TEMP4, $TEMP4
+
+ vpblendd \$3, $ZERO, $TEMP1, $TEMP0
+ vpermq \$0x93, $TEMP5, $TEMP5
+ vpblendd \$3, $TEMP1, $TEMP2, $TEMP1
+ vpaddq $TEMP0, $ACC4, $ACC4
+ vpblendd \$3, $TEMP2, $TEMP3, $TEMP2
+ vpaddq $TEMP1, $ACC5, $ACC5
+ vmovdqu $ACC4, 32*4-128($rp)
+ vpblendd \$3, $TEMP3, $TEMP4, $TEMP3
+ vpaddq $TEMP2, $ACC6, $ACC6
+ vmovdqu $ACC5, 32*5-128($rp)
+ vpblendd \$3, $TEMP4, $TEMP5, $TEMP4
+ vpaddq $TEMP3, $ACC7, $ACC7
+ vmovdqu $ACC6, 32*6-128($rp)
+ vpaddq $TEMP4, $ACC8, $ACC8
+ vmovdqu $ACC7, 32*7-128($rp)
+ vmovdqu $ACC8, 32*8-128($rp)
+
+ mov $rp, $ap
+ dec $rep
+ jne .LOOP_GRANDE_SQR_1024
+
+ vzeroall
+ mov %rbp, %rax
+___
+$code.=<<___ if ($win64);
+ movaps -0xd8(%rax),%xmm6
+ movaps -0xc8(%rax),%xmm7
+ movaps -0xb8(%rax),%xmm8
+ movaps -0xa8(%rax),%xmm9
+ movaps -0x98(%rax),%xmm10
+ movaps -0x88(%rax),%xmm11
+ movaps -0x78(%rax),%xmm12
+ movaps -0x68(%rax),%xmm13
+ movaps -0x58(%rax),%xmm14
+ movaps -0x48(%rax),%xmm15
+___
+$code.=<<___;
+ mov -48(%rax),%r15
+ mov -40(%rax),%r14
+ mov -32(%rax),%r13
+ mov -24(%rax),%r12
+ mov -16(%rax),%rbp
+ mov -8(%rax),%rbx
+ lea (%rax),%rsp # restore %rsp
+.Lsqr_1024_epilogue:
+ ret
+.size rsaz_1024_sqr_avx2,.-rsaz_1024_sqr_avx2
+___
+}
+
+{ # void AMM_WW(
+my $rp="%rdi"; # BN_ULONG *rp,
+my $ap="%rsi"; # const BN_ULONG *ap,
+my $bp="%rdx"; # const BN_ULONG *bp,
+my $np="%rcx"; # const BN_ULONG *np,
+my $n0="%r8d"; # unsigned int n0);
+
+# The registers that hold the accumulated redundant result
+# The AMM works on 1024 bit operands, and redundant word size is 29
+# Therefore: ceil(1024/29)/4 = 9
+my $ACC0="%ymm0";
+my $ACC1="%ymm1";
+my $ACC2="%ymm2";
+my $ACC3="%ymm3";
+my $ACC4="%ymm4";
+my $ACC5="%ymm5";
+my $ACC6="%ymm6";
+my $ACC7="%ymm7";
+my $ACC8="%ymm8";
+my $ACC9="%ymm9";
+
+# Registers that hold the broadcasted words of multiplier, currently used
+my $Bi="%ymm10";
+my $Yi="%ymm11";
+
+# Helper registers
+my $TEMP0=$ACC0;
+my $TEMP1="%ymm12";
+my $TEMP2="%ymm13";
+my $ZERO="%ymm14";
+my $AND_MASK="%ymm15";
+
+# alu registers that hold the first words of the ACC
+my $r0="%r9";
+my $r1="%r10";
+my $r2="%r11";
+my $r3="%r12";
+
+my $i="%r14d";
+my $tmp="%r15";
+
+$bp="%r13"; # reassigned argument
+
+$code.=<<___;
+.globl rsaz_1024_mul_avx2
+.type rsaz_1024_mul_avx2,\@function,5
+.align 64
+rsaz_1024_mul_avx2:
+ lea (%rsp), %rax
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+___
+$code.=<<___ if ($win64);
+ vzeroupper
+ lea -0xa8(%rsp),%rsp
+ vmovaps %xmm6,-0xd8(%rax)
+ vmovaps %xmm7,-0xc8(%rax)
+ vmovaps %xmm8,-0xb8(%rax)
+ vmovaps %xmm9,-0xa8(%rax)
+ vmovaps %xmm10,-0x98(%rax)
+ vmovaps %xmm11,-0x88(%rax)
+ vmovaps %xmm12,-0x78(%rax)
+ vmovaps %xmm13,-0x68(%rax)
+ vmovaps %xmm14,-0x58(%rax)
+ vmovaps %xmm15,-0x48(%rax)
+.Lmul_1024_body:
+___
+$code.=<<___;
+ mov %rax,%rbp
+ vzeroall
+ mov %rdx, $bp # reassigned argument
+ sub \$64,%rsp
+
+ # unaligned 256-bit load that crosses page boundary can
+ # cause severe performance degradation here, so if $ap does
+ # cross page boundary, swap it with $bp [meaning that caller
+ # is advised to lay down $ap and $bp next to each other, so
+ # that only one can cross page boundary].
+ .byte 0x67,0x67
+ mov $ap, $tmp
+ and \$4095, $tmp
+ add \$32*10, $tmp
+ shr \$12, $tmp
+ mov $ap, $tmp
+ cmovnz $bp, $ap
+ cmovnz $tmp, $bp
+
+ mov $np, $tmp
+ sub \$-128,$ap # size optimization
+ sub \$-128,$np
+ sub \$-128,$rp
+
+ and \$4095, $tmp # see if $np crosses page
+ add \$32*10, $tmp
+ .byte 0x67,0x67
+ shr \$12, $tmp
+ jz .Lmul_1024_no_n_copy
+
+ # unaligned 256-bit load that crosses page boundary can
+ # cause severe performance degradation here, so if $np does
+ # cross page boundary, copy it to stack and make sure stack
+ # frame doesn't...
+ sub \$32*10,%rsp
+ vmovdqu 32*0-128($np), $ACC0
+ and \$-512, %rsp
+ vmovdqu 32*1-128($np), $ACC1
+ vmovdqu 32*2-128($np), $ACC2
+ vmovdqu 32*3-128($np), $ACC3
+ vmovdqu 32*4-128($np), $ACC4
+ vmovdqu 32*5-128($np), $ACC5
+ vmovdqu 32*6-128($np), $ACC6
+ vmovdqu 32*7-128($np), $ACC7
+ vmovdqu 32*8-128($np), $ACC8
+ lea 64+128(%rsp),$np
+ vmovdqu $ACC0, 32*0-128($np)
+ vpxor $ACC0, $ACC0, $ACC0
+ vmovdqu $ACC1, 32*1-128($np)
+ vpxor $ACC1, $ACC1, $ACC1
+ vmovdqu $ACC2, 32*2-128($np)
+ vpxor $ACC2, $ACC2, $ACC2
+ vmovdqu $ACC3, 32*3-128($np)
+ vpxor $ACC3, $ACC3, $ACC3
+ vmovdqu $ACC4, 32*4-128($np)
+ vpxor $ACC4, $ACC4, $ACC4
+ vmovdqu $ACC5, 32*5-128($np)
+ vpxor $ACC5, $ACC5, $ACC5
+ vmovdqu $ACC6, 32*6-128($np)
+ vpxor $ACC6, $ACC6, $ACC6
+ vmovdqu $ACC7, 32*7-128($np)
+ vpxor $ACC7, $ACC7, $ACC7
+ vmovdqu $ACC8, 32*8-128($np)
+ vmovdqa $ACC0, $ACC8
+ vmovdqu $ACC9, 32*9-128($np) # $ACC9 is zero after vzeroall
+.Lmul_1024_no_n_copy:
+ and \$-64,%rsp
+
+ mov ($bp), %rbx
+ vpbroadcastq ($bp), $Bi
+ vmovdqu $ACC0, (%rsp) # clear top of stack
+ xor $r0, $r0
+ .byte 0x67
+ xor $r1, $r1
+ xor $r2, $r2
+ xor $r3, $r3
+
+ vmovdqu .Land_mask(%rip), $AND_MASK
+ mov \$9, $i
+ vmovdqu $ACC9, 32*9-128($rp) # $ACC9 is zero after vzeroall
+ jmp .Loop_mul_1024
+
+.align 32
+.Loop_mul_1024:
+ vpsrlq \$29, $ACC3, $ACC9 # correct $ACC3(*)
+ mov %rbx, %rax
+ imulq -128($ap), %rax
+ add $r0, %rax
+ mov %rbx, $r1
+ imulq 8-128($ap), $r1
+ add 8(%rsp), $r1
+
+ mov %rax, $r0
+ imull $n0, %eax
+ and \$0x1fffffff, %eax
+
+ mov %rbx, $r2
+ imulq 16-128($ap), $r2
+ add 16(%rsp), $r2
+
+ mov %rbx, $r3
+ imulq 24-128($ap), $r3
+ add 24(%rsp), $r3
+ vpmuludq 32*1-128($ap),$Bi,$TEMP0
+ vmovd %eax, $Yi
+ vpaddq $TEMP0,$ACC1,$ACC1
+ vpmuludq 32*2-128($ap),$Bi,$TEMP1
+ vpbroadcastq $Yi, $Yi
+ vpaddq $TEMP1,$ACC2,$ACC2
+ vpmuludq 32*3-128($ap),$Bi,$TEMP2
+ vpand $AND_MASK, $ACC3, $ACC3 # correct $ACC3
+ vpaddq $TEMP2,$ACC3,$ACC3
+ vpmuludq 32*4-128($ap),$Bi,$TEMP0
+ vpaddq $TEMP0,$ACC4,$ACC4
+ vpmuludq 32*5-128($ap),$Bi,$TEMP1
+ vpaddq $TEMP1,$ACC5,$ACC5
+ vpmuludq 32*6-128($ap),$Bi,$TEMP2
+ vpaddq $TEMP2,$ACC6,$ACC6
+ vpmuludq 32*7-128($ap),$Bi,$TEMP0
+ vpermq \$0x93, $ACC9, $ACC9 # correct $ACC3
+ vpaddq $TEMP0,$ACC7,$ACC7
+ vpmuludq 32*8-128($ap),$Bi,$TEMP1
+ vpbroadcastq 8($bp), $Bi
+ vpaddq $TEMP1,$ACC8,$ACC8
+
+ mov %rax,%rdx
+ imulq -128($np),%rax
+ add %rax,$r0
+ mov %rdx,%rax
+ imulq 8-128($np),%rax
+ add %rax,$r1
+ mov %rdx,%rax
+ imulq 16-128($np),%rax
+ add %rax,$r2
+ shr \$29, $r0
+ imulq 24-128($np),%rdx
+ add %rdx,$r3
+ add $r0, $r1
+
+ vpmuludq 32*1-128($np),$Yi,$TEMP2
+ vmovq $Bi, %rbx
+ vpaddq $TEMP2,$ACC1,$ACC1
+ vpmuludq 32*2-128($np),$Yi,$TEMP0
+ vpaddq $TEMP0,$ACC2,$ACC2
+ vpmuludq 32*3-128($np),$Yi,$TEMP1
+ vpaddq $TEMP1,$ACC3,$ACC3
+ vpmuludq 32*4-128($np),$Yi,$TEMP2
+ vpaddq $TEMP2,$ACC4,$ACC4
+ vpmuludq 32*5-128($np),$Yi,$TEMP0
+ vpaddq $TEMP0,$ACC5,$ACC5
+ vpmuludq 32*6-128($np),$Yi,$TEMP1
+ vpaddq $TEMP1,$ACC6,$ACC6
+ vpmuludq 32*7-128($np),$Yi,$TEMP2
+ vpblendd \$3, $ZERO, $ACC9, $TEMP1 # correct $ACC3
+ vpaddq $TEMP2,$ACC7,$ACC7
+ vpmuludq 32*8-128($np),$Yi,$TEMP0
+ vpaddq $TEMP1, $ACC3, $ACC3 # correct $ACC3
+ vpaddq $TEMP0,$ACC8,$ACC8
+
+ mov %rbx, %rax
+ imulq -128($ap),%rax
+ add %rax,$r1
+ vmovdqu -8+32*1-128($ap),$TEMP1
+ mov %rbx, %rax
+ imulq 8-128($ap),%rax
+ add %rax,$r2
+ vmovdqu -8+32*2-128($ap),$TEMP2
+
+ mov $r1, %rax
+ vpblendd \$0xfc, $ZERO, $ACC9, $ACC9 # correct $ACC3
+ imull $n0, %eax
+ vpaddq $ACC9,$ACC4,$ACC4 # correct $ACC3
+ and \$0x1fffffff, %eax
+
+ imulq 16-128($ap),%rbx
+ add %rbx,$r3
+ vpmuludq $Bi,$TEMP1,$TEMP1
+ vmovd %eax, $Yi
+ vmovdqu -8+32*3-128($ap),$TEMP0
+ vpaddq $TEMP1,$ACC1,$ACC1
+ vpmuludq $Bi,$TEMP2,$TEMP2
+ vpbroadcastq $Yi, $Yi
+ vmovdqu -8+32*4-128($ap),$TEMP1
+ vpaddq $TEMP2,$ACC2,$ACC2
+ vpmuludq $Bi,$TEMP0,$TEMP0
+ vmovdqu -8+32*5-128($ap),$TEMP2
+ vpaddq $TEMP0,$ACC3,$ACC3
+ vpmuludq $Bi,$TEMP1,$TEMP1
+ vmovdqu -8+32*6-128($ap),$TEMP0
+ vpaddq $TEMP1,$ACC4,$ACC4
+ vpmuludq $Bi,$TEMP2,$TEMP2
+ vmovdqu -8+32*7-128($ap),$TEMP1
+ vpaddq $TEMP2,$ACC5,$ACC5
+ vpmuludq $Bi,$TEMP0,$TEMP0
+ vmovdqu -8+32*8-128($ap),$TEMP2
+ vpaddq $TEMP0,$ACC6,$ACC6
+ vpmuludq $Bi,$TEMP1,$TEMP1
+ vmovdqu -8+32*9-128($ap),$ACC9
+ vpaddq $TEMP1,$ACC7,$ACC7
+ vpmuludq $Bi,$TEMP2,$TEMP2
+ vpaddq $TEMP2,$ACC8,$ACC8
+ vpmuludq $Bi,$ACC9,$ACC9
+ vpbroadcastq 16($bp), $Bi
+
+ mov %rax,%rdx
+ imulq -128($np),%rax
+ add %rax,$r1
+ vmovdqu -8+32*1-128($np),$TEMP0
+ mov %rdx,%rax
+ imulq 8-128($np),%rax
+ add %rax,$r2
+ vmovdqu -8+32*2-128($np),$TEMP1
+ shr \$29, $r1
+ imulq 16-128($np),%rdx
+ add %rdx,$r3
+ add $r1, $r2
+
+ vpmuludq $Yi,$TEMP0,$TEMP0
+ vmovq $Bi, %rbx
+ vmovdqu -8+32*3-128($np),$TEMP2
+ vpaddq $TEMP0,$ACC1,$ACC1
+ vpmuludq $Yi,$TEMP1,$TEMP1
+ vmovdqu -8+32*4-128($np),$TEMP0
+ vpaddq $TEMP1,$ACC2,$ACC2
+ vpmuludq $Yi,$TEMP2,$TEMP2
+ vmovdqu -8+32*5-128($np),$TEMP1
+ vpaddq $TEMP2,$ACC3,$ACC3
+ vpmuludq $Yi,$TEMP0,$TEMP0
+ vmovdqu -8+32*6-128($np),$TEMP2
+ vpaddq $TEMP0,$ACC4,$ACC4
+ vpmuludq $Yi,$TEMP1,$TEMP1
+ vmovdqu -8+32*7-128($np),$TEMP0
+ vpaddq $TEMP1,$ACC5,$ACC5
+ vpmuludq $Yi,$TEMP2,$TEMP2
+ vmovdqu -8+32*8-128($np),$TEMP1
+ vpaddq $TEMP2,$ACC6,$ACC6
+ vpmuludq $Yi,$TEMP0,$TEMP0
+ vmovdqu -8+32*9-128($np),$TEMP2
+ vpaddq $TEMP0,$ACC7,$ACC7
+ vpmuludq $Yi,$TEMP1,$TEMP1
+ vpaddq $TEMP1,$ACC8,$ACC8
+ vpmuludq $Yi,$TEMP2,$TEMP2
+ vpaddq $TEMP2,$ACC9,$ACC9
+
+ vmovdqu -16+32*1-128($ap),$TEMP0
+ mov %rbx,%rax
+ imulq -128($ap),%rax
+ add $r2,%rax
+
+ vmovdqu -16+32*2-128($ap),$TEMP1
+ mov %rax,$r2
+ imull $n0, %eax
+ and \$0x1fffffff, %eax
+
+ imulq 8-128($ap),%rbx
+ add %rbx,$r3
+ vpmuludq $Bi,$TEMP0,$TEMP0
+ vmovd %eax, $Yi
+ vmovdqu -16+32*3-128($ap),$TEMP2
+ vpaddq $TEMP0,$ACC1,$ACC1
+ vpmuludq $Bi,$TEMP1,$TEMP1
+ vpbroadcastq $Yi, $Yi
+ vmovdqu -16+32*4-128($ap),$TEMP0
+ vpaddq $TEMP1,$ACC2,$ACC2
+ vpmuludq $Bi,$TEMP2,$TEMP2
+ vmovdqu -16+32*5-128($ap),$TEMP1
+ vpaddq $TEMP2,$ACC3,$ACC3
+ vpmuludq $Bi,$TEMP0,$TEMP0
+ vmovdqu -16+32*6-128($ap),$TEMP2
+ vpaddq $TEMP0,$ACC4,$ACC4
+ vpmuludq $Bi,$TEMP1,$TEMP1
+ vmovdqu -16+32*7-128($ap),$TEMP0
+ vpaddq $TEMP1,$ACC5,$ACC5
+ vpmuludq $Bi,$TEMP2,$TEMP2
+ vmovdqu -16+32*8-128($ap),$TEMP1
+ vpaddq $TEMP2,$ACC6,$ACC6
+ vpmuludq $Bi,$TEMP0,$TEMP0
+ vmovdqu -16+32*9-128($ap),$TEMP2
+ vpaddq $TEMP0,$ACC7,$ACC7
+ vpmuludq $Bi,$TEMP1,$TEMP1
+ vpaddq $TEMP1,$ACC8,$ACC8
+ vpmuludq $Bi,$TEMP2,$TEMP2
+ vpbroadcastq 24($bp), $Bi
+ vpaddq $TEMP2,$ACC9,$ACC9
+
+ vmovdqu -16+32*1-128($np),$TEMP0
+ mov %rax,%rdx
+ imulq -128($np),%rax
+ add %rax,$r2
+ vmovdqu -16+32*2-128($np),$TEMP1
+ imulq 8-128($np),%rdx
+ add %rdx,$r3
+ shr \$29, $r2
+
+ vpmuludq $Yi,$TEMP0,$TEMP0
+ vmovq $Bi, %rbx
+ vmovdqu -16+32*3-128($np),$TEMP2
+ vpaddq $TEMP0,$ACC1,$ACC1
+ vpmuludq $Yi,$TEMP1,$TEMP1
+ vmovdqu -16+32*4-128($np),$TEMP0
+ vpaddq $TEMP1,$ACC2,$ACC2
+ vpmuludq $Yi,$TEMP2,$TEMP2
+ vmovdqu -16+32*5-128($np),$TEMP1
+ vpaddq $TEMP2,$ACC3,$ACC3
+ vpmuludq $Yi,$TEMP0,$TEMP0
+ vmovdqu -16+32*6-128($np),$TEMP2
+ vpaddq $TEMP0,$ACC4,$ACC4
+ vpmuludq $Yi,$TEMP1,$TEMP1
+ vmovdqu -16+32*7-128($np),$TEMP0
+ vpaddq $TEMP1,$ACC5,$ACC5
+ vpmuludq $Yi,$TEMP2,$TEMP2
+ vmovdqu -16+32*8-128($np),$TEMP1
+ vpaddq $TEMP2,$ACC6,$ACC6
+ vpmuludq $Yi,$TEMP0,$TEMP0
+ vmovdqu -16+32*9-128($np),$TEMP2
+ vpaddq $TEMP0,$ACC7,$ACC7
+ vpmuludq $Yi,$TEMP1,$TEMP1
+ vmovdqu -24+32*1-128($ap),$TEMP0
+ vpaddq $TEMP1,$ACC8,$ACC8
+ vpmuludq $Yi,$TEMP2,$TEMP2
+ vmovdqu -24+32*2-128($ap),$TEMP1
+ vpaddq $TEMP2,$ACC9,$ACC9
+
+ add $r2, $r3
+ imulq -128($ap),%rbx
+ add %rbx,$r3
+
+ mov $r3, %rax
+ imull $n0, %eax
+ and \$0x1fffffff, %eax
+
+ vpmuludq $Bi,$TEMP0,$TEMP0
+ vmovd %eax, $Yi
+ vmovdqu -24+32*3-128($ap),$TEMP2
+ vpaddq $TEMP0,$ACC1,$ACC1
+ vpmuludq $Bi,$TEMP1,$TEMP1
+ vpbroadcastq $Yi, $Yi
+ vmovdqu -24+32*4-128($ap),$TEMP0
+ vpaddq $TEMP1,$ACC2,$ACC2
+ vpmuludq $Bi,$TEMP2,$TEMP2
+ vmovdqu -24+32*5-128($ap),$TEMP1
+ vpaddq $TEMP2,$ACC3,$ACC3
+ vpmuludq $Bi,$TEMP0,$TEMP0
+ vmovdqu -24+32*6-128($ap),$TEMP2
+ vpaddq $TEMP0,$ACC4,$ACC4
+ vpmuludq $Bi,$TEMP1,$TEMP1
+ vmovdqu -24+32*7-128($ap),$TEMP0
+ vpaddq $TEMP1,$ACC5,$ACC5
+ vpmuludq $Bi,$TEMP2,$TEMP2
+ vmovdqu -24+32*8-128($ap),$TEMP1
+ vpaddq $TEMP2,$ACC6,$ACC6
+ vpmuludq $Bi,$TEMP0,$TEMP0
+ vmovdqu -24+32*9-128($ap),$TEMP2
+ vpaddq $TEMP0,$ACC7,$ACC7
+ vpmuludq $Bi,$TEMP1,$TEMP1
+ vpaddq $TEMP1,$ACC8,$ACC8
+ vpmuludq $Bi,$TEMP2,$TEMP2
+ vpbroadcastq 32($bp), $Bi
+ vpaddq $TEMP2,$ACC9,$ACC9
+ add \$32, $bp # $bp++
+
+ vmovdqu -24+32*1-128($np),$TEMP0
+ imulq -128($np),%rax
+ add %rax,$r3
+ shr \$29, $r3
+
+ vmovdqu -24+32*2-128($np),$TEMP1
+ vpmuludq $Yi,$TEMP0,$TEMP0
+ vmovq $Bi, %rbx
+ vmovdqu -24+32*3-128($np),$TEMP2
+ vpaddq $TEMP0,$ACC1,$ACC0 # $ACC0==$TEMP0
+ vpmuludq $Yi,$TEMP1,$TEMP1
+ vmovdqu $ACC0, (%rsp) # transfer $r0-$r3
+ vpaddq $TEMP1,$ACC2,$ACC1
+ vmovdqu -24+32*4-128($np),$TEMP0
+ vpmuludq $Yi,$TEMP2,$TEMP2
+ vmovdqu -24+32*5-128($np),$TEMP1
+ vpaddq $TEMP2,$ACC3,$ACC2
+ vpmuludq $Yi,$TEMP0,$TEMP0
+ vmovdqu -24+32*6-128($np),$TEMP2
+ vpaddq $TEMP0,$ACC4,$ACC3
+ vpmuludq $Yi,$TEMP1,$TEMP1
+ vmovdqu -24+32*7-128($np),$TEMP0
+ vpaddq $TEMP1,$ACC5,$ACC4
+ vpmuludq $Yi,$TEMP2,$TEMP2
+ vmovdqu -24+32*8-128($np),$TEMP1
+ vpaddq $TEMP2,$ACC6,$ACC5
+ vpmuludq $Yi,$TEMP0,$TEMP0
+ vmovdqu -24+32*9-128($np),$TEMP2
+ mov $r3, $r0
+ vpaddq $TEMP0,$ACC7,$ACC6
+ vpmuludq $Yi,$TEMP1,$TEMP1
+ add (%rsp), $r0
+ vpaddq $TEMP1,$ACC8,$ACC7
+ vpmuludq $Yi,$TEMP2,$TEMP2
+ vmovq $r3, $TEMP1
+ vpaddq $TEMP2,$ACC9,$ACC8
+
+ dec $i
+ jnz .Loop_mul_1024
+___
+
+# (*) Original implementation was correcting ACC1-ACC3 for overflow
+# after 7 loop runs, or after 28 iterations, or 56 additions.
+# But as we underutilize resources, it's possible to correct in
+# each iteration with marginal performance loss. But then, as
+# we do it in each iteration, we can correct less digits, and
+# avoid performance penalties completely.
+
+$TEMP0 = $ACC9;
+$TEMP3 = $Bi;
+$TEMP4 = $Yi;
+$code.=<<___;
+ vpaddq (%rsp), $TEMP1, $ACC0
+
+ vpsrlq \$29, $ACC0, $TEMP1
+ vpand $AND_MASK, $ACC0, $ACC0
+ vpsrlq \$29, $ACC1, $TEMP2
+ vpand $AND_MASK, $ACC1, $ACC1
+ vpsrlq \$29, $ACC2, $TEMP3
+ vpermq \$0x93, $TEMP1, $TEMP1
+ vpand $AND_MASK, $ACC2, $ACC2
+ vpsrlq \$29, $ACC3, $TEMP4
+ vpermq \$0x93, $TEMP2, $TEMP2
+ vpand $AND_MASK, $ACC3, $ACC3
+
+ vpblendd \$3, $ZERO, $TEMP1, $TEMP0
+ vpermq \$0x93, $TEMP3, $TEMP3
+ vpblendd \$3, $TEMP1, $TEMP2, $TEMP1
+ vpermq \$0x93, $TEMP4, $TEMP4
+ vpaddq $TEMP0, $ACC0, $ACC0
+ vpblendd \$3, $TEMP2, $TEMP3, $TEMP2
+ vpaddq $TEMP1, $ACC1, $ACC1
+ vpblendd \$3, $TEMP3, $TEMP4, $TEMP3
+ vpaddq $TEMP2, $ACC2, $ACC2
+ vpblendd \$3, $TEMP4, $ZERO, $TEMP4
+ vpaddq $TEMP3, $ACC3, $ACC3
+ vpaddq $TEMP4, $ACC4, $ACC4
+
+ vpsrlq \$29, $ACC0, $TEMP1
+ vpand $AND_MASK, $ACC0, $ACC0
+ vpsrlq \$29, $ACC1, $TEMP2
+ vpand $AND_MASK, $ACC1, $ACC1
+ vpsrlq \$29, $ACC2, $TEMP3
+ vpermq \$0x93, $TEMP1, $TEMP1
+ vpand $AND_MASK, $ACC2, $ACC2
+ vpsrlq \$29, $ACC3, $TEMP4
+ vpermq \$0x93, $TEMP2, $TEMP2
+ vpand $AND_MASK, $ACC3, $ACC3
+ vpermq \$0x93, $TEMP3, $TEMP3
+
+ vpblendd \$3, $ZERO, $TEMP1, $TEMP0
+ vpermq \$0x93, $TEMP4, $TEMP4
+ vpblendd \$3, $TEMP1, $TEMP2, $TEMP1
+ vpaddq $TEMP0, $ACC0, $ACC0
+ vpblendd \$3, $TEMP2, $TEMP3, $TEMP2
+ vpaddq $TEMP1, $ACC1, $ACC1
+ vpblendd \$3, $TEMP3, $TEMP4, $TEMP3
+ vpaddq $TEMP2, $ACC2, $ACC2
+ vpblendd \$3, $TEMP4, $ZERO, $TEMP4
+ vpaddq $TEMP3, $ACC3, $ACC3
+ vpaddq $TEMP4, $ACC4, $ACC4
+
+ vmovdqu $ACC0, 0-128($rp)
+ vmovdqu $ACC1, 32-128($rp)
+ vmovdqu $ACC2, 64-128($rp)
+ vmovdqu $ACC3, 96-128($rp)
+___
+
+$TEMP5=$ACC0;
+$code.=<<___;
+ vpsrlq \$29, $ACC4, $TEMP1
+ vpand $AND_MASK, $ACC4, $ACC4
+ vpsrlq \$29, $ACC5, $TEMP2
+ vpand $AND_MASK, $ACC5, $ACC5
+ vpsrlq \$29, $ACC6, $TEMP3
+ vpermq \$0x93, $TEMP1, $TEMP1
+ vpand $AND_MASK, $ACC6, $ACC6
+ vpsrlq \$29, $ACC7, $TEMP4
+ vpermq \$0x93, $TEMP2, $TEMP2
+ vpand $AND_MASK, $ACC7, $ACC7
+ vpsrlq \$29, $ACC8, $TEMP5
+ vpermq \$0x93, $TEMP3, $TEMP3
+ vpand $AND_MASK, $ACC8, $ACC8
+ vpermq \$0x93, $TEMP4, $TEMP4
+
+ vpblendd \$3, $ZERO, $TEMP1, $TEMP0
+ vpermq \$0x93, $TEMP5, $TEMP5
+ vpblendd \$3, $TEMP1, $TEMP2, $TEMP1
+ vpaddq $TEMP0, $ACC4, $ACC4
+ vpblendd \$3, $TEMP2, $TEMP3, $TEMP2
+ vpaddq $TEMP1, $ACC5, $ACC5
+ vpblendd \$3, $TEMP3, $TEMP4, $TEMP3
+ vpaddq $TEMP2, $ACC6, $ACC6
+ vpblendd \$3, $TEMP4, $TEMP5, $TEMP4
+ vpaddq $TEMP3, $ACC7, $ACC7
+ vpaddq $TEMP4, $ACC8, $ACC8
+
+ vpsrlq \$29, $ACC4, $TEMP1
+ vpand $AND_MASK, $ACC4, $ACC4
+ vpsrlq \$29, $ACC5, $TEMP2
+ vpand $AND_MASK, $ACC5, $ACC5
+ vpsrlq \$29, $ACC6, $TEMP3
+ vpermq \$0x93, $TEMP1, $TEMP1
+ vpand $AND_MASK, $ACC6, $ACC6
+ vpsrlq \$29, $ACC7, $TEMP4
+ vpermq \$0x93, $TEMP2, $TEMP2
+ vpand $AND_MASK, $ACC7, $ACC7
+ vpsrlq \$29, $ACC8, $TEMP5
+ vpermq \$0x93, $TEMP3, $TEMP3
+ vpand $AND_MASK, $ACC8, $ACC8
+ vpermq \$0x93, $TEMP4, $TEMP4
+
+ vpblendd \$3, $ZERO, $TEMP1, $TEMP0
+ vpermq \$0x93, $TEMP5, $TEMP5
+ vpblendd \$3, $TEMP1, $TEMP2, $TEMP1
+ vpaddq $TEMP0, $ACC4, $ACC4
+ vpblendd \$3, $TEMP2, $TEMP3, $TEMP2
+ vpaddq $TEMP1, $ACC5, $ACC5
+ vpblendd \$3, $TEMP3, $TEMP4, $TEMP3
+ vpaddq $TEMP2, $ACC6, $ACC6
+ vpblendd \$3, $TEMP4, $TEMP5, $TEMP4
+ vpaddq $TEMP3, $ACC7, $ACC7
+ vpaddq $TEMP4, $ACC8, $ACC8
+
+ vmovdqu $ACC4, 128-128($rp)
+ vmovdqu $ACC5, 160-128($rp)
+ vmovdqu $ACC6, 192-128($rp)
+ vmovdqu $ACC7, 224-128($rp)
+ vmovdqu $ACC8, 256-128($rp)
+ vzeroupper
+
+ mov %rbp, %rax
+___
+$code.=<<___ if ($win64);
+ movaps -0xd8(%rax),%xmm6
+ movaps -0xc8(%rax),%xmm7
+ movaps -0xb8(%rax),%xmm8
+ movaps -0xa8(%rax),%xmm9
+ movaps -0x98(%rax),%xmm10
+ movaps -0x88(%rax),%xmm11
+ movaps -0x78(%rax),%xmm12
+ movaps -0x68(%rax),%xmm13
+ movaps -0x58(%rax),%xmm14
+ movaps -0x48(%rax),%xmm15
+___
+$code.=<<___;
+ mov -48(%rax),%r15
+ mov -40(%rax),%r14
+ mov -32(%rax),%r13
+ mov -24(%rax),%r12
+ mov -16(%rax),%rbp
+ mov -8(%rax),%rbx
+ lea (%rax),%rsp # restore %rsp
+.Lmul_1024_epilogue:
+ ret
+.size rsaz_1024_mul_avx2,.-rsaz_1024_mul_avx2
+___
+}
+{
+my ($out,$inp) = $win64 ? ("%rcx","%rdx") : ("%rdi","%rsi");
+my @T = map("%r$_",(8..11));
+
+$code.=<<___;
+.globl rsaz_1024_red2norm_avx2
+.type rsaz_1024_red2norm_avx2,\@abi-omnipotent
+.align 32
+rsaz_1024_red2norm_avx2:
+ sub \$-128,$inp # size optimization
+ xor %rax,%rax
+___
+
+for ($j=0,$i=0; $i<16; $i++) {
+ my $k=0;
+ while (29*$j<64*($i+1)) { # load data till boundary
+ $code.=" mov `8*$j-128`($inp), @T[0]\n";
+ $j++; $k++; push(@T,shift(@T));
+ }
+ $l=$k;
+ while ($k>1) { # shift loaded data but last value
+ $code.=" shl \$`29*($j-$k)`,@T[-$k]\n";
+ $k--;
+ }
+ $code.=<<___; # shift last value
+ mov @T[-1], @T[0]
+ shl \$`29*($j-1)`, @T[-1]
+ shr \$`-29*($j-1)`, @T[0]
+___
+ while ($l) { # accumulate all values
+ $code.=" add @T[-$l], %rax\n";
+ $l--;
+ }
+ $code.=<<___;
+ adc \$0, @T[0] # consume eventual carry
+ mov %rax, 8*$i($out)
+ mov @T[0], %rax
+___
+ push(@T,shift(@T));
+}
+$code.=<<___;
+ ret
+.size rsaz_1024_red2norm_avx2,.-rsaz_1024_red2norm_avx2
+
+.globl rsaz_1024_norm2red_avx2
+.type rsaz_1024_norm2red_avx2,\@abi-omnipotent
+.align 32
+rsaz_1024_norm2red_avx2:
+ sub \$-128,$out # size optimization
+ mov ($inp),@T[0]
+ mov \$0x1fffffff,%eax
+___
+for ($j=0,$i=0; $i<16; $i++) {
+ $code.=" mov `8*($i+1)`($inp),@T[1]\n" if ($i<15);
+ $code.=" xor @T[1],@T[1]\n" if ($i==15);
+ my $k=1;
+ while (29*($j+1)<64*($i+1)) {
+ $code.=<<___;
+ mov @T[0],@T[-$k]
+ shr \$`29*$j`,@T[-$k]
+ and %rax,@T[-$k] # &0x1fffffff
+ mov @T[-$k],`8*$j-128`($out)
+___
+ $j++; $k++;
+ }
+ $code.=<<___;
+ shrd \$`29*$j`,@T[1],@T[0]
+ and %rax,@T[0]
+ mov @T[0],`8*$j-128`($out)
+___
+ $j++;
+ push(@T,shift(@T));
+}
+$code.=<<___;
+ mov @T[0],`8*$j-128`($out) # zero
+ mov @T[0],`8*($j+1)-128`($out)
+ mov @T[0],`8*($j+2)-128`($out)
+ mov @T[0],`8*($j+3)-128`($out)
+ ret
+.size rsaz_1024_norm2red_avx2,.-rsaz_1024_norm2red_avx2
+___
+}
+{
+my ($out,$inp,$power) = $win64 ? ("%rcx","%rdx","%r8d") : ("%rdi","%rsi","%edx");
+
+$code.=<<___;
+.globl rsaz_1024_scatter5_avx2
+.type rsaz_1024_scatter5_avx2,\@abi-omnipotent
+.align 32
+rsaz_1024_scatter5_avx2:
+ vzeroupper
+ vmovdqu .Lscatter_permd(%rip),%ymm5
+ shl \$4,$power
+ lea ($out,$power),$out
+ mov \$9,%eax
+ jmp .Loop_scatter_1024
+
+.align 32
+.Loop_scatter_1024:
+ vmovdqu ($inp),%ymm0
+ lea 32($inp),$inp
+ vpermd %ymm0,%ymm5,%ymm0
+ vmovdqu %xmm0,($out)
+ lea 16*32($out),$out
+ dec %eax
+ jnz .Loop_scatter_1024
+
+ vzeroupper
+ ret
+.size rsaz_1024_scatter5_avx2,.-rsaz_1024_scatter5_avx2
+
+.globl rsaz_1024_gather5_avx2
+.type rsaz_1024_gather5_avx2,\@abi-omnipotent
+.align 32
+rsaz_1024_gather5_avx2:
+ vzeroupper
+ mov %rsp,%r11
+___
+$code.=<<___ if ($win64);
+ lea -0x88(%rsp),%rax
+.LSEH_begin_rsaz_1024_gather5:
+ # I can't trust assembler to use specific encoding:-(
+ .byte 0x48,0x8d,0x60,0xe0 # lea -0x20(%rax),%rsp
+ .byte 0xc5,0xf8,0x29,0x70,0xe0 # vmovaps %xmm6,-0x20(%rax)
+ .byte 0xc5,0xf8,0x29,0x78,0xf0 # vmovaps %xmm7,-0x10(%rax)
+ .byte 0xc5,0x78,0x29,0x40,0x00 # vmovaps %xmm8,0(%rax)
+ .byte 0xc5,0x78,0x29,0x48,0x10 # vmovaps %xmm9,0x10(%rax)
+ .byte 0xc5,0x78,0x29,0x50,0x20 # vmovaps %xmm10,0x20(%rax)
+ .byte 0xc5,0x78,0x29,0x58,0x30 # vmovaps %xmm11,0x30(%rax)
+ .byte 0xc5,0x78,0x29,0x60,0x40 # vmovaps %xmm12,0x40(%rax)
+ .byte 0xc5,0x78,0x29,0x68,0x50 # vmovaps %xmm13,0x50(%rax)
+ .byte 0xc5,0x78,0x29,0x70,0x60 # vmovaps %xmm14,0x60(%rax)
+ .byte 0xc5,0x78,0x29,0x78,0x70 # vmovaps %xmm15,0x70(%rax)
+___
+$code.=<<___;
+ lea -0x100(%rsp),%rsp
+ and \$-32, %rsp
+ lea .Linc(%rip), %r10
+ lea -128(%rsp),%rax # control u-op density
+
+ vmovd $power, %xmm4
+ vmovdqa (%r10),%ymm0
+ vmovdqa 32(%r10),%ymm1
+ vmovdqa 64(%r10),%ymm5
+ vpbroadcastd %xmm4,%ymm4
+
+ vpaddd %ymm5, %ymm0, %ymm2
+ vpcmpeqd %ymm4, %ymm0, %ymm0
+ vpaddd %ymm5, %ymm1, %ymm3
+ vpcmpeqd %ymm4, %ymm1, %ymm1
+ vmovdqa %ymm0, 32*0+128(%rax)
+ vpaddd %ymm5, %ymm2, %ymm0
+ vpcmpeqd %ymm4, %ymm2, %ymm2
+ vmovdqa %ymm1, 32*1+128(%rax)
+ vpaddd %ymm5, %ymm3, %ymm1
+ vpcmpeqd %ymm4, %ymm3, %ymm3
+ vmovdqa %ymm2, 32*2+128(%rax)
+ vpaddd %ymm5, %ymm0, %ymm2
+ vpcmpeqd %ymm4, %ymm0, %ymm0
+ vmovdqa %ymm3, 32*3+128(%rax)
+ vpaddd %ymm5, %ymm1, %ymm3
+ vpcmpeqd %ymm4, %ymm1, %ymm1
+ vmovdqa %ymm0, 32*4+128(%rax)
+ vpaddd %ymm5, %ymm2, %ymm8
+ vpcmpeqd %ymm4, %ymm2, %ymm2
+ vmovdqa %ymm1, 32*5+128(%rax)
+ vpaddd %ymm5, %ymm3, %ymm9
+ vpcmpeqd %ymm4, %ymm3, %ymm3
+ vmovdqa %ymm2, 32*6+128(%rax)
+ vpaddd %ymm5, %ymm8, %ymm10
+ vpcmpeqd %ymm4, %ymm8, %ymm8
+ vmovdqa %ymm3, 32*7+128(%rax)
+ vpaddd %ymm5, %ymm9, %ymm11
+ vpcmpeqd %ymm4, %ymm9, %ymm9
+ vpaddd %ymm5, %ymm10, %ymm12
+ vpcmpeqd %ymm4, %ymm10, %ymm10
+ vpaddd %ymm5, %ymm11, %ymm13
+ vpcmpeqd %ymm4, %ymm11, %ymm11
+ vpaddd %ymm5, %ymm12, %ymm14
+ vpcmpeqd %ymm4, %ymm12, %ymm12
+ vpaddd %ymm5, %ymm13, %ymm15
+ vpcmpeqd %ymm4, %ymm13, %ymm13
+ vpcmpeqd %ymm4, %ymm14, %ymm14
+ vpcmpeqd %ymm4, %ymm15, %ymm15
+
+ vmovdqa -32(%r10),%ymm7 # .Lgather_permd
+ lea 128($inp), $inp
+ mov \$9,$power
+
+.Loop_gather_1024:
+ vmovdqa 32*0-128($inp), %ymm0
+ vmovdqa 32*1-128($inp), %ymm1
+ vmovdqa 32*2-128($inp), %ymm2
+ vmovdqa 32*3-128($inp), %ymm3
+ vpand 32*0+128(%rax), %ymm0, %ymm0
+ vpand 32*1+128(%rax), %ymm1, %ymm1
+ vpand 32*2+128(%rax), %ymm2, %ymm2
+ vpor %ymm0, %ymm1, %ymm4
+ vpand 32*3+128(%rax), %ymm3, %ymm3
+ vmovdqa 32*4-128($inp), %ymm0
+ vmovdqa 32*5-128($inp), %ymm1
+ vpor %ymm2, %ymm3, %ymm5
+ vmovdqa 32*6-128($inp), %ymm2
+ vmovdqa 32*7-128($inp), %ymm3
+ vpand 32*4+128(%rax), %ymm0, %ymm0
+ vpand 32*5+128(%rax), %ymm1, %ymm1
+ vpand 32*6+128(%rax), %ymm2, %ymm2
+ vpor %ymm0, %ymm4, %ymm4
+ vpand 32*7+128(%rax), %ymm3, %ymm3
+ vpand 32*8-128($inp), %ymm8, %ymm0
+ vpor %ymm1, %ymm5, %ymm5
+ vpand 32*9-128($inp), %ymm9, %ymm1
+ vpor %ymm2, %ymm4, %ymm4
+ vpand 32*10-128($inp),%ymm10, %ymm2
+ vpor %ymm3, %ymm5, %ymm5
+ vpand 32*11-128($inp),%ymm11, %ymm3
+ vpor %ymm0, %ymm4, %ymm4
+ vpand 32*12-128($inp),%ymm12, %ymm0
+ vpor %ymm1, %ymm5, %ymm5
+ vpand 32*13-128($inp),%ymm13, %ymm1
+ vpor %ymm2, %ymm4, %ymm4
+ vpand 32*14-128($inp),%ymm14, %ymm2
+ vpor %ymm3, %ymm5, %ymm5
+ vpand 32*15-128($inp),%ymm15, %ymm3
+ lea 32*16($inp), $inp
+ vpor %ymm0, %ymm4, %ymm4
+ vpor %ymm1, %ymm5, %ymm5
+ vpor %ymm2, %ymm4, %ymm4
+ vpor %ymm3, %ymm5, %ymm5
+
+ vpor %ymm5, %ymm4, %ymm4
+ vextracti128 \$1, %ymm4, %xmm5 # upper half is cleared
+ vpor %xmm4, %xmm5, %xmm5
+ vpermd %ymm5,%ymm7,%ymm5
+ vmovdqu %ymm5,($out)
+ lea 32($out),$out
+ dec $power
+ jnz .Loop_gather_1024
+
+ vpxor %ymm0,%ymm0,%ymm0
+ vmovdqu %ymm0,($out)
+ vzeroupper
+___
+$code.=<<___ if ($win64);
+ movaps -0xa8(%r11),%xmm6
+ movaps -0x98(%r11),%xmm7
+ movaps -0x88(%r11),%xmm8
+ movaps -0x78(%r11),%xmm9
+ movaps -0x68(%r11),%xmm10
+ movaps -0x58(%r11),%xmm11
+ movaps -0x48(%r11),%xmm12
+ movaps -0x38(%r11),%xmm13
+ movaps -0x28(%r11),%xmm14
+ movaps -0x18(%r11),%xmm15
+.LSEH_end_rsaz_1024_gather5:
+___
+$code.=<<___;
+ lea (%r11),%rsp
+ ret
+.size rsaz_1024_gather5_avx2,.-rsaz_1024_gather5_avx2
+___
+}
+
+$code.=<<___;
+.extern OPENSSL_ia32cap_P
+.globl rsaz_avx2_eligible
+.type rsaz_avx2_eligible,\@abi-omnipotent
+.align 32
+rsaz_avx2_eligible:
+ mov OPENSSL_ia32cap_P+8(%rip),%eax
+___
+$code.=<<___ if ($addx);
+ mov \$`1<<8|1<<19`,%ecx
+ mov \$0,%edx
+ and %eax,%ecx
+ cmp \$`1<<8|1<<19`,%ecx # check for BMI2+AD*X
+ cmove %edx,%eax
+___
+$code.=<<___;
+ and \$`1<<5`,%eax
+ shr \$5,%eax
+ ret
+.size rsaz_avx2_eligible,.-rsaz_avx2_eligible
+
+.align 64
+.Land_mask:
+ .quad 0x1fffffff,0x1fffffff,0x1fffffff,0x1fffffff
+.Lscatter_permd:
+ .long 0,2,4,6,7,7,7,7
+.Lgather_permd:
+ .long 0,7,1,7,2,7,3,7
+.Linc:
+ .long 0,0,0,0, 1,1,1,1
+ .long 2,2,2,2, 3,3,3,3
+ .long 4,4,4,4, 4,4,4,4
+.align 64
+___
+
+if ($win64) {
+$rec="%rcx";
+$frame="%rdx";
+$context="%r8";
+$disp="%r9";
+
+$code.=<<___
+.extern __imp_RtlVirtualUnwind
+.type rsaz_se_handler,\@abi-omnipotent
+.align 16
+rsaz_se_handler:
+ push %rsi
+ push %rdi
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ pushfq
+ sub \$64,%rsp
+
+ mov 120($context),%rax # pull context->Rax
+ mov 248($context),%rbx # pull context->Rip
+
+ mov 8($disp),%rsi # disp->ImageBase
+ mov 56($disp),%r11 # disp->HandlerData
+
+ mov 0(%r11),%r10d # HandlerData[0]
+ lea (%rsi,%r10),%r10 # prologue label
+ cmp %r10,%rbx # context->Rip<prologue label
+ jb .Lcommon_seh_tail
+
+ mov 152($context),%rax # pull context->Rsp
+
+ mov 4(%r11),%r10d # HandlerData[1]
+ lea (%rsi,%r10),%r10 # epilogue label
+ cmp %r10,%rbx # context->Rip>=epilogue label
+ jae .Lcommon_seh_tail
+
+ mov 160($context),%rax # pull context->Rbp
+
+ mov -48(%rax),%r15
+ mov -40(%rax),%r14
+ mov -32(%rax),%r13
+ mov -24(%rax),%r12
+ mov -16(%rax),%rbp
+ mov -8(%rax),%rbx
+ mov %r15,240($context)
+ mov %r14,232($context)
+ mov %r13,224($context)
+ mov %r12,216($context)
+ mov %rbp,160($context)
+ mov %rbx,144($context)
+
+ lea -0xd8(%rax),%rsi # %xmm save area
+ lea 512($context),%rdi # & context.Xmm6
+ mov \$20,%ecx # 10*sizeof(%xmm0)/sizeof(%rax)
+ .long 0xa548f3fc # cld; rep movsq
+
+.Lcommon_seh_tail:
+ mov 8(%rax),%rdi
+ mov 16(%rax),%rsi
+ mov %rax,152($context) # restore context->Rsp
+ mov %rsi,168($context) # restore context->Rsi
+ mov %rdi,176($context) # restore context->Rdi
+
+ mov 40($disp),%rdi # disp->ContextRecord
+ mov $context,%rsi # context
+ mov \$154,%ecx # sizeof(CONTEXT)
+ .long 0xa548f3fc # cld; rep movsq
+
+ mov $disp,%rsi
+ xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
+ mov 8(%rsi),%rdx # arg2, disp->ImageBase
+ mov 0(%rsi),%r8 # arg3, disp->ControlPc
+ mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
+ mov 40(%rsi),%r10 # disp->ContextRecord
+ lea 56(%rsi),%r11 # &disp->HandlerData
+ lea 24(%rsi),%r12 # &disp->EstablisherFrame
+ mov %r10,32(%rsp) # arg5
+ mov %r11,40(%rsp) # arg6
+ mov %r12,48(%rsp) # arg7
+ mov %rcx,56(%rsp) # arg8, (NULL)
+ call *__imp_RtlVirtualUnwind(%rip)
+
+ mov \$1,%eax # ExceptionContinueSearch
+ add \$64,%rsp
+ popfq
+ pop %r15
+ pop %r14
+ pop %r13
+ pop %r12
+ pop %rbp
+ pop %rbx
+ pop %rdi
+ pop %rsi
+ ret
+.size rsaz_se_handler,.-rsaz_se_handler
+
+.section .pdata
+.align 4
+ .rva .LSEH_begin_rsaz_1024_sqr_avx2
+ .rva .LSEH_end_rsaz_1024_sqr_avx2
+ .rva .LSEH_info_rsaz_1024_sqr_avx2
+
+ .rva .LSEH_begin_rsaz_1024_mul_avx2
+ .rva .LSEH_end_rsaz_1024_mul_avx2
+ .rva .LSEH_info_rsaz_1024_mul_avx2
+
+ .rva .LSEH_begin_rsaz_1024_gather5
+ .rva .LSEH_end_rsaz_1024_gather5
+ .rva .LSEH_info_rsaz_1024_gather5
+.section .xdata
+.align 8
+.LSEH_info_rsaz_1024_sqr_avx2:
+ .byte 9,0,0,0
+ .rva rsaz_se_handler
+ .rva .Lsqr_1024_body,.Lsqr_1024_epilogue
+.LSEH_info_rsaz_1024_mul_avx2:
+ .byte 9,0,0,0
+ .rva rsaz_se_handler
+ .rva .Lmul_1024_body,.Lmul_1024_epilogue
+.LSEH_info_rsaz_1024_gather5:
+ .byte 0x01,0x36,0x17,0x0b
+ .byte 0x36,0xf8,0x09,0x00 # vmovaps 0x90(rsp),xmm15
+ .byte 0x31,0xe8,0x08,0x00 # vmovaps 0x80(rsp),xmm14
+ .byte 0x2c,0xd8,0x07,0x00 # vmovaps 0x70(rsp),xmm13
+ .byte 0x27,0xc8,0x06,0x00 # vmovaps 0x60(rsp),xmm12
+ .byte 0x22,0xb8,0x05,0x00 # vmovaps 0x50(rsp),xmm11
+ .byte 0x1d,0xa8,0x04,0x00 # vmovaps 0x40(rsp),xmm10
+ .byte 0x18,0x98,0x03,0x00 # vmovaps 0x30(rsp),xmm9
+ .byte 0x13,0x88,0x02,0x00 # vmovaps 0x20(rsp),xmm8
+ .byte 0x0e,0x78,0x01,0x00 # vmovaps 0x10(rsp),xmm7
+ .byte 0x09,0x68,0x00,0x00 # vmovaps 0x00(rsp),xmm6
+ .byte 0x04,0x01,0x15,0x00 # sub rsp,0xa8
+ .byte 0x00,0xb3,0x00,0x00 # set_frame r11
+___
+}
+
+foreach (split("\n",$code)) {
+ s/\`([^\`]*)\`/eval($1)/ge;
+
+ s/\b(sh[rl]d?\s+\$)(-?[0-9]+)/$1.$2%64/ge or
+
+ s/\b(vmov[dq])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go or
+ s/\b(vmovdqu)\b(.+)%x%ymm([0-9]+)/$1$2%xmm$3/go or
+ s/\b(vpinsr[qd])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go or
+ s/\b(vpextr[qd])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go or
+ s/\b(vpbroadcast[qd]\s+)%ymm([0-9]+)/$1%xmm$2/go;
+ print $_,"\n";
+}
+
+}}} else {{{
+print <<___; # assembler is too old
+.text
+
+.globl rsaz_avx2_eligible
+.type rsaz_avx2_eligible,\@abi-omnipotent
+rsaz_avx2_eligible:
+ xor %eax,%eax
+ ret
+.size rsaz_avx2_eligible,.-rsaz_avx2_eligible
+
+.globl rsaz_1024_sqr_avx2
+.globl rsaz_1024_mul_avx2
+.globl rsaz_1024_norm2red_avx2
+.globl rsaz_1024_red2norm_avx2
+.globl rsaz_1024_scatter5_avx2
+.globl rsaz_1024_gather5_avx2
+.type rsaz_1024_sqr_avx2,\@abi-omnipotent
+rsaz_1024_sqr_avx2:
+rsaz_1024_mul_avx2:
+rsaz_1024_norm2red_avx2:
+rsaz_1024_red2norm_avx2:
+rsaz_1024_scatter5_avx2:
+rsaz_1024_gather5_avx2:
+ .byte 0x0f,0x0b # ud2
+ ret
+.size rsaz_1024_sqr_avx2,.-rsaz_1024_sqr_avx2
+___
+}}}
+
+close STDOUT;
diff --git a/openssl-1.1.0h/crypto/bn/asm/rsaz-x86_64.pl b/openssl-1.1.0h/crypto/bn/asm/rsaz-x86_64.pl
new file mode 100755
index 0000000..6f3b664
--- /dev/null
+++ b/openssl-1.1.0h/crypto/bn/asm/rsaz-x86_64.pl
@@ -0,0 +1,2358 @@
+#! /usr/bin/env perl
+# Copyright 2013-2016 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License"). You may not use
+# this file except in compliance with the License. You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+
+##############################################################################
+# #
+# Copyright (c) 2012, Intel Corporation #
+# #
+# All rights reserved. #
+# #
+# Redistribution and use in source and binary forms, with or without #
+# modification, are permitted provided that the following conditions are #
+# met: #
+# #
+# * Redistributions of source code must retain the above copyright #
+# notice, this list of conditions and the following disclaimer. #
+# #
+# * Redistributions in binary form must reproduce the above copyright #
+# notice, this list of conditions and the following disclaimer in the #
+# documentation and/or other materials provided with the #
+# distribution. #
+# #
+# * Neither the name of the Intel Corporation nor the names of its #
+# contributors may be used to endorse or promote products derived from #
+# this software without specific prior written permission. #
+# #
+# #
+# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION ""AS IS"" AND ANY #
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE #
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR #
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR #
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, #
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, #
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR #
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF #
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING #
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS #
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #
+# #
+##############################################################################
+# Developers and authors: #
+# Shay Gueron (1, 2), and Vlad Krasnov (1) #
+# (1) Intel Architecture Group, Microprocessor and Chipset Development, #
+# Israel Development Center, Haifa, Israel #
+# (2) University of Haifa #
+##############################################################################
+# Reference: #
+# [1] S. Gueron, "Efficient Software Implementations of Modular #
+# Exponentiation", http://eprint.iacr.org/2011/239 #
+# [2] S. Gueron, V. Krasnov. "Speeding up Big-Numbers Squaring". #
+# IEEE Proceedings of 9th International Conference on Information #
+# Technology: New Generations (ITNG 2012), 821-823 (2012). #
+# [3] S. Gueron, Efficient Software Implementations of Modular Exponentiation#
+# Journal of Cryptographic Engineering 2:31-43 (2012). #
+# [4] S. Gueron, V. Krasnov: "[PATCH] Efficient and side channel analysis #
+# resistant 512-bit and 1024-bit modular exponentiation for optimizing #
+# RSA1024 and RSA2048 on x86_64 platforms", #
+# http://rt.openssl.org/Ticket/Display.html?id=2582&user=guest&pass=guest#
+##############################################################################
+
+# While original submission covers 512- and 1024-bit exponentiation,
+# this module is limited to 512-bit version only (and as such
+# accelerates RSA1024 sign). This is because improvement for longer
+# keys is not high enough to justify the effort, highest measured
+# was ~5% on Westmere. [This is relative to OpenSSL 1.0.2, upcoming
+# for the moment of this writing!] Nor does this module implement
+# "monolithic" complete exponentiation jumbo-subroutine, but adheres
+# to more modular mixture of C and assembly. And it's optimized even
+# for processors other than Intel Core family (see table below for
+# improvement coefficients).
+# <appro@openssl.org>
+#
+# RSA1024 sign/sec this/original |this/rsax(*) this/fips(*)
+# ----------------+---------------------------
+# Opteron +13% |+5% +20%
+# Bulldozer -0% |-1% +10%
+# P4 +11% |+7% +8%
+# Westmere +5% |+14% +17%
+# Sandy Bridge +2% |+12% +29%
+# Ivy Bridge +1% |+11% +35%
+# Haswell(**) -0% |+12% +39%
+# Atom +13% |+11% +4%
+# VIA Nano +70% |+9% +25%
+#
+# (*) rsax engine and fips numbers are presented for reference
+# purposes;
+# (**) MULX was attempted, but found to give only marginal improvement;
+
+$flavour = shift;
+$output = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
+*STDOUT=*OUT;
+
+if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
+ =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
+ $addx = ($1>=2.23);
+}
+
+if (!$addx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
+ `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
+ $addx = ($1>=2.10);
+}
+
+if (!$addx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
+ `ml64 2>&1` =~ /Version ([0-9]+)\./) {
+ $addx = ($1>=12);
+}
+
+if (!$addx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9])\.([0-9]+)/) {
+ my $ver = $2 + $3/100.0; # 3.1->3.01, 3.10->3.10
+ $addx = ($ver>=3.03);
+}
+
+($out, $inp, $mod) = ("%rdi", "%rsi", "%rbp"); # common internal API
+{
+my ($out,$inp,$mod,$n0,$times) = ("%rdi","%rsi","%rdx","%rcx","%r8d");
+
+$code.=<<___;
+.text
+
+.extern OPENSSL_ia32cap_P
+
+.globl rsaz_512_sqr
+.type rsaz_512_sqr,\@function,5
+.align 32
+rsaz_512_sqr: # 25-29% faster than rsaz_512_mul
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+
+ subq \$128+24, %rsp
+.Lsqr_body:
+ movq $mod, %rbp # common argument
+ movq ($inp), %rdx
+ movq 8($inp), %rax
+ movq $n0, 128(%rsp)
+___
+$code.=<<___ if ($addx);
+ movl \$0x80100,%r11d
+ andl OPENSSL_ia32cap_P+8(%rip),%r11d
+ cmpl \$0x80100,%r11d # check for MULX and ADO/CX
+ je .Loop_sqrx
+___
+$code.=<<___;
+ jmp .Loop_sqr
+
+.align 32
+.Loop_sqr:
+ movl $times,128+8(%rsp)
+#first iteration
+ movq %rdx, %rbx
+ mulq %rdx
+ movq %rax, %r8
+ movq 16($inp), %rax
+ movq %rdx, %r9
+
+ mulq %rbx
+ addq %rax, %r9
+ movq 24($inp), %rax
+ movq %rdx, %r10
+ adcq \$0, %r10
+
+ mulq %rbx
+ addq %rax, %r10
+ movq 32($inp), %rax
+ movq %rdx, %r11
+ adcq \$0, %r11
+
+ mulq %rbx
+ addq %rax, %r11
+ movq 40($inp), %rax
+ movq %rdx, %r12
+ adcq \$0, %r12
+
+ mulq %rbx
+ addq %rax, %r12
+ movq 48($inp), %rax
+ movq %rdx, %r13
+ adcq \$0, %r13
+
+ mulq %rbx
+ addq %rax, %r13
+ movq 56($inp), %rax
+ movq %rdx, %r14
+ adcq \$0, %r14
+
+ mulq %rbx
+ addq %rax, %r14
+ movq %rbx, %rax
+ movq %rdx, %r15
+ adcq \$0, %r15
+
+ addq %r8, %r8 #shlq \$1, %r8
+ movq %r9, %rcx
+ adcq %r9, %r9 #shld \$1, %r8, %r9
+
+ mulq %rax
+ movq %rax, (%rsp)
+ addq %rdx, %r8
+ adcq \$0, %r9
+
+ movq %r8, 8(%rsp)
+ shrq \$63, %rcx
+
+#second iteration
+ movq 8($inp), %r8
+ movq 16($inp), %rax
+ mulq %r8
+ addq %rax, %r10
+ movq 24($inp), %rax
+ movq %rdx, %rbx
+ adcq \$0, %rbx
+
+ mulq %r8
+ addq %rax, %r11
+ movq 32($inp), %rax
+ adcq \$0, %rdx
+ addq %rbx, %r11
+ movq %rdx, %rbx
+ adcq \$0, %rbx
+
+ mulq %r8
+ addq %rax, %r12
+ movq 40($inp), %rax
+ adcq \$0, %rdx
+ addq %rbx, %r12
+ movq %rdx, %rbx
+ adcq \$0, %rbx
+
+ mulq %r8
+ addq %rax, %r13
+ movq 48($inp), %rax
+ adcq \$0, %rdx
+ addq %rbx, %r13
+ movq %rdx, %rbx
+ adcq \$0, %rbx
+
+ mulq %r8
+ addq %rax, %r14
+ movq 56($inp), %rax
+ adcq \$0, %rdx
+ addq %rbx, %r14
+ movq %rdx, %rbx
+ adcq \$0, %rbx
+
+ mulq %r8
+ addq %rax, %r15
+ movq %r8, %rax
+ adcq \$0, %rdx
+ addq %rbx, %r15
+ movq %rdx, %r8
+ movq %r10, %rdx
+ adcq \$0, %r8
+
+ add %rdx, %rdx
+ lea (%rcx,%r10,2), %r10 #shld \$1, %rcx, %r10
+ movq %r11, %rbx
+ adcq %r11, %r11 #shld \$1, %r10, %r11
+
+ mulq %rax
+ addq %rax, %r9
+ adcq %rdx, %r10
+ adcq \$0, %r11
+
+ movq %r9, 16(%rsp)
+ movq %r10, 24(%rsp)
+ shrq \$63, %rbx
+
+#third iteration
+ movq 16($inp), %r9
+ movq 24($inp), %rax
+ mulq %r9
+ addq %rax, %r12
+ movq 32($inp), %rax
+ movq %rdx, %rcx
+ adcq \$0, %rcx
+
+ mulq %r9
+ addq %rax, %r13
+ movq 40($inp), %rax
+ adcq \$0, %rdx
+ addq %rcx, %r13
+ movq %rdx, %rcx
+ adcq \$0, %rcx
+
+ mulq %r9
+ addq %rax, %r14
+ movq 48($inp), %rax
+ adcq \$0, %rdx
+ addq %rcx, %r14
+ movq %rdx, %rcx
+ adcq \$0, %rcx
+
+ mulq %r9
+ movq %r12, %r10
+ lea (%rbx,%r12,2), %r12 #shld \$1, %rbx, %r12
+ addq %rax, %r15
+ movq 56($inp), %rax
+ adcq \$0, %rdx
+ addq %rcx, %r15
+ movq %rdx, %rcx
+ adcq \$0, %rcx
+
+ mulq %r9
+ shrq \$63, %r10
+ addq %rax, %r8
+ movq %r9, %rax
+ adcq \$0, %rdx
+ addq %rcx, %r8
+ movq %rdx, %r9
+ adcq \$0, %r9
+
+ movq %r13, %rcx
+ leaq (%r10,%r13,2), %r13 #shld \$1, %r12, %r13
+
+ mulq %rax
+ addq %rax, %r11
+ adcq %rdx, %r12
+ adcq \$0, %r13
+
+ movq %r11, 32(%rsp)
+ movq %r12, 40(%rsp)
+ shrq \$63, %rcx
+
+#fourth iteration
+ movq 24($inp), %r10
+ movq 32($inp), %rax
+ mulq %r10
+ addq %rax, %r14
+ movq 40($inp), %rax
+ movq %rdx, %rbx
+ adcq \$0, %rbx
+
+ mulq %r10
+ addq %rax, %r15
+ movq 48($inp), %rax
+ adcq \$0, %rdx
+ addq %rbx, %r15
+ movq %rdx, %rbx
+ adcq \$0, %rbx
+
+ mulq %r10
+ movq %r14, %r12
+ leaq (%rcx,%r14,2), %r14 #shld \$1, %rcx, %r14
+ addq %rax, %r8
+ movq 56($inp), %rax
+ adcq \$0, %rdx
+ addq %rbx, %r8
+ movq %rdx, %rbx
+ adcq \$0, %rbx
+
+ mulq %r10
+ shrq \$63, %r12
+ addq %rax, %r9
+ movq %r10, %rax
+ adcq \$0, %rdx
+ addq %rbx, %r9
+ movq %rdx, %r10
+ adcq \$0, %r10
+
+ movq %r15, %rbx
+ leaq (%r12,%r15,2),%r15 #shld \$1, %r14, %r15
+
+ mulq %rax
+ addq %rax, %r13
+ adcq %rdx, %r14
+ adcq \$0, %r15
+
+ movq %r13, 48(%rsp)
+ movq %r14, 56(%rsp)
+ shrq \$63, %rbx
+
+#fifth iteration
+ movq 32($inp), %r11
+ movq 40($inp), %rax
+ mulq %r11
+ addq %rax, %r8
+ movq 48($inp), %rax
+ movq %rdx, %rcx
+ adcq \$0, %rcx
+
+ mulq %r11
+ addq %rax, %r9
+ movq 56($inp), %rax
+ adcq \$0, %rdx
+ movq %r8, %r12
+ leaq (%rbx,%r8,2), %r8 #shld \$1, %rbx, %r8
+ addq %rcx, %r9
+ movq %rdx, %rcx
+ adcq \$0, %rcx
+
+ mulq %r11
+ shrq \$63, %r12
+ addq %rax, %r10
+ movq %r11, %rax
+ adcq \$0, %rdx
+ addq %rcx, %r10
+ movq %rdx, %r11
+ adcq \$0, %r11
+
+ movq %r9, %rcx
+ leaq (%r12,%r9,2), %r9 #shld \$1, %r8, %r9
+
+ mulq %rax
+ addq %rax, %r15
+ adcq %rdx, %r8
+ adcq \$0, %r9
+
+ movq %r15, 64(%rsp)
+ movq %r8, 72(%rsp)
+ shrq \$63, %rcx
+
+#sixth iteration
+ movq 40($inp), %r12
+ movq 48($inp), %rax
+ mulq %r12
+ addq %rax, %r10
+ movq 56($inp), %rax
+ movq %rdx, %rbx
+ adcq \$0, %rbx
+
+ mulq %r12
+ addq %rax, %r11
+ movq %r12, %rax
+ movq %r10, %r15
+ leaq (%rcx,%r10,2), %r10 #shld \$1, %rcx, %r10
+ adcq \$0, %rdx
+ shrq \$63, %r15
+ addq %rbx, %r11
+ movq %rdx, %r12
+ adcq \$0, %r12
+
+ movq %r11, %rbx
+ leaq (%r15,%r11,2), %r11 #shld \$1, %r10, %r11
+
+ mulq %rax
+ addq %rax, %r9
+ adcq %rdx, %r10
+ adcq \$0, %r11
+
+ movq %r9, 80(%rsp)
+ movq %r10, 88(%rsp)
+
+#seventh iteration
+ movq 48($inp), %r13
+ movq 56($inp), %rax
+ mulq %r13
+ addq %rax, %r12
+ movq %r13, %rax
+ movq %rdx, %r13
+ adcq \$0, %r13
+
+ xorq %r14, %r14
+ shlq \$1, %rbx
+ adcq %r12, %r12 #shld \$1, %rbx, %r12
+ adcq %r13, %r13 #shld \$1, %r12, %r13
+ adcq %r14, %r14 #shld \$1, %r13, %r14
+
+ mulq %rax
+ addq %rax, %r11
+ adcq %rdx, %r12
+ adcq \$0, %r13
+
+ movq %r11, 96(%rsp)
+ movq %r12, 104(%rsp)
+
+#eighth iteration
+ movq 56($inp), %rax
+ mulq %rax
+ addq %rax, %r13
+ adcq \$0, %rdx
+
+ addq %rdx, %r14
+
+ movq %r13, 112(%rsp)
+ movq %r14, 120(%rsp)
+
+ movq (%rsp), %r8
+ movq 8(%rsp), %r9
+ movq 16(%rsp), %r10
+ movq 24(%rsp), %r11
+ movq 32(%rsp), %r12
+ movq 40(%rsp), %r13
+ movq 48(%rsp), %r14
+ movq 56(%rsp), %r15
+
+ call __rsaz_512_reduce
+
+ addq 64(%rsp), %r8
+ adcq 72(%rsp), %r9
+ adcq 80(%rsp), %r10
+ adcq 88(%rsp), %r11
+ adcq 96(%rsp), %r12
+ adcq 104(%rsp), %r13
+ adcq 112(%rsp), %r14
+ adcq 120(%rsp), %r15
+ sbbq %rcx, %rcx
+
+ call __rsaz_512_subtract
+
+ movq %r8, %rdx
+ movq %r9, %rax
+ movl 128+8(%rsp), $times
+ movq $out, $inp
+
+ decl $times
+ jnz .Loop_sqr
+___
+if ($addx) {
+$code.=<<___;
+ jmp .Lsqr_tail
+
+.align 32
+.Loop_sqrx:
+ movl $times,128+8(%rsp)
+ movq $out, %xmm0 # off-load
+ movq %rbp, %xmm1 # off-load
+#first iteration
+ mulx %rax, %r8, %r9
+
+ mulx 16($inp), %rcx, %r10
+ xor %rbp, %rbp # cf=0, of=0
+
+ mulx 24($inp), %rax, %r11
+ adcx %rcx, %r9
+
+ mulx 32($inp), %rcx, %r12
+ adcx %rax, %r10
+
+ mulx 40($inp), %rax, %r13
+ adcx %rcx, %r11
+
+ .byte 0xc4,0x62,0xf3,0xf6,0xb6,0x30,0x00,0x00,0x00 # mulx 48($inp), %rcx, %r14
+ adcx %rax, %r12
+ adcx %rcx, %r13
+
+ .byte 0xc4,0x62,0xfb,0xf6,0xbe,0x38,0x00,0x00,0x00 # mulx 56($inp), %rax, %r15
+ adcx %rax, %r14
+ adcx %rbp, %r15 # %rbp is 0
+
+ mov %r9, %rcx
+ shld \$1, %r8, %r9
+ shl \$1, %r8
+
+ xor %ebp, %ebp
+ mulx %rdx, %rax, %rdx
+ adcx %rdx, %r8
+ mov 8($inp), %rdx
+ adcx %rbp, %r9
+
+ mov %rax, (%rsp)
+ mov %r8, 8(%rsp)
+
+#second iteration
+ mulx 16($inp), %rax, %rbx
+ adox %rax, %r10
+ adcx %rbx, %r11
+
+ .byte 0xc4,0x62,0xc3,0xf6,0x86,0x18,0x00,0x00,0x00 # mulx 24($inp), $out, %r8
+ adox $out, %r11
+ adcx %r8, %r12
+
+ mulx 32($inp), %rax, %rbx
+ adox %rax, %r12
+ adcx %rbx, %r13
+
+ mulx 40($inp), $out, %r8
+ adox $out, %r13
+ adcx %r8, %r14
+
+ .byte 0xc4,0xe2,0xfb,0xf6,0x9e,0x30,0x00,0x00,0x00 # mulx 48($inp), %rax, %rbx
+ adox %rax, %r14
+ adcx %rbx, %r15
+
+ .byte 0xc4,0x62,0xc3,0xf6,0x86,0x38,0x00,0x00,0x00 # mulx 56($inp), $out, %r8
+ adox $out, %r15
+ adcx %rbp, %r8
+ adox %rbp, %r8
+
+ mov %r11, %rbx
+ shld \$1, %r10, %r11
+ shld \$1, %rcx, %r10
+
+ xor %ebp,%ebp
+ mulx %rdx, %rax, %rcx
+ mov 16($inp), %rdx
+ adcx %rax, %r9
+ adcx %rcx, %r10
+ adcx %rbp, %r11
+
+ mov %r9, 16(%rsp)
+ .byte 0x4c,0x89,0x94,0x24,0x18,0x00,0x00,0x00 # mov %r10, 24(%rsp)
+
+#third iteration
+ .byte 0xc4,0x62,0xc3,0xf6,0x8e,0x18,0x00,0x00,0x00 # mulx 24($inp), $out, %r9
+ adox $out, %r12
+ adcx %r9, %r13
+
+ mulx 32($inp), %rax, %rcx
+ adox %rax, %r13
+ adcx %rcx, %r14
+
+ mulx 40($inp), $out, %r9
+ adox $out, %r14
+ adcx %r9, %r15
+
+ .byte 0xc4,0xe2,0xfb,0xf6,0x8e,0x30,0x00,0x00,0x00 # mulx 48($inp), %rax, %rcx
+ adox %rax, %r15
+ adcx %rcx, %r8
+
+ .byte 0xc4,0x62,0xc3,0xf6,0x8e,0x38,0x00,0x00,0x00 # mulx 56($inp), $out, %r9
+ adox $out, %r8
+ adcx %rbp, %r9
+ adox %rbp, %r9
+
+ mov %r13, %rcx
+ shld \$1, %r12, %r13
+ shld \$1, %rbx, %r12
+
+ xor %ebp, %ebp
+ mulx %rdx, %rax, %rdx
+ adcx %rax, %r11
+ adcx %rdx, %r12
+ mov 24($inp), %rdx
+ adcx %rbp, %r13
+
+ mov %r11, 32(%rsp)
+ .byte 0x4c,0x89,0xa4,0x24,0x28,0x00,0x00,0x00 # mov %r12, 40(%rsp)
+
+#fourth iteration
+ .byte 0xc4,0xe2,0xfb,0xf6,0x9e,0x20,0x00,0x00,0x00 # mulx 32($inp), %rax, %rbx
+ adox %rax, %r14
+ adcx %rbx, %r15
+
+ mulx 40($inp), $out, %r10
+ adox $out, %r15
+ adcx %r10, %r8
+
+ mulx 48($inp), %rax, %rbx
+ adox %rax, %r8
+ adcx %rbx, %r9
+
+ mulx 56($inp), $out, %r10
+ adox $out, %r9
+ adcx %rbp, %r10
+ adox %rbp, %r10
+
+ .byte 0x66
+ mov %r15, %rbx
+ shld \$1, %r14, %r15
+ shld \$1, %rcx, %r14
+
+ xor %ebp, %ebp
+ mulx %rdx, %rax, %rdx
+ adcx %rax, %r13
+ adcx %rdx, %r14
+ mov 32($inp), %rdx
+ adcx %rbp, %r15
+
+ mov %r13, 48(%rsp)
+ mov %r14, 56(%rsp)
+
+#fifth iteration
+ .byte 0xc4,0x62,0xc3,0xf6,0x9e,0x28,0x00,0x00,0x00 # mulx 40($inp), $out, %r11
+ adox $out, %r8
+ adcx %r11, %r9
+
+ mulx 48($inp), %rax, %rcx
+ adox %rax, %r9
+ adcx %rcx, %r10
+
+ mulx 56($inp), $out, %r11
+ adox $out, %r10
+ adcx %rbp, %r11
+ adox %rbp, %r11
+
+ mov %r9, %rcx
+ shld \$1, %r8, %r9
+ shld \$1, %rbx, %r8
+
+ xor %ebp, %ebp
+ mulx %rdx, %rax, %rdx
+ adcx %rax, %r15
+ adcx %rdx, %r8
+ mov 40($inp), %rdx
+ adcx %rbp, %r9
+
+ mov %r15, 64(%rsp)
+ mov %r8, 72(%rsp)
+
+#sixth iteration
+ .byte 0xc4,0xe2,0xfb,0xf6,0x9e,0x30,0x00,0x00,0x00 # mulx 48($inp), %rax, %rbx
+ adox %rax, %r10
+ adcx %rbx, %r11
+
+ .byte 0xc4,0x62,0xc3,0xf6,0xa6,0x38,0x00,0x00,0x00 # mulx 56($inp), $out, %r12
+ adox $out, %r11
+ adcx %rbp, %r12
+ adox %rbp, %r12
+
+ mov %r11, %rbx
+ shld \$1, %r10, %r11
+ shld \$1, %rcx, %r10
+
+ xor %ebp, %ebp
+ mulx %rdx, %rax, %rdx
+ adcx %rax, %r9
+ adcx %rdx, %r10
+ mov 48($inp), %rdx
+ adcx %rbp, %r11
+
+ mov %r9, 80(%rsp)
+ mov %r10, 88(%rsp)
+
+#seventh iteration
+ .byte 0xc4,0x62,0xfb,0xf6,0xae,0x38,0x00,0x00,0x00 # mulx 56($inp), %rax, %r13
+ adox %rax, %r12
+ adox %rbp, %r13
+
+ xor %r14, %r14
+ shld \$1, %r13, %r14
+ shld \$1, %r12, %r13
+ shld \$1, %rbx, %r12
+
+ xor %ebp, %ebp
+ mulx %rdx, %rax, %rdx
+ adcx %rax, %r11
+ adcx %rdx, %r12
+ mov 56($inp), %rdx
+ adcx %rbp, %r13
+
+ .byte 0x4c,0x89,0x9c,0x24,0x60,0x00,0x00,0x00 # mov %r11, 96(%rsp)
+ .byte 0x4c,0x89,0xa4,0x24,0x68,0x00,0x00,0x00 # mov %r12, 104(%rsp)
+
+#eighth iteration
+ mulx %rdx, %rax, %rdx
+ adox %rax, %r13
+ adox %rbp, %rdx
+
+ .byte 0x66
+ add %rdx, %r14
+
+ movq %r13, 112(%rsp)
+ movq %r14, 120(%rsp)
+ movq %xmm0, $out
+ movq %xmm1, %rbp
+
+ movq 128(%rsp), %rdx # pull $n0
+ movq (%rsp), %r8
+ movq 8(%rsp), %r9
+ movq 16(%rsp), %r10
+ movq 24(%rsp), %r11
+ movq 32(%rsp), %r12
+ movq 40(%rsp), %r13
+ movq 48(%rsp), %r14
+ movq 56(%rsp), %r15
+
+ call __rsaz_512_reducex
+
+ addq 64(%rsp), %r8
+ adcq 72(%rsp), %r9
+ adcq 80(%rsp), %r10
+ adcq 88(%rsp), %r11
+ adcq 96(%rsp), %r12
+ adcq 104(%rsp), %r13
+ adcq 112(%rsp), %r14
+ adcq 120(%rsp), %r15
+ sbbq %rcx, %rcx
+
+ call __rsaz_512_subtract
+
+ movq %r8, %rdx
+ movq %r9, %rax
+ movl 128+8(%rsp), $times
+ movq $out, $inp
+
+ decl $times
+ jnz .Loop_sqrx
+
+.Lsqr_tail:
+___
+}
+$code.=<<___;
+
+ leaq 128+24+48(%rsp), %rax
+ movq -48(%rax), %r15
+ movq -40(%rax), %r14
+ movq -32(%rax), %r13
+ movq -24(%rax), %r12
+ movq -16(%rax), %rbp
+ movq -8(%rax), %rbx
+ leaq (%rax), %rsp
+.Lsqr_epilogue:
+ ret
+.size rsaz_512_sqr,.-rsaz_512_sqr
+___
+}
+{
+my ($out,$ap,$bp,$mod,$n0) = ("%rdi","%rsi","%rdx","%rcx","%r8");
+$code.=<<___;
+.globl rsaz_512_mul
+.type rsaz_512_mul,\@function,5
+.align 32
+rsaz_512_mul:
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+
+ subq \$128+24, %rsp
+.Lmul_body:
+ movq $out, %xmm0 # off-load arguments
+ movq $mod, %xmm1
+ movq $n0, 128(%rsp)
+___
+$code.=<<___ if ($addx);
+ movl \$0x80100,%r11d
+ andl OPENSSL_ia32cap_P+8(%rip),%r11d
+ cmpl \$0x80100,%r11d # check for MULX and ADO/CX
+ je .Lmulx
+___
+$code.=<<___;
+ movq ($bp), %rbx # pass b[0]
+ movq $bp, %rbp # pass argument
+ call __rsaz_512_mul
+
+ movq %xmm0, $out
+ movq %xmm1, %rbp
+
+ movq (%rsp), %r8
+ movq 8(%rsp), %r9
+ movq 16(%rsp), %r10
+ movq 24(%rsp), %r11
+ movq 32(%rsp), %r12
+ movq 40(%rsp), %r13
+ movq 48(%rsp), %r14
+ movq 56(%rsp), %r15
+
+ call __rsaz_512_reduce
+___
+$code.=<<___ if ($addx);
+ jmp .Lmul_tail
+
+.align 32
+.Lmulx:
+ movq $bp, %rbp # pass argument
+ movq ($bp), %rdx # pass b[0]
+ call __rsaz_512_mulx
+
+ movq %xmm0, $out
+ movq %xmm1, %rbp
+
+ movq 128(%rsp), %rdx # pull $n0
+ movq (%rsp), %r8
+ movq 8(%rsp), %r9
+ movq 16(%rsp), %r10
+ movq 24(%rsp), %r11
+ movq 32(%rsp), %r12
+ movq 40(%rsp), %r13
+ movq 48(%rsp), %r14
+ movq 56(%rsp), %r15
+
+ call __rsaz_512_reducex
+.Lmul_tail:
+___
+$code.=<<___;
+ addq 64(%rsp), %r8
+ adcq 72(%rsp), %r9
+ adcq 80(%rsp), %r10
+ adcq 88(%rsp), %r11
+ adcq 96(%rsp), %r12
+ adcq 104(%rsp), %r13
+ adcq 112(%rsp), %r14
+ adcq 120(%rsp), %r15
+ sbbq %rcx, %rcx
+
+ call __rsaz_512_subtract
+
+ leaq 128+24+48(%rsp), %rax
+ movq -48(%rax), %r15
+ movq -40(%rax), %r14
+ movq -32(%rax), %r13
+ movq -24(%rax), %r12
+ movq -16(%rax), %rbp
+ movq -8(%rax), %rbx
+ leaq (%rax), %rsp
+.Lmul_epilogue:
+ ret
+.size rsaz_512_mul,.-rsaz_512_mul
+___
+}
+{
+my ($out,$ap,$bp,$mod,$n0,$pwr) = ("%rdi","%rsi","%rdx","%rcx","%r8","%r9d");
+$code.=<<___;
+.globl rsaz_512_mul_gather4
+.type rsaz_512_mul_gather4,\@function,6
+.align 32
+rsaz_512_mul_gather4:
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+
+ subq \$`128+24+($win64?0xb0:0)`, %rsp
+___
+$code.=<<___ if ($win64);
+ movaps %xmm6,0xa0(%rsp)
+ movaps %xmm7,0xb0(%rsp)
+ movaps %xmm8,0xc0(%rsp)
+ movaps %xmm9,0xd0(%rsp)
+ movaps %xmm10,0xe0(%rsp)
+ movaps %xmm11,0xf0(%rsp)
+ movaps %xmm12,0x100(%rsp)
+ movaps %xmm13,0x110(%rsp)
+ movaps %xmm14,0x120(%rsp)
+ movaps %xmm15,0x130(%rsp)
+___
+$code.=<<___;
+.Lmul_gather4_body:
+ movd $pwr,%xmm8
+ movdqa .Linc+16(%rip),%xmm1 # 00000002000000020000000200000002
+ movdqa .Linc(%rip),%xmm0 # 00000001000000010000000000000000
+
+ pshufd \$0,%xmm8,%xmm8 # broadcast $power
+ movdqa %xmm1,%xmm7
+ movdqa %xmm1,%xmm2
+___
+########################################################################
+# calculate mask by comparing 0..15 to $power
+#
+for($i=0;$i<4;$i++) {
+$code.=<<___;
+ paddd %xmm`$i`,%xmm`$i+1`
+ pcmpeqd %xmm8,%xmm`$i`
+ movdqa %xmm7,%xmm`$i+3`
+___
+}
+for(;$i<7;$i++) {
+$code.=<<___;
+ paddd %xmm`$i`,%xmm`$i+1`
+ pcmpeqd %xmm8,%xmm`$i`
+___
+}
+$code.=<<___;
+ pcmpeqd %xmm8,%xmm7
+
+ movdqa 16*0($bp),%xmm8
+ movdqa 16*1($bp),%xmm9
+ movdqa 16*2($bp),%xmm10
+ movdqa 16*3($bp),%xmm11
+ pand %xmm0,%xmm8
+ movdqa 16*4($bp),%xmm12
+ pand %xmm1,%xmm9
+ movdqa 16*5($bp),%xmm13
+ pand %xmm2,%xmm10
+ movdqa 16*6($bp),%xmm14
+ pand %xmm3,%xmm11
+ movdqa 16*7($bp),%xmm15
+ leaq 128($bp), %rbp
+ pand %xmm4,%xmm12
+ pand %xmm5,%xmm13
+ pand %xmm6,%xmm14
+ pand %xmm7,%xmm15
+ por %xmm10,%xmm8
+ por %xmm11,%xmm9
+ por %xmm12,%xmm8
+ por %xmm13,%xmm9
+ por %xmm14,%xmm8
+ por %xmm15,%xmm9
+
+ por %xmm9,%xmm8
+ pshufd \$0x4e,%xmm8,%xmm9
+ por %xmm9,%xmm8
+___
+$code.=<<___ if ($addx);
+ movl \$0x80100,%r11d
+ andl OPENSSL_ia32cap_P+8(%rip),%r11d
+ cmpl \$0x80100,%r11d # check for MULX and ADO/CX
+ je .Lmulx_gather
+___
+$code.=<<___;
+ movq %xmm8,%rbx
+
+ movq $n0, 128(%rsp) # off-load arguments
+ movq $out, 128+8(%rsp)
+ movq $mod, 128+16(%rsp)
+
+ movq ($ap), %rax
+ movq 8($ap), %rcx
+ mulq %rbx # 0 iteration
+ movq %rax, (%rsp)
+ movq %rcx, %rax
+ movq %rdx, %r8
+
+ mulq %rbx
+ addq %rax, %r8
+ movq 16($ap), %rax
+ movq %rdx, %r9
+ adcq \$0, %r9
+
+ mulq %rbx
+ addq %rax, %r9
+ movq 24($ap), %rax
+ movq %rdx, %r10
+ adcq \$0, %r10
+
+ mulq %rbx
+ addq %rax, %r10
+ movq 32($ap), %rax
+ movq %rdx, %r11
+ adcq \$0, %r11
+
+ mulq %rbx
+ addq %rax, %r11
+ movq 40($ap), %rax
+ movq %rdx, %r12
+ adcq \$0, %r12
+
+ mulq %rbx
+ addq %rax, %r12
+ movq 48($ap), %rax
+ movq %rdx, %r13
+ adcq \$0, %r13
+
+ mulq %rbx
+ addq %rax, %r13
+ movq 56($ap), %rax
+ movq %rdx, %r14
+ adcq \$0, %r14
+
+ mulq %rbx
+ addq %rax, %r14
+ movq ($ap), %rax
+ movq %rdx, %r15
+ adcq \$0, %r15
+
+ leaq 8(%rsp), %rdi
+ movl \$7, %ecx
+ jmp .Loop_mul_gather
+
+.align 32
+.Loop_mul_gather:
+ movdqa 16*0(%rbp),%xmm8
+ movdqa 16*1(%rbp),%xmm9
+ movdqa 16*2(%rbp),%xmm10
+ movdqa 16*3(%rbp),%xmm11
+ pand %xmm0,%xmm8
+ movdqa 16*4(%rbp),%xmm12
+ pand %xmm1,%xmm9
+ movdqa 16*5(%rbp),%xmm13
+ pand %xmm2,%xmm10
+ movdqa 16*6(%rbp),%xmm14
+ pand %xmm3,%xmm11
+ movdqa 16*7(%rbp),%xmm15
+ leaq 128(%rbp), %rbp
+ pand %xmm4,%xmm12
+ pand %xmm5,%xmm13
+ pand %xmm6,%xmm14
+ pand %xmm7,%xmm15
+ por %xmm10,%xmm8
+ por %xmm11,%xmm9
+ por %xmm12,%xmm8
+ por %xmm13,%xmm9
+ por %xmm14,%xmm8
+ por %xmm15,%xmm9
+
+ por %xmm9,%xmm8
+ pshufd \$0x4e,%xmm8,%xmm9
+ por %xmm9,%xmm8
+ movq %xmm8,%rbx
+
+ mulq %rbx
+ addq %rax, %r8
+ movq 8($ap), %rax
+ movq %r8, (%rdi)
+ movq %rdx, %r8
+ adcq \$0, %r8
+
+ mulq %rbx
+ addq %rax, %r9
+ movq 16($ap), %rax
+ adcq \$0, %rdx
+ addq %r9, %r8
+ movq %rdx, %r9
+ adcq \$0, %r9
+
+ mulq %rbx
+ addq %rax, %r10
+ movq 24($ap), %rax
+ adcq \$0, %rdx
+ addq %r10, %r9
+ movq %rdx, %r10
+ adcq \$0, %r10
+
+ mulq %rbx
+ addq %rax, %r11
+ movq 32($ap), %rax
+ adcq \$0, %rdx
+ addq %r11, %r10
+ movq %rdx, %r11
+ adcq \$0, %r11
+
+ mulq %rbx
+ addq %rax, %r12
+ movq 40($ap), %rax
+ adcq \$0, %rdx
+ addq %r12, %r11
+ movq %rdx, %r12
+ adcq \$0, %r12
+
+ mulq %rbx
+ addq %rax, %r13
+ movq 48($ap), %rax
+ adcq \$0, %rdx
+ addq %r13, %r12
+ movq %rdx, %r13
+ adcq \$0, %r13
+
+ mulq %rbx
+ addq %rax, %r14
+ movq 56($ap), %rax
+ adcq \$0, %rdx
+ addq %r14, %r13
+ movq %rdx, %r14
+ adcq \$0, %r14
+
+ mulq %rbx
+ addq %rax, %r15
+ movq ($ap), %rax
+ adcq \$0, %rdx
+ addq %r15, %r14
+ movq %rdx, %r15
+ adcq \$0, %r15
+
+ leaq 8(%rdi), %rdi
+
+ decl %ecx
+ jnz .Loop_mul_gather
+
+ movq %r8, (%rdi)
+ movq %r9, 8(%rdi)
+ movq %r10, 16(%rdi)
+ movq %r11, 24(%rdi)
+ movq %r12, 32(%rdi)
+ movq %r13, 40(%rdi)
+ movq %r14, 48(%rdi)
+ movq %r15, 56(%rdi)
+
+ movq 128+8(%rsp), $out
+ movq 128+16(%rsp), %rbp
+
+ movq (%rsp), %r8
+ movq 8(%rsp), %r9
+ movq 16(%rsp), %r10
+ movq 24(%rsp), %r11
+ movq 32(%rsp), %r12
+ movq 40(%rsp), %r13
+ movq 48(%rsp), %r14
+ movq 56(%rsp), %r15
+
+ call __rsaz_512_reduce
+___
+$code.=<<___ if ($addx);
+ jmp .Lmul_gather_tail
+
+.align 32
+.Lmulx_gather:
+ movq %xmm8,%rdx
+
+ mov $n0, 128(%rsp) # off-load arguments
+ mov $out, 128+8(%rsp)
+ mov $mod, 128+16(%rsp)
+
+ mulx ($ap), %rbx, %r8 # 0 iteration
+ mov %rbx, (%rsp)
+ xor %edi, %edi # cf=0, of=0
+
+ mulx 8($ap), %rax, %r9
+
+ mulx 16($ap), %rbx, %r10
+ adcx %rax, %r8
+
+ mulx 24($ap), %rax, %r11
+ adcx %rbx, %r9
+
+ mulx 32($ap), %rbx, %r12
+ adcx %rax, %r10
+
+ mulx 40($ap), %rax, %r13
+ adcx %rbx, %r11
+
+ mulx 48($ap), %rbx, %r14
+ adcx %rax, %r12
+
+ mulx 56($ap), %rax, %r15
+ adcx %rbx, %r13
+ adcx %rax, %r14
+ .byte 0x67
+ mov %r8, %rbx
+ adcx %rdi, %r15 # %rdi is 0
+
+ mov \$-7, %rcx
+ jmp .Loop_mulx_gather
+
+.align 32
+.Loop_mulx_gather:
+ movdqa 16*0(%rbp),%xmm8
+ movdqa 16*1(%rbp),%xmm9
+ movdqa 16*2(%rbp),%xmm10
+ movdqa 16*3(%rbp),%xmm11
+ pand %xmm0,%xmm8
+ movdqa 16*4(%rbp),%xmm12
+ pand %xmm1,%xmm9
+ movdqa 16*5(%rbp),%xmm13
+ pand %xmm2,%xmm10
+ movdqa 16*6(%rbp),%xmm14
+ pand %xmm3,%xmm11
+ movdqa 16*7(%rbp),%xmm15
+ leaq 128(%rbp), %rbp
+ pand %xmm4,%xmm12
+ pand %xmm5,%xmm13
+ pand %xmm6,%xmm14
+ pand %xmm7,%xmm15
+ por %xmm10,%xmm8
+ por %xmm11,%xmm9
+ por %xmm12,%xmm8
+ por %xmm13,%xmm9
+ por %xmm14,%xmm8
+ por %xmm15,%xmm9
+
+ por %xmm9,%xmm8
+ pshufd \$0x4e,%xmm8,%xmm9
+ por %xmm9,%xmm8
+ movq %xmm8,%rdx
+
+ .byte 0xc4,0x62,0xfb,0xf6,0x86,0x00,0x00,0x00,0x00 # mulx ($ap), %rax, %r8
+ adcx %rax, %rbx
+ adox %r9, %r8
+
+ mulx 8($ap), %rax, %r9
+ adcx %rax, %r8
+ adox %r10, %r9
+
+ mulx 16($ap), %rax, %r10
+ adcx %rax, %r9
+ adox %r11, %r10
+
+ .byte 0xc4,0x62,0xfb,0xf6,0x9e,0x18,0x00,0x00,0x00 # mulx 24($ap), %rax, %r11
+ adcx %rax, %r10
+ adox %r12, %r11
+
+ mulx 32($ap), %rax, %r12
+ adcx %rax, %r11
+ adox %r13, %r12
+
+ mulx 40($ap), %rax, %r13
+ adcx %rax, %r12
+ adox %r14, %r13
+
+ .byte 0xc4,0x62,0xfb,0xf6,0xb6,0x30,0x00,0x00,0x00 # mulx 48($ap), %rax, %r14
+ adcx %rax, %r13
+ .byte 0x67
+ adox %r15, %r14
+
+ mulx 56($ap), %rax, %r15
+ mov %rbx, 64(%rsp,%rcx,8)
+ adcx %rax, %r14
+ adox %rdi, %r15
+ mov %r8, %rbx
+ adcx %rdi, %r15 # cf=0
+
+ inc %rcx # of=0
+ jnz .Loop_mulx_gather
+
+ mov %r8, 64(%rsp)
+ mov %r9, 64+8(%rsp)
+ mov %r10, 64+16(%rsp)
+ mov %r11, 64+24(%rsp)
+ mov %r12, 64+32(%rsp)
+ mov %r13, 64+40(%rsp)
+ mov %r14, 64+48(%rsp)
+ mov %r15, 64+56(%rsp)
+
+ mov 128(%rsp), %rdx # pull arguments
+ mov 128+8(%rsp), $out
+ mov 128+16(%rsp), %rbp
+
+ mov (%rsp), %r8
+ mov 8(%rsp), %r9
+ mov 16(%rsp), %r10
+ mov 24(%rsp), %r11
+ mov 32(%rsp), %r12
+ mov 40(%rsp), %r13
+ mov 48(%rsp), %r14
+ mov 56(%rsp), %r15
+
+ call __rsaz_512_reducex
+
+.Lmul_gather_tail:
+___
+$code.=<<___;
+ addq 64(%rsp), %r8
+ adcq 72(%rsp), %r9
+ adcq 80(%rsp), %r10
+ adcq 88(%rsp), %r11
+ adcq 96(%rsp), %r12
+ adcq 104(%rsp), %r13
+ adcq 112(%rsp), %r14
+ adcq 120(%rsp), %r15
+ sbbq %rcx, %rcx
+
+ call __rsaz_512_subtract
+
+ leaq 128+24+48(%rsp), %rax
+___
+$code.=<<___ if ($win64);
+ movaps 0xa0-0xc8(%rax),%xmm6
+ movaps 0xb0-0xc8(%rax),%xmm7
+ movaps 0xc0-0xc8(%rax),%xmm8
+ movaps 0xd0-0xc8(%rax),%xmm9
+ movaps 0xe0-0xc8(%rax),%xmm10
+ movaps 0xf0-0xc8(%rax),%xmm11
+ movaps 0x100-0xc8(%rax),%xmm12
+ movaps 0x110-0xc8(%rax),%xmm13
+ movaps 0x120-0xc8(%rax),%xmm14
+ movaps 0x130-0xc8(%rax),%xmm15
+ lea 0xb0(%rax),%rax
+___
+$code.=<<___;
+ movq -48(%rax), %r15
+ movq -40(%rax), %r14
+ movq -32(%rax), %r13
+ movq -24(%rax), %r12
+ movq -16(%rax), %rbp
+ movq -8(%rax), %rbx
+ leaq (%rax), %rsp
+.Lmul_gather4_epilogue:
+ ret
+.size rsaz_512_mul_gather4,.-rsaz_512_mul_gather4
+___
+}
+{
+my ($out,$ap,$mod,$n0,$tbl,$pwr) = ("%rdi","%rsi","%rdx","%rcx","%r8","%r9d");
+$code.=<<___;
+.globl rsaz_512_mul_scatter4
+.type rsaz_512_mul_scatter4,\@function,6
+.align 32
+rsaz_512_mul_scatter4:
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+
+ mov $pwr, $pwr
+ subq \$128+24, %rsp
+.Lmul_scatter4_body:
+ leaq ($tbl,$pwr,8), $tbl
+ movq $out, %xmm0 # off-load arguments
+ movq $mod, %xmm1
+ movq $tbl, %xmm2
+ movq $n0, 128(%rsp)
+
+ movq $out, %rbp
+___
+$code.=<<___ if ($addx);
+ movl \$0x80100,%r11d
+ andl OPENSSL_ia32cap_P+8(%rip),%r11d
+ cmpl \$0x80100,%r11d # check for MULX and ADO/CX
+ je .Lmulx_scatter
+___
+$code.=<<___;
+ movq ($out),%rbx # pass b[0]
+ call __rsaz_512_mul
+
+ movq %xmm0, $out
+ movq %xmm1, %rbp
+
+ movq (%rsp), %r8
+ movq 8(%rsp), %r9
+ movq 16(%rsp), %r10
+ movq 24(%rsp), %r11
+ movq 32(%rsp), %r12
+ movq 40(%rsp), %r13
+ movq 48(%rsp), %r14
+ movq 56(%rsp), %r15
+
+ call __rsaz_512_reduce
+___
+$code.=<<___ if ($addx);
+ jmp .Lmul_scatter_tail
+
+.align 32
+.Lmulx_scatter:
+ movq ($out), %rdx # pass b[0]
+ call __rsaz_512_mulx
+
+ movq %xmm0, $out
+ movq %xmm1, %rbp
+
+ movq 128(%rsp), %rdx # pull $n0
+ movq (%rsp), %r8
+ movq 8(%rsp), %r9
+ movq 16(%rsp), %r10
+ movq 24(%rsp), %r11
+ movq 32(%rsp), %r12
+ movq 40(%rsp), %r13
+ movq 48(%rsp), %r14
+ movq 56(%rsp), %r15
+
+ call __rsaz_512_reducex
+
+.Lmul_scatter_tail:
+___
+$code.=<<___;
+ addq 64(%rsp), %r8
+ adcq 72(%rsp), %r9
+ adcq 80(%rsp), %r10
+ adcq 88(%rsp), %r11
+ adcq 96(%rsp), %r12
+ adcq 104(%rsp), %r13
+ adcq 112(%rsp), %r14
+ adcq 120(%rsp), %r15
+ movq %xmm2, $inp
+ sbbq %rcx, %rcx
+
+ call __rsaz_512_subtract
+
+ movq %r8, 128*0($inp) # scatter
+ movq %r9, 128*1($inp)
+ movq %r10, 128*2($inp)
+ movq %r11, 128*3($inp)
+ movq %r12, 128*4($inp)
+ movq %r13, 128*5($inp)
+ movq %r14, 128*6($inp)
+ movq %r15, 128*7($inp)
+
+ leaq 128+24+48(%rsp), %rax
+ movq -48(%rax), %r15
+ movq -40(%rax), %r14
+ movq -32(%rax), %r13
+ movq -24(%rax), %r12
+ movq -16(%rax), %rbp
+ movq -8(%rax), %rbx
+ leaq (%rax), %rsp
+.Lmul_scatter4_epilogue:
+ ret
+.size rsaz_512_mul_scatter4,.-rsaz_512_mul_scatter4
+___
+}
+{
+my ($out,$inp,$mod,$n0) = ("%rdi","%rsi","%rdx","%rcx");
+$code.=<<___;
+.globl rsaz_512_mul_by_one
+.type rsaz_512_mul_by_one,\@function,4
+.align 32
+rsaz_512_mul_by_one:
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+
+ subq \$128+24, %rsp
+.Lmul_by_one_body:
+___
+$code.=<<___ if ($addx);
+ movl OPENSSL_ia32cap_P+8(%rip),%eax
+___
+$code.=<<___;
+ movq $mod, %rbp # reassign argument
+ movq $n0, 128(%rsp)
+
+ movq ($inp), %r8
+ pxor %xmm0, %xmm0
+ movq 8($inp), %r9
+ movq 16($inp), %r10
+ movq 24($inp), %r11
+ movq 32($inp), %r12
+ movq 40($inp), %r13
+ movq 48($inp), %r14
+ movq 56($inp), %r15
+
+ movdqa %xmm0, (%rsp)
+ movdqa %xmm0, 16(%rsp)
+ movdqa %xmm0, 32(%rsp)
+ movdqa %xmm0, 48(%rsp)
+ movdqa %xmm0, 64(%rsp)
+ movdqa %xmm0, 80(%rsp)
+ movdqa %xmm0, 96(%rsp)
+___
+$code.=<<___ if ($addx);
+ andl \$0x80100,%eax
+ cmpl \$0x80100,%eax # check for MULX and ADO/CX
+ je .Lby_one_callx
+___
+$code.=<<___;
+ call __rsaz_512_reduce
+___
+$code.=<<___ if ($addx);
+ jmp .Lby_one_tail
+.align 32
+.Lby_one_callx:
+ movq 128(%rsp), %rdx # pull $n0
+ call __rsaz_512_reducex
+.Lby_one_tail:
+___
+$code.=<<___;
+ movq %r8, ($out)
+ movq %r9, 8($out)
+ movq %r10, 16($out)
+ movq %r11, 24($out)
+ movq %r12, 32($out)
+ movq %r13, 40($out)
+ movq %r14, 48($out)
+ movq %r15, 56($out)
+
+ leaq 128+24+48(%rsp), %rax
+ movq -48(%rax), %r15
+ movq -40(%rax), %r14
+ movq -32(%rax), %r13
+ movq -24(%rax), %r12
+ movq -16(%rax), %rbp
+ movq -8(%rax), %rbx
+ leaq (%rax), %rsp
+.Lmul_by_one_epilogue:
+ ret
+.size rsaz_512_mul_by_one,.-rsaz_512_mul_by_one
+___
+}
+{ # __rsaz_512_reduce
+ #
+ # input: %r8-%r15, %rbp - mod, 128(%rsp) - n0
+ # output: %r8-%r15
+ # clobbers: everything except %rbp and %rdi
+$code.=<<___;
+.type __rsaz_512_reduce,\@abi-omnipotent
+.align 32
+__rsaz_512_reduce:
+ movq %r8, %rbx
+ imulq 128+8(%rsp), %rbx
+ movq 0(%rbp), %rax
+ movl \$8, %ecx
+ jmp .Lreduction_loop
+
+.align 32
+.Lreduction_loop:
+ mulq %rbx
+ movq 8(%rbp), %rax
+ negq %r8
+ movq %rdx, %r8
+ adcq \$0, %r8
+
+ mulq %rbx
+ addq %rax, %r9
+ movq 16(%rbp), %rax
+ adcq \$0, %rdx
+ addq %r9, %r8
+ movq %rdx, %r9
+ adcq \$0, %r9
+
+ mulq %rbx
+ addq %rax, %r10
+ movq 24(%rbp), %rax
+ adcq \$0, %rdx
+ addq %r10, %r9
+ movq %rdx, %r10
+ adcq \$0, %r10
+
+ mulq %rbx
+ addq %rax, %r11
+ movq 32(%rbp), %rax
+ adcq \$0, %rdx
+ addq %r11, %r10
+ movq 128+8(%rsp), %rsi
+ #movq %rdx, %r11
+ #adcq \$0, %r11
+ adcq \$0, %rdx
+ movq %rdx, %r11
+
+ mulq %rbx
+ addq %rax, %r12
+ movq 40(%rbp), %rax
+ adcq \$0, %rdx
+ imulq %r8, %rsi
+ addq %r12, %r11
+ movq %rdx, %r12
+ adcq \$0, %r12
+
+ mulq %rbx
+ addq %rax, %r13
+ movq 48(%rbp), %rax
+ adcq \$0, %rdx
+ addq %r13, %r12
+ movq %rdx, %r13
+ adcq \$0, %r13
+
+ mulq %rbx
+ addq %rax, %r14
+ movq 56(%rbp), %rax
+ adcq \$0, %rdx
+ addq %r14, %r13
+ movq %rdx, %r14
+ adcq \$0, %r14
+
+ mulq %rbx
+ movq %rsi, %rbx
+ addq %rax, %r15
+ movq 0(%rbp), %rax
+ adcq \$0, %rdx
+ addq %r15, %r14
+ movq %rdx, %r15
+ adcq \$0, %r15
+
+ decl %ecx
+ jne .Lreduction_loop
+
+ ret
+.size __rsaz_512_reduce,.-__rsaz_512_reduce
+___
+}
+if ($addx) {
+ # __rsaz_512_reducex
+ #
+ # input: %r8-%r15, %rbp - mod, 128(%rsp) - n0
+ # output: %r8-%r15
+ # clobbers: everything except %rbp and %rdi
+$code.=<<___;
+.type __rsaz_512_reducex,\@abi-omnipotent
+.align 32
+__rsaz_512_reducex:
+ #movq 128+8(%rsp), %rdx # pull $n0
+ imulq %r8, %rdx
+ xorq %rsi, %rsi # cf=0,of=0
+ movl \$8, %ecx
+ jmp .Lreduction_loopx
+
+.align 32
+.Lreduction_loopx:
+ mov %r8, %rbx
+ mulx 0(%rbp), %rax, %r8
+ adcx %rbx, %rax
+ adox %r9, %r8
+
+ mulx 8(%rbp), %rax, %r9
+ adcx %rax, %r8
+ adox %r10, %r9
+
+ mulx 16(%rbp), %rbx, %r10
+ adcx %rbx, %r9
+ adox %r11, %r10
+
+ mulx 24(%rbp), %rbx, %r11
+ adcx %rbx, %r10
+ adox %r12, %r11
+
+ .byte 0xc4,0x62,0xe3,0xf6,0xa5,0x20,0x00,0x00,0x00 # mulx 32(%rbp), %rbx, %r12
+ mov %rdx, %rax
+ mov %r8, %rdx
+ adcx %rbx, %r11
+ adox %r13, %r12
+
+ mulx 128+8(%rsp), %rbx, %rdx
+ mov %rax, %rdx
+
+ mulx 40(%rbp), %rax, %r13
+ adcx %rax, %r12
+ adox %r14, %r13
+
+ .byte 0xc4,0x62,0xfb,0xf6,0xb5,0x30,0x00,0x00,0x00 # mulx 48(%rbp), %rax, %r14
+ adcx %rax, %r13
+ adox %r15, %r14
+
+ mulx 56(%rbp), %rax, %r15
+ mov %rbx, %rdx
+ adcx %rax, %r14
+ adox %rsi, %r15 # %rsi is 0
+ adcx %rsi, %r15 # cf=0
+
+ decl %ecx # of=0
+ jne .Lreduction_loopx
+
+ ret
+.size __rsaz_512_reducex,.-__rsaz_512_reducex
+___
+}
+{ # __rsaz_512_subtract
+ # input: %r8-%r15, %rdi - $out, %rbp - $mod, %rcx - mask
+ # output:
+ # clobbers: everything but %rdi, %rsi and %rbp
+$code.=<<___;
+.type __rsaz_512_subtract,\@abi-omnipotent
+.align 32
+__rsaz_512_subtract:
+ movq %r8, ($out)
+ movq %r9, 8($out)
+ movq %r10, 16($out)
+ movq %r11, 24($out)
+ movq %r12, 32($out)
+ movq %r13, 40($out)
+ movq %r14, 48($out)
+ movq %r15, 56($out)
+
+ movq 0($mod), %r8
+ movq 8($mod), %r9
+ negq %r8
+ notq %r9
+ andq %rcx, %r8
+ movq 16($mod), %r10
+ andq %rcx, %r9
+ notq %r10
+ movq 24($mod), %r11
+ andq %rcx, %r10
+ notq %r11
+ movq 32($mod), %r12
+ andq %rcx, %r11
+ notq %r12
+ movq 40($mod), %r13
+ andq %rcx, %r12
+ notq %r13
+ movq 48($mod), %r14
+ andq %rcx, %r13
+ notq %r14
+ movq 56($mod), %r15
+ andq %rcx, %r14
+ notq %r15
+ andq %rcx, %r15
+
+ addq ($out), %r8
+ adcq 8($out), %r9
+ adcq 16($out), %r10
+ adcq 24($out), %r11
+ adcq 32($out), %r12
+ adcq 40($out), %r13
+ adcq 48($out), %r14
+ adcq 56($out), %r15
+
+ movq %r8, ($out)
+ movq %r9, 8($out)
+ movq %r10, 16($out)
+ movq %r11, 24($out)
+ movq %r12, 32($out)
+ movq %r13, 40($out)
+ movq %r14, 48($out)
+ movq %r15, 56($out)
+
+ ret
+.size __rsaz_512_subtract,.-__rsaz_512_subtract
+___
+}
+{ # __rsaz_512_mul
+ #
+ # input: %rsi - ap, %rbp - bp
+ # output:
+ # clobbers: everything
+my ($ap,$bp) = ("%rsi","%rbp");
+$code.=<<___;
+.type __rsaz_512_mul,\@abi-omnipotent
+.align 32
+__rsaz_512_mul:
+ leaq 8(%rsp), %rdi
+
+ movq ($ap), %rax
+ mulq %rbx
+ movq %rax, (%rdi)
+ movq 8($ap), %rax
+ movq %rdx, %r8
+
+ mulq %rbx
+ addq %rax, %r8
+ movq 16($ap), %rax
+ movq %rdx, %r9
+ adcq \$0, %r9
+
+ mulq %rbx
+ addq %rax, %r9
+ movq 24($ap), %rax
+ movq %rdx, %r10
+ adcq \$0, %r10
+
+ mulq %rbx
+ addq %rax, %r10
+ movq 32($ap), %rax
+ movq %rdx, %r11
+ adcq \$0, %r11
+
+ mulq %rbx
+ addq %rax, %r11
+ movq 40($ap), %rax
+ movq %rdx, %r12
+ adcq \$0, %r12
+
+ mulq %rbx
+ addq %rax, %r12
+ movq 48($ap), %rax
+ movq %rdx, %r13
+ adcq \$0, %r13
+
+ mulq %rbx
+ addq %rax, %r13
+ movq 56($ap), %rax
+ movq %rdx, %r14
+ adcq \$0, %r14
+
+ mulq %rbx
+ addq %rax, %r14
+ movq ($ap), %rax
+ movq %rdx, %r15
+ adcq \$0, %r15
+
+ leaq 8($bp), $bp
+ leaq 8(%rdi), %rdi
+
+ movl \$7, %ecx
+ jmp .Loop_mul
+
+.align 32
+.Loop_mul:
+ movq ($bp), %rbx
+ mulq %rbx
+ addq %rax, %r8
+ movq 8($ap), %rax
+ movq %r8, (%rdi)
+ movq %rdx, %r8
+ adcq \$0, %r8
+
+ mulq %rbx
+ addq %rax, %r9
+ movq 16($ap), %rax
+ adcq \$0, %rdx
+ addq %r9, %r8
+ movq %rdx, %r9
+ adcq \$0, %r9
+
+ mulq %rbx
+ addq %rax, %r10
+ movq 24($ap), %rax
+ adcq \$0, %rdx
+ addq %r10, %r9
+ movq %rdx, %r10
+ adcq \$0, %r10
+
+ mulq %rbx
+ addq %rax, %r11
+ movq 32($ap), %rax
+ adcq \$0, %rdx
+ addq %r11, %r10
+ movq %rdx, %r11
+ adcq \$0, %r11
+
+ mulq %rbx
+ addq %rax, %r12
+ movq 40($ap), %rax
+ adcq \$0, %rdx
+ addq %r12, %r11
+ movq %rdx, %r12
+ adcq \$0, %r12
+
+ mulq %rbx
+ addq %rax, %r13
+ movq 48($ap), %rax
+ adcq \$0, %rdx
+ addq %r13, %r12
+ movq %rdx, %r13
+ adcq \$0, %r13
+
+ mulq %rbx
+ addq %rax, %r14
+ movq 56($ap), %rax
+ adcq \$0, %rdx
+ addq %r14, %r13
+ movq %rdx, %r14
+ leaq 8($bp), $bp
+ adcq \$0, %r14
+
+ mulq %rbx
+ addq %rax, %r15
+ movq ($ap), %rax
+ adcq \$0, %rdx
+ addq %r15, %r14
+ movq %rdx, %r15
+ adcq \$0, %r15
+
+ leaq 8(%rdi), %rdi
+
+ decl %ecx
+ jnz .Loop_mul
+
+ movq %r8, (%rdi)
+ movq %r9, 8(%rdi)
+ movq %r10, 16(%rdi)
+ movq %r11, 24(%rdi)
+ movq %r12, 32(%rdi)
+ movq %r13, 40(%rdi)
+ movq %r14, 48(%rdi)
+ movq %r15, 56(%rdi)
+
+ ret
+.size __rsaz_512_mul,.-__rsaz_512_mul
+___
+}
+if ($addx) {
+ # __rsaz_512_mulx
+ #
+ # input: %rsi - ap, %rbp - bp
+ # output:
+ # clobbers: everything
+my ($ap,$bp,$zero) = ("%rsi","%rbp","%rdi");
+$code.=<<___;
+.type __rsaz_512_mulx,\@abi-omnipotent
+.align 32
+__rsaz_512_mulx:
+ mulx ($ap), %rbx, %r8 # initial %rdx preloaded by caller
+ mov \$-6, %rcx
+
+ mulx 8($ap), %rax, %r9
+ movq %rbx, 8(%rsp)
+
+ mulx 16($ap), %rbx, %r10
+ adc %rax, %r8
+
+ mulx 24($ap), %rax, %r11
+ adc %rbx, %r9
+
+ mulx 32($ap), %rbx, %r12
+ adc %rax, %r10
+
+ mulx 40($ap), %rax, %r13
+ adc %rbx, %r11
+
+ mulx 48($ap), %rbx, %r14
+ adc %rax, %r12
+
+ mulx 56($ap), %rax, %r15
+ mov 8($bp), %rdx
+ adc %rbx, %r13
+ adc %rax, %r14
+ adc \$0, %r15
+
+ xor $zero, $zero # cf=0,of=0
+ jmp .Loop_mulx
+
+.align 32
+.Loop_mulx:
+ movq %r8, %rbx
+ mulx ($ap), %rax, %r8
+ adcx %rax, %rbx
+ adox %r9, %r8
+
+ mulx 8($ap), %rax, %r9
+ adcx %rax, %r8
+ adox %r10, %r9
+
+ mulx 16($ap), %rax, %r10
+ adcx %rax, %r9
+ adox %r11, %r10
+
+ mulx 24($ap), %rax, %r11
+ adcx %rax, %r10
+ adox %r12, %r11
+
+ .byte 0x3e,0xc4,0x62,0xfb,0xf6,0xa6,0x20,0x00,0x00,0x00 # mulx 32($ap), %rax, %r12
+ adcx %rax, %r11
+ adox %r13, %r12
+
+ mulx 40($ap), %rax, %r13
+ adcx %rax, %r12
+ adox %r14, %r13
+
+ mulx 48($ap), %rax, %r14
+ adcx %rax, %r13
+ adox %r15, %r14
+
+ mulx 56($ap), %rax, %r15
+ movq 64($bp,%rcx,8), %rdx
+ movq %rbx, 8+64-8(%rsp,%rcx,8)
+ adcx %rax, %r14
+ adox $zero, %r15
+ adcx $zero, %r15 # cf=0
+
+ inc %rcx # of=0
+ jnz .Loop_mulx
+
+ movq %r8, %rbx
+ mulx ($ap), %rax, %r8
+ adcx %rax, %rbx
+ adox %r9, %r8
+
+ .byte 0xc4,0x62,0xfb,0xf6,0x8e,0x08,0x00,0x00,0x00 # mulx 8($ap), %rax, %r9
+ adcx %rax, %r8
+ adox %r10, %r9
+
+ .byte 0xc4,0x62,0xfb,0xf6,0x96,0x10,0x00,0x00,0x00 # mulx 16($ap), %rax, %r10
+ adcx %rax, %r9
+ adox %r11, %r10
+
+ mulx 24($ap), %rax, %r11
+ adcx %rax, %r10
+ adox %r12, %r11
+
+ mulx 32($ap), %rax, %r12
+ adcx %rax, %r11
+ adox %r13, %r12
+
+ mulx 40($ap), %rax, %r13
+ adcx %rax, %r12
+ adox %r14, %r13
+
+ .byte 0xc4,0x62,0xfb,0xf6,0xb6,0x30,0x00,0x00,0x00 # mulx 48($ap), %rax, %r14
+ adcx %rax, %r13
+ adox %r15, %r14
+
+ .byte 0xc4,0x62,0xfb,0xf6,0xbe,0x38,0x00,0x00,0x00 # mulx 56($ap), %rax, %r15
+ adcx %rax, %r14
+ adox $zero, %r15
+ adcx $zero, %r15
+
+ mov %rbx, 8+64-8(%rsp)
+ mov %r8, 8+64(%rsp)
+ mov %r9, 8+64+8(%rsp)
+ mov %r10, 8+64+16(%rsp)
+ mov %r11, 8+64+24(%rsp)
+ mov %r12, 8+64+32(%rsp)
+ mov %r13, 8+64+40(%rsp)
+ mov %r14, 8+64+48(%rsp)
+ mov %r15, 8+64+56(%rsp)
+
+ ret
+.size __rsaz_512_mulx,.-__rsaz_512_mulx
+___
+}
+{
+my ($out,$inp,$power)= $win64 ? ("%rcx","%rdx","%r8d") : ("%rdi","%rsi","%edx");
+$code.=<<___;
+.globl rsaz_512_scatter4
+.type rsaz_512_scatter4,\@abi-omnipotent
+.align 16
+rsaz_512_scatter4:
+ leaq ($out,$power,8), $out
+ movl \$8, %r9d
+ jmp .Loop_scatter
+.align 16
+.Loop_scatter:
+ movq ($inp), %rax
+ leaq 8($inp), $inp
+ movq %rax, ($out)
+ leaq 128($out), $out
+ decl %r9d
+ jnz .Loop_scatter
+ ret
+.size rsaz_512_scatter4,.-rsaz_512_scatter4
+
+.globl rsaz_512_gather4
+.type rsaz_512_gather4,\@abi-omnipotent
+.align 16
+rsaz_512_gather4:
+___
+$code.=<<___ if ($win64);
+.LSEH_begin_rsaz_512_gather4:
+ .byte 0x48,0x81,0xec,0xa8,0x00,0x00,0x00 # sub $0xa8,%rsp
+ .byte 0x0f,0x29,0x34,0x24 # movaps %xmm6,(%rsp)
+ .byte 0x0f,0x29,0x7c,0x24,0x10 # movaps %xmm7,0x10(%rsp)
+ .byte 0x44,0x0f,0x29,0x44,0x24,0x20 # movaps %xmm8,0x20(%rsp)
+ .byte 0x44,0x0f,0x29,0x4c,0x24,0x30 # movaps %xmm9,0x30(%rsp)
+ .byte 0x44,0x0f,0x29,0x54,0x24,0x40 # movaps %xmm10,0x40(%rsp)
+ .byte 0x44,0x0f,0x29,0x5c,0x24,0x50 # movaps %xmm11,0x50(%rsp)
+ .byte 0x44,0x0f,0x29,0x64,0x24,0x60 # movaps %xmm12,0x60(%rsp)
+ .byte 0x44,0x0f,0x29,0x6c,0x24,0x70 # movaps %xmm13,0x70(%rsp)
+ .byte 0x44,0x0f,0x29,0xb4,0x24,0x80,0,0,0 # movaps %xmm14,0x80(%rsp)
+ .byte 0x44,0x0f,0x29,0xbc,0x24,0x90,0,0,0 # movaps %xmm15,0x90(%rsp)
+___
+$code.=<<___;
+ movd $power,%xmm8
+ movdqa .Linc+16(%rip),%xmm1 # 00000002000000020000000200000002
+ movdqa .Linc(%rip),%xmm0 # 00000001000000010000000000000000
+
+ pshufd \$0,%xmm8,%xmm8 # broadcast $power
+ movdqa %xmm1,%xmm7
+ movdqa %xmm1,%xmm2
+___
+########################################################################
+# calculate mask by comparing 0..15 to $power
+#
+for($i=0;$i<4;$i++) {
+$code.=<<___;
+ paddd %xmm`$i`,%xmm`$i+1`
+ pcmpeqd %xmm8,%xmm`$i`
+ movdqa %xmm7,%xmm`$i+3`
+___
+}
+for(;$i<7;$i++) {
+$code.=<<___;
+ paddd %xmm`$i`,%xmm`$i+1`
+ pcmpeqd %xmm8,%xmm`$i`
+___
+}
+$code.=<<___;
+ pcmpeqd %xmm8,%xmm7
+ movl \$8, %r9d
+ jmp .Loop_gather
+.align 16
+.Loop_gather:
+ movdqa 16*0($inp),%xmm8
+ movdqa 16*1($inp),%xmm9
+ movdqa 16*2($inp),%xmm10
+ movdqa 16*3($inp),%xmm11
+ pand %xmm0,%xmm8
+ movdqa 16*4($inp),%xmm12
+ pand %xmm1,%xmm9
+ movdqa 16*5($inp),%xmm13
+ pand %xmm2,%xmm10
+ movdqa 16*6($inp),%xmm14
+ pand %xmm3,%xmm11
+ movdqa 16*7($inp),%xmm15
+ leaq 128($inp), $inp
+ pand %xmm4,%xmm12
+ pand %xmm5,%xmm13
+ pand %xmm6,%xmm14
+ pand %xmm7,%xmm15
+ por %xmm10,%xmm8
+ por %xmm11,%xmm9
+ por %xmm12,%xmm8
+ por %xmm13,%xmm9
+ por %xmm14,%xmm8
+ por %xmm15,%xmm9
+
+ por %xmm9,%xmm8
+ pshufd \$0x4e,%xmm8,%xmm9
+ por %xmm9,%xmm8
+ movq %xmm8,($out)
+ leaq 8($out), $out
+ decl %r9d
+ jnz .Loop_gather
+___
+$code.=<<___ if ($win64);
+ movaps 0x00(%rsp),%xmm6
+ movaps 0x10(%rsp),%xmm7
+ movaps 0x20(%rsp),%xmm8
+ movaps 0x30(%rsp),%xmm9
+ movaps 0x40(%rsp),%xmm10
+ movaps 0x50(%rsp),%xmm11
+ movaps 0x60(%rsp),%xmm12
+ movaps 0x70(%rsp),%xmm13
+ movaps 0x80(%rsp),%xmm14
+ movaps 0x90(%rsp),%xmm15
+ add \$0xa8,%rsp
+___
+$code.=<<___;
+ ret
+.LSEH_end_rsaz_512_gather4:
+.size rsaz_512_gather4,.-rsaz_512_gather4
+
+.align 64
+.Linc:
+ .long 0,0, 1,1
+ .long 2,2, 2,2
+___
+}
+
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+# CONTEXT *context,DISPATCHER_CONTEXT *disp)
+if ($win64) {
+$rec="%rcx";
+$frame="%rdx";
+$context="%r8";
+$disp="%r9";
+
+$code.=<<___;
+.extern __imp_RtlVirtualUnwind
+.type se_handler,\@abi-omnipotent
+.align 16
+se_handler:
+ push %rsi
+ push %rdi
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ pushfq
+ sub \$64,%rsp
+
+ mov 120($context),%rax # pull context->Rax
+ mov 248($context),%rbx # pull context->Rip
+
+ mov 8($disp),%rsi # disp->ImageBase
+ mov 56($disp),%r11 # disp->HandlerData
+
+ mov 0(%r11),%r10d # HandlerData[0]
+ lea (%rsi,%r10),%r10 # end of prologue label
+ cmp %r10,%rbx # context->Rip<end of prologue label
+ jb .Lcommon_seh_tail
+
+ mov 152($context),%rax # pull context->Rsp
+
+ mov 4(%r11),%r10d # HandlerData[1]
+ lea (%rsi,%r10),%r10 # epilogue label
+ cmp %r10,%rbx # context->Rip>=epilogue label
+ jae .Lcommon_seh_tail
+
+ lea 128+24+48(%rax),%rax
+
+ lea .Lmul_gather4_epilogue(%rip),%rbx
+ cmp %r10,%rbx
+ jne .Lse_not_in_mul_gather4
+
+ lea 0xb0(%rax),%rax
+
+ lea -48-0xa8(%rax),%rsi
+ lea 512($context),%rdi
+ mov \$20,%ecx
+ .long 0xa548f3fc # cld; rep movsq
+
+.Lse_not_in_mul_gather4:
+ mov -8(%rax),%rbx
+ mov -16(%rax),%rbp
+ mov -24(%rax),%r12
+ mov -32(%rax),%r13
+ mov -40(%rax),%r14
+ mov -48(%rax),%r15
+ mov %rbx,144($context) # restore context->Rbx
+ mov %rbp,160($context) # restore context->Rbp
+ mov %r12,216($context) # restore context->R12
+ mov %r13,224($context) # restore context->R13
+ mov %r14,232($context) # restore context->R14
+ mov %r15,240($context) # restore context->R15
+
+.Lcommon_seh_tail:
+ mov 8(%rax),%rdi
+ mov 16(%rax),%rsi
+ mov %rax,152($context) # restore context->Rsp
+ mov %rsi,168($context) # restore context->Rsi
+ mov %rdi,176($context) # restore context->Rdi
+
+ mov 40($disp),%rdi # disp->ContextRecord
+ mov $context,%rsi # context
+ mov \$154,%ecx # sizeof(CONTEXT)
+ .long 0xa548f3fc # cld; rep movsq
+
+ mov $disp,%rsi
+ xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
+ mov 8(%rsi),%rdx # arg2, disp->ImageBase
+ mov 0(%rsi),%r8 # arg3, disp->ControlPc
+ mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
+ mov 40(%rsi),%r10 # disp->ContextRecord
+ lea 56(%rsi),%r11 # &disp->HandlerData
+ lea 24(%rsi),%r12 # &disp->EstablisherFrame
+ mov %r10,32(%rsp) # arg5
+ mov %r11,40(%rsp) # arg6
+ mov %r12,48(%rsp) # arg7
+ mov %rcx,56(%rsp) # arg8, (NULL)
+ call *__imp_RtlVirtualUnwind(%rip)
+
+ mov \$1,%eax # ExceptionContinueSearch
+ add \$64,%rsp
+ popfq
+ pop %r15
+ pop %r14
+ pop %r13
+ pop %r12
+ pop %rbp
+ pop %rbx
+ pop %rdi
+ pop %rsi
+ ret
+.size se_handler,.-se_handler
+
+.section .pdata
+.align 4
+ .rva .LSEH_begin_rsaz_512_sqr
+ .rva .LSEH_end_rsaz_512_sqr
+ .rva .LSEH_info_rsaz_512_sqr
+
+ .rva .LSEH_begin_rsaz_512_mul
+ .rva .LSEH_end_rsaz_512_mul
+ .rva .LSEH_info_rsaz_512_mul
+
+ .rva .LSEH_begin_rsaz_512_mul_gather4
+ .rva .LSEH_end_rsaz_512_mul_gather4
+ .rva .LSEH_info_rsaz_512_mul_gather4
+
+ .rva .LSEH_begin_rsaz_512_mul_scatter4
+ .rva .LSEH_end_rsaz_512_mul_scatter4
+ .rva .LSEH_info_rsaz_512_mul_scatter4
+
+ .rva .LSEH_begin_rsaz_512_mul_by_one
+ .rva .LSEH_end_rsaz_512_mul_by_one
+ .rva .LSEH_info_rsaz_512_mul_by_one
+
+ .rva .LSEH_begin_rsaz_512_gather4
+ .rva .LSEH_end_rsaz_512_gather4
+ .rva .LSEH_info_rsaz_512_gather4
+
+.section .xdata
+.align 8
+.LSEH_info_rsaz_512_sqr:
+ .byte 9,0,0,0
+ .rva se_handler
+ .rva .Lsqr_body,.Lsqr_epilogue # HandlerData[]
+.LSEH_info_rsaz_512_mul:
+ .byte 9,0,0,0
+ .rva se_handler
+ .rva .Lmul_body,.Lmul_epilogue # HandlerData[]
+.LSEH_info_rsaz_512_mul_gather4:
+ .byte 9,0,0,0
+ .rva se_handler
+ .rva .Lmul_gather4_body,.Lmul_gather4_epilogue # HandlerData[]
+.LSEH_info_rsaz_512_mul_scatter4:
+ .byte 9,0,0,0
+ .rva se_handler
+ .rva .Lmul_scatter4_body,.Lmul_scatter4_epilogue # HandlerData[]
+.LSEH_info_rsaz_512_mul_by_one:
+ .byte 9,0,0,0
+ .rva se_handler
+ .rva .Lmul_by_one_body,.Lmul_by_one_epilogue # HandlerData[]
+.LSEH_info_rsaz_512_gather4:
+ .byte 0x01,0x46,0x16,0x00
+ .byte 0x46,0xf8,0x09,0x00 # vmovaps 0x90(rsp),xmm15
+ .byte 0x3d,0xe8,0x08,0x00 # vmovaps 0x80(rsp),xmm14
+ .byte 0x34,0xd8,0x07,0x00 # vmovaps 0x70(rsp),xmm13
+ .byte 0x2e,0xc8,0x06,0x00 # vmovaps 0x60(rsp),xmm12
+ .byte 0x28,0xb8,0x05,0x00 # vmovaps 0x50(rsp),xmm11
+ .byte 0x22,0xa8,0x04,0x00 # vmovaps 0x40(rsp),xmm10
+ .byte 0x1c,0x98,0x03,0x00 # vmovaps 0x30(rsp),xmm9
+ .byte 0x16,0x88,0x02,0x00 # vmovaps 0x20(rsp),xmm8
+ .byte 0x10,0x78,0x01,0x00 # vmovaps 0x10(rsp),xmm7
+ .byte 0x0b,0x68,0x00,0x00 # vmovaps 0x00(rsp),xmm6
+ .byte 0x07,0x01,0x15,0x00 # sub rsp,0xa8
+___
+}
+
+$code =~ s/\`([^\`]*)\`/eval $1/gem;
+print $code;
+close STDOUT;
diff --git a/openssl-1.1.0h/crypto/bn/asm/s390x-gf2m.pl b/openssl-1.1.0h/crypto/bn/asm/s390x-gf2m.pl
new file mode 100644
index 0000000..cbd16f4
--- /dev/null
+++ b/openssl-1.1.0h/crypto/bn/asm/s390x-gf2m.pl
@@ -0,0 +1,228 @@
+#! /usr/bin/env perl
+# Copyright 2011-2016 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License"). You may not use
+# this file except in compliance with the License. You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# May 2011
+#
+# The module implements bn_GF2m_mul_2x2 polynomial multiplication used
+# in bn_gf2m.c. It's kind of low-hanging mechanical port from C for
+# the time being... gcc 4.3 appeared to generate poor code, therefore
+# the effort. And indeed, the module delivers 55%-90%(*) improvement
+# on haviest ECDSA verify and ECDH benchmarks for 163- and 571-bit
+# key lengths on z990, 30%-55%(*) - on z10, and 70%-110%(*) - on z196.
+# This is for 64-bit build. In 32-bit "highgprs" case improvement is
+# even higher, for example on z990 it was measured 80%-150%. ECDSA
+# sign is modest 9%-12% faster. Keep in mind that these coefficients
+# are not ones for bn_GF2m_mul_2x2 itself, as not all CPU time is
+# burnt in it...
+#
+# (*) gcc 4.1 was observed to deliver better results than gcc 4.3,
+# so that improvement coefficients can vary from one specific
+# setup to another.
+
+$flavour = shift;
+
+if ($flavour =~ /3[12]/) {
+ $SIZE_T=4;
+ $g="";
+} else {
+ $SIZE_T=8;
+ $g="g";
+}
+
+while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {}
+open STDOUT,">$output";
+
+$stdframe=16*$SIZE_T+4*8;
+
+$rp="%r2";
+$a1="%r3";
+$a0="%r4";
+$b1="%r5";
+$b0="%r6";
+
+$ra="%r14";
+$sp="%r15";
+
+@T=("%r0","%r1");
+@i=("%r12","%r13");
+
+($a1,$a2,$a4,$a8,$a12,$a48)=map("%r$_",(6..11));
+($lo,$hi,$b)=map("%r$_",(3..5)); $a=$lo; $mask=$a8;
+
+$code.=<<___;
+.text
+
+.type _mul_1x1,\@function
+.align 16
+_mul_1x1:
+ lgr $a1,$a
+ sllg $a2,$a,1
+ sllg $a4,$a,2
+ sllg $a8,$a,3
+
+ srag $lo,$a1,63 # broadcast 63rd bit
+ nihh $a1,0x1fff
+ srag @i[0],$a2,63 # broadcast 62nd bit
+ nihh $a2,0x3fff
+ srag @i[1],$a4,63 # broadcast 61st bit
+ nihh $a4,0x7fff
+ ngr $lo,$b
+ ngr @i[0],$b
+ ngr @i[1],$b
+
+ lghi @T[0],0
+ lgr $a12,$a1
+ stg @T[0],`$stdframe+0*8`($sp) # tab[0]=0
+ xgr $a12,$a2
+ stg $a1,`$stdframe+1*8`($sp) # tab[1]=a1
+ lgr $a48,$a4
+ stg $a2,`$stdframe+2*8`($sp) # tab[2]=a2
+ xgr $a48,$a8
+ stg $a12,`$stdframe+3*8`($sp) # tab[3]=a1^a2
+ xgr $a1,$a4
+
+ stg $a4,`$stdframe+4*8`($sp) # tab[4]=a4
+ xgr $a2,$a4
+ stg $a1,`$stdframe+5*8`($sp) # tab[5]=a1^a4
+ xgr $a12,$a4
+ stg $a2,`$stdframe+6*8`($sp) # tab[6]=a2^a4
+ xgr $a1,$a48
+ stg $a12,`$stdframe+7*8`($sp) # tab[7]=a1^a2^a4
+ xgr $a2,$a48
+
+ stg $a8,`$stdframe+8*8`($sp) # tab[8]=a8
+ xgr $a12,$a48
+ stg $a1,`$stdframe+9*8`($sp) # tab[9]=a1^a8
+ xgr $a1,$a4
+ stg $a2,`$stdframe+10*8`($sp) # tab[10]=a2^a8
+ xgr $a2,$a4
+ stg $a12,`$stdframe+11*8`($sp) # tab[11]=a1^a2^a8
+
+ xgr $a12,$a4
+ stg $a48,`$stdframe+12*8`($sp) # tab[12]=a4^a8
+ srlg $hi,$lo,1
+ stg $a1,`$stdframe+13*8`($sp) # tab[13]=a1^a4^a8
+ sllg $lo,$lo,63
+ stg $a2,`$stdframe+14*8`($sp) # tab[14]=a2^a4^a8
+ srlg @T[0],@i[0],2
+ stg $a12,`$stdframe+15*8`($sp) # tab[15]=a1^a2^a4^a8
+
+ lghi $mask,`0xf<<3`
+ sllg $a1,@i[0],62
+ sllg @i[0],$b,3
+ srlg @T[1],@i[1],3
+ ngr @i[0],$mask
+ sllg $a2,@i[1],61
+ srlg @i[1],$b,4-3
+ xgr $hi,@T[0]
+ ngr @i[1],$mask
+ xgr $lo,$a1
+ xgr $hi,@T[1]
+ xgr $lo,$a2
+
+ xg $lo,$stdframe(@i[0],$sp)
+ srlg @i[0],$b,8-3
+ ngr @i[0],$mask
+___
+for($n=1;$n<14;$n++) {
+$code.=<<___;
+ lg @T[1],$stdframe(@i[1],$sp)
+ srlg @i[1],$b,`($n+2)*4`-3
+ sllg @T[0],@T[1],`$n*4`
+ ngr @i[1],$mask
+ srlg @T[1],@T[1],`64-$n*4`
+ xgr $lo,@T[0]
+ xgr $hi,@T[1]
+___
+ push(@i,shift(@i)); push(@T,shift(@T));
+}
+$code.=<<___;
+ lg @T[1],$stdframe(@i[1],$sp)
+ sllg @T[0],@T[1],`$n*4`
+ srlg @T[1],@T[1],`64-$n*4`
+ xgr $lo,@T[0]
+ xgr $hi,@T[1]
+
+ lg @T[0],$stdframe(@i[0],$sp)
+ sllg @T[1],@T[0],`($n+1)*4`
+ srlg @T[0],@T[0],`64-($n+1)*4`
+ xgr $lo,@T[1]
+ xgr $hi,@T[0]
+
+ br $ra
+.size _mul_1x1,.-_mul_1x1
+
+.globl bn_GF2m_mul_2x2
+.type bn_GF2m_mul_2x2,\@function
+.align 16
+bn_GF2m_mul_2x2:
+ stm${g} %r3,%r15,3*$SIZE_T($sp)
+
+ lghi %r1,-$stdframe-128
+ la %r0,0($sp)
+ la $sp,0(%r1,$sp) # alloca
+ st${g} %r0,0($sp) # back chain
+___
+if ($SIZE_T==8) {
+my @r=map("%r$_",(6..9));
+$code.=<<___;
+ bras $ra,_mul_1x1 # a1·b1
+ stmg $lo,$hi,16($rp)
+
+ lg $a,`$stdframe+128+4*$SIZE_T`($sp)
+ lg $b,`$stdframe+128+6*$SIZE_T`($sp)
+ bras $ra,_mul_1x1 # a0·b0
+ stmg $lo,$hi,0($rp)
+
+ lg $a,`$stdframe+128+3*$SIZE_T`($sp)
+ lg $b,`$stdframe+128+5*$SIZE_T`($sp)
+ xg $a,`$stdframe+128+4*$SIZE_T`($sp)
+ xg $b,`$stdframe+128+6*$SIZE_T`($sp)
+ bras $ra,_mul_1x1 # (a0+a1)·(b0+b1)
+ lmg @r[0],@r[3],0($rp)
+
+ xgr $lo,$hi
+ xgr $hi,@r[1]
+ xgr $lo,@r[0]
+ xgr $hi,@r[2]
+ xgr $lo,@r[3]
+ xgr $hi,@r[3]
+ xgr $lo,$hi
+ stg $hi,16($rp)
+ stg $lo,8($rp)
+___
+} else {
+$code.=<<___;
+ sllg %r3,%r3,32
+ sllg %r5,%r5,32
+ or %r3,%r4
+ or %r5,%r6
+ bras $ra,_mul_1x1
+ rllg $lo,$lo,32
+ rllg $hi,$hi,32
+ stmg $lo,$hi,0($rp)
+___
+}
+$code.=<<___;
+ lm${g} %r6,%r15,`$stdframe+128+6*$SIZE_T`($sp)
+ br $ra
+.size bn_GF2m_mul_2x2,.-bn_GF2m_mul_2x2
+.string "GF(2^m) Multiplication for s390x, CRYPTOGAMS by <appro\@openssl.org>"
+___
+
+$code =~ s/\`([^\`]*)\`/eval($1)/gem;
+print $code;
+close STDOUT;
diff --git a/openssl-1.1.0h/crypto/bn/asm/s390x-mont.pl b/openssl-1.1.0h/crypto/bn/asm/s390x-mont.pl
new file mode 100644
index 0000000..2205bc2
--- /dev/null
+++ b/openssl-1.1.0h/crypto/bn/asm/s390x-mont.pl
@@ -0,0 +1,284 @@
+#! /usr/bin/env perl
+# Copyright 2007-2016 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License"). You may not use
+# this file except in compliance with the License. You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# April 2007.
+#
+# Performance improvement over vanilla C code varies from 85% to 45%
+# depending on key length and benchmark. Unfortunately in this context
+# these are not very impressive results [for code that utilizes "wide"
+# 64x64=128-bit multiplication, which is not commonly available to C
+# programmers], at least hand-coded bn_asm.c replacement is known to
+# provide 30-40% better results for longest keys. Well, on a second
+# thought it's not very surprising, because z-CPUs are single-issue
+# and _strictly_ in-order execution, while bn_mul_mont is more or less
+# dependent on CPU ability to pipe-line instructions and have several
+# of them "in-flight" at the same time. I mean while other methods,
+# for example Karatsuba, aim to minimize amount of multiplications at
+# the cost of other operations increase, bn_mul_mont aim to neatly
+# "overlap" multiplications and the other operations [and on most
+# platforms even minimize the amount of the other operations, in
+# particular references to memory]. But it's possible to improve this
+# module performance by implementing dedicated squaring code-path and
+# possibly by unrolling loops...
+
+# January 2009.
+#
+# Reschedule to minimize/avoid Address Generation Interlock hazard,
+# make inner loops counter-based.
+
+# November 2010.
+#
+# Adapt for -m31 build. If kernel supports what's called "highgprs"
+# feature on Linux [see /proc/cpuinfo], it's possible to use 64-bit
+# instructions and achieve "64-bit" performance even in 31-bit legacy
+# application context. The feature is not specific to any particular
+# processor, as long as it's "z-CPU". Latter implies that the code
+# remains z/Architecture specific. Compatibility with 32-bit BN_ULONG
+# is achieved by swapping words after 64-bit loads, follow _dswap-s.
+# On z990 it was measured to perform 2.6-2.2 times better than
+# compiler-generated code, less for longer keys...
+
+$flavour = shift;
+
+if ($flavour =~ /3[12]/) {
+ $SIZE_T=4;
+ $g="";
+} else {
+ $SIZE_T=8;
+ $g="g";
+}
+
+while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {}
+open STDOUT,">$output";
+
+$stdframe=16*$SIZE_T+4*8;
+
+$mn0="%r0";
+$num="%r1";
+
+# int bn_mul_mont(
+$rp="%r2"; # BN_ULONG *rp,
+$ap="%r3"; # const BN_ULONG *ap,
+$bp="%r4"; # const BN_ULONG *bp,
+$np="%r5"; # const BN_ULONG *np,
+$n0="%r6"; # const BN_ULONG *n0,
+#$num="160(%r15)" # int num);
+
+$bi="%r2"; # zaps rp
+$j="%r7";
+
+$ahi="%r8";
+$alo="%r9";
+$nhi="%r10";
+$nlo="%r11";
+$AHI="%r12";
+$NHI="%r13";
+$count="%r14";
+$sp="%r15";
+
+$code.=<<___;
+.text
+.globl bn_mul_mont
+.type bn_mul_mont,\@function
+bn_mul_mont:
+ lgf $num,`$stdframe+$SIZE_T-4`($sp) # pull $num
+ sla $num,`log($SIZE_T)/log(2)` # $num to enumerate bytes
+ la $bp,0($num,$bp)
+
+ st${g} %r2,2*$SIZE_T($sp)
+
+ cghi $num,16 #
+ lghi %r2,0 #
+ blr %r14 # if($num<16) return 0;
+___
+$code.=<<___ if ($flavour =~ /3[12]/);
+ tmll $num,4
+ bnzr %r14 # if ($num&1) return 0;
+___
+$code.=<<___ if ($flavour !~ /3[12]/);
+ cghi $num,96 #
+ bhr %r14 # if($num>96) return 0;
+___
+$code.=<<___;
+ stm${g} %r3,%r15,3*$SIZE_T($sp)
+
+ lghi $rp,-$stdframe-8 # leave room for carry bit
+ lcgr $j,$num # -$num
+ lgr %r0,$sp
+ la $rp,0($rp,$sp)
+ la $sp,0($j,$rp) # alloca
+ st${g} %r0,0($sp) # back chain
+
+ sra $num,3 # restore $num
+ la $bp,0($j,$bp) # restore $bp
+ ahi $num,-1 # adjust $num for inner loop
+ lg $n0,0($n0) # pull n0
+ _dswap $n0
+
+ lg $bi,0($bp)
+ _dswap $bi
+ lg $alo,0($ap)
+ _dswap $alo
+ mlgr $ahi,$bi # ap[0]*bp[0]
+ lgr $AHI,$ahi
+
+ lgr $mn0,$alo # "tp[0]"*n0
+ msgr $mn0,$n0
+
+ lg $nlo,0($np) #
+ _dswap $nlo
+ mlgr $nhi,$mn0 # np[0]*m1
+ algr $nlo,$alo # +="tp[0]"
+ lghi $NHI,0
+ alcgr $NHI,$nhi
+
+ la $j,8(%r0) # j=1
+ lr $count,$num
+
+.align 16
+.L1st:
+ lg $alo,0($j,$ap)
+ _dswap $alo
+ mlgr $ahi,$bi # ap[j]*bp[0]
+ algr $alo,$AHI
+ lghi $AHI,0
+ alcgr $AHI,$ahi
+
+ lg $nlo,0($j,$np)
+ _dswap $nlo
+ mlgr $nhi,$mn0 # np[j]*m1
+ algr $nlo,$NHI
+ lghi $NHI,0
+ alcgr $nhi,$NHI # +="tp[j]"
+ algr $nlo,$alo
+ alcgr $NHI,$nhi
+
+ stg $nlo,$stdframe-8($j,$sp) # tp[j-1]=
+ la $j,8($j) # j++
+ brct $count,.L1st
+
+ algr $NHI,$AHI
+ lghi $AHI,0
+ alcgr $AHI,$AHI # upmost overflow bit
+ stg $NHI,$stdframe-8($j,$sp)
+ stg $AHI,$stdframe($j,$sp)
+ la $bp,8($bp) # bp++
+
+.Louter:
+ lg $bi,0($bp) # bp[i]
+ _dswap $bi
+ lg $alo,0($ap)
+ _dswap $alo
+ mlgr $ahi,$bi # ap[0]*bp[i]
+ alg $alo,$stdframe($sp) # +=tp[0]
+ lghi $AHI,0
+ alcgr $AHI,$ahi
+
+ lgr $mn0,$alo
+ msgr $mn0,$n0 # tp[0]*n0
+
+ lg $nlo,0($np) # np[0]
+ _dswap $nlo
+ mlgr $nhi,$mn0 # np[0]*m1
+ algr $nlo,$alo # +="tp[0]"
+ lghi $NHI,0
+ alcgr $NHI,$nhi
+
+ la $j,8(%r0) # j=1
+ lr $count,$num
+
+.align 16
+.Linner:
+ lg $alo,0($j,$ap)
+ _dswap $alo
+ mlgr $ahi,$bi # ap[j]*bp[i]
+ algr $alo,$AHI
+ lghi $AHI,0
+ alcgr $ahi,$AHI
+ alg $alo,$stdframe($j,$sp)# +=tp[j]
+ alcgr $AHI,$ahi
+
+ lg $nlo,0($j,$np)
+ _dswap $nlo
+ mlgr $nhi,$mn0 # np[j]*m1
+ algr $nlo,$NHI
+ lghi $NHI,0
+ alcgr $nhi,$NHI
+ algr $nlo,$alo # +="tp[j]"
+ alcgr $NHI,$nhi
+
+ stg $nlo,$stdframe-8($j,$sp) # tp[j-1]=
+ la $j,8($j) # j++
+ brct $count,.Linner
+
+ algr $NHI,$AHI
+ lghi $AHI,0
+ alcgr $AHI,$AHI
+ alg $NHI,$stdframe($j,$sp)# accumulate previous upmost overflow bit
+ lghi $ahi,0
+ alcgr $AHI,$ahi # new upmost overflow bit
+ stg $NHI,$stdframe-8($j,$sp)
+ stg $AHI,$stdframe($j,$sp)
+
+ la $bp,8($bp) # bp++
+ cl${g} $bp,`$stdframe+8+4*$SIZE_T`($j,$sp) # compare to &bp[num]
+ jne .Louter
+
+ l${g} $rp,`$stdframe+8+2*$SIZE_T`($j,$sp) # reincarnate rp
+ la $ap,$stdframe($sp)
+ ahi $num,1 # restore $num, incidentally clears "borrow"
+
+ la $j,0(%r0)
+ lr $count,$num
+.Lsub: lg $alo,0($j,$ap)
+ lg $nlo,0($j,$np)
+ _dswap $nlo
+ slbgr $alo,$nlo
+ stg $alo,0($j,$rp)
+ la $j,8($j)
+ brct $count,.Lsub
+ lghi $ahi,0
+ slbgr $AHI,$ahi # handle upmost carry
+
+ ngr $ap,$AHI
+ lghi $np,-1
+ xgr $np,$AHI
+ ngr $np,$rp
+ ogr $ap,$np # ap=borrow?tp:rp
+
+ la $j,0(%r0)
+ lgr $count,$num
+.Lcopy: lg $alo,0($j,$ap) # copy or in-place refresh
+ _dswap $alo
+ stg $j,$stdframe($j,$sp) # zap tp
+ stg $alo,0($j,$rp)
+ la $j,8($j)
+ brct $count,.Lcopy
+
+ la %r1,`$stdframe+8+6*$SIZE_T`($j,$sp)
+ lm${g} %r6,%r15,0(%r1)
+ lghi %r2,1 # signal "processed"
+ br %r14
+.size bn_mul_mont,.-bn_mul_mont
+.string "Montgomery Multiplication for s390x, CRYPTOGAMS by <appro\@openssl.org>"
+___
+
+foreach (split("\n",$code)) {
+ s/\`([^\`]*)\`/eval $1/ge;
+ s/_dswap\s+(%r[0-9]+)/sprintf("rllg\t%s,%s,32",$1,$1) if($SIZE_T==4)/e;
+ print $_,"\n";
+}
+close STDOUT;
diff --git a/openssl-1.1.0h/crypto/bn/asm/s390x.S b/openssl-1.1.0h/crypto/bn/asm/s390x.S
new file mode 100644
index 0000000..292a7a9
--- /dev/null
+++ b/openssl-1.1.0h/crypto/bn/asm/s390x.S
@@ -0,0 +1,713 @@
+.ident "s390x.S, version 1.1"
+// ====================================================================
+// Copyright 2007-2016 The OpenSSL Project Authors. All Rights Reserved.
+//
+// Licensed under the OpenSSL license (the "License"). You may not use
+// this file except in compliance with the License. You can obtain a copy
+// in the file LICENSE in the source distribution or at
+// https://www.openssl.org/source/license.html
+// ====================================================================
+
+.text
+
+#define zero %r0
+
+// BN_ULONG bn_mul_add_words(BN_ULONG *r2,BN_ULONG *r3,int r4,BN_ULONG r5);
+.globl bn_mul_add_words
+.type bn_mul_add_words,@function
+.align 4
+bn_mul_add_words:
+ lghi zero,0 // zero = 0
+ la %r1,0(%r2) // put rp aside [to give way to]
+ lghi %r2,0 // return value
+ ltgfr %r4,%r4
+ bler %r14 // if (len<=0) return 0;
+
+ stmg %r6,%r13,48(%r15)
+ lghi %r2,3
+ lghi %r12,0 // carry = 0
+ slgr %r1,%r3 // rp-=ap
+ nr %r2,%r4 // len%4
+ sra %r4,2 // cnt=len/4
+ jz .Loop1_madd // carry is incidentally cleared if branch taken
+ algr zero,zero // clear carry
+
+ lg %r7,0(%r3) // ap[0]
+ lg %r9,8(%r3) // ap[1]
+ mlgr %r6,%r5 // *=w
+ brct %r4,.Loop4_madd
+ j .Loop4_madd_tail
+
+.Loop4_madd:
+ mlgr %r8,%r5
+ lg %r11,16(%r3) // ap[i+2]
+ alcgr %r7,%r12 // +=carry
+ alcgr %r6,zero
+ alg %r7,0(%r3,%r1) // +=rp[i]
+ stg %r7,0(%r3,%r1) // rp[i]=
+
+ mlgr %r10,%r5
+ lg %r13,24(%r3)
+ alcgr %r9,%r6
+ alcgr %r8,zero
+ alg %r9,8(%r3,%r1)
+ stg %r9,8(%r3,%r1)
+
+ mlgr %r12,%r5
+ lg %r7,32(%r3)
+ alcgr %r11,%r8
+ alcgr %r10,zero
+ alg %r11,16(%r3,%r1)
+ stg %r11,16(%r3,%r1)
+
+ mlgr %r6,%r5
+ lg %r9,40(%r3)
+ alcgr %r13,%r10
+ alcgr %r12,zero
+ alg %r13,24(%r3,%r1)
+ stg %r13,24(%r3,%r1)
+
+ la %r3,32(%r3) // i+=4
+ brct %r4,.Loop4_madd
+
+.Loop4_madd_tail:
+ mlgr %r8,%r5
+ lg %r11,16(%r3)
+ alcgr %r7,%r12 // +=carry
+ alcgr %r6,zero
+ alg %r7,0(%r3,%r1) // +=rp[i]
+ stg %r7,0(%r3,%r1) // rp[i]=
+
+ mlgr %r10,%r5
+ lg %r13,24(%r3)
+ alcgr %r9,%r6
+ alcgr %r8,zero
+ alg %r9,8(%r3,%r1)
+ stg %r9,8(%r3,%r1)
+
+ mlgr %r12,%r5
+ alcgr %r11,%r8
+ alcgr %r10,zero
+ alg %r11,16(%r3,%r1)
+ stg %r11,16(%r3,%r1)
+
+ alcgr %r13,%r10
+ alcgr %r12,zero
+ alg %r13,24(%r3,%r1)
+ stg %r13,24(%r3,%r1)
+
+ la %r3,32(%r3) // i+=4
+
+ la %r2,1(%r2) // see if len%4 is zero ...
+ brct %r2,.Loop1_madd // without touching condition code:-)
+
+.Lend_madd:
+ lgr %r2,zero // return value
+ alcgr %r2,%r12 // collect even carry bit
+ lmg %r6,%r13,48(%r15)
+ br %r14
+
+.Loop1_madd:
+ lg %r7,0(%r3) // ap[i]
+ mlgr %r6,%r5 // *=w
+ alcgr %r7,%r12 // +=carry
+ alcgr %r6,zero
+ alg %r7,0(%r3,%r1) // +=rp[i]
+ stg %r7,0(%r3,%r1) // rp[i]=
+
+ lgr %r12,%r6
+ la %r3,8(%r3) // i++
+ brct %r2,.Loop1_madd
+
+ j .Lend_madd
+.size bn_mul_add_words,.-bn_mul_add_words
+
+// BN_ULONG bn_mul_words(BN_ULONG *r2,BN_ULONG *r3,int r4,BN_ULONG r5);
+.globl bn_mul_words
+.type bn_mul_words,@function
+.align 4
+bn_mul_words:
+ lghi zero,0 // zero = 0
+ la %r1,0(%r2) // put rp aside
+ lghi %r2,0 // i=0;
+ ltgfr %r4,%r4
+ bler %r14 // if (len<=0) return 0;
+
+ stmg %r6,%r10,48(%r15)
+ lghi %r10,3
+ lghi %r8,0 // carry = 0
+ nr %r10,%r4 // len%4
+ sra %r4,2 // cnt=len/4
+ jz .Loop1_mul // carry is incidentally cleared if branch taken
+ algr zero,zero // clear carry
+
+.Loop4_mul:
+ lg %r7,0(%r2,%r3) // ap[i]
+ mlgr %r6,%r5 // *=w
+ alcgr %r7,%r8 // +=carry
+ stg %r7,0(%r2,%r1) // rp[i]=
+
+ lg %r9,8(%r2,%r3)
+ mlgr %r8,%r5
+ alcgr %r9,%r6
+ stg %r9,8(%r2,%r1)
+
+ lg %r7,16(%r2,%r3)
+ mlgr %r6,%r5
+ alcgr %r7,%r8
+ stg %r7,16(%r2,%r1)
+
+ lg %r9,24(%r2,%r3)
+ mlgr %r8,%r5
+ alcgr %r9,%r6
+ stg %r9,24(%r2,%r1)
+
+ la %r2,32(%r2) // i+=4
+ brct %r4,.Loop4_mul
+
+ la %r10,1(%r10) // see if len%4 is zero ...
+ brct %r10,.Loop1_mul // without touching condition code:-)
+
+.Lend_mul:
+ alcgr %r8,zero // collect carry bit
+ lgr %r2,%r8
+ lmg %r6,%r10,48(%r15)
+ br %r14
+
+.Loop1_mul:
+ lg %r7,0(%r2,%r3) // ap[i]
+ mlgr %r6,%r5 // *=w
+ alcgr %r7,%r8 // +=carry
+ stg %r7,0(%r2,%r1) // rp[i]=
+
+ lgr %r8,%r6
+ la %r2,8(%r2) // i++
+ brct %r10,.Loop1_mul
+
+ j .Lend_mul
+.size bn_mul_words,.-bn_mul_words
+
+// void bn_sqr_words(BN_ULONG *r2,BN_ULONG *r2,int r4)
+.globl bn_sqr_words
+.type bn_sqr_words,@function
+.align 4
+bn_sqr_words:
+ ltgfr %r4,%r4
+ bler %r14
+
+ stmg %r6,%r7,48(%r15)
+ srag %r1,%r4,2 // cnt=len/4
+ jz .Loop1_sqr
+
+.Loop4_sqr:
+ lg %r7,0(%r3)
+ mlgr %r6,%r7
+ stg %r7,0(%r2)
+ stg %r6,8(%r2)
+
+ lg %r7,8(%r3)
+ mlgr %r6,%r7
+ stg %r7,16(%r2)
+ stg %r6,24(%r2)
+
+ lg %r7,16(%r3)
+ mlgr %r6,%r7
+ stg %r7,32(%r2)
+ stg %r6,40(%r2)
+
+ lg %r7,24(%r3)
+ mlgr %r6,%r7
+ stg %r7,48(%r2)
+ stg %r6,56(%r2)
+
+ la %r3,32(%r3)
+ la %r2,64(%r2)
+ brct %r1,.Loop4_sqr
+
+ lghi %r1,3
+ nr %r4,%r1 // cnt=len%4
+ jz .Lend_sqr
+
+.Loop1_sqr:
+ lg %r7,0(%r3)
+ mlgr %r6,%r7
+ stg %r7,0(%r2)
+ stg %r6,8(%r2)
+
+ la %r3,8(%r3)
+ la %r2,16(%r2)
+ brct %r4,.Loop1_sqr
+
+.Lend_sqr:
+ lmg %r6,%r7,48(%r15)
+ br %r14
+.size bn_sqr_words,.-bn_sqr_words
+
+// BN_ULONG bn_div_words(BN_ULONG h,BN_ULONG l,BN_ULONG d);
+.globl bn_div_words
+.type bn_div_words,@function
+.align 4
+bn_div_words:
+ dlgr %r2,%r4
+ lgr %r2,%r3
+ br %r14
+.size bn_div_words,.-bn_div_words
+
+// BN_ULONG bn_add_words(BN_ULONG *r2,BN_ULONG *r3,BN_ULONG *r4,int r5);
+.globl bn_add_words
+.type bn_add_words,@function
+.align 4
+bn_add_words:
+ la %r1,0(%r2) // put rp aside
+ lghi %r2,0 // i=0
+ ltgfr %r5,%r5
+ bler %r14 // if (len<=0) return 0;
+
+ stg %r6,48(%r15)
+ lghi %r6,3
+ nr %r6,%r5 // len%4
+ sra %r5,2 // len/4, use sra because it sets condition code
+ jz .Loop1_add // carry is incidentally cleared if branch taken
+ algr %r2,%r2 // clear carry
+
+.Loop4_add:
+ lg %r0,0(%r2,%r3)
+ alcg %r0,0(%r2,%r4)
+ stg %r0,0(%r2,%r1)
+ lg %r0,8(%r2,%r3)
+ alcg %r0,8(%r2,%r4)
+ stg %r0,8(%r2,%r1)
+ lg %r0,16(%r2,%r3)
+ alcg %r0,16(%r2,%r4)
+ stg %r0,16(%r2,%r1)
+ lg %r0,24(%r2,%r3)
+ alcg %r0,24(%r2,%r4)
+ stg %r0,24(%r2,%r1)
+
+ la %r2,32(%r2) // i+=4
+ brct %r5,.Loop4_add
+
+ la %r6,1(%r6) // see if len%4 is zero ...
+ brct %r6,.Loop1_add // without touching condition code:-)
+
+.Lexit_add:
+ lghi %r2,0
+ alcgr %r2,%r2
+ lg %r6,48(%r15)
+ br %r14
+
+.Loop1_add:
+ lg %r0,0(%r2,%r3)
+ alcg %r0,0(%r2,%r4)
+ stg %r0,0(%r2,%r1)
+
+ la %r2,8(%r2) // i++
+ brct %r6,.Loop1_add
+
+ j .Lexit_add
+.size bn_add_words,.-bn_add_words
+
+// BN_ULONG bn_sub_words(BN_ULONG *r2,BN_ULONG *r3,BN_ULONG *r4,int r5);
+.globl bn_sub_words
+.type bn_sub_words,@function
+.align 4
+bn_sub_words:
+ la %r1,0(%r2) // put rp aside
+ lghi %r2,0 // i=0
+ ltgfr %r5,%r5
+ bler %r14 // if (len<=0) return 0;
+
+ stg %r6,48(%r15)
+ lghi %r6,3
+ nr %r6,%r5 // len%4
+ sra %r5,2 // len/4, use sra because it sets condition code
+ jnz .Loop4_sub // borrow is incidentally cleared if branch taken
+ slgr %r2,%r2 // clear borrow
+
+.Loop1_sub:
+ lg %r0,0(%r2,%r3)
+ slbg %r0,0(%r2,%r4)
+ stg %r0,0(%r2,%r1)
+
+ la %r2,8(%r2) // i++
+ brct %r6,.Loop1_sub
+ j .Lexit_sub
+
+.Loop4_sub:
+ lg %r0,0(%r2,%r3)
+ slbg %r0,0(%r2,%r4)
+ stg %r0,0(%r2,%r1)
+ lg %r0,8(%r2,%r3)
+ slbg %r0,8(%r2,%r4)
+ stg %r0,8(%r2,%r1)
+ lg %r0,16(%r2,%r3)
+ slbg %r0,16(%r2,%r4)
+ stg %r0,16(%r2,%r1)
+ lg %r0,24(%r2,%r3)
+ slbg %r0,24(%r2,%r4)
+ stg %r0,24(%r2,%r1)
+
+ la %r2,32(%r2) // i+=4
+ brct %r5,.Loop4_sub
+
+ la %r6,1(%r6) // see if len%4 is zero ...
+ brct %r6,.Loop1_sub // without touching condition code:-)
+
+.Lexit_sub:
+ lghi %r2,0
+ slbgr %r2,%r2
+ lcgr %r2,%r2
+ lg %r6,48(%r15)
+ br %r14
+.size bn_sub_words,.-bn_sub_words
+
+#define c1 %r1
+#define c2 %r5
+#define c3 %r8
+
+#define mul_add_c(ai,bi,c1,c2,c3) \
+ lg %r7,ai*8(%r3); \
+ mlg %r6,bi*8(%r4); \
+ algr c1,%r7; \
+ alcgr c2,%r6; \
+ alcgr c3,zero
+
+// void bn_mul_comba8(BN_ULONG *r2,BN_ULONG *r3,BN_ULONG *r4);
+.globl bn_mul_comba8
+.type bn_mul_comba8,@function
+.align 4
+bn_mul_comba8:
+ stmg %r6,%r8,48(%r15)
+
+ lghi c1,0
+ lghi c2,0
+ lghi c3,0
+ lghi zero,0
+
+ mul_add_c(0,0,c1,c2,c3);
+ stg c1,0*8(%r2)
+ lghi c1,0
+
+ mul_add_c(0,1,c2,c3,c1);
+ mul_add_c(1,0,c2,c3,c1);
+ stg c2,1*8(%r2)
+ lghi c2,0
+
+ mul_add_c(2,0,c3,c1,c2);
+ mul_add_c(1,1,c3,c1,c2);
+ mul_add_c(0,2,c3,c1,c2);
+ stg c3,2*8(%r2)
+ lghi c3,0
+
+ mul_add_c(0,3,c1,c2,c3);
+ mul_add_c(1,2,c1,c2,c3);
+ mul_add_c(2,1,c1,c2,c3);
+ mul_add_c(3,0,c1,c2,c3);
+ stg c1,3*8(%r2)
+ lghi c1,0
+
+ mul_add_c(4,0,c2,c3,c1);
+ mul_add_c(3,1,c2,c3,c1);
+ mul_add_c(2,2,c2,c3,c1);
+ mul_add_c(1,3,c2,c3,c1);
+ mul_add_c(0,4,c2,c3,c1);
+ stg c2,4*8(%r2)
+ lghi c2,0
+
+ mul_add_c(0,5,c3,c1,c2);
+ mul_add_c(1,4,c3,c1,c2);
+ mul_add_c(2,3,c3,c1,c2);
+ mul_add_c(3,2,c3,c1,c2);
+ mul_add_c(4,1,c3,c1,c2);
+ mul_add_c(5,0,c3,c1,c2);
+ stg c3,5*8(%r2)
+ lghi c3,0
+
+ mul_add_c(6,0,c1,c2,c3);
+ mul_add_c(5,1,c1,c2,c3);
+ mul_add_c(4,2,c1,c2,c3);
+ mul_add_c(3,3,c1,c2,c3);
+ mul_add_c(2,4,c1,c2,c3);
+ mul_add_c(1,5,c1,c2,c3);
+ mul_add_c(0,6,c1,c2,c3);
+ stg c1,6*8(%r2)
+ lghi c1,0
+
+ mul_add_c(0,7,c2,c3,c1);
+ mul_add_c(1,6,c2,c3,c1);
+ mul_add_c(2,5,c2,c3,c1);
+ mul_add_c(3,4,c2,c3,c1);
+ mul_add_c(4,3,c2,c3,c1);
+ mul_add_c(5,2,c2,c3,c1);
+ mul_add_c(6,1,c2,c3,c1);
+ mul_add_c(7,0,c2,c3,c1);
+ stg c2,7*8(%r2)
+ lghi c2,0
+
+ mul_add_c(7,1,c3,c1,c2);
+ mul_add_c(6,2,c3,c1,c2);
+ mul_add_c(5,3,c3,c1,c2);
+ mul_add_c(4,4,c3,c1,c2);
+ mul_add_c(3,5,c3,c1,c2);
+ mul_add_c(2,6,c3,c1,c2);
+ mul_add_c(1,7,c3,c1,c2);
+ stg c3,8*8(%r2)
+ lghi c3,0
+
+ mul_add_c(2,7,c1,c2,c3);
+ mul_add_c(3,6,c1,c2,c3);
+ mul_add_c(4,5,c1,c2,c3);
+ mul_add_c(5,4,c1,c2,c3);
+ mul_add_c(6,3,c1,c2,c3);
+ mul_add_c(7,2,c1,c2,c3);
+ stg c1,9*8(%r2)
+ lghi c1,0
+
+ mul_add_c(7,3,c2,c3,c1);
+ mul_add_c(6,4,c2,c3,c1);
+ mul_add_c(5,5,c2,c3,c1);
+ mul_add_c(4,6,c2,c3,c1);
+ mul_add_c(3,7,c2,c3,c1);
+ stg c2,10*8(%r2)
+ lghi c2,0
+
+ mul_add_c(4,7,c3,c1,c2);
+ mul_add_c(5,6,c3,c1,c2);
+ mul_add_c(6,5,c3,c1,c2);
+ mul_add_c(7,4,c3,c1,c2);
+ stg c3,11*8(%r2)
+ lghi c3,0
+
+ mul_add_c(7,5,c1,c2,c3);
+ mul_add_c(6,6,c1,c2,c3);
+ mul_add_c(5,7,c1,c2,c3);
+ stg c1,12*8(%r2)
+ lghi c1,0
+
+
+ mul_add_c(6,7,c2,c3,c1);
+ mul_add_c(7,6,c2,c3,c1);
+ stg c2,13*8(%r2)
+ lghi c2,0
+
+ mul_add_c(7,7,c3,c1,c2);
+ stg c3,14*8(%r2)
+ stg c1,15*8(%r2)
+
+ lmg %r6,%r8,48(%r15)
+ br %r14
+.size bn_mul_comba8,.-bn_mul_comba8
+
+// void bn_mul_comba4(BN_ULONG *r2,BN_ULONG *r3,BN_ULONG *r4);
+.globl bn_mul_comba4
+.type bn_mul_comba4,@function
+.align 4
+bn_mul_comba4:
+ stmg %r6,%r8,48(%r15)
+
+ lghi c1,0
+ lghi c2,0
+ lghi c3,0
+ lghi zero,0
+
+ mul_add_c(0,0,c1,c2,c3);
+ stg c1,0*8(%r3)
+ lghi c1,0
+
+ mul_add_c(0,1,c2,c3,c1);
+ mul_add_c(1,0,c2,c3,c1);
+ stg c2,1*8(%r2)
+ lghi c2,0
+
+ mul_add_c(2,0,c3,c1,c2);
+ mul_add_c(1,1,c3,c1,c2);
+ mul_add_c(0,2,c3,c1,c2);
+ stg c3,2*8(%r2)
+ lghi c3,0
+
+ mul_add_c(0,3,c1,c2,c3);
+ mul_add_c(1,2,c1,c2,c3);
+ mul_add_c(2,1,c1,c2,c3);
+ mul_add_c(3,0,c1,c2,c3);
+ stg c1,3*8(%r2)
+ lghi c1,0
+
+ mul_add_c(3,1,c2,c3,c1);
+ mul_add_c(2,2,c2,c3,c1);
+ mul_add_c(1,3,c2,c3,c1);
+ stg c2,4*8(%r2)
+ lghi c2,0
+
+ mul_add_c(2,3,c3,c1,c2);
+ mul_add_c(3,2,c3,c1,c2);
+ stg c3,5*8(%r2)
+ lghi c3,0
+
+ mul_add_c(3,3,c1,c2,c3);
+ stg c1,6*8(%r2)
+ stg c2,7*8(%r2)
+
+ stmg %r6,%r8,48(%r15)
+ br %r14
+.size bn_mul_comba4,.-bn_mul_comba4
+
+#define sqr_add_c(ai,c1,c2,c3) \
+ lg %r7,ai*8(%r3); \
+ mlgr %r6,%r7; \
+ algr c1,%r7; \
+ alcgr c2,%r6; \
+ alcgr c3,zero
+
+#define sqr_add_c2(ai,aj,c1,c2,c3) \
+ lg %r7,ai*8(%r3); \
+ mlg %r6,aj*8(%r3); \
+ algr c1,%r7; \
+ alcgr c2,%r6; \
+ alcgr c3,zero; \
+ algr c1,%r7; \
+ alcgr c2,%r6; \
+ alcgr c3,zero
+
+// void bn_sqr_comba8(BN_ULONG *r2,BN_ULONG *r3);
+.globl bn_sqr_comba8
+.type bn_sqr_comba8,@function
+.align 4
+bn_sqr_comba8:
+ stmg %r6,%r8,48(%r15)
+
+ lghi c1,0
+ lghi c2,0
+ lghi c3,0
+ lghi zero,0
+
+ sqr_add_c(0,c1,c2,c3);
+ stg c1,0*8(%r2)
+ lghi c1,0
+
+ sqr_add_c2(1,0,c2,c3,c1);
+ stg c2,1*8(%r2)
+ lghi c2,0
+
+ sqr_add_c(1,c3,c1,c2);
+ sqr_add_c2(2,0,c3,c1,c2);
+ stg c3,2*8(%r2)
+ lghi c3,0
+
+ sqr_add_c2(3,0,c1,c2,c3);
+ sqr_add_c2(2,1,c1,c2,c3);
+ stg c1,3*8(%r2)
+ lghi c1,0
+
+ sqr_add_c(2,c2,c3,c1);
+ sqr_add_c2(3,1,c2,c3,c1);
+ sqr_add_c2(4,0,c2,c3,c1);
+ stg c2,4*8(%r2)
+ lghi c2,0
+
+ sqr_add_c2(5,0,c3,c1,c2);
+ sqr_add_c2(4,1,c3,c1,c2);
+ sqr_add_c2(3,2,c3,c1,c2);
+ stg c3,5*8(%r2)
+ lghi c3,0
+
+ sqr_add_c(3,c1,c2,c3);
+ sqr_add_c2(4,2,c1,c2,c3);
+ sqr_add_c2(5,1,c1,c2,c3);
+ sqr_add_c2(6,0,c1,c2,c3);
+ stg c1,6*8(%r2)
+ lghi c1,0
+
+ sqr_add_c2(7,0,c2,c3,c1);
+ sqr_add_c2(6,1,c2,c3,c1);
+ sqr_add_c2(5,2,c2,c3,c1);
+ sqr_add_c2(4,3,c2,c3,c1);
+ stg c2,7*8(%r2)
+ lghi c2,0
+
+ sqr_add_c(4,c3,c1,c2);
+ sqr_add_c2(5,3,c3,c1,c2);
+ sqr_add_c2(6,2,c3,c1,c2);
+ sqr_add_c2(7,1,c3,c1,c2);
+ stg c3,8*8(%r2)
+ lghi c3,0
+
+ sqr_add_c2(7,2,c1,c2,c3);
+ sqr_add_c2(6,3,c1,c2,c3);
+ sqr_add_c2(5,4,c1,c2,c3);
+ stg c1,9*8(%r2)
+ lghi c1,0
+
+ sqr_add_c(5,c2,c3,c1);
+ sqr_add_c2(6,4,c2,c3,c1);
+ sqr_add_c2(7,3,c2,c3,c1);
+ stg c2,10*8(%r2)
+ lghi c2,0
+
+ sqr_add_c2(7,4,c3,c1,c2);
+ sqr_add_c2(6,5,c3,c1,c2);
+ stg c3,11*8(%r2)
+ lghi c3,0
+
+ sqr_add_c(6,c1,c2,c3);
+ sqr_add_c2(7,5,c1,c2,c3);
+ stg c1,12*8(%r2)
+ lghi c1,0
+
+ sqr_add_c2(7,6,c2,c3,c1);
+ stg c2,13*8(%r2)
+ lghi c2,0
+
+ sqr_add_c(7,c3,c1,c2);
+ stg c3,14*8(%r2)
+ stg c1,15*8(%r2)
+
+ lmg %r6,%r8,48(%r15)
+ br %r14
+.size bn_sqr_comba8,.-bn_sqr_comba8
+
+// void bn_sqr_comba4(BN_ULONG *r2,BN_ULONG *r3);
+.globl bn_sqr_comba4
+.type bn_sqr_comba4,@function
+.align 4
+bn_sqr_comba4:
+ stmg %r6,%r8,48(%r15)
+
+ lghi c1,0
+ lghi c2,0
+ lghi c3,0
+ lghi zero,0
+
+ sqr_add_c(0,c1,c2,c3);
+ stg c1,0*8(%r2)
+ lghi c1,0
+
+ sqr_add_c2(1,0,c2,c3,c1);
+ stg c2,1*8(%r2)
+ lghi c2,0
+
+ sqr_add_c(1,c3,c1,c2);
+ sqr_add_c2(2,0,c3,c1,c2);
+ stg c3,2*8(%r2)
+ lghi c3,0
+
+ sqr_add_c2(3,0,c1,c2,c3);
+ sqr_add_c2(2,1,c1,c2,c3);
+ stg c1,3*8(%r2)
+ lghi c1,0
+
+ sqr_add_c(2,c2,c3,c1);
+ sqr_add_c2(3,1,c2,c3,c1);
+ stg c2,4*8(%r2)
+ lghi c2,0
+
+ sqr_add_c2(3,2,c3,c1,c2);
+ stg c3,5*8(%r2)
+ lghi c3,0
+
+ sqr_add_c(3,c1,c2,c3);
+ stg c1,6*8(%r2)
+ stg c2,7*8(%r2)
+
+ lmg %r6,%r8,48(%r15)
+ br %r14
+.size bn_sqr_comba4,.-bn_sqr_comba4
diff --git a/openssl-1.1.0h/crypto/bn/asm/sparct4-mont.pl b/openssl-1.1.0h/crypto/bn/asm/sparct4-mont.pl
new file mode 100755
index 0000000..4faf66f
--- /dev/null
+++ b/openssl-1.1.0h/crypto/bn/asm/sparct4-mont.pl
@@ -0,0 +1,1232 @@
+#! /usr/bin/env perl
+# Copyright 2012-2016 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License"). You may not use
+# this file except in compliance with the License. You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+
+# ====================================================================
+# Written by David S. Miller <davem@devemloft.net> and Andy Polyakov
+# <appro@openssl.org>. The module is licensed under 2-clause BSD
+# license. November 2012. All rights reserved.
+# ====================================================================
+
+######################################################################
+# Montgomery squaring-n-multiplication module for SPARC T4.
+#
+# The module consists of three parts:
+#
+# 1) collection of "single-op" subroutines that perform single
+# operation, Montgomery squaring or multiplication, on 512-,
+# 1024-, 1536- and 2048-bit operands;
+# 2) collection of "multi-op" subroutines that perform 5 squaring and
+# 1 multiplication operations on operands of above lengths;
+# 3) fall-back and helper VIS3 subroutines.
+#
+# RSA sign is dominated by multi-op subroutine, while RSA verify and
+# DSA - by single-op. Special note about 4096-bit RSA verify result.
+# Operands are too long for dedicated hardware and it's handled by
+# VIS3 code, which is why you don't see any improvement. It's surely
+# possible to improve it [by deploying 'mpmul' instruction], maybe in
+# the future...
+#
+# Performance improvement.
+#
+# 64-bit process, VIS3:
+# sign verify sign/s verify/s
+# rsa 1024 bits 0.000628s 0.000028s 1592.4 35434.4
+# rsa 2048 bits 0.003282s 0.000106s 304.7 9438.3
+# rsa 4096 bits 0.025866s 0.000340s 38.7 2940.9
+# dsa 1024 bits 0.000301s 0.000332s 3323.7 3013.9
+# dsa 2048 bits 0.001056s 0.001233s 946.9 810.8
+#
+# 64-bit process, this module:
+# sign verify sign/s verify/s
+# rsa 1024 bits 0.000256s 0.000016s 3904.4 61411.9
+# rsa 2048 bits 0.000946s 0.000029s 1056.8 34292.7
+# rsa 4096 bits 0.005061s 0.000340s 197.6 2940.5
+# dsa 1024 bits 0.000176s 0.000195s 5674.7 5130.5
+# dsa 2048 bits 0.000296s 0.000354s 3383.2 2827.6
+#
+######################################################################
+# 32-bit process, VIS3:
+# sign verify sign/s verify/s
+# rsa 1024 bits 0.000665s 0.000028s 1504.8 35233.3
+# rsa 2048 bits 0.003349s 0.000106s 298.6 9433.4
+# rsa 4096 bits 0.025959s 0.000341s 38.5 2934.8
+# dsa 1024 bits 0.000320s 0.000341s 3123.3 2929.6
+# dsa 2048 bits 0.001101s 0.001260s 908.2 793.4
+#
+# 32-bit process, this module:
+# sign verify sign/s verify/s
+# rsa 1024 bits 0.000301s 0.000017s 3317.1 60240.0
+# rsa 2048 bits 0.001034s 0.000030s 966.9 33812.7
+# rsa 4096 bits 0.005244s 0.000341s 190.7 2935.4
+# dsa 1024 bits 0.000201s 0.000205s 4976.1 4879.2
+# dsa 2048 bits 0.000328s 0.000360s 3051.1 2774.2
+#
+# 32-bit code is prone to performance degradation as interrupt rate
+# dispatched to CPU executing the code grows. This is because in
+# standard process of handling interrupt in 32-bit process context
+# upper halves of most integer registers used as input or output are
+# zeroed. This renders result invalid, and operation has to be re-run.
+# If CPU is "bothered" with timer interrupts only, the penalty is
+# hardly measurable. But in order to mitigate this problem for higher
+# interrupt rates contemporary Linux kernel recognizes biased stack
+# even in 32-bit process context and preserves full register contents.
+# See http://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=517ffce4e1a03aea979fe3a18a3dd1761a24fafb
+# for details.
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+push(@INC,"${dir}","${dir}../../perlasm");
+require "sparcv9_modes.pl";
+
+$output = pop;
+open STDOUT,">$output";
+
+$code.=<<___;
+#include "sparc_arch.h"
+
+#ifdef __arch64__
+.register %g2,#scratch
+.register %g3,#scratch
+#endif
+
+.section ".text",#alloc,#execinstr
+
+#ifdef __PIC__
+SPARC_PIC_THUNK(%g1)
+#endif
+___
+
+########################################################################
+# Register layout for mont[mul|sqr] instructions.
+# For details see "Oracle SPARC Architecture 2011" manual at
+# http://www.oracle.com/technetwork/server-storage/sun-sparc-enterprise/documentation/.
+#
+my @R=map("%f".2*$_,(0..11,30,31,12..29));
+my @N=(map("%l$_",(0..7)),map("%o$_",(0..5))); @N=(@N,@N,@N[0..3]);
+my @A=(@N[0..13],@R[14..31]);
+my @B=(map("%i$_",(0..5)),map("%l$_",(0..7))); @B=(@B,@B,map("%o$_",(0..3)));
+
+########################################################################
+# int bn_mul_mont_t4_$NUM(u64 *rp,const u64 *ap,const u64 *bp,
+# const u64 *np,const BN_ULONG *n0);
+#
+sub generate_bn_mul_mont_t4() {
+my $NUM=shift;
+my ($rp,$ap,$bp,$np,$sentinel)=map("%g$_",(1..5));
+
+$code.=<<___;
+.globl bn_mul_mont_t4_$NUM
+.align 32
+bn_mul_mont_t4_$NUM:
+#ifdef __arch64__
+ mov 0,$sentinel
+ mov -128,%g4
+#elif defined(SPARCV9_64BIT_STACK)
+ SPARC_LOAD_ADDRESS_LEAF(OPENSSL_sparcv9cap_P,%g1,%g5)
+ ld [%g1+0],%g1 ! OPENSSL_sparcv9_P[0]
+ mov -2047,%g4
+ and %g1,SPARCV9_64BIT_STACK,%g1
+ movrz %g1,0,%g4
+ mov -1,$sentinel
+ add %g4,-128,%g4
+#else
+ mov -1,$sentinel
+ mov -128,%g4
+#endif
+ sllx $sentinel,32,$sentinel
+ save %sp,%g4,%sp
+#ifndef __arch64__
+ save %sp,-128,%sp ! warm it up
+ save %sp,-128,%sp
+ save %sp,-128,%sp
+ save %sp,-128,%sp
+ save %sp,-128,%sp
+ save %sp,-128,%sp
+ restore
+ restore
+ restore
+ restore
+ restore
+ restore
+#endif
+ and %sp,1,%g4
+ or $sentinel,%fp,%fp
+ or %g4,$sentinel,$sentinel
+
+ ! copy arguments to global registers
+ mov %i0,$rp
+ mov %i1,$ap
+ mov %i2,$bp
+ mov %i3,$np
+ ld [%i4+0],%f1 ! load *n0
+ ld [%i4+4],%f0
+ fsrc2 %f0,%f60
+___
+
+# load ap[$NUM] ########################################################
+$code.=<<___;
+ save %sp,-128,%sp; or $sentinel,%fp,%fp
+___
+for($i=0; $i<14 && $i<$NUM; $i++) {
+my $lo=$i<13?@A[$i+1]:"%o7";
+$code.=<<___;
+ ld [$ap+$i*8+0],$lo
+ ld [$ap+$i*8+4],@A[$i]
+ sllx @A[$i],32,@A[$i]
+ or $lo,@A[$i],@A[$i]
+___
+}
+for(; $i<$NUM; $i++) {
+my ($hi,$lo)=("%f".2*($i%4),"%f".(2*($i%4)+1));
+$code.=<<___;
+ ld [$ap+$i*8+0],$lo
+ ld [$ap+$i*8+4],$hi
+ fsrc2 $hi,@A[$i]
+___
+}
+# load np[$NUM] ########################################################
+$code.=<<___;
+ save %sp,-128,%sp; or $sentinel,%fp,%fp
+___
+for($i=0; $i<14 && $i<$NUM; $i++) {
+my $lo=$i<13?@N[$i+1]:"%o7";
+$code.=<<___;
+ ld [$np+$i*8+0],$lo
+ ld [$np+$i*8+4],@N[$i]
+ sllx @N[$i],32,@N[$i]
+ or $lo,@N[$i],@N[$i]
+___
+}
+$code.=<<___;
+ save %sp,-128,%sp; or $sentinel,%fp,%fp
+___
+for(; $i<28 && $i<$NUM; $i++) {
+my $lo=$i<27?@N[$i+1]:"%o7";
+$code.=<<___;
+ ld [$np+$i*8+0],$lo
+ ld [$np+$i*8+4],@N[$i]
+ sllx @N[$i],32,@N[$i]
+ or $lo,@N[$i],@N[$i]
+___
+}
+$code.=<<___;
+ save %sp,-128,%sp; or $sentinel,%fp,%fp
+___
+for(; $i<$NUM; $i++) {
+my $lo=($i<$NUM-1)?@N[$i+1]:"%o7";
+$code.=<<___;
+ ld [$np+$i*8+0],$lo
+ ld [$np+$i*8+4],@N[$i]
+ sllx @N[$i],32,@N[$i]
+ or $lo,@N[$i],@N[$i]
+___
+}
+$code.=<<___;
+ cmp $ap,$bp
+ be SIZE_T_CC,.Lmsquare_$NUM
+ nop
+___
+
+# load bp[$NUM] ########################################################
+$code.=<<___;
+ save %sp,-128,%sp; or $sentinel,%fp,%fp
+___
+for($i=0; $i<14 && $i<$NUM; $i++) {
+my $lo=$i<13?@B[$i+1]:"%o7";
+$code.=<<___;
+ ld [$bp+$i*8+0],$lo
+ ld [$bp+$i*8+4],@B[$i]
+ sllx @B[$i],32,@B[$i]
+ or $lo,@B[$i],@B[$i]
+___
+}
+$code.=<<___;
+ save %sp,-128,%sp; or $sentinel,%fp,%fp
+___
+for(; $i<$NUM; $i++) {
+my $lo=($i<$NUM-1)?@B[$i+1]:"%o7";
+$code.=<<___;
+ ld [$bp+$i*8+0],$lo
+ ld [$bp+$i*8+4],@B[$i]
+ sllx @B[$i],32,@B[$i]
+ or $lo,@B[$i],@B[$i]
+___
+}
+# magic ################################################################
+$code.=<<___;
+ .word 0x81b02920+$NUM-1 ! montmul $NUM-1
+.Lmresume_$NUM:
+ fbu,pn %fcc3,.Lmabort_$NUM
+#ifndef __arch64__
+ and %fp,$sentinel,$sentinel
+ brz,pn $sentinel,.Lmabort_$NUM
+#endif
+ nop
+#ifdef __arch64__
+ restore
+ restore
+ restore
+ restore
+ restore
+#else
+ restore; and %fp,$sentinel,$sentinel
+ restore; and %fp,$sentinel,$sentinel
+ restore; and %fp,$sentinel,$sentinel
+ restore; and %fp,$sentinel,$sentinel
+ brz,pn $sentinel,.Lmabort1_$NUM
+ restore
+#endif
+___
+
+# save tp[$NUM] ########################################################
+for($i=0; $i<14 && $i<$NUM; $i++) {
+$code.=<<___;
+ movxtod @A[$i],@R[$i]
+___
+}
+$code.=<<___;
+#ifdef __arch64__
+ restore
+#else
+ and %fp,$sentinel,$sentinel
+ restore
+ and $sentinel,1,%o7
+ and %fp,$sentinel,$sentinel
+ srl %fp,0,%fp ! just in case?
+ or %o7,$sentinel,$sentinel
+ brz,a,pn $sentinel,.Lmdone_$NUM
+ mov 0,%i0 ! return failure
+#endif
+___
+for($i=0; $i<12 && $i<$NUM; $i++) {
+@R[$i] =~ /%f([0-9]+)/;
+my $lo = "%f".($1+1);
+$code.=<<___;
+ st $lo,[$rp+$i*8+0]
+ st @R[$i],[$rp+$i*8+4]
+___
+}
+for(; $i<$NUM; $i++) {
+my ($hi,$lo)=("%f".2*($i%4),"%f".(2*($i%4)+1));
+$code.=<<___;
+ fsrc2 @R[$i],$hi
+ st $lo,[$rp+$i*8+0]
+ st $hi,[$rp+$i*8+4]
+___
+}
+$code.=<<___;
+ mov 1,%i0 ! return success
+.Lmdone_$NUM:
+ ret
+ restore
+
+.Lmabort_$NUM:
+ restore
+ restore
+ restore
+ restore
+ restore
+.Lmabort1_$NUM:
+ restore
+
+ mov 0,%i0 ! return failure
+ ret
+ restore
+
+.align 32
+.Lmsquare_$NUM:
+ save %sp,-128,%sp; or $sentinel,%fp,%fp
+ save %sp,-128,%sp; or $sentinel,%fp,%fp
+ .word 0x81b02940+$NUM-1 ! montsqr $NUM-1
+ ba .Lmresume_$NUM
+ nop
+.type bn_mul_mont_t4_$NUM, #function
+.size bn_mul_mont_t4_$NUM, .-bn_mul_mont_t4_$NUM
+___
+}
+
+for ($i=8;$i<=32;$i+=8) {
+ &generate_bn_mul_mont_t4($i);
+}
+
+########################################################################
+#
+sub load_ccr {
+my ($ptbl,$pwr,$ccr,$skip_wr)=@_;
+$code.=<<___;
+ srl $pwr, 2, %o4
+ and $pwr, 3, %o5
+ and %o4, 7, %o4
+ sll %o5, 3, %o5 ! offset within first cache line
+ add %o5, $ptbl, $ptbl ! of the pwrtbl
+ or %g0, 1, %o5
+ sll %o5, %o4, $ccr
+___
+$code.=<<___ if (!$skip_wr);
+ wr $ccr, %g0, %ccr
+___
+}
+sub load_b_pair {
+my ($pwrtbl,$B0,$B1)=@_;
+
+$code.=<<___;
+ ldx [$pwrtbl+0*32], $B0
+ ldx [$pwrtbl+8*32], $B1
+ ldx [$pwrtbl+1*32], %o4
+ ldx [$pwrtbl+9*32], %o5
+ movvs %icc, %o4, $B0
+ ldx [$pwrtbl+2*32], %o4
+ movvs %icc, %o5, $B1
+ ldx [$pwrtbl+10*32],%o5
+ move %icc, %o4, $B0
+ ldx [$pwrtbl+3*32], %o4
+ move %icc, %o5, $B1
+ ldx [$pwrtbl+11*32],%o5
+ movneg %icc, %o4, $B0
+ ldx [$pwrtbl+4*32], %o4
+ movneg %icc, %o5, $B1
+ ldx [$pwrtbl+12*32],%o5
+ movcs %xcc, %o4, $B0
+ ldx [$pwrtbl+5*32],%o4
+ movcs %xcc, %o5, $B1
+ ldx [$pwrtbl+13*32],%o5
+ movvs %xcc, %o4, $B0
+ ldx [$pwrtbl+6*32], %o4
+ movvs %xcc, %o5, $B1
+ ldx [$pwrtbl+14*32],%o5
+ move %xcc, %o4, $B0
+ ldx [$pwrtbl+7*32], %o4
+ move %xcc, %o5, $B1
+ ldx [$pwrtbl+15*32],%o5
+ movneg %xcc, %o4, $B0
+ add $pwrtbl,16*32, $pwrtbl
+ movneg %xcc, %o5, $B1
+___
+}
+sub load_b {
+my ($pwrtbl,$Bi)=@_;
+
+$code.=<<___;
+ ldx [$pwrtbl+0*32], $Bi
+ ldx [$pwrtbl+1*32], %o4
+ ldx [$pwrtbl+2*32], %o5
+ movvs %icc, %o4, $Bi
+ ldx [$pwrtbl+3*32], %o4
+ move %icc, %o5, $Bi
+ ldx [$pwrtbl+4*32], %o5
+ movneg %icc, %o4, $Bi
+ ldx [$pwrtbl+5*32], %o4
+ movcs %xcc, %o5, $Bi
+ ldx [$pwrtbl+6*32], %o5
+ movvs %xcc, %o4, $Bi
+ ldx [$pwrtbl+7*32], %o4
+ move %xcc, %o5, $Bi
+ add $pwrtbl,8*32, $pwrtbl
+ movneg %xcc, %o4, $Bi
+___
+}
+
+########################################################################
+# int bn_pwr5_mont_t4_$NUM(u64 *tp,const u64 *np,const BN_ULONG *n0,
+# const u64 *pwrtbl,int pwr,int stride);
+#
+sub generate_bn_pwr5_mont_t4() {
+my $NUM=shift;
+my ($tp,$np,$pwrtbl,$pwr,$sentinel)=map("%g$_",(1..5));
+
+$code.=<<___;
+.globl bn_pwr5_mont_t4_$NUM
+.align 32
+bn_pwr5_mont_t4_$NUM:
+#ifdef __arch64__
+ mov 0,$sentinel
+ mov -128,%g4
+#elif defined(SPARCV9_64BIT_STACK)
+ SPARC_LOAD_ADDRESS_LEAF(OPENSSL_sparcv9cap_P,%g1,%g5)
+ ld [%g1+0],%g1 ! OPENSSL_sparcv9_P[0]
+ mov -2047,%g4
+ and %g1,SPARCV9_64BIT_STACK,%g1
+ movrz %g1,0,%g4
+ mov -1,$sentinel
+ add %g4,-128,%g4
+#else
+ mov -1,$sentinel
+ mov -128,%g4
+#endif
+ sllx $sentinel,32,$sentinel
+ save %sp,%g4,%sp
+#ifndef __arch64__
+ save %sp,-128,%sp ! warm it up
+ save %sp,-128,%sp
+ save %sp,-128,%sp
+ save %sp,-128,%sp
+ save %sp,-128,%sp
+ save %sp,-128,%sp
+ restore
+ restore
+ restore
+ restore
+ restore
+ restore
+#endif
+ and %sp,1,%g4
+ or $sentinel,%fp,%fp
+ or %g4,$sentinel,$sentinel
+
+ ! copy arguments to global registers
+ mov %i0,$tp
+ mov %i1,$np
+ ld [%i2+0],%f1 ! load *n0
+ ld [%i2+4],%f0
+ mov %i3,$pwrtbl
+ srl %i4,%g0,%i4 ! pack last arguments
+ sllx %i5,32,$pwr
+ or %i4,$pwr,$pwr
+ fsrc2 %f0,%f60
+___
+
+# load tp[$NUM] ########################################################
+$code.=<<___;
+ save %sp,-128,%sp; or $sentinel,%fp,%fp
+___
+for($i=0; $i<14 && $i<$NUM; $i++) {
+$code.=<<___;
+ ldx [$tp+$i*8],@A[$i]
+___
+}
+for(; $i<$NUM; $i++) {
+$code.=<<___;
+ ldd [$tp+$i*8],@A[$i]
+___
+}
+# load np[$NUM] ########################################################
+$code.=<<___;
+ save %sp,-128,%sp; or $sentinel,%fp,%fp
+___
+for($i=0; $i<14 && $i<$NUM; $i++) {
+$code.=<<___;
+ ldx [$np+$i*8],@N[$i]
+___
+}
+$code.=<<___;
+ save %sp,-128,%sp; or $sentinel,%fp,%fp
+___
+for(; $i<28 && $i<$NUM; $i++) {
+$code.=<<___;
+ ldx [$np+$i*8],@N[$i]
+___
+}
+$code.=<<___;
+ save %sp,-128,%sp; or $sentinel,%fp,%fp
+___
+for(; $i<$NUM; $i++) {
+$code.=<<___;
+ ldx [$np+$i*8],@N[$i]
+___
+}
+# load pwrtbl[pwr] ########################################################
+$code.=<<___;
+ save %sp,-128,%sp; or $sentinel,%fp,%fp
+
+ srlx $pwr, 32, %o4 ! unpack $pwr
+ srl $pwr, %g0, %o5
+ sub %o4, 5, %o4
+ mov $pwrtbl, %o7
+ sllx %o4, 32, $pwr ! re-pack $pwr
+ or %o5, $pwr, $pwr
+ srl %o5, %o4, %o5
+___
+ &load_ccr("%o7","%o5","%o4");
+$code.=<<___;
+ b .Lstride_$NUM
+ nop
+.align 16
+.Lstride_$NUM:
+___
+for($i=0; $i<14 && $i<$NUM; $i+=2) {
+ &load_b_pair("%o7",@B[$i],@B[$i+1]);
+}
+$code.=<<___;
+ save %sp,-128,%sp; or $sentinel,%fp,%fp
+___
+for(; $i<$NUM; $i+=2) {
+ &load_b_pair("%i7",@B[$i],@B[$i+1]);
+}
+$code.=<<___;
+ srax $pwr, 32, %o4 ! unpack $pwr
+ srl $pwr, %g0, %o5
+ sub %o4, 5, %o4
+ mov $pwrtbl, %i7
+ sllx %o4, 32, $pwr ! re-pack $pwr
+ or %o5, $pwr, $pwr
+ srl %o5, %o4, %o5
+___
+ &load_ccr("%i7","%o5","%o4",1);
+
+# magic ################################################################
+for($i=0; $i<5; $i++) {
+$code.=<<___;
+ .word 0x81b02940+$NUM-1 ! montsqr $NUM-1
+ fbu,pn %fcc3,.Labort_$NUM
+#ifndef __arch64__
+ and %fp,$sentinel,$sentinel
+ brz,pn $sentinel,.Labort_$NUM
+#endif
+ nop
+___
+}
+$code.=<<___;
+ wr %o4, %g0, %ccr
+ .word 0x81b02920+$NUM-1 ! montmul $NUM-1
+ fbu,pn %fcc3,.Labort_$NUM
+#ifndef __arch64__
+ and %fp,$sentinel,$sentinel
+ brz,pn $sentinel,.Labort_$NUM
+#endif
+
+ srax $pwr, 32, %o4
+#ifdef __arch64__
+ brgez %o4,.Lstride_$NUM
+ restore
+ restore
+ restore
+ restore
+ restore
+#else
+ brgez %o4,.Lstride_$NUM
+ restore; and %fp,$sentinel,$sentinel
+ restore; and %fp,$sentinel,$sentinel
+ restore; and %fp,$sentinel,$sentinel
+ restore; and %fp,$sentinel,$sentinel
+ brz,pn $sentinel,.Labort1_$NUM
+ restore
+#endif
+___
+
+# save tp[$NUM] ########################################################
+for($i=0; $i<14 && $i<$NUM; $i++) {
+$code.=<<___;
+ movxtod @A[$i],@R[$i]
+___
+}
+$code.=<<___;
+#ifdef __arch64__
+ restore
+#else
+ and %fp,$sentinel,$sentinel
+ restore
+ and $sentinel,1,%o7
+ and %fp,$sentinel,$sentinel
+ srl %fp,0,%fp ! just in case?
+ or %o7,$sentinel,$sentinel
+ brz,a,pn $sentinel,.Ldone_$NUM
+ mov 0,%i0 ! return failure
+#endif
+___
+for($i=0; $i<$NUM; $i++) {
+$code.=<<___;
+ std @R[$i],[$tp+$i*8]
+___
+}
+$code.=<<___;
+ mov 1,%i0 ! return success
+.Ldone_$NUM:
+ ret
+ restore
+
+.Labort_$NUM:
+ restore
+ restore
+ restore
+ restore
+ restore
+.Labort1_$NUM:
+ restore
+
+ mov 0,%i0 ! return failure
+ ret
+ restore
+.type bn_pwr5_mont_t4_$NUM, #function
+.size bn_pwr5_mont_t4_$NUM, .-bn_pwr5_mont_t4_$NUM
+___
+}
+
+for ($i=8;$i<=32;$i+=8) {
+ &generate_bn_pwr5_mont_t4($i);
+}
+
+{
+########################################################################
+# Fall-back subroutines
+#
+# copy of bn_mul_mont_vis3 adjusted for vectors of 64-bit values
+#
+($n0,$m0,$m1,$lo0,$hi0, $lo1,$hi1,$aj,$alo,$nj,$nlo,$tj)=
+ (map("%g$_",(1..5)),map("%o$_",(0..5,7)));
+
+# int bn_mul_mont(
+$rp="%o0"; # u64 *rp,
+$ap="%o1"; # const u64 *ap,
+$bp="%o2"; # const u64 *bp,
+$np="%o3"; # const u64 *np,
+$n0p="%o4"; # const BN_ULONG *n0,
+$num="%o5"; # int num); # caller ensures that num is >=3
+$code.=<<___;
+.globl bn_mul_mont_t4
+.align 32
+bn_mul_mont_t4:
+ add %sp, STACK_BIAS, %g4 ! real top of stack
+ sll $num, 3, $num ! size in bytes
+ add $num, 63, %g1
+ andn %g1, 63, %g1 ! buffer size rounded up to 64 bytes
+ sub %g4, %g1, %g1
+ andn %g1, 63, %g1 ! align at 64 byte
+ sub %g1, STACK_FRAME, %g1 ! new top of stack
+ sub %g1, %g4, %g1
+
+ save %sp, %g1, %sp
+___
+# +-------------------------------+<----- %sp
+# . .
+# +-------------------------------+<----- aligned at 64 bytes
+# | __int64 tmp[0] |
+# +-------------------------------+
+# . .
+# . .
+# +-------------------------------+<----- aligned at 64 bytes
+# . .
+($rp,$ap,$bp,$np,$n0p,$num)=map("%i$_",(0..5));
+($t0,$t1,$t2,$t3,$cnt,$tp,$bufsz)=map("%l$_",(0..7));
+($ovf,$i)=($t0,$t1);
+$code.=<<___;
+ ld [$n0p+0], $t0 ! pull n0[0..1] value
+ ld [$n0p+4], $t1
+ add %sp, STACK_BIAS+STACK_FRAME, $tp
+ ldx [$bp+0], $m0 ! m0=bp[0]
+ sllx $t1, 32, $n0
+ add $bp, 8, $bp
+ or $t0, $n0, $n0
+
+ ldx [$ap+0], $aj ! ap[0]
+
+ mulx $aj, $m0, $lo0 ! ap[0]*bp[0]
+ umulxhi $aj, $m0, $hi0
+
+ ldx [$ap+8], $aj ! ap[1]
+ add $ap, 16, $ap
+ ldx [$np+0], $nj ! np[0]
+
+ mulx $lo0, $n0, $m1 ! "tp[0]"*n0
+
+ mulx $aj, $m0, $alo ! ap[1]*bp[0]
+ umulxhi $aj, $m0, $aj ! ahi=aj
+
+ mulx $nj, $m1, $lo1 ! np[0]*m1
+ umulxhi $nj, $m1, $hi1
+
+ ldx [$np+8], $nj ! np[1]
+
+ addcc $lo0, $lo1, $lo1
+ add $np, 16, $np
+ addxc %g0, $hi1, $hi1
+
+ mulx $nj, $m1, $nlo ! np[1]*m1
+ umulxhi $nj, $m1, $nj ! nhi=nj
+
+ ba .L1st
+ sub $num, 24, $cnt ! cnt=num-3
+
+.align 16
+.L1st:
+ addcc $alo, $hi0, $lo0
+ addxc $aj, %g0, $hi0
+
+ ldx [$ap+0], $aj ! ap[j]
+ addcc $nlo, $hi1, $lo1
+ add $ap, 8, $ap
+ addxc $nj, %g0, $hi1 ! nhi=nj
+
+ ldx [$np+0], $nj ! np[j]
+ mulx $aj, $m0, $alo ! ap[j]*bp[0]
+ add $np, 8, $np
+ umulxhi $aj, $m0, $aj ! ahi=aj
+
+ mulx $nj, $m1, $nlo ! np[j]*m1
+ addcc $lo0, $lo1, $lo1 ! np[j]*m1+ap[j]*bp[0]
+ umulxhi $nj, $m1, $nj ! nhi=nj
+ addxc %g0, $hi1, $hi1
+ stxa $lo1, [$tp]0xe2 ! tp[j-1]
+ add $tp, 8, $tp ! tp++
+
+ brnz,pt $cnt, .L1st
+ sub $cnt, 8, $cnt ! j--
+!.L1st
+ addcc $alo, $hi0, $lo0
+ addxc $aj, %g0, $hi0 ! ahi=aj
+
+ addcc $nlo, $hi1, $lo1
+ addxc $nj, %g0, $hi1
+ addcc $lo0, $lo1, $lo1 ! np[j]*m1+ap[j]*bp[0]
+ addxc %g0, $hi1, $hi1
+ stxa $lo1, [$tp]0xe2 ! tp[j-1]
+ add $tp, 8, $tp
+
+ addcc $hi0, $hi1, $hi1
+ addxc %g0, %g0, $ovf ! upmost overflow bit
+ stxa $hi1, [$tp]0xe2
+ add $tp, 8, $tp
+
+ ba .Louter
+ sub $num, 16, $i ! i=num-2
+
+.align 16
+.Louter:
+ ldx [$bp+0], $m0 ! m0=bp[i]
+ add $bp, 8, $bp
+
+ sub $ap, $num, $ap ! rewind
+ sub $np, $num, $np
+ sub $tp, $num, $tp
+
+ ldx [$ap+0], $aj ! ap[0]
+ ldx [$np+0], $nj ! np[0]
+
+ mulx $aj, $m0, $lo0 ! ap[0]*bp[i]
+ ldx [$tp], $tj ! tp[0]
+ umulxhi $aj, $m0, $hi0
+ ldx [$ap+8], $aj ! ap[1]
+ addcc $lo0, $tj, $lo0 ! ap[0]*bp[i]+tp[0]
+ mulx $aj, $m0, $alo ! ap[1]*bp[i]
+ addxc %g0, $hi0, $hi0
+ mulx $lo0, $n0, $m1 ! tp[0]*n0
+ umulxhi $aj, $m0, $aj ! ahi=aj
+ mulx $nj, $m1, $lo1 ! np[0]*m1
+ add $ap, 16, $ap
+ umulxhi $nj, $m1, $hi1
+ ldx [$np+8], $nj ! np[1]
+ add $np, 16, $np
+ addcc $lo1, $lo0, $lo1
+ mulx $nj, $m1, $nlo ! np[1]*m1
+ addxc %g0, $hi1, $hi1
+ umulxhi $nj, $m1, $nj ! nhi=nj
+
+ ba .Linner
+ sub $num, 24, $cnt ! cnt=num-3
+.align 16
+.Linner:
+ addcc $alo, $hi0, $lo0
+ ldx [$tp+8], $tj ! tp[j]
+ addxc $aj, %g0, $hi0 ! ahi=aj
+ ldx [$ap+0], $aj ! ap[j]
+ add $ap, 8, $ap
+ addcc $nlo, $hi1, $lo1
+ mulx $aj, $m0, $alo ! ap[j]*bp[i]
+ addxc $nj, %g0, $hi1 ! nhi=nj
+ ldx [$np+0], $nj ! np[j]
+ add $np, 8, $np
+ umulxhi $aj, $m0, $aj ! ahi=aj
+ addcc $lo0, $tj, $lo0 ! ap[j]*bp[i]+tp[j]
+ mulx $nj, $m1, $nlo ! np[j]*m1
+ addxc %g0, $hi0, $hi0
+ umulxhi $nj, $m1, $nj ! nhi=nj
+ addcc $lo1, $lo0, $lo1 ! np[j]*m1+ap[j]*bp[i]+tp[j]
+ addxc %g0, $hi1, $hi1
+ stx $lo1, [$tp] ! tp[j-1]
+ add $tp, 8, $tp
+ brnz,pt $cnt, .Linner
+ sub $cnt, 8, $cnt
+!.Linner
+ ldx [$tp+8], $tj ! tp[j]
+ addcc $alo, $hi0, $lo0
+ addxc $aj, %g0, $hi0 ! ahi=aj
+ addcc $lo0, $tj, $lo0 ! ap[j]*bp[i]+tp[j]
+ addxc %g0, $hi0, $hi0
+
+ addcc $nlo, $hi1, $lo1
+ addxc $nj, %g0, $hi1 ! nhi=nj
+ addcc $lo1, $lo0, $lo1 ! np[j]*m1+ap[j]*bp[i]+tp[j]
+ addxc %g0, $hi1, $hi1
+ stx $lo1, [$tp] ! tp[j-1]
+
+ subcc %g0, $ovf, %g0 ! move upmost overflow to CCR.xcc
+ addxccc $hi1, $hi0, $hi1
+ addxc %g0, %g0, $ovf
+ stx $hi1, [$tp+8]
+ add $tp, 16, $tp
+
+ brnz,pt $i, .Louter
+ sub $i, 8, $i
+
+ sub $ap, $num, $ap ! rewind
+ sub $np, $num, $np
+ sub $tp, $num, $tp
+ ba .Lsub
+ subcc $num, 8, $cnt ! cnt=num-1 and clear CCR.xcc
+
+.align 16
+.Lsub:
+ ldx [$tp], $tj
+ add $tp, 8, $tp
+ ldx [$np+0], $nj
+ add $np, 8, $np
+ subccc $tj, $nj, $t2 ! tp[j]-np[j]
+ srlx $tj, 32, $tj
+ srlx $nj, 32, $nj
+ subccc $tj, $nj, $t3
+ add $rp, 8, $rp
+ st $t2, [$rp-4] ! reverse order
+ st $t3, [$rp-8]
+ brnz,pt $cnt, .Lsub
+ sub $cnt, 8, $cnt
+
+ sub $np, $num, $np ! rewind
+ sub $tp, $num, $tp
+ sub $rp, $num, $rp
+
+ subc $ovf, %g0, $ovf ! handle upmost overflow bit
+ and $tp, $ovf, $ap
+ andn $rp, $ovf, $np
+ or $np, $ap, $ap ! ap=borrow?tp:rp
+ ba .Lcopy
+ sub $num, 8, $cnt
+
+.align 16
+.Lcopy: ! copy or in-place refresh
+ ldx [$ap+0], $t2
+ add $ap, 8, $ap
+ stx %g0, [$tp] ! zap
+ add $tp, 8, $tp
+ stx $t2, [$rp+0]
+ add $rp, 8, $rp
+ brnz $cnt, .Lcopy
+ sub $cnt, 8, $cnt
+
+ mov 1, %o0
+ ret
+ restore
+.type bn_mul_mont_t4, #function
+.size bn_mul_mont_t4, .-bn_mul_mont_t4
+___
+
+# int bn_mul_mont_gather5(
+$rp="%o0"; # u64 *rp,
+$ap="%o1"; # const u64 *ap,
+$bp="%o2"; # const u64 *pwrtbl,
+$np="%o3"; # const u64 *np,
+$n0p="%o4"; # const BN_ULONG *n0,
+$num="%o5"; # int num, # caller ensures that num is >=3
+ # int power);
+$code.=<<___;
+.globl bn_mul_mont_gather5_t4
+.align 32
+bn_mul_mont_gather5_t4:
+ add %sp, STACK_BIAS, %g4 ! real top of stack
+ sll $num, 3, $num ! size in bytes
+ add $num, 63, %g1
+ andn %g1, 63, %g1 ! buffer size rounded up to 64 bytes
+ sub %g4, %g1, %g1
+ andn %g1, 63, %g1 ! align at 64 byte
+ sub %g1, STACK_FRAME, %g1 ! new top of stack
+ sub %g1, %g4, %g1
+ LDPTR [%sp+STACK_7thARG], %g4 ! load power, 7th argument
+
+ save %sp, %g1, %sp
+___
+# +-------------------------------+<----- %sp
+# . .
+# +-------------------------------+<----- aligned at 64 bytes
+# | __int64 tmp[0] |
+# +-------------------------------+
+# . .
+# . .
+# +-------------------------------+<----- aligned at 64 bytes
+# . .
+($rp,$ap,$bp,$np,$n0p,$num)=map("%i$_",(0..5));
+($t0,$t1,$t2,$t3,$cnt,$tp,$bufsz,$ccr)=map("%l$_",(0..7));
+($ovf,$i)=($t0,$t1);
+ &load_ccr($bp,"%g4",$ccr);
+ &load_b($bp,$m0,"%o7"); # m0=bp[0]
+
+$code.=<<___;
+ ld [$n0p+0], $t0 ! pull n0[0..1] value
+ ld [$n0p+4], $t1
+ add %sp, STACK_BIAS+STACK_FRAME, $tp
+ sllx $t1, 32, $n0
+ or $t0, $n0, $n0
+
+ ldx [$ap+0], $aj ! ap[0]
+
+ mulx $aj, $m0, $lo0 ! ap[0]*bp[0]
+ umulxhi $aj, $m0, $hi0
+
+ ldx [$ap+8], $aj ! ap[1]
+ add $ap, 16, $ap
+ ldx [$np+0], $nj ! np[0]
+
+ mulx $lo0, $n0, $m1 ! "tp[0]"*n0
+
+ mulx $aj, $m0, $alo ! ap[1]*bp[0]
+ umulxhi $aj, $m0, $aj ! ahi=aj
+
+ mulx $nj, $m1, $lo1 ! np[0]*m1
+ umulxhi $nj, $m1, $hi1
+
+ ldx [$np+8], $nj ! np[1]
+
+ addcc $lo0, $lo1, $lo1
+ add $np, 16, $np
+ addxc %g0, $hi1, $hi1
+
+ mulx $nj, $m1, $nlo ! np[1]*m1
+ umulxhi $nj, $m1, $nj ! nhi=nj
+
+ ba .L1st_g5
+ sub $num, 24, $cnt ! cnt=num-3
+
+.align 16
+.L1st_g5:
+ addcc $alo, $hi0, $lo0
+ addxc $aj, %g0, $hi0
+
+ ldx [$ap+0], $aj ! ap[j]
+ addcc $nlo, $hi1, $lo1
+ add $ap, 8, $ap
+ addxc $nj, %g0, $hi1 ! nhi=nj
+
+ ldx [$np+0], $nj ! np[j]
+ mulx $aj, $m0, $alo ! ap[j]*bp[0]
+ add $np, 8, $np
+ umulxhi $aj, $m0, $aj ! ahi=aj
+
+ mulx $nj, $m1, $nlo ! np[j]*m1
+ addcc $lo0, $lo1, $lo1 ! np[j]*m1+ap[j]*bp[0]
+ umulxhi $nj, $m1, $nj ! nhi=nj
+ addxc %g0, $hi1, $hi1
+ stxa $lo1, [$tp]0xe2 ! tp[j-1]
+ add $tp, 8, $tp ! tp++
+
+ brnz,pt $cnt, .L1st_g5
+ sub $cnt, 8, $cnt ! j--
+!.L1st_g5
+ addcc $alo, $hi0, $lo0
+ addxc $aj, %g0, $hi0 ! ahi=aj
+
+ addcc $nlo, $hi1, $lo1
+ addxc $nj, %g0, $hi1
+ addcc $lo0, $lo1, $lo1 ! np[j]*m1+ap[j]*bp[0]
+ addxc %g0, $hi1, $hi1
+ stxa $lo1, [$tp]0xe2 ! tp[j-1]
+ add $tp, 8, $tp
+
+ addcc $hi0, $hi1, $hi1
+ addxc %g0, %g0, $ovf ! upmost overflow bit
+ stxa $hi1, [$tp]0xe2
+ add $tp, 8, $tp
+
+ ba .Louter_g5
+ sub $num, 16, $i ! i=num-2
+
+.align 16
+.Louter_g5:
+ wr $ccr, %g0, %ccr
+___
+ &load_b($bp,$m0); # m0=bp[i]
+$code.=<<___;
+ sub $ap, $num, $ap ! rewind
+ sub $np, $num, $np
+ sub $tp, $num, $tp
+
+ ldx [$ap+0], $aj ! ap[0]
+ ldx [$np+0], $nj ! np[0]
+
+ mulx $aj, $m0, $lo0 ! ap[0]*bp[i]
+ ldx [$tp], $tj ! tp[0]
+ umulxhi $aj, $m0, $hi0
+ ldx [$ap+8], $aj ! ap[1]
+ addcc $lo0, $tj, $lo0 ! ap[0]*bp[i]+tp[0]
+ mulx $aj, $m0, $alo ! ap[1]*bp[i]
+ addxc %g0, $hi0, $hi0
+ mulx $lo0, $n0, $m1 ! tp[0]*n0
+ umulxhi $aj, $m0, $aj ! ahi=aj
+ mulx $nj, $m1, $lo1 ! np[0]*m1
+ add $ap, 16, $ap
+ umulxhi $nj, $m1, $hi1
+ ldx [$np+8], $nj ! np[1]
+ add $np, 16, $np
+ addcc $lo1, $lo0, $lo1
+ mulx $nj, $m1, $nlo ! np[1]*m1
+ addxc %g0, $hi1, $hi1
+ umulxhi $nj, $m1, $nj ! nhi=nj
+
+ ba .Linner_g5
+ sub $num, 24, $cnt ! cnt=num-3
+.align 16
+.Linner_g5:
+ addcc $alo, $hi0, $lo0
+ ldx [$tp+8], $tj ! tp[j]
+ addxc $aj, %g0, $hi0 ! ahi=aj
+ ldx [$ap+0], $aj ! ap[j]
+ add $ap, 8, $ap
+ addcc $nlo, $hi1, $lo1
+ mulx $aj, $m0, $alo ! ap[j]*bp[i]
+ addxc $nj, %g0, $hi1 ! nhi=nj
+ ldx [$np+0], $nj ! np[j]
+ add $np, 8, $np
+ umulxhi $aj, $m0, $aj ! ahi=aj
+ addcc $lo0, $tj, $lo0 ! ap[j]*bp[i]+tp[j]
+ mulx $nj, $m1, $nlo ! np[j]*m1
+ addxc %g0, $hi0, $hi0
+ umulxhi $nj, $m1, $nj ! nhi=nj
+ addcc $lo1, $lo0, $lo1 ! np[j]*m1+ap[j]*bp[i]+tp[j]
+ addxc %g0, $hi1, $hi1
+ stx $lo1, [$tp] ! tp[j-1]
+ add $tp, 8, $tp
+ brnz,pt $cnt, .Linner_g5
+ sub $cnt, 8, $cnt
+!.Linner_g5
+ ldx [$tp+8], $tj ! tp[j]
+ addcc $alo, $hi0, $lo0
+ addxc $aj, %g0, $hi0 ! ahi=aj
+ addcc $lo0, $tj, $lo0 ! ap[j]*bp[i]+tp[j]
+ addxc %g0, $hi0, $hi0
+
+ addcc $nlo, $hi1, $lo1
+ addxc $nj, %g0, $hi1 ! nhi=nj
+ addcc $lo1, $lo0, $lo1 ! np[j]*m1+ap[j]*bp[i]+tp[j]
+ addxc %g0, $hi1, $hi1
+ stx $lo1, [$tp] ! tp[j-1]
+
+ subcc %g0, $ovf, %g0 ! move upmost overflow to CCR.xcc
+ addxccc $hi1, $hi0, $hi1
+ addxc %g0, %g0, $ovf
+ stx $hi1, [$tp+8]
+ add $tp, 16, $tp
+
+ brnz,pt $i, .Louter_g5
+ sub $i, 8, $i
+
+ sub $ap, $num, $ap ! rewind
+ sub $np, $num, $np
+ sub $tp, $num, $tp
+ ba .Lsub_g5
+ subcc $num, 8, $cnt ! cnt=num-1 and clear CCR.xcc
+
+.align 16
+.Lsub_g5:
+ ldx [$tp], $tj
+ add $tp, 8, $tp
+ ldx [$np+0], $nj
+ add $np, 8, $np
+ subccc $tj, $nj, $t2 ! tp[j]-np[j]
+ srlx $tj, 32, $tj
+ srlx $nj, 32, $nj
+ subccc $tj, $nj, $t3
+ add $rp, 8, $rp
+ st $t2, [$rp-4] ! reverse order
+ st $t3, [$rp-8]
+ brnz,pt $cnt, .Lsub_g5
+ sub $cnt, 8, $cnt
+
+ sub $np, $num, $np ! rewind
+ sub $tp, $num, $tp
+ sub $rp, $num, $rp
+
+ subc $ovf, %g0, $ovf ! handle upmost overflow bit
+ and $tp, $ovf, $ap
+ andn $rp, $ovf, $np
+ or $np, $ap, $ap ! ap=borrow?tp:rp
+ ba .Lcopy_g5
+ sub $num, 8, $cnt
+
+.align 16
+.Lcopy_g5: ! copy or in-place refresh
+ ldx [$ap+0], $t2
+ add $ap, 8, $ap
+ stx %g0, [$tp] ! zap
+ add $tp, 8, $tp
+ stx $t2, [$rp+0]
+ add $rp, 8, $rp
+ brnz $cnt, .Lcopy_g5
+ sub $cnt, 8, $cnt
+
+ mov 1, %o0
+ ret
+ restore
+.type bn_mul_mont_gather5_t4, #function
+.size bn_mul_mont_gather5_t4, .-bn_mul_mont_gather5_t4
+___
+}
+
+$code.=<<___;
+.globl bn_flip_t4
+.align 32
+bn_flip_t4:
+.Loop_flip:
+ ld [%o1+0], %o4
+ sub %o2, 1, %o2
+ ld [%o1+4], %o5
+ add %o1, 8, %o1
+ st %o5, [%o0+0]
+ st %o4, [%o0+4]
+ brnz %o2, .Loop_flip
+ add %o0, 8, %o0
+ retl
+ nop
+.type bn_flip_t4, #function
+.size bn_flip_t4, .-bn_flip_t4
+
+.globl bn_flip_n_scatter5_t4
+.align 32
+bn_flip_n_scatter5_t4:
+ sll %o3, 3, %o3
+ srl %o1, 1, %o1
+ add %o3, %o2, %o2 ! &pwrtbl[pwr]
+ sub %o1, 1, %o1
+.Loop_flip_n_scatter5:
+ ld [%o0+0], %o4 ! inp[i]
+ ld [%o0+4], %o5
+ add %o0, 8, %o0
+ sllx %o5, 32, %o5
+ or %o4, %o5, %o5
+ stx %o5, [%o2]
+ add %o2, 32*8, %o2
+ brnz %o1, .Loop_flip_n_scatter5
+ sub %o1, 1, %o1
+ retl
+ nop
+.type bn_flip_n_scatter5_t4, #function
+.size bn_flip_n_scatter5_t4, .-bn_flip_n_scatter5_t4
+
+.globl bn_gather5_t4
+.align 32
+bn_gather5_t4:
+___
+ &load_ccr("%o2","%o3","%g1");
+$code.=<<___;
+ sub %o1, 1, %o1
+.Loop_gather5:
+___
+ &load_b("%o2","%g1");
+$code.=<<___;
+ stx %g1, [%o0]
+ add %o0, 8, %o0
+ brnz %o1, .Loop_gather5
+ sub %o1, 1, %o1
+
+ retl
+ nop
+.type bn_gather5_t4, #function
+.size bn_gather5_t4, .-bn_gather5_t4
+
+.asciz "Montgomery Multiplication for SPARC T4, David S. Miller, Andy Polyakov"
+.align 4
+___
+
+&emit_assembler();
+
+close STDOUT;
diff --git a/openssl-1.1.0h/crypto/bn/asm/sparcv8.S b/openssl-1.1.0h/crypto/bn/asm/sparcv8.S
new file mode 100644
index 0000000..9c31073
--- /dev/null
+++ b/openssl-1.1.0h/crypto/bn/asm/sparcv8.S
@@ -0,0 +1,1458 @@
+.ident "sparcv8.s, Version 1.4"
+.ident "SPARC v8 ISA artwork by Andy Polyakov <appro@fy.chalmers.se>"
+
+/*
+ * ====================================================================
+ * Copyright 1999-2016 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the OpenSSL license (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ * ====================================================================
+ */
+
+/*
+ * This is my modest contributon to OpenSSL project (see
+ * http://www.openssl.org/ for more information about it) and is
+ * a drop-in SuperSPARC ISA replacement for crypto/bn/bn_asm.c
+ * module. For updates see http://fy.chalmers.se/~appro/hpe/.
+ *
+ * See bn_asm.sparc.v8plus.S for more details.
+ */
+
+/*
+ * Revision history.
+ *
+ * 1.1 - new loop unrolling model(*);
+ * 1.2 - made gas friendly;
+ * 1.3 - fixed problem with /usr/ccs/lib/cpp;
+ * 1.4 - some retunes;
+ *
+ * (*) see bn_asm.sparc.v8plus.S for details
+ */
+
+.section ".text",#alloc,#execinstr
+.file "bn_asm.sparc.v8.S"
+
+.align 32
+
+.global bn_mul_add_words
+/*
+ * BN_ULONG bn_mul_add_words(rp,ap,num,w)
+ * BN_ULONG *rp,*ap;
+ * int num;
+ * BN_ULONG w;
+ */
+bn_mul_add_words:
+ cmp %o2,0
+ bg,a .L_bn_mul_add_words_proceed
+ ld [%o1],%g2
+ retl
+ clr %o0
+
+.L_bn_mul_add_words_proceed:
+ andcc %o2,-4,%g0
+ bz .L_bn_mul_add_words_tail
+ clr %o5
+
+.L_bn_mul_add_words_loop:
+ ld [%o0],%o4
+ ld [%o1+4],%g3
+ umul %o3,%g2,%g2
+ rd %y,%g1
+ addcc %o4,%o5,%o4
+ addx %g1,0,%g1
+ addcc %o4,%g2,%o4
+ st %o4,[%o0]
+ addx %g1,0,%o5
+
+ ld [%o0+4],%o4
+ ld [%o1+8],%g2
+ umul %o3,%g3,%g3
+ dec 4,%o2
+ rd %y,%g1
+ addcc %o4,%o5,%o4
+ addx %g1,0,%g1
+ addcc %o4,%g3,%o4
+ st %o4,[%o0+4]
+ addx %g1,0,%o5
+
+ ld [%o0+8],%o4
+ ld [%o1+12],%g3
+ umul %o3,%g2,%g2
+ inc 16,%o1
+ rd %y,%g1
+ addcc %o4,%o5,%o4
+ addx %g1,0,%g1
+ addcc %o4,%g2,%o4
+ st %o4,[%o0+8]
+ addx %g1,0,%o5
+
+ ld [%o0+12],%o4
+ umul %o3,%g3,%g3
+ inc 16,%o0
+ rd %y,%g1
+ addcc %o4,%o5,%o4
+ addx %g1,0,%g1
+ addcc %o4,%g3,%o4
+ st %o4,[%o0-4]
+ addx %g1,0,%o5
+ andcc %o2,-4,%g0
+ bnz,a .L_bn_mul_add_words_loop
+ ld [%o1],%g2
+
+ tst %o2
+ bnz,a .L_bn_mul_add_words_tail
+ ld [%o1],%g2
+.L_bn_mul_add_words_return:
+ retl
+ mov %o5,%o0
+ nop
+
+.L_bn_mul_add_words_tail:
+ ld [%o0],%o4
+ umul %o3,%g2,%g2
+ addcc %o4,%o5,%o4
+ rd %y,%g1
+ addx %g1,0,%g1
+ addcc %o4,%g2,%o4
+ addx %g1,0,%o5
+ deccc %o2
+ bz .L_bn_mul_add_words_return
+ st %o4,[%o0]
+
+ ld [%o1+4],%g2
+ ld [%o0+4],%o4
+ umul %o3,%g2,%g2
+ rd %y,%g1
+ addcc %o4,%o5,%o4
+ addx %g1,0,%g1
+ addcc %o4,%g2,%o4
+ addx %g1,0,%o5
+ deccc %o2
+ bz .L_bn_mul_add_words_return
+ st %o4,[%o0+4]
+
+ ld [%o1+8],%g2
+ ld [%o0+8],%o4
+ umul %o3,%g2,%g2
+ rd %y,%g1
+ addcc %o4,%o5,%o4
+ addx %g1,0,%g1
+ addcc %o4,%g2,%o4
+ st %o4,[%o0+8]
+ retl
+ addx %g1,0,%o0
+
+.type bn_mul_add_words,#function
+.size bn_mul_add_words,(.-bn_mul_add_words)
+
+.align 32
+
+.global bn_mul_words
+/*
+ * BN_ULONG bn_mul_words(rp,ap,num,w)
+ * BN_ULONG *rp,*ap;
+ * int num;
+ * BN_ULONG w;
+ */
+bn_mul_words:
+ cmp %o2,0
+ bg,a .L_bn_mul_words_proceeed
+ ld [%o1],%g2
+ retl
+ clr %o0
+
+.L_bn_mul_words_proceeed:
+ andcc %o2,-4,%g0
+ bz .L_bn_mul_words_tail
+ clr %o5
+
+.L_bn_mul_words_loop:
+ ld [%o1+4],%g3
+ umul %o3,%g2,%g2
+ addcc %g2,%o5,%g2
+ rd %y,%g1
+ addx %g1,0,%o5
+ st %g2,[%o0]
+
+ ld [%o1+8],%g2
+ umul %o3,%g3,%g3
+ addcc %g3,%o5,%g3
+ rd %y,%g1
+ dec 4,%o2
+ addx %g1,0,%o5
+ st %g3,[%o0+4]
+
+ ld [%o1+12],%g3
+ umul %o3,%g2,%g2
+ addcc %g2,%o5,%g2
+ rd %y,%g1
+ inc 16,%o1
+ st %g2,[%o0+8]
+ addx %g1,0,%o5
+
+ umul %o3,%g3,%g3
+ addcc %g3,%o5,%g3
+ rd %y,%g1
+ inc 16,%o0
+ addx %g1,0,%o5
+ st %g3,[%o0-4]
+ andcc %o2,-4,%g0
+ nop
+ bnz,a .L_bn_mul_words_loop
+ ld [%o1],%g2
+
+ tst %o2
+ bnz,a .L_bn_mul_words_tail
+ ld [%o1],%g2
+.L_bn_mul_words_return:
+ retl
+ mov %o5,%o0
+ nop
+
+.L_bn_mul_words_tail:
+ umul %o3,%g2,%g2
+ addcc %g2,%o5,%g2
+ rd %y,%g1
+ addx %g1,0,%o5
+ deccc %o2
+ bz .L_bn_mul_words_return
+ st %g2,[%o0]
+ nop
+
+ ld [%o1+4],%g2
+ umul %o3,%g2,%g2
+ addcc %g2,%o5,%g2
+ rd %y,%g1
+ addx %g1,0,%o5
+ deccc %o2
+ bz .L_bn_mul_words_return
+ st %g2,[%o0+4]
+
+ ld [%o1+8],%g2
+ umul %o3,%g2,%g2
+ addcc %g2,%o5,%g2
+ rd %y,%g1
+ st %g2,[%o0+8]
+ retl
+ addx %g1,0,%o0
+
+.type bn_mul_words,#function
+.size bn_mul_words,(.-bn_mul_words)
+
+.align 32
+.global bn_sqr_words
+/*
+ * void bn_sqr_words(r,a,n)
+ * BN_ULONG *r,*a;
+ * int n;
+ */
+bn_sqr_words:
+ cmp %o2,0
+ bg,a .L_bn_sqr_words_proceeed
+ ld [%o1],%g2
+ retl
+ clr %o0
+
+.L_bn_sqr_words_proceeed:
+ andcc %o2,-4,%g0
+ bz .L_bn_sqr_words_tail
+ clr %o5
+
+.L_bn_sqr_words_loop:
+ ld [%o1+4],%g3
+ umul %g2,%g2,%o4
+ st %o4,[%o0]
+ rd %y,%o5
+ st %o5,[%o0+4]
+
+ ld [%o1+8],%g2
+ umul %g3,%g3,%o4
+ dec 4,%o2
+ st %o4,[%o0+8]
+ rd %y,%o5
+ st %o5,[%o0+12]
+ nop
+
+ ld [%o1+12],%g3
+ umul %g2,%g2,%o4
+ st %o4,[%o0+16]
+ rd %y,%o5
+ inc 16,%o1
+ st %o5,[%o0+20]
+
+ umul %g3,%g3,%o4
+ inc 32,%o0
+ st %o4,[%o0-8]
+ rd %y,%o5
+ st %o5,[%o0-4]
+ andcc %o2,-4,%g2
+ bnz,a .L_bn_sqr_words_loop
+ ld [%o1],%g2
+
+ tst %o2
+ nop
+ bnz,a .L_bn_sqr_words_tail
+ ld [%o1],%g2
+.L_bn_sqr_words_return:
+ retl
+ clr %o0
+
+.L_bn_sqr_words_tail:
+ umul %g2,%g2,%o4
+ st %o4,[%o0]
+ deccc %o2
+ rd %y,%o5
+ bz .L_bn_sqr_words_return
+ st %o5,[%o0+4]
+
+ ld [%o1+4],%g2
+ umul %g2,%g2,%o4
+ st %o4,[%o0+8]
+ deccc %o2
+ rd %y,%o5
+ nop
+ bz .L_bn_sqr_words_return
+ st %o5,[%o0+12]
+
+ ld [%o1+8],%g2
+ umul %g2,%g2,%o4
+ st %o4,[%o0+16]
+ rd %y,%o5
+ st %o5,[%o0+20]
+ retl
+ clr %o0
+
+.type bn_sqr_words,#function
+.size bn_sqr_words,(.-bn_sqr_words)
+
+.align 32
+
+.global bn_div_words
+/*
+ * BN_ULONG bn_div_words(h,l,d)
+ * BN_ULONG h,l,d;
+ */
+bn_div_words:
+ wr %o0,%y
+ udiv %o1,%o2,%o0
+ retl
+ nop
+
+.type bn_div_words,#function
+.size bn_div_words,(.-bn_div_words)
+
+.align 32
+
+.global bn_add_words
+/*
+ * BN_ULONG bn_add_words(rp,ap,bp,n)
+ * BN_ULONG *rp,*ap,*bp;
+ * int n;
+ */
+bn_add_words:
+ cmp %o3,0
+ bg,a .L_bn_add_words_proceed
+ ld [%o1],%o4
+ retl
+ clr %o0
+
+.L_bn_add_words_proceed:
+ andcc %o3,-4,%g0
+ bz .L_bn_add_words_tail
+ clr %g1
+ ba .L_bn_add_words_warn_loop
+ addcc %g0,0,%g0 ! clear carry flag
+
+.L_bn_add_words_loop:
+ ld [%o1],%o4
+.L_bn_add_words_warn_loop:
+ ld [%o2],%o5
+ ld [%o1+4],%g3
+ ld [%o2+4],%g4
+ dec 4,%o3
+ addxcc %o5,%o4,%o5
+ st %o5,[%o0]
+
+ ld [%o1+8],%o4
+ ld [%o2+8],%o5
+ inc 16,%o1
+ addxcc %g3,%g4,%g3
+ st %g3,[%o0+4]
+
+ ld [%o1-4],%g3
+ ld [%o2+12],%g4
+ inc 16,%o2
+ addxcc %o5,%o4,%o5
+ st %o5,[%o0+8]
+
+ inc 16,%o0
+ addxcc %g3,%g4,%g3
+ st %g3,[%o0-4]
+ addx %g0,0,%g1
+ andcc %o3,-4,%g0
+ bnz,a .L_bn_add_words_loop
+ addcc %g1,-1,%g0
+
+ tst %o3
+ bnz,a .L_bn_add_words_tail
+ ld [%o1],%o4
+.L_bn_add_words_return:
+ retl
+ mov %g1,%o0
+
+.L_bn_add_words_tail:
+ addcc %g1,-1,%g0
+ ld [%o2],%o5
+ addxcc %o5,%o4,%o5
+ addx %g0,0,%g1
+ deccc %o3
+ bz .L_bn_add_words_return
+ st %o5,[%o0]
+
+ ld [%o1+4],%o4
+ addcc %g1,-1,%g0
+ ld [%o2+4],%o5
+ addxcc %o5,%o4,%o5
+ addx %g0,0,%g1
+ deccc %o3
+ bz .L_bn_add_words_return
+ st %o5,[%o0+4]
+
+ ld [%o1+8],%o4
+ addcc %g1,-1,%g0
+ ld [%o2+8],%o5
+ addxcc %o5,%o4,%o5
+ st %o5,[%o0+8]
+ retl
+ addx %g0,0,%o0
+
+.type bn_add_words,#function
+.size bn_add_words,(.-bn_add_words)
+
+.align 32
+
+.global bn_sub_words
+/*
+ * BN_ULONG bn_sub_words(rp,ap,bp,n)
+ * BN_ULONG *rp,*ap,*bp;
+ * int n;
+ */
+bn_sub_words:
+ cmp %o3,0
+ bg,a .L_bn_sub_words_proceed
+ ld [%o1],%o4
+ retl
+ clr %o0
+
+.L_bn_sub_words_proceed:
+ andcc %o3,-4,%g0
+ bz .L_bn_sub_words_tail
+ clr %g1
+ ba .L_bn_sub_words_warm_loop
+ addcc %g0,0,%g0 ! clear carry flag
+
+.L_bn_sub_words_loop:
+ ld [%o1],%o4
+.L_bn_sub_words_warm_loop:
+ ld [%o2],%o5
+ ld [%o1+4],%g3
+ ld [%o2+4],%g4
+ dec 4,%o3
+ subxcc %o4,%o5,%o5
+ st %o5,[%o0]
+
+ ld [%o1+8],%o4
+ ld [%o2+8],%o5
+ inc 16,%o1
+ subxcc %g3,%g4,%g4
+ st %g4,[%o0+4]
+
+ ld [%o1-4],%g3
+ ld [%o2+12],%g4
+ inc 16,%o2
+ subxcc %o4,%o5,%o5
+ st %o5,[%o0+8]
+
+ inc 16,%o0
+ subxcc %g3,%g4,%g4
+ st %g4,[%o0-4]
+ addx %g0,0,%g1
+ andcc %o3,-4,%g0
+ bnz,a .L_bn_sub_words_loop
+ addcc %g1,-1,%g0
+
+ tst %o3
+ nop
+ bnz,a .L_bn_sub_words_tail
+ ld [%o1],%o4
+.L_bn_sub_words_return:
+ retl
+ mov %g1,%o0
+
+.L_bn_sub_words_tail:
+ addcc %g1,-1,%g0
+ ld [%o2],%o5
+ subxcc %o4,%o5,%o5
+ addx %g0,0,%g1
+ deccc %o3
+ bz .L_bn_sub_words_return
+ st %o5,[%o0]
+ nop
+
+ ld [%o1+4],%o4
+ addcc %g1,-1,%g0
+ ld [%o2+4],%o5
+ subxcc %o4,%o5,%o5
+ addx %g0,0,%g1
+ deccc %o3
+ bz .L_bn_sub_words_return
+ st %o5,[%o0+4]
+
+ ld [%o1+8],%o4
+ addcc %g1,-1,%g0
+ ld [%o2+8],%o5
+ subxcc %o4,%o5,%o5
+ st %o5,[%o0+8]
+ retl
+ addx %g0,0,%o0
+
+.type bn_sub_words,#function
+.size bn_sub_words,(.-bn_sub_words)
+
+#define FRAME_SIZE -96
+
+/*
+ * Here is register usage map for *all* routines below.
+ */
+#define t_1 %o0
+#define t_2 %o1
+#define c_1 %o2
+#define c_2 %o3
+#define c_3 %o4
+
+#define ap(I) [%i1+4*I]
+#define bp(I) [%i2+4*I]
+#define rp(I) [%i0+4*I]
+
+#define a_0 %l0
+#define a_1 %l1
+#define a_2 %l2
+#define a_3 %l3
+#define a_4 %l4
+#define a_5 %l5
+#define a_6 %l6
+#define a_7 %l7
+
+#define b_0 %i3
+#define b_1 %i4
+#define b_2 %i5
+#define b_3 %o5
+#define b_4 %g1
+#define b_5 %g2
+#define b_6 %g3
+#define b_7 %g4
+
+.align 32
+.global bn_mul_comba8
+/*
+ * void bn_mul_comba8(r,a,b)
+ * BN_ULONG *r,*a,*b;
+ */
+bn_mul_comba8:
+ save %sp,FRAME_SIZE,%sp
+ ld ap(0),a_0
+ ld bp(0),b_0
+ umul a_0,b_0,c_1 !=!mul_add_c(a[0],b[0],c1,c2,c3);
+ ld bp(1),b_1
+ rd %y,c_2
+ st c_1,rp(0) !r[0]=c1;
+
+ umul a_0,b_1,t_1 !=!mul_add_c(a[0],b[1],c2,c3,c1);
+ ld ap(1),a_1
+ addcc c_2,t_1,c_2
+ rd %y,t_2
+ addxcc %g0,t_2,c_3 !=
+ addx %g0,%g0,c_1
+ ld ap(2),a_2
+ umul a_1,b_0,t_1 !mul_add_c(a[1],b[0],c2,c3,c1);
+ addcc c_2,t_1,c_2 !=
+ rd %y,t_2
+ addxcc c_3,t_2,c_3
+ st c_2,rp(1) !r[1]=c2;
+ addx c_1,%g0,c_1 !=
+
+ umul a_2,b_0,t_1 !mul_add_c(a[2],b[0],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1 !=
+ addx %g0,%g0,c_2
+ ld bp(2),b_2
+ umul a_1,b_1,t_1 !mul_add_c(a[1],b[1],c3,c1,c2);
+ addcc c_3,t_1,c_3 !=
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ ld bp(3),b_3
+ addx c_2,%g0,c_2 !=
+ umul a_0,b_2,t_1 !mul_add_c(a[0],b[2],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1 !=
+ addx c_2,%g0,c_2
+ st c_3,rp(2) !r[2]=c3;
+
+ umul a_0,b_3,t_1 !mul_add_c(a[0],b[3],c1,c2,c3);
+ addcc c_1,t_1,c_1 !=
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ addx %g0,%g0,c_3
+ umul a_1,b_2,t_1 !=!mul_add_c(a[1],b[2],c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3 !=
+ ld ap(3),a_3
+ umul a_2,b_1,t_1 !mul_add_c(a[2],b[1],c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2 !=
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3
+ ld ap(4),a_4
+ umul a_3,b_0,t_1 !mul_add_c(a[3],b[0],c1,c2,c3);!=
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3 !=
+ st c_1,rp(3) !r[3]=c1;
+
+ umul a_4,b_0,t_1 !mul_add_c(a[4],b[0],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx %g0,%g0,c_1
+ umul a_3,b_1,t_1 !mul_add_c(a[3],b[1],c2,c3,c1);
+ addcc c_2,t_1,c_2 !=
+ rd %y,t_2
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1
+ umul a_2,b_2,t_1 !=!mul_add_c(a[2],b[2],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1 !=
+ ld bp(4),b_4
+ umul a_1,b_3,t_1 !mul_add_c(a[1],b[3],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1
+ ld bp(5),b_5
+ umul a_0,b_4,t_1 !=!mul_add_c(a[0],b[4],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1 !=
+ st c_2,rp(4) !r[4]=c2;
+
+ umul a_0,b_5,t_1 !mul_add_c(a[0],b[5],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2 !=
+ addxcc c_1,t_2,c_1
+ addx %g0,%g0,c_2
+ umul a_1,b_4,t_1 !mul_add_c(a[1],b[4],c3,c1,c2);
+ addcc c_3,t_1,c_3 !=
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2
+ umul a_2,b_3,t_1 !=!mul_add_c(a[2],b[3],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2 !=
+ umul a_3,b_2,t_1 !mul_add_c(a[3],b[2],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1 !=
+ addx c_2,%g0,c_2
+ ld ap(5),a_5
+ umul a_4,b_1,t_1 !mul_add_c(a[4],b[1],c3,c1,c2);
+ addcc c_3,t_1,c_3 !=
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ ld ap(6),a_6
+ addx c_2,%g0,c_2 !=
+ umul a_5,b_0,t_1 !mul_add_c(a[5],b[0],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1 !=
+ addx c_2,%g0,c_2
+ st c_3,rp(5) !r[5]=c3;
+
+ umul a_6,b_0,t_1 !mul_add_c(a[6],b[0],c1,c2,c3);
+ addcc c_1,t_1,c_1 !=
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ addx %g0,%g0,c_3
+ umul a_5,b_1,t_1 !=!mul_add_c(a[5],b[1],c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3 !=
+ umul a_4,b_2,t_1 !mul_add_c(a[4],b[2],c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2 !=
+ addx c_3,%g0,c_3
+ umul a_3,b_3,t_1 !mul_add_c(a[3],b[3],c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2 !=
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3
+ umul a_2,b_4,t_1 !mul_add_c(a[2],b[4],c1,c2,c3);
+ addcc c_1,t_1,c_1 !=
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ ld bp(6),b_6
+ addx c_3,%g0,c_3 !=
+ umul a_1,b_5,t_1 !mul_add_c(a[1],b[5],c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2 !=
+ addx c_3,%g0,c_3
+ ld bp(7),b_7
+ umul a_0,b_6,t_1 !mul_add_c(a[0],b[6],c1,c2,c3);
+ addcc c_1,t_1,c_1 !=
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ st c_1,rp(6) !r[6]=c1;
+ addx c_3,%g0,c_3 !=
+
+ umul a_0,b_7,t_1 !mul_add_c(a[0],b[7],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2
+ addxcc c_3,t_2,c_3 !=
+ addx %g0,%g0,c_1
+ umul a_1,b_6,t_1 !mul_add_c(a[1],b[6],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1
+ umul a_2,b_5,t_1 !mul_add_c(a[2],b[5],c2,c3,c1);
+ addcc c_2,t_1,c_2 !=
+ rd %y,t_2
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1
+ umul a_3,b_4,t_1 !=!mul_add_c(a[3],b[4],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1 !=
+ umul a_4,b_3,t_1 !mul_add_c(a[4],b[3],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2
+ addxcc c_3,t_2,c_3 !=
+ addx c_1,%g0,c_1
+ umul a_5,b_2,t_1 !mul_add_c(a[5],b[2],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1
+ ld ap(7),a_7
+ umul a_6,b_1,t_1 !=!mul_add_c(a[6],b[1],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1 !=
+ umul a_7,b_0,t_1 !mul_add_c(a[7],b[0],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2
+ addxcc c_3,t_2,c_3 !=
+ addx c_1,%g0,c_1
+ st c_2,rp(7) !r[7]=c2;
+
+ umul a_7,b_1,t_1 !mul_add_c(a[7],b[1],c3,c1,c2);
+ addcc c_3,t_1,c_3 !=
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ addx %g0,%g0,c_2
+ umul a_6,b_2,t_1 !=!mul_add_c(a[6],b[2],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2 !=
+ umul a_5,b_3,t_1 !mul_add_c(a[5],b[3],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1 !=
+ addx c_2,%g0,c_2
+ umul a_4,b_4,t_1 !mul_add_c(a[4],b[4],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2 !=
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2
+ umul a_3,b_5,t_1 !mul_add_c(a[3],b[5],c3,c1,c2);
+ addcc c_3,t_1,c_3 !=
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2
+ umul a_2,b_6,t_1 !=!mul_add_c(a[2],b[6],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2 !=
+ umul a_1,b_7,t_1 !mul_add_c(a[1],b[7],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1 !
+ addx c_2,%g0,c_2
+ st c_3,rp(8) !r[8]=c3;
+
+ umul a_2,b_7,t_1 !mul_add_c(a[2],b[7],c1,c2,c3);
+ addcc c_1,t_1,c_1 !=
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ addx %g0,%g0,c_3
+ umul a_3,b_6,t_1 !=!mul_add_c(a[3],b[6],c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3 !=
+ umul a_4,b_5,t_1 !mul_add_c(a[4],b[5],c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2 !=
+ addx c_3,%g0,c_3
+ umul a_5,b_4,t_1 !mul_add_c(a[5],b[4],c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2 !=
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3
+ umul a_6,b_3,t_1 !mul_add_c(a[6],b[3],c1,c2,c3);
+ addcc c_1,t_1,c_1 !=
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3
+ umul a_7,b_2,t_1 !=!mul_add_c(a[7],b[2],c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3 !=
+ st c_1,rp(9) !r[9]=c1;
+
+ umul a_7,b_3,t_1 !mul_add_c(a[7],b[3],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx %g0,%g0,c_1
+ umul a_6,b_4,t_1 !mul_add_c(a[6],b[4],c2,c3,c1);
+ addcc c_2,t_1,c_2 !=
+ rd %y,t_2
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1
+ umul a_5,b_5,t_1 !=!mul_add_c(a[5],b[5],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1 !=
+ umul a_4,b_6,t_1 !mul_add_c(a[4],b[6],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2
+ addxcc c_3,t_2,c_3 !=
+ addx c_1,%g0,c_1
+ umul a_3,b_7,t_1 !mul_add_c(a[3],b[7],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1
+ st c_2,rp(10) !r[10]=c2;
+
+ umul a_4,b_7,t_1 !=!mul_add_c(a[4],b[7],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ addx %g0,%g0,c_2 !=
+ umul a_5,b_6,t_1 !mul_add_c(a[5],b[6],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1 !=
+ addx c_2,%g0,c_2
+ umul a_6,b_5,t_1 !mul_add_c(a[6],b[5],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2 !=
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2
+ umul a_7,b_4,t_1 !mul_add_c(a[7],b[4],c3,c1,c2);
+ addcc c_3,t_1,c_3 !=
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ st c_3,rp(11) !r[11]=c3;
+ addx c_2,%g0,c_2 !=
+
+ umul a_7,b_5,t_1 !mul_add_c(a[7],b[5],c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2 !=
+ addx %g0,%g0,c_3
+ umul a_6,b_6,t_1 !mul_add_c(a[6],b[6],c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2 !=
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3
+ umul a_5,b_7,t_1 !mul_add_c(a[5],b[7],c1,c2,c3);
+ addcc c_1,t_1,c_1 !=
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ st c_1,rp(12) !r[12]=c1;
+ addx c_3,%g0,c_3 !=
+
+ umul a_6,b_7,t_1 !mul_add_c(a[6],b[7],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2
+ addxcc c_3,t_2,c_3 !=
+ addx %g0,%g0,c_1
+ umul a_7,b_6,t_1 !mul_add_c(a[7],b[6],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1
+ st c_2,rp(13) !r[13]=c2;
+
+ umul a_7,b_7,t_1 !=!mul_add_c(a[7],b[7],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ nop !=
+ st c_3,rp(14) !r[14]=c3;
+ st c_1,rp(15) !r[15]=c1;
+
+ ret
+ restore %g0,%g0,%o0
+
+.type bn_mul_comba8,#function
+.size bn_mul_comba8,(.-bn_mul_comba8)
+
+.align 32
+
+.global bn_mul_comba4
+/*
+ * void bn_mul_comba4(r,a,b)
+ * BN_ULONG *r,*a,*b;
+ */
+bn_mul_comba4:
+ save %sp,FRAME_SIZE,%sp
+ ld ap(0),a_0
+ ld bp(0),b_0
+ umul a_0,b_0,c_1 !=!mul_add_c(a[0],b[0],c1,c2,c3);
+ ld bp(1),b_1
+ rd %y,c_2
+ st c_1,rp(0) !r[0]=c1;
+
+ umul a_0,b_1,t_1 !=!mul_add_c(a[0],b[1],c2,c3,c1);
+ ld ap(1),a_1
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc %g0,t_2,c_3
+ addx %g0,%g0,c_1
+ ld ap(2),a_2
+ umul a_1,b_0,t_1 !=!mul_add_c(a[1],b[0],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1 !=
+ st c_2,rp(1) !r[1]=c2;
+
+ umul a_2,b_0,t_1 !mul_add_c(a[2],b[0],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2 !=
+ addxcc c_1,t_2,c_1
+ addx %g0,%g0,c_2
+ ld bp(2),b_2
+ umul a_1,b_1,t_1 !=!mul_add_c(a[1],b[1],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2 !=
+ ld bp(3),b_3
+ umul a_0,b_2,t_1 !mul_add_c(a[0],b[2],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2 !=
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2
+ st c_3,rp(2) !r[2]=c3;
+
+ umul a_0,b_3,t_1 !=!mul_add_c(a[0],b[3],c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ addx %g0,%g0,c_3 !=
+ umul a_1,b_2,t_1 !mul_add_c(a[1],b[2],c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2 !=
+ addx c_3,%g0,c_3
+ ld ap(3),a_3
+ umul a_2,b_1,t_1 !mul_add_c(a[2],b[1],c1,c2,c3);
+ addcc c_1,t_1,c_1 !=
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3
+ umul a_3,b_0,t_1 !=!mul_add_c(a[3],b[0],c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3 !=
+ st c_1,rp(3) !r[3]=c1;
+
+ umul a_3,b_1,t_1 !mul_add_c(a[3],b[1],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx %g0,%g0,c_1
+ umul a_2,b_2,t_1 !mul_add_c(a[2],b[2],c2,c3,c1);
+ addcc c_2,t_1,c_2 !=
+ rd %y,t_2
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1
+ umul a_1,b_3,t_1 !=!mul_add_c(a[1],b[3],c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1 !=
+ st c_2,rp(4) !r[4]=c2;
+
+ umul a_2,b_3,t_1 !mul_add_c(a[2],b[3],c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2 !=
+ addxcc c_1,t_2,c_1
+ addx %g0,%g0,c_2
+ umul a_3,b_2,t_1 !mul_add_c(a[3],b[2],c3,c1,c2);
+ addcc c_3,t_1,c_3 !=
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ st c_3,rp(5) !r[5]=c3;
+ addx c_2,%g0,c_2 !=
+
+ umul a_3,b_3,t_1 !mul_add_c(a[3],b[3],c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2 !=
+ st c_1,rp(6) !r[6]=c1;
+ st c_2,rp(7) !r[7]=c2;
+
+ ret
+ restore %g0,%g0,%o0
+
+.type bn_mul_comba4,#function
+.size bn_mul_comba4,(.-bn_mul_comba4)
+
+.align 32
+
+.global bn_sqr_comba8
+bn_sqr_comba8:
+ save %sp,FRAME_SIZE,%sp
+ ld ap(0),a_0
+ ld ap(1),a_1
+ umul a_0,a_0,c_1 !=!sqr_add_c(a,0,c1,c2,c3);
+ rd %y,c_2
+ st c_1,rp(0) !r[0]=c1;
+
+ ld ap(2),a_2
+ umul a_0,a_1,t_1 !=!sqr_add_c2(a,1,0,c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2
+ addxcc %g0,t_2,c_3
+ addx %g0,%g0,c_1 !=
+ addcc c_2,t_1,c_2
+ addxcc c_3,t_2,c_3
+ st c_2,rp(1) !r[1]=c2;
+ addx c_1,%g0,c_1 !=
+
+ umul a_2,a_0,t_1 !sqr_add_c2(a,2,0,c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1 !=
+ addx %g0,%g0,c_2
+ addcc c_3,t_1,c_3
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2 !=
+ ld ap(3),a_3
+ umul a_1,a_1,t_1 !sqr_add_c(a,1,c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2 !=
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2
+ st c_3,rp(2) !r[2]=c3;
+
+ umul a_0,a_3,t_1 !=!sqr_add_c2(a,3,0,c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ addx %g0,%g0,c_3 !=
+ addcc c_1,t_1,c_1
+ addxcc c_2,t_2,c_2
+ ld ap(4),a_4
+ addx c_3,%g0,c_3 !=
+ umul a_1,a_2,t_1 !sqr_add_c2(a,2,1,c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2 !=
+ addx c_3,%g0,c_3
+ addcc c_1,t_1,c_1
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3 !=
+ st c_1,rp(3) !r[3]=c1;
+
+ umul a_4,a_0,t_1 !sqr_add_c2(a,4,0,c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx %g0,%g0,c_1
+ addcc c_2,t_1,c_2
+ addxcc c_3,t_2,c_3 !=
+ addx c_1,%g0,c_1
+ umul a_3,a_1,t_1 !sqr_add_c2(a,3,1,c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1
+ addcc c_2,t_1,c_2
+ addxcc c_3,t_2,c_3 !=
+ addx c_1,%g0,c_1
+ ld ap(5),a_5
+ umul a_2,a_2,t_1 !sqr_add_c(a,2,c2,c3,c1);
+ addcc c_2,t_1,c_2 !=
+ rd %y,t_2
+ addxcc c_3,t_2,c_3
+ st c_2,rp(4) !r[4]=c2;
+ addx c_1,%g0,c_1 !=
+
+ umul a_0,a_5,t_1 !sqr_add_c2(a,5,0,c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1 !=
+ addx %g0,%g0,c_2
+ addcc c_3,t_1,c_3
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2 !=
+ umul a_1,a_4,t_1 !sqr_add_c2(a,4,1,c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1 !=
+ addx c_2,%g0,c_2
+ addcc c_3,t_1,c_3
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2 !=
+ ld ap(6),a_6
+ umul a_2,a_3,t_1 !sqr_add_c2(a,3,2,c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2 !=
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2
+ addcc c_3,t_1,c_3
+ addxcc c_1,t_2,c_1 !=
+ addx c_2,%g0,c_2
+ st c_3,rp(5) !r[5]=c3;
+
+ umul a_6,a_0,t_1 !sqr_add_c2(a,6,0,c1,c2,c3);
+ addcc c_1,t_1,c_1 !=
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ addx %g0,%g0,c_3
+ addcc c_1,t_1,c_1 !=
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3
+ umul a_5,a_1,t_1 !sqr_add_c2(a,5,1,c1,c2,c3);
+ addcc c_1,t_1,c_1 !=
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3
+ addcc c_1,t_1,c_1 !=
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3
+ umul a_4,a_2,t_1 !sqr_add_c2(a,4,2,c1,c2,c3);
+ addcc c_1,t_1,c_1 !=
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3
+ addcc c_1,t_1,c_1 !=
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3
+ ld ap(7),a_7
+ umul a_3,a_3,t_1 !=!sqr_add_c(a,3,c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3 !=
+ st c_1,rp(6) !r[6]=c1;
+
+ umul a_0,a_7,t_1 !sqr_add_c2(a,7,0,c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx %g0,%g0,c_1
+ addcc c_2,t_1,c_2
+ addxcc c_3,t_2,c_3 !=
+ addx c_1,%g0,c_1
+ umul a_1,a_6,t_1 !sqr_add_c2(a,6,1,c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1
+ addcc c_2,t_1,c_2
+ addxcc c_3,t_2,c_3 !=
+ addx c_1,%g0,c_1
+ umul a_2,a_5,t_1 !sqr_add_c2(a,5,2,c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1
+ addcc c_2,t_1,c_2
+ addxcc c_3,t_2,c_3 !=
+ addx c_1,%g0,c_1
+ umul a_3,a_4,t_1 !sqr_add_c2(a,4,3,c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1
+ addcc c_2,t_1,c_2
+ addxcc c_3,t_2,c_3 !=
+ addx c_1,%g0,c_1
+ st c_2,rp(7) !r[7]=c2;
+
+ umul a_7,a_1,t_1 !sqr_add_c2(a,7,1,c3,c1,c2);
+ addcc c_3,t_1,c_3 !=
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ addx %g0,%g0,c_2
+ addcc c_3,t_1,c_3 !=
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2
+ umul a_6,a_2,t_1 !sqr_add_c2(a,6,2,c3,c1,c2);
+ addcc c_3,t_1,c_3 !=
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2
+ addcc c_3,t_1,c_3 !=
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2
+ umul a_5,a_3,t_1 !sqr_add_c2(a,5,3,c3,c1,c2);
+ addcc c_3,t_1,c_3 !=
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2
+ addcc c_3,t_1,c_3 !=
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2
+ umul a_4,a_4,t_1 !sqr_add_c(a,4,c3,c1,c2);
+ addcc c_3,t_1,c_3 !=
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ st c_3,rp(8) !r[8]=c3;
+ addx c_2,%g0,c_2 !=
+
+ umul a_2,a_7,t_1 !sqr_add_c2(a,7,2,c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2 !=
+ addx %g0,%g0,c_3
+ addcc c_1,t_1,c_1
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3 !=
+ umul a_3,a_6,t_1 !sqr_add_c2(a,6,3,c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2 !=
+ addx c_3,%g0,c_3
+ addcc c_1,t_1,c_1
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3 !=
+ umul a_4,a_5,t_1 !sqr_add_c2(a,5,4,c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2 !=
+ addx c_3,%g0,c_3
+ addcc c_1,t_1,c_1
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3 !=
+ st c_1,rp(9) !r[9]=c1;
+
+ umul a_7,a_3,t_1 !sqr_add_c2(a,7,3,c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx %g0,%g0,c_1
+ addcc c_2,t_1,c_2
+ addxcc c_3,t_2,c_3 !=
+ addx c_1,%g0,c_1
+ umul a_6,a_4,t_1 !sqr_add_c2(a,6,4,c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1
+ addcc c_2,t_1,c_2
+ addxcc c_3,t_2,c_3 !=
+ addx c_1,%g0,c_1
+ umul a_5,a_5,t_1 !sqr_add_c(a,5,c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1
+ st c_2,rp(10) !r[10]=c2;
+
+ umul a_4,a_7,t_1 !=!sqr_add_c2(a,7,4,c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ addx %g0,%g0,c_2 !=
+ addcc c_3,t_1,c_3
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2
+ umul a_5,a_6,t_1 !=!sqr_add_c2(a,6,5,c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ addx c_2,%g0,c_2 !=
+ addcc c_3,t_1,c_3
+ addxcc c_1,t_2,c_1
+ st c_3,rp(11) !r[11]=c3;
+ addx c_2,%g0,c_2 !=
+
+ umul a_7,a_5,t_1 !sqr_add_c2(a,7,5,c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2 !=
+ addx %g0,%g0,c_3
+ addcc c_1,t_1,c_1
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3 !=
+ umul a_6,a_6,t_1 !sqr_add_c(a,6,c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2 !=
+ addx c_3,%g0,c_3
+ st c_1,rp(12) !r[12]=c1;
+
+ umul a_6,a_7,t_1 !sqr_add_c2(a,7,6,c2,c3,c1);
+ addcc c_2,t_1,c_2 !=
+ rd %y,t_2
+ addxcc c_3,t_2,c_3
+ addx %g0,%g0,c_1
+ addcc c_2,t_1,c_2 !=
+ addxcc c_3,t_2,c_3
+ st c_2,rp(13) !r[13]=c2;
+ addx c_1,%g0,c_1 !=
+
+ umul a_7,a_7,t_1 !sqr_add_c(a,7,c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1 !=
+ st c_3,rp(14) !r[14]=c3;
+ st c_1,rp(15) !r[15]=c1;
+
+ ret
+ restore %g0,%g0,%o0
+
+.type bn_sqr_comba8,#function
+.size bn_sqr_comba8,(.-bn_sqr_comba8)
+
+.align 32
+
+.global bn_sqr_comba4
+/*
+ * void bn_sqr_comba4(r,a)
+ * BN_ULONG *r,*a;
+ */
+bn_sqr_comba4:
+ save %sp,FRAME_SIZE,%sp
+ ld ap(0),a_0
+ umul a_0,a_0,c_1 !sqr_add_c(a,0,c1,c2,c3);
+ ld ap(1),a_1 !=
+ rd %y,c_2
+ st c_1,rp(0) !r[0]=c1;
+
+ ld ap(2),a_2
+ umul a_0,a_1,t_1 !=!sqr_add_c2(a,1,0,c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2
+ addxcc %g0,t_2,c_3
+ addx %g0,%g0,c_1 !=
+ addcc c_2,t_1,c_2
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1 !=
+ st c_2,rp(1) !r[1]=c2;
+
+ umul a_2,a_0,t_1 !sqr_add_c2(a,2,0,c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2 !=
+ addxcc c_1,t_2,c_1
+ addx %g0,%g0,c_2
+ addcc c_3,t_1,c_3
+ addxcc c_1,t_2,c_1 !=
+ addx c_2,%g0,c_2
+ ld ap(3),a_3
+ umul a_1,a_1,t_1 !sqr_add_c(a,1,c3,c1,c2);
+ addcc c_3,t_1,c_3 !=
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ st c_3,rp(2) !r[2]=c3;
+ addx c_2,%g0,c_2 !=
+
+ umul a_0,a_3,t_1 !sqr_add_c2(a,3,0,c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2 !=
+ addx %g0,%g0,c_3
+ addcc c_1,t_1,c_1
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3 !=
+ umul a_1,a_2,t_1 !sqr_add_c2(a,2,1,c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2 !=
+ addx c_3,%g0,c_3
+ addcc c_1,t_1,c_1
+ addxcc c_2,t_2,c_2
+ addx c_3,%g0,c_3 !=
+ st c_1,rp(3) !r[3]=c1;
+
+ umul a_3,a_1,t_1 !sqr_add_c2(a,3,1,c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx %g0,%g0,c_1
+ addcc c_2,t_1,c_2
+ addxcc c_3,t_2,c_3 !=
+ addx c_1,%g0,c_1
+ umul a_2,a_2,t_1 !sqr_add_c(a,2,c2,c3,c1);
+ addcc c_2,t_1,c_2
+ rd %y,t_2 !=
+ addxcc c_3,t_2,c_3
+ addx c_1,%g0,c_1
+ st c_2,rp(4) !r[4]=c2;
+
+ umul a_2,a_3,t_1 !=!sqr_add_c2(a,3,2,c3,c1,c2);
+ addcc c_3,t_1,c_3
+ rd %y,t_2
+ addxcc c_1,t_2,c_1
+ addx %g0,%g0,c_2 !=
+ addcc c_3,t_1,c_3
+ addxcc c_1,t_2,c_1
+ st c_3,rp(5) !r[5]=c3;
+ addx c_2,%g0,c_2 !=
+
+ umul a_3,a_3,t_1 !sqr_add_c(a,3,c1,c2,c3);
+ addcc c_1,t_1,c_1
+ rd %y,t_2
+ addxcc c_2,t_2,c_2 !=
+ st c_1,rp(6) !r[6]=c1;
+ st c_2,rp(7) !r[7]=c2;
+
+ ret
+ restore %g0,%g0,%o0
+
+.type bn_sqr_comba4,#function
+.size bn_sqr_comba4,(.-bn_sqr_comba4)
+
+.align 32
diff --git a/openssl-1.1.0h/crypto/bn/asm/sparcv8plus.S b/openssl-1.1.0h/crypto/bn/asm/sparcv8plus.S
new file mode 100644
index 0000000..714a136
--- /dev/null
+++ b/openssl-1.1.0h/crypto/bn/asm/sparcv8plus.S
@@ -0,0 +1,1562 @@
+.ident "sparcv8plus.s, Version 1.4"
+.ident "SPARC v9 ISA artwork by Andy Polyakov <appro@fy.chalmers.se>"
+
+/*
+ * ====================================================================
+ * Copyright 1999-2016 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the OpenSSL license (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ * ====================================================================
+ */
+
+/*
+ * This is my modest contributon to OpenSSL project (see
+ * http://www.openssl.org/ for more information about it) and is
+ * a drop-in UltraSPARC ISA replacement for crypto/bn/bn_asm.c
+ * module. For updates see http://fy.chalmers.se/~appro/hpe/.
+ *
+ * Questions-n-answers.
+ *
+ * Q. How to compile?
+ * A. With SC4.x/SC5.x:
+ *
+ * cc -xarch=v8plus -c bn_asm.sparc.v8plus.S -o bn_asm.o
+ *
+ * and with gcc:
+ *
+ * gcc -mcpu=ultrasparc -c bn_asm.sparc.v8plus.S -o bn_asm.o
+ *
+ * or if above fails (it does if you have gas installed):
+ *
+ * gcc -E bn_asm.sparc.v8plus.S | as -xarch=v8plus /dev/fd/0 -o bn_asm.o
+ *
+ * Quick-n-dirty way to fuse the module into the library.
+ * Provided that the library is already configured and built
+ * (in 0.9.2 case with no-asm option):
+ *
+ * # cd crypto/bn
+ * # cp /some/place/bn_asm.sparc.v8plus.S .
+ * # cc -xarch=v8plus -c bn_asm.sparc.v8plus.S -o bn_asm.o
+ * # make
+ * # cd ../..
+ * # make; make test
+ *
+ * Quick-n-dirty way to get rid of it:
+ *
+ * # cd crypto/bn
+ * # touch bn_asm.c
+ * # make
+ * # cd ../..
+ * # make; make test
+ *
+ * Q. V8plus architecture? What kind of beast is that?
+ * A. Well, it's rather a programming model than an architecture...
+ * It's actually v9-compliant, i.e. *any* UltraSPARC, CPU under
+ * special conditions, namely when kernel doesn't preserve upper
+ * 32 bits of otherwise 64-bit registers during a context switch.
+ *
+ * Q. Why just UltraSPARC? What about SuperSPARC?
+ * A. Original release did target UltraSPARC only. Now SuperSPARC
+ * version is provided along. Both version share bn_*comba[48]
+ * implementations (see comment later in code for explanation).
+ * But what's so special about this UltraSPARC implementation?
+ * Why didn't I let compiler do the job? Trouble is that most of
+ * available compilers (well, SC5.0 is the only exception) don't
+ * attempt to take advantage of UltraSPARC's 64-bitness under
+ * 32-bit kernels even though it's perfectly possible (see next
+ * question).
+ *
+ * Q. 64-bit registers under 32-bit kernels? Didn't you just say it
+ * doesn't work?
+ * A. You can't address *all* registers as 64-bit wide:-( The catch is
+ * that you actually may rely upon %o0-%o5 and %g1-%g4 being fully
+ * preserved if you're in a leaf function, i.e. such never calling
+ * any other functions. All functions in this module are leaf and
+ * 10 registers is a handful. And as a matter of fact none-"comba"
+ * routines don't require even that much and I could even afford to
+ * not allocate own stack frame for 'em:-)
+ *
+ * Q. What about 64-bit kernels?
+ * A. What about 'em? Just kidding:-) Pure 64-bit version is currently
+ * under evaluation and development...
+ *
+ * Q. What about shared libraries?
+ * A. What about 'em? Kidding again:-) Code does *not* contain any
+ * code position dependencies and it's safe to include it into
+ * shared library as is.
+ *
+ * Q. How much faster does it go?
+ * A. Do you have a good benchmark? In either case below is what I
+ * experience with crypto/bn/expspeed.c test program:
+ *
+ * v8plus module on U10/300MHz against bn_asm.c compiled with:
+ *
+ * cc-5.0 -xarch=v8plus -xO5 -xdepend +7-12%
+ * cc-4.2 -xarch=v8plus -xO5 -xdepend +25-35%
+ * egcs-1.1.2 -mcpu=ultrasparc -O3 +35-45%
+ *
+ * v8 module on SS10/60MHz against bn_asm.c compiled with:
+ *
+ * cc-5.0 -xarch=v8 -xO5 -xdepend +7-10%
+ * cc-4.2 -xarch=v8 -xO5 -xdepend +10%
+ * egcs-1.1.2 -mv8 -O3 +35-45%
+ *
+ * As you can see it's damn hard to beat the new Sun C compiler
+ * and it's in first place GNU C users who will appreciate this
+ * assembler implementation:-)
+ */
+
+/*
+ * Revision history.
+ *
+ * 1.0 - initial release;
+ * 1.1 - new loop unrolling model(*);
+ * - some more fine tuning;
+ * 1.2 - made gas friendly;
+ * - updates to documentation concerning v9;
+ * - new performance comparison matrix;
+ * 1.3 - fixed problem with /usr/ccs/lib/cpp;
+ * 1.4 - native V9 bn_*_comba[48] implementation (15% more efficient)
+ * resulting in slight overall performance kick;
+ * - some retunes;
+ * - support for GNU as added;
+ *
+ * (*) Originally unrolled loop looked like this:
+ * for (;;) {
+ * op(p+0); if (--n==0) break;
+ * op(p+1); if (--n==0) break;
+ * op(p+2); if (--n==0) break;
+ * op(p+3); if (--n==0) break;
+ * p+=4;
+ * }
+ * I unroll according to following:
+ * while (n&~3) {
+ * op(p+0); op(p+1); op(p+2); op(p+3);
+ * p+=4; n=-4;
+ * }
+ * if (n) {
+ * op(p+0); if (--n==0) return;
+ * op(p+2); if (--n==0) return;
+ * op(p+3); return;
+ * }
+ */
+
+#ifdef OPENSSL_FIPSCANISTER
+#include <openssl/fipssyms.h>
+#endif
+
+#if defined(__SUNPRO_C) && defined(__sparcv9)
+ /* They've said -xarch=v9 at command line */
+ .register %g2,#scratch
+ .register %g3,#scratch
+# define FRAME_SIZE -192
+#elif defined(__GNUC__) && defined(__arch64__)
+ /* They've said -m64 at command line */
+ .register %g2,#scratch
+ .register %g3,#scratch
+# define FRAME_SIZE -192
+#else
+# define FRAME_SIZE -96
+#endif
+/*
+ * GNU assembler can't stand stuw:-(
+ */
+#define stuw st
+
+.section ".text",#alloc,#execinstr
+.file "bn_asm.sparc.v8plus.S"
+
+.align 32
+
+.global bn_mul_add_words
+/*
+ * BN_ULONG bn_mul_add_words(rp,ap,num,w)
+ * BN_ULONG *rp,*ap;
+ * int num;
+ * BN_ULONG w;
+ */
+bn_mul_add_words:
+ sra %o2,%g0,%o2 ! signx %o2
+ brgz,a %o2,.L_bn_mul_add_words_proceed
+ lduw [%o1],%g2
+ retl
+ clr %o0
+ nop
+ nop
+ nop
+
+.L_bn_mul_add_words_proceed:
+ srl %o3,%g0,%o3 ! clruw %o3
+ andcc %o2,-4,%g0
+ bz,pn %icc,.L_bn_mul_add_words_tail
+ clr %o5
+
+.L_bn_mul_add_words_loop: ! wow! 32 aligned!
+ lduw [%o0],%g1
+ lduw [%o1+4],%g3
+ mulx %o3,%g2,%g2
+ add %g1,%o5,%o4
+ nop
+ add %o4,%g2,%o4
+ stuw %o4,[%o0]
+ srlx %o4,32,%o5
+
+ lduw [%o0+4],%g1
+ lduw [%o1+8],%g2
+ mulx %o3,%g3,%g3
+ add %g1,%o5,%o4
+ dec 4,%o2
+ add %o4,%g3,%o4
+ stuw %o4,[%o0+4]
+ srlx %o4,32,%o5
+
+ lduw [%o0+8],%g1
+ lduw [%o1+12],%g3
+ mulx %o3,%g2,%g2
+ add %g1,%o5,%o4
+ inc 16,%o1
+ add %o4,%g2,%o4
+ stuw %o4,[%o0+8]
+ srlx %o4,32,%o5
+
+ lduw [%o0+12],%g1
+ mulx %o3,%g3,%g3
+ add %g1,%o5,%o4
+ inc 16,%o0
+ add %o4,%g3,%o4
+ andcc %o2,-4,%g0
+ stuw %o4,[%o0-4]
+ srlx %o4,32,%o5
+ bnz,a,pt %icc,.L_bn_mul_add_words_loop
+ lduw [%o1],%g2
+
+ brnz,a,pn %o2,.L_bn_mul_add_words_tail
+ lduw [%o1],%g2
+.L_bn_mul_add_words_return:
+ retl
+ mov %o5,%o0
+
+.L_bn_mul_add_words_tail:
+ lduw [%o0],%g1
+ mulx %o3,%g2,%g2
+ add %g1,%o5,%o4
+ dec %o2
+ add %o4,%g2,%o4
+ srlx %o4,32,%o5
+ brz,pt %o2,.L_bn_mul_add_words_return
+ stuw %o4,[%o0]
+
+ lduw [%o1+4],%g2
+ lduw [%o0+4],%g1
+ mulx %o3,%g2,%g2
+ add %g1,%o5,%o4
+ dec %o2
+ add %o4,%g2,%o4
+ srlx %o4,32,%o5
+ brz,pt %o2,.L_bn_mul_add_words_return
+ stuw %o4,[%o0+4]
+
+ lduw [%o1+8],%g2
+ lduw [%o0+8],%g1
+ mulx %o3,%g2,%g2
+ add %g1,%o5,%o4
+ add %o4,%g2,%o4
+ stuw %o4,[%o0+8]
+ retl
+ srlx %o4,32,%o0
+
+.type bn_mul_add_words,#function
+.size bn_mul_add_words,(.-bn_mul_add_words)
+
+.align 32
+
+.global bn_mul_words
+/*
+ * BN_ULONG bn_mul_words(rp,ap,num,w)
+ * BN_ULONG *rp,*ap;
+ * int num;
+ * BN_ULONG w;
+ */
+bn_mul_words:
+ sra %o2,%g0,%o2 ! signx %o2
+ brgz,a %o2,.L_bn_mul_words_proceeed
+ lduw [%o1],%g2
+ retl
+ clr %o0
+ nop
+ nop
+ nop
+
+.L_bn_mul_words_proceeed:
+ srl %o3,%g0,%o3 ! clruw %o3
+ andcc %o2,-4,%g0
+ bz,pn %icc,.L_bn_mul_words_tail
+ clr %o5
+
+.L_bn_mul_words_loop: ! wow! 32 aligned!
+ lduw [%o1+4],%g3
+ mulx %o3,%g2,%g2
+ add %g2,%o5,%o4
+ nop
+ stuw %o4,[%o0]
+ srlx %o4,32,%o5
+
+ lduw [%o1+8],%g2
+ mulx %o3,%g3,%g3
+ add %g3,%o5,%o4
+ dec 4,%o2
+ stuw %o4,[%o0+4]
+ srlx %o4,32,%o5
+
+ lduw [%o1+12],%g3
+ mulx %o3,%g2,%g2
+ add %g2,%o5,%o4
+ inc 16,%o1
+ stuw %o4,[%o0+8]
+ srlx %o4,32,%o5
+
+ mulx %o3,%g3,%g3
+ add %g3,%o5,%o4
+ inc 16,%o0
+ stuw %o4,[%o0-4]
+ srlx %o4,32,%o5
+ andcc %o2,-4,%g0
+ bnz,a,pt %icc,.L_bn_mul_words_loop
+ lduw [%o1],%g2
+ nop
+ nop
+
+ brnz,a,pn %o2,.L_bn_mul_words_tail
+ lduw [%o1],%g2
+.L_bn_mul_words_return:
+ retl
+ mov %o5,%o0
+
+.L_bn_mul_words_tail:
+ mulx %o3,%g2,%g2
+ add %g2,%o5,%o4
+ dec %o2
+ srlx %o4,32,%o5
+ brz,pt %o2,.L_bn_mul_words_return
+ stuw %o4,[%o0]
+
+ lduw [%o1+4],%g2
+ mulx %o3,%g2,%g2
+ add %g2,%o5,%o4
+ dec %o2
+ srlx %o4,32,%o5
+ brz,pt %o2,.L_bn_mul_words_return
+ stuw %o4,[%o0+4]
+
+ lduw [%o1+8],%g2
+ mulx %o3,%g2,%g2
+ add %g2,%o5,%o4
+ stuw %o4,[%o0+8]
+ retl
+ srlx %o4,32,%o0
+
+.type bn_mul_words,#function
+.size bn_mul_words,(.-bn_mul_words)
+
+.align 32
+.global bn_sqr_words
+/*
+ * void bn_sqr_words(r,a,n)
+ * BN_ULONG *r,*a;
+ * int n;
+ */
+bn_sqr_words:
+ sra %o2,%g0,%o2 ! signx %o2
+ brgz,a %o2,.L_bn_sqr_words_proceeed
+ lduw [%o1],%g2
+ retl
+ clr %o0
+ nop
+ nop
+ nop
+
+.L_bn_sqr_words_proceeed:
+ andcc %o2,-4,%g0
+ nop
+ bz,pn %icc,.L_bn_sqr_words_tail
+ nop
+
+.L_bn_sqr_words_loop: ! wow! 32 aligned!
+ lduw [%o1+4],%g3
+ mulx %g2,%g2,%o4
+ stuw %o4,[%o0]
+ srlx %o4,32,%o5
+ stuw %o5,[%o0+4]
+ nop
+
+ lduw [%o1+8],%g2
+ mulx %g3,%g3,%o4
+ dec 4,%o2
+ stuw %o4,[%o0+8]
+ srlx %o4,32,%o5
+ stuw %o5,[%o0+12]
+
+ lduw [%o1+12],%g3
+ mulx %g2,%g2,%o4
+ srlx %o4,32,%o5
+ stuw %o4,[%o0+16]
+ inc 16,%o1
+ stuw %o5,[%o0+20]
+
+ mulx %g3,%g3,%o4
+ inc 32,%o0
+ stuw %o4,[%o0-8]
+ srlx %o4,32,%o5
+ andcc %o2,-4,%g2
+ stuw %o5,[%o0-4]
+ bnz,a,pt %icc,.L_bn_sqr_words_loop
+ lduw [%o1],%g2
+ nop
+
+ brnz,a,pn %o2,.L_bn_sqr_words_tail
+ lduw [%o1],%g2
+.L_bn_sqr_words_return:
+ retl
+ clr %o0
+
+.L_bn_sqr_words_tail:
+ mulx %g2,%g2,%o4
+ dec %o2
+ stuw %o4,[%o0]
+ srlx %o4,32,%o5
+ brz,pt %o2,.L_bn_sqr_words_return
+ stuw %o5,[%o0+4]
+
+ lduw [%o1+4],%g2
+ mulx %g2,%g2,%o4
+ dec %o2
+ stuw %o4,[%o0+8]
+ srlx %o4,32,%o5
+ brz,pt %o2,.L_bn_sqr_words_return
+ stuw %o5,[%o0+12]
+
+ lduw [%o1+8],%g2
+ mulx %g2,%g2,%o4
+ srlx %o4,32,%o5
+ stuw %o4,[%o0+16]
+ stuw %o5,[%o0+20]
+ retl
+ clr %o0
+
+.type bn_sqr_words,#function
+.size bn_sqr_words,(.-bn_sqr_words)
+
+.align 32
+.global bn_div_words
+/*
+ * BN_ULONG bn_div_words(h,l,d)
+ * BN_ULONG h,l,d;
+ */
+bn_div_words:
+ sllx %o0,32,%o0
+ or %o0,%o1,%o0
+ udivx %o0,%o2,%o0
+ retl
+ srl %o0,%g0,%o0 ! clruw %o0
+
+.type bn_div_words,#function
+.size bn_div_words,(.-bn_div_words)
+
+.align 32
+
+.global bn_add_words
+/*
+ * BN_ULONG bn_add_words(rp,ap,bp,n)
+ * BN_ULONG *rp,*ap,*bp;
+ * int n;
+ */
+bn_add_words:
+ sra %o3,%g0,%o3 ! signx %o3
+ brgz,a %o3,.L_bn_add_words_proceed
+ lduw [%o1],%o4
+ retl
+ clr %o0
+
+.L_bn_add_words_proceed:
+ andcc %o3,-4,%g0
+ bz,pn %icc,.L_bn_add_words_tail
+ addcc %g0,0,%g0 ! clear carry flag
+
+.L_bn_add_words_loop: ! wow! 32 aligned!
+ dec 4,%o3
+ lduw [%o2],%o5
+ lduw [%o1+4],%g1
+ lduw [%o2+4],%g2
+ lduw [%o1+8],%g3
+ lduw [%o2+8],%g4
+ addccc %o5,%o4,%o5
+ stuw %o5,[%o0]
+
+ lduw [%o1+12],%o4
+ lduw [%o2+12],%o5
+ inc 16,%o1
+ addccc %g1,%g2,%g1
+ stuw %g1,[%o0+4]
+
+ inc 16,%o2
+ addccc %g3,%g4,%g3
+ stuw %g3,[%o0+8]
+
+ inc 16,%o0
+ addccc %o5,%o4,%o5
+ stuw %o5,[%o0-4]
+ and %o3,-4,%g1
+ brnz,a,pt %g1,.L_bn_add_words_loop
+ lduw [%o1],%o4
+
+ brnz,a,pn %o3,.L_bn_add_words_tail
+ lduw [%o1],%o4
+.L_bn_add_words_return:
+ clr %o0
+ retl
+ movcs %icc,1,%o0
+ nop
+
+.L_bn_add_words_tail:
+ lduw [%o2],%o5
+ dec %o3
+ addccc %o5,%o4,%o5
+ brz,pt %o3,.L_bn_add_words_return
+ stuw %o5,[%o0]
+
+ lduw [%o1+4],%o4
+ lduw [%o2+4],%o5
+ dec %o3
+ addccc %o5,%o4,%o5
+ brz,pt %o3,.L_bn_add_words_return
+ stuw %o5,[%o0+4]
+
+ lduw [%o1+8],%o4
+ lduw [%o2+8],%o5
+ addccc %o5,%o4,%o5
+ stuw %o5,[%o0+8]
+ clr %o0
+ retl
+ movcs %icc,1,%o0
+
+.type bn_add_words,#function
+.size bn_add_words,(.-bn_add_words)
+
+.global bn_sub_words
+/*
+ * BN_ULONG bn_sub_words(rp,ap,bp,n)
+ * BN_ULONG *rp,*ap,*bp;
+ * int n;
+ */
+bn_sub_words:
+ sra %o3,%g0,%o3 ! signx %o3
+ brgz,a %o3,.L_bn_sub_words_proceed
+ lduw [%o1],%o4
+ retl
+ clr %o0
+
+.L_bn_sub_words_proceed:
+ andcc %o3,-4,%g0
+ bz,pn %icc,.L_bn_sub_words_tail
+ addcc %g0,0,%g0 ! clear carry flag
+
+.L_bn_sub_words_loop: ! wow! 32 aligned!
+ dec 4,%o3
+ lduw [%o2],%o5
+ lduw [%o1+4],%g1
+ lduw [%o2+4],%g2
+ lduw [%o1+8],%g3
+ lduw [%o2+8],%g4
+ subccc %o4,%o5,%o5
+ stuw %o5,[%o0]
+
+ lduw [%o1+12],%o4
+ lduw [%o2+12],%o5
+ inc 16,%o1
+ subccc %g1,%g2,%g2
+ stuw %g2,[%o0+4]
+
+ inc 16,%o2
+ subccc %g3,%g4,%g4
+ stuw %g4,[%o0+8]
+
+ inc 16,%o0
+ subccc %o4,%o5,%o5
+ stuw %o5,[%o0-4]
+ and %o3,-4,%g1
+ brnz,a,pt %g1,.L_bn_sub_words_loop
+ lduw [%o1],%o4
+
+ brnz,a,pn %o3,.L_bn_sub_words_tail
+ lduw [%o1],%o4
+.L_bn_sub_words_return:
+ clr %o0
+ retl
+ movcs %icc,1,%o0
+ nop
+
+.L_bn_sub_words_tail: ! wow! 32 aligned!
+ lduw [%o2],%o5
+ dec %o3
+ subccc %o4,%o5,%o5
+ brz,pt %o3,.L_bn_sub_words_return
+ stuw %o5,[%o0]
+
+ lduw [%o1+4],%o4
+ lduw [%o2+4],%o5
+ dec %o3
+ subccc %o4,%o5,%o5
+ brz,pt %o3,.L_bn_sub_words_return
+ stuw %o5,[%o0+4]
+
+ lduw [%o1+8],%o4
+ lduw [%o2+8],%o5
+ subccc %o4,%o5,%o5
+ stuw %o5,[%o0+8]
+ clr %o0
+ retl
+ movcs %icc,1,%o0
+
+.type bn_sub_words,#function
+.size bn_sub_words,(.-bn_sub_words)
+
+/*
+ * Code below depends on the fact that upper parts of the %l0-%l7
+ * and %i0-%i7 are zeroed by kernel after context switch. In
+ * previous versions this comment stated that "the trouble is that
+ * it's not feasible to implement the mumbo-jumbo in less V9
+ * instructions:-(" which apparently isn't true thanks to
+ * 'bcs,a %xcc,.+8; inc %rd' pair. But the performance improvement
+ * results not from the shorter code, but from elimination of
+ * multicycle none-pairable 'rd %y,%rd' instructions.
+ *
+ * Andy.
+ */
+
+/*
+ * Here is register usage map for *all* routines below.
+ */
+#define t_1 %o0
+#define t_2 %o1
+#define c_12 %o2
+#define c_3 %o3
+
+#define ap(I) [%i1+4*I]
+#define bp(I) [%i2+4*I]
+#define rp(I) [%i0+4*I]
+
+#define a_0 %l0
+#define a_1 %l1
+#define a_2 %l2
+#define a_3 %l3
+#define a_4 %l4
+#define a_5 %l5
+#define a_6 %l6
+#define a_7 %l7
+
+#define b_0 %i3
+#define b_1 %i4
+#define b_2 %i5
+#define b_3 %o4
+#define b_4 %o5
+#define b_5 %o7
+#define b_6 %g1
+#define b_7 %g4
+
+.align 32
+.global bn_mul_comba8
+/*
+ * void bn_mul_comba8(r,a,b)
+ * BN_ULONG *r,*a,*b;
+ */
+bn_mul_comba8:
+ save %sp,FRAME_SIZE,%sp
+ mov 1,t_2
+ lduw ap(0),a_0
+ sllx t_2,32,t_2
+ lduw bp(0),b_0 !=
+ lduw bp(1),b_1
+ mulx a_0,b_0,t_1 !mul_add_c(a[0],b[0],c1,c2,c3);
+ srlx t_1,32,c_12
+ stuw t_1,rp(0) !=!r[0]=c1;
+
+ lduw ap(1),a_1
+ mulx a_0,b_1,t_1 !mul_add_c(a[0],b[1],c2,c3,c1);
+ addcc c_12,t_1,c_12
+ clr c_3 !=
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ lduw ap(2),a_2
+ mulx a_1,b_0,t_1 !=!mul_add_c(a[1],b[0],c2,c3,c1);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12 !=
+ stuw t_1,rp(1) !r[1]=c2;
+ or c_12,c_3,c_12
+
+ mulx a_2,b_0,t_1 !mul_add_c(a[2],b[0],c3,c1,c2);
+ addcc c_12,t_1,c_12 !=
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ lduw bp(2),b_2 !=
+ mulx a_1,b_1,t_1 !mul_add_c(a[1],b[1],c3,c1,c2);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3 !=
+ lduw bp(3),b_3
+ mulx a_0,b_2,t_1 !mul_add_c(a[0],b[2],c3,c1,c2);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(2) !r[2]=c3;
+ or c_12,c_3,c_12 !=
+
+ mulx a_0,b_3,t_1 !mul_add_c(a[0],b[3],c1,c2,c3);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_1,b_2,t_1 !=!mul_add_c(a[1],b[2],c1,c2,c3);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ lduw ap(3),a_3
+ mulx a_2,b_1,t_1 !mul_add_c(a[2],b[1],c1,c2,c3);
+ addcc c_12,t_1,c_12 !=
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ lduw ap(4),a_4
+ mulx a_3,b_0,t_1 !=!mul_add_c(a[3],b[0],c1,c2,c3);!=
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12 !=
+ stuw t_1,rp(3) !r[3]=c1;
+ or c_12,c_3,c_12
+
+ mulx a_4,b_0,t_1 !mul_add_c(a[4],b[0],c2,c3,c1);
+ addcc c_12,t_1,c_12 !=
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_3,b_1,t_1 !=!mul_add_c(a[3],b[1],c2,c3,c1);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_2,b_2,t_1 !=!mul_add_c(a[2],b[2],c2,c3,c1);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ lduw bp(4),b_4 !=
+ mulx a_1,b_3,t_1 !mul_add_c(a[1],b[3],c2,c3,c1);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3 !=
+ lduw bp(5),b_5
+ mulx a_0,b_4,t_1 !mul_add_c(a[0],b[4],c2,c3,c1);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(4) !r[4]=c2;
+ or c_12,c_3,c_12 !=
+
+ mulx a_0,b_5,t_1 !mul_add_c(a[0],b[5],c3,c1,c2);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_1,b_4,t_1 !mul_add_c(a[1],b[4],c3,c1,c2);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_2,b_3,t_1 !mul_add_c(a[2],b[3],c3,c1,c2);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_3,b_2,t_1 !mul_add_c(a[3],b[2],c3,c1,c2);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ lduw ap(5),a_5
+ mulx a_4,b_1,t_1 !mul_add_c(a[4],b[1],c3,c1,c2);
+ addcc c_12,t_1,c_12 !=
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ lduw ap(6),a_6
+ mulx a_5,b_0,t_1 !=!mul_add_c(a[5],b[0],c3,c1,c2);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12 !=
+ stuw t_1,rp(5) !r[5]=c3;
+ or c_12,c_3,c_12
+
+ mulx a_6,b_0,t_1 !mul_add_c(a[6],b[0],c1,c2,c3);
+ addcc c_12,t_1,c_12 !=
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_5,b_1,t_1 !=!mul_add_c(a[5],b[1],c1,c2,c3);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_4,b_2,t_1 !=!mul_add_c(a[4],b[2],c1,c2,c3);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_3,b_3,t_1 !=!mul_add_c(a[3],b[3],c1,c2,c3);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_2,b_4,t_1 !=!mul_add_c(a[2],b[4],c1,c2,c3);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ lduw bp(6),b_6 !=
+ mulx a_1,b_5,t_1 !mul_add_c(a[1],b[5],c1,c2,c3);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3 !=
+ lduw bp(7),b_7
+ mulx a_0,b_6,t_1 !mul_add_c(a[0],b[6],c1,c2,c3);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(6) !r[6]=c1;
+ or c_12,c_3,c_12 !=
+
+ mulx a_0,b_7,t_1 !mul_add_c(a[0],b[7],c2,c3,c1);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_1,b_6,t_1 !mul_add_c(a[1],b[6],c2,c3,c1);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_2,b_5,t_1 !mul_add_c(a[2],b[5],c2,c3,c1);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_3,b_4,t_1 !mul_add_c(a[3],b[4],c2,c3,c1);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_4,b_3,t_1 !mul_add_c(a[4],b[3],c2,c3,c1);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_5,b_2,t_1 !mul_add_c(a[5],b[2],c2,c3,c1);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ lduw ap(7),a_7
+ mulx a_6,b_1,t_1 !=!mul_add_c(a[6],b[1],c2,c3,c1);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_7,b_0,t_1 !=!mul_add_c(a[7],b[0],c2,c3,c1);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12 !=
+ stuw t_1,rp(7) !r[7]=c2;
+ or c_12,c_3,c_12
+
+ mulx a_7,b_1,t_1 !=!mul_add_c(a[7],b[1],c3,c1,c2);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3 !=
+ mulx a_6,b_2,t_1 !mul_add_c(a[6],b[2],c3,c1,c2);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3 !=
+ mulx a_5,b_3,t_1 !mul_add_c(a[5],b[3],c3,c1,c2);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3 !=
+ mulx a_4,b_4,t_1 !mul_add_c(a[4],b[4],c3,c1,c2);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3 !=
+ mulx a_3,b_5,t_1 !mul_add_c(a[3],b[5],c3,c1,c2);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3 !=
+ mulx a_2,b_6,t_1 !mul_add_c(a[2],b[6],c3,c1,c2);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3 !=
+ mulx a_1,b_7,t_1 !mul_add_c(a[1],b[7],c3,c1,c2);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3 !=
+ srlx t_1,32,c_12
+ stuw t_1,rp(8) !r[8]=c3;
+ or c_12,c_3,c_12
+
+ mulx a_2,b_7,t_1 !=!mul_add_c(a[2],b[7],c1,c2,c3);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3 !=
+ mulx a_3,b_6,t_1 !mul_add_c(a[3],b[6],c1,c2,c3);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_4,b_5,t_1 !mul_add_c(a[4],b[5],c1,c2,c3);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_5,b_4,t_1 !mul_add_c(a[5],b[4],c1,c2,c3);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_6,b_3,t_1 !mul_add_c(a[6],b[3],c1,c2,c3);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_7,b_2,t_1 !mul_add_c(a[7],b[2],c1,c2,c3);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(9) !r[9]=c1;
+ or c_12,c_3,c_12 !=
+
+ mulx a_7,b_3,t_1 !mul_add_c(a[7],b[3],c2,c3,c1);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_6,b_4,t_1 !mul_add_c(a[6],b[4],c2,c3,c1);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_5,b_5,t_1 !mul_add_c(a[5],b[5],c2,c3,c1);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_4,b_6,t_1 !mul_add_c(a[4],b[6],c2,c3,c1);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_3,b_7,t_1 !mul_add_c(a[3],b[7],c2,c3,c1);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(10) !r[10]=c2;
+ or c_12,c_3,c_12 !=
+
+ mulx a_4,b_7,t_1 !mul_add_c(a[4],b[7],c3,c1,c2);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_5,b_6,t_1 !mul_add_c(a[5],b[6],c3,c1,c2);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_6,b_5,t_1 !mul_add_c(a[6],b[5],c3,c1,c2);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_7,b_4,t_1 !mul_add_c(a[7],b[4],c3,c1,c2);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(11) !r[11]=c3;
+ or c_12,c_3,c_12 !=
+
+ mulx a_7,b_5,t_1 !mul_add_c(a[7],b[5],c1,c2,c3);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_6,b_6,t_1 !mul_add_c(a[6],b[6],c1,c2,c3);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_5,b_7,t_1 !mul_add_c(a[5],b[7],c1,c2,c3);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(12) !r[12]=c1;
+ or c_12,c_3,c_12 !=
+
+ mulx a_6,b_7,t_1 !mul_add_c(a[6],b[7],c2,c3,c1);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_7,b_6,t_1 !mul_add_c(a[7],b[6],c2,c3,c1);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ st t_1,rp(13) !r[13]=c2;
+ or c_12,c_3,c_12 !=
+
+ mulx a_7,b_7,t_1 !mul_add_c(a[7],b[7],c3,c1,c2);
+ addcc c_12,t_1,t_1
+ srlx t_1,32,c_12 !=
+ stuw t_1,rp(14) !r[14]=c3;
+ stuw c_12,rp(15) !r[15]=c1;
+
+ ret
+ restore %g0,%g0,%o0 !=
+
+.type bn_mul_comba8,#function
+.size bn_mul_comba8,(.-bn_mul_comba8)
+
+.align 32
+
+.global bn_mul_comba4
+/*
+ * void bn_mul_comba4(r,a,b)
+ * BN_ULONG *r,*a,*b;
+ */
+bn_mul_comba4:
+ save %sp,FRAME_SIZE,%sp
+ lduw ap(0),a_0
+ mov 1,t_2
+ lduw bp(0),b_0
+ sllx t_2,32,t_2 !=
+ lduw bp(1),b_1
+ mulx a_0,b_0,t_1 !mul_add_c(a[0],b[0],c1,c2,c3);
+ srlx t_1,32,c_12
+ stuw t_1,rp(0) !=!r[0]=c1;
+
+ lduw ap(1),a_1
+ mulx a_0,b_1,t_1 !mul_add_c(a[0],b[1],c2,c3,c1);
+ addcc c_12,t_1,c_12
+ clr c_3 !=
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ lduw ap(2),a_2
+ mulx a_1,b_0,t_1 !=!mul_add_c(a[1],b[0],c2,c3,c1);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12 !=
+ stuw t_1,rp(1) !r[1]=c2;
+ or c_12,c_3,c_12
+
+ mulx a_2,b_0,t_1 !mul_add_c(a[2],b[0],c3,c1,c2);
+ addcc c_12,t_1,c_12 !=
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ lduw bp(2),b_2 !=
+ mulx a_1,b_1,t_1 !mul_add_c(a[1],b[1],c3,c1,c2);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3 !=
+ lduw bp(3),b_3
+ mulx a_0,b_2,t_1 !mul_add_c(a[0],b[2],c3,c1,c2);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(2) !r[2]=c3;
+ or c_12,c_3,c_12 !=
+
+ mulx a_0,b_3,t_1 !mul_add_c(a[0],b[3],c1,c2,c3);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ mulx a_1,b_2,t_1 !mul_add_c(a[1],b[2],c1,c2,c3);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8 !=
+ add c_3,t_2,c_3
+ lduw ap(3),a_3
+ mulx a_2,b_1,t_1 !mul_add_c(a[2],b[1],c1,c2,c3);
+ addcc c_12,t_1,c_12 !=
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_3,b_0,t_1 !mul_add_c(a[3],b[0],c1,c2,c3);!=
+ addcc c_12,t_1,t_1 !=
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(3) !=!r[3]=c1;
+ or c_12,c_3,c_12
+
+ mulx a_3,b_1,t_1 !mul_add_c(a[3],b[1],c2,c3,c1);
+ addcc c_12,t_1,c_12
+ clr c_3 !=
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_2,b_2,t_1 !mul_add_c(a[2],b[2],c2,c3,c1);
+ addcc c_12,t_1,c_12 !=
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_1,b_3,t_1 !mul_add_c(a[1],b[3],c2,c3,c1);
+ addcc c_12,t_1,t_1 !=
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(4) !=!r[4]=c2;
+ or c_12,c_3,c_12
+
+ mulx a_2,b_3,t_1 !mul_add_c(a[2],b[3],c3,c1,c2);
+ addcc c_12,t_1,c_12
+ clr c_3 !=
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_3,b_2,t_1 !mul_add_c(a[3],b[2],c3,c1,c2);
+ addcc c_12,t_1,t_1 !=
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(5) !=!r[5]=c3;
+ or c_12,c_3,c_12
+
+ mulx a_3,b_3,t_1 !mul_add_c(a[3],b[3],c1,c2,c3);
+ addcc c_12,t_1,t_1
+ srlx t_1,32,c_12 !=
+ stuw t_1,rp(6) !r[6]=c1;
+ stuw c_12,rp(7) !r[7]=c2;
+
+ ret
+ restore %g0,%g0,%o0
+
+.type bn_mul_comba4,#function
+.size bn_mul_comba4,(.-bn_mul_comba4)
+
+.align 32
+
+.global bn_sqr_comba8
+bn_sqr_comba8:
+ save %sp,FRAME_SIZE,%sp
+ mov 1,t_2
+ lduw ap(0),a_0
+ sllx t_2,32,t_2
+ lduw ap(1),a_1
+ mulx a_0,a_0,t_1 !sqr_add_c(a,0,c1,c2,c3);
+ srlx t_1,32,c_12
+ stuw t_1,rp(0) !r[0]=c1;
+
+ lduw ap(2),a_2
+ mulx a_0,a_1,t_1 !=!sqr_add_c2(a,1,0,c2,c3,c1);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(1) !r[1]=c2;
+ or c_12,c_3,c_12
+
+ mulx a_2,a_0,t_1 !sqr_add_c2(a,2,0,c3,c1,c2);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ lduw ap(3),a_3
+ mulx a_1,a_1,t_1 !sqr_add_c(a,1,c3,c1,c2);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(2) !r[2]=c3;
+ or c_12,c_3,c_12
+
+ mulx a_0,a_3,t_1 !sqr_add_c2(a,3,0,c1,c2,c3);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ lduw ap(4),a_4
+ mulx a_1,a_2,t_1 !sqr_add_c2(a,2,1,c1,c2,c3);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ st t_1,rp(3) !r[3]=c1;
+ or c_12,c_3,c_12
+
+ mulx a_4,a_0,t_1 !sqr_add_c2(a,4,0,c2,c3,c1);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_3,a_1,t_1 !sqr_add_c2(a,3,1,c2,c3,c1);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ lduw ap(5),a_5
+ mulx a_2,a_2,t_1 !sqr_add_c(a,2,c2,c3,c1);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(4) !r[4]=c2;
+ or c_12,c_3,c_12
+
+ mulx a_0,a_5,t_1 !sqr_add_c2(a,5,0,c3,c1,c2);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_1,a_4,t_1 !sqr_add_c2(a,4,1,c3,c1,c2);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ lduw ap(6),a_6
+ mulx a_2,a_3,t_1 !sqr_add_c2(a,3,2,c3,c1,c2);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(5) !r[5]=c3;
+ or c_12,c_3,c_12
+
+ mulx a_6,a_0,t_1 !sqr_add_c2(a,6,0,c1,c2,c3);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_5,a_1,t_1 !sqr_add_c2(a,5,1,c1,c2,c3);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_4,a_2,t_1 !sqr_add_c2(a,4,2,c1,c2,c3);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ lduw ap(7),a_7
+ mulx a_3,a_3,t_1 !=!sqr_add_c(a,3,c1,c2,c3);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(6) !r[6]=c1;
+ or c_12,c_3,c_12
+
+ mulx a_0,a_7,t_1 !sqr_add_c2(a,7,0,c2,c3,c1);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_1,a_6,t_1 !sqr_add_c2(a,6,1,c2,c3,c1);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_2,a_5,t_1 !sqr_add_c2(a,5,2,c2,c3,c1);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_3,a_4,t_1 !sqr_add_c2(a,4,3,c2,c3,c1);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(7) !r[7]=c2;
+ or c_12,c_3,c_12
+
+ mulx a_7,a_1,t_1 !sqr_add_c2(a,7,1,c3,c1,c2);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_6,a_2,t_1 !sqr_add_c2(a,6,2,c3,c1,c2);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_5,a_3,t_1 !sqr_add_c2(a,5,3,c3,c1,c2);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_4,a_4,t_1 !sqr_add_c(a,4,c3,c1,c2);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(8) !r[8]=c3;
+ or c_12,c_3,c_12
+
+ mulx a_2,a_7,t_1 !sqr_add_c2(a,7,2,c1,c2,c3);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_3,a_6,t_1 !sqr_add_c2(a,6,3,c1,c2,c3);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_4,a_5,t_1 !sqr_add_c2(a,5,4,c1,c2,c3);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(9) !r[9]=c1;
+ or c_12,c_3,c_12
+
+ mulx a_7,a_3,t_1 !sqr_add_c2(a,7,3,c2,c3,c1);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_6,a_4,t_1 !sqr_add_c2(a,6,4,c2,c3,c1);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_5,a_5,t_1 !sqr_add_c(a,5,c2,c3,c1);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(10) !r[10]=c2;
+ or c_12,c_3,c_12
+
+ mulx a_4,a_7,t_1 !sqr_add_c2(a,7,4,c3,c1,c2);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_5,a_6,t_1 !sqr_add_c2(a,6,5,c3,c1,c2);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(11) !r[11]=c3;
+ or c_12,c_3,c_12
+
+ mulx a_7,a_5,t_1 !sqr_add_c2(a,7,5,c1,c2,c3);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_6,a_6,t_1 !sqr_add_c(a,6,c1,c2,c3);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(12) !r[12]=c1;
+ or c_12,c_3,c_12
+
+ mulx a_6,a_7,t_1 !sqr_add_c2(a,7,6,c2,c3,c1);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(13) !r[13]=c2;
+ or c_12,c_3,c_12
+
+ mulx a_7,a_7,t_1 !sqr_add_c(a,7,c3,c1,c2);
+ addcc c_12,t_1,t_1
+ srlx t_1,32,c_12
+ stuw t_1,rp(14) !r[14]=c3;
+ stuw c_12,rp(15) !r[15]=c1;
+
+ ret
+ restore %g0,%g0,%o0
+
+.type bn_sqr_comba8,#function
+.size bn_sqr_comba8,(.-bn_sqr_comba8)
+
+.align 32
+
+.global bn_sqr_comba4
+/*
+ * void bn_sqr_comba4(r,a)
+ * BN_ULONG *r,*a;
+ */
+bn_sqr_comba4:
+ save %sp,FRAME_SIZE,%sp
+ mov 1,t_2
+ lduw ap(0),a_0
+ sllx t_2,32,t_2
+ lduw ap(1),a_1
+ mulx a_0,a_0,t_1 !sqr_add_c(a,0,c1,c2,c3);
+ srlx t_1,32,c_12
+ stuw t_1,rp(0) !r[0]=c1;
+
+ lduw ap(2),a_2
+ mulx a_0,a_1,t_1 !sqr_add_c2(a,1,0,c2,c3,c1);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(1) !r[1]=c2;
+ or c_12,c_3,c_12
+
+ mulx a_2,a_0,t_1 !sqr_add_c2(a,2,0,c3,c1,c2);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ lduw ap(3),a_3
+ mulx a_1,a_1,t_1 !sqr_add_c(a,1,c3,c1,c2);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(2) !r[2]=c3;
+ or c_12,c_3,c_12
+
+ mulx a_0,a_3,t_1 !sqr_add_c2(a,3,0,c1,c2,c3);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_1,a_2,t_1 !sqr_add_c2(a,2,1,c1,c2,c3);
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(3) !r[3]=c1;
+ or c_12,c_3,c_12
+
+ mulx a_3,a_1,t_1 !sqr_add_c2(a,3,1,c2,c3,c1);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,c_12
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ mulx a_2,a_2,t_1 !sqr_add_c(a,2,c2,c3,c1);
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(4) !r[4]=c2;
+ or c_12,c_3,c_12
+
+ mulx a_2,a_3,t_1 !sqr_add_c2(a,3,2,c3,c1,c2);
+ addcc c_12,t_1,c_12
+ clr c_3
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ addcc c_12,t_1,t_1
+ bcs,a %xcc,.+8
+ add c_3,t_2,c_3
+ srlx t_1,32,c_12
+ stuw t_1,rp(5) !r[5]=c3;
+ or c_12,c_3,c_12
+
+ mulx a_3,a_3,t_1 !sqr_add_c(a,3,c1,c2,c3);
+ addcc c_12,t_1,t_1
+ srlx t_1,32,c_12
+ stuw t_1,rp(6) !r[6]=c1;
+ stuw c_12,rp(7) !r[7]=c2;
+
+ ret
+ restore %g0,%g0,%o0
+
+.type bn_sqr_comba4,#function
+.size bn_sqr_comba4,(.-bn_sqr_comba4)
+
+.align 32
diff --git a/openssl-1.1.0h/crypto/bn/asm/sparcv9-gf2m.pl b/openssl-1.1.0h/crypto/bn/asm/sparcv9-gf2m.pl
new file mode 100644
index 0000000..dcf11a8
--- /dev/null
+++ b/openssl-1.1.0h/crypto/bn/asm/sparcv9-gf2m.pl
@@ -0,0 +1,200 @@
+#! /usr/bin/env perl
+# Copyright 2012-2016 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License"). You may not use
+# this file except in compliance with the License. You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# October 2012
+#
+# The module implements bn_GF2m_mul_2x2 polynomial multiplication used
+# in bn_gf2m.c. It's kind of low-hanging mechanical port from C for
+# the time being... Except that it has two code paths: one suitable
+# for all SPARCv9 processors and one for VIS3-capable ones. Former
+# delivers ~25-45% more, more for longer keys, heaviest DH and DSA
+# verify operations on venerable UltraSPARC II. On T4 VIS3 code is
+# ~100-230% faster than gcc-generated code and ~35-90% faster than
+# the pure SPARCv9 code path.
+
+$output = pop;
+open STDOUT,">$output";
+
+$locals=16*8;
+
+$tab="%l0";
+
+@T=("%g2","%g3");
+@i=("%g4","%g5");
+
+($a1,$a2,$a4,$a8,$a12,$a48)=map("%o$_",(0..5));
+($lo,$hi,$b)=("%g1",$a8,"%o7"); $a=$lo;
+
+$code.=<<___;
+#include <sparc_arch.h>
+
+#ifdef __arch64__
+.register %g2,#scratch
+.register %g3,#scratch
+#endif
+
+#ifdef __PIC__
+SPARC_PIC_THUNK(%g1)
+#endif
+
+.globl bn_GF2m_mul_2x2
+.align 16
+bn_GF2m_mul_2x2:
+ SPARC_LOAD_ADDRESS_LEAF(OPENSSL_sparcv9cap_P,%g1,%g5)
+ ld [%g1+0],%g1 ! OPENSSL_sparcv9cap_P[0]
+
+ andcc %g1, SPARCV9_VIS3, %g0
+ bz,pn %icc,.Lsoftware
+ nop
+
+ sllx %o1, 32, %o1
+ sllx %o3, 32, %o3
+ or %o2, %o1, %o1
+ or %o4, %o3, %o3
+ .word 0x95b262ab ! xmulx %o1, %o3, %o2
+ .word 0x99b262cb ! xmulxhi %o1, %o3, %o4
+ srlx %o2, 32, %o1 ! 13 cycles later
+ st %o2, [%o0+0]
+ st %o1, [%o0+4]
+ srlx %o4, 32, %o3
+ st %o4, [%o0+8]
+ retl
+ st %o3, [%o0+12]
+
+.align 16
+.Lsoftware:
+ save %sp,-STACK_FRAME-$locals,%sp
+
+ sllx %i1,32,$a
+ mov -1,$a12
+ sllx %i3,32,$b
+ or %i2,$a,$a
+ srlx $a12,1,$a48 ! 0x7fff...
+ or %i4,$b,$b
+ srlx $a12,2,$a12 ! 0x3fff...
+ add %sp,STACK_BIAS+STACK_FRAME,$tab
+
+ sllx $a,2,$a4
+ mov $a,$a1
+ sllx $a,1,$a2
+
+ srax $a4,63,@i[1] ! broadcast 61st bit
+ and $a48,$a4,$a4 ! (a<<2)&0x7fff...
+ srlx $a48,2,$a48
+ srax $a2,63,@i[0] ! broadcast 62nd bit
+ and $a12,$a2,$a2 ! (a<<1)&0x3fff...
+ srax $a1,63,$lo ! broadcast 63rd bit
+ and $a48,$a1,$a1 ! (a<<0)&0x1fff...
+
+ sllx $a1,3,$a8
+ and $b,$lo,$lo
+ and $b,@i[0],@i[0]
+ and $b,@i[1],@i[1]
+
+ stx %g0,[$tab+0*8] ! tab[0]=0
+ xor $a1,$a2,$a12
+ stx $a1,[$tab+1*8] ! tab[1]=a1
+ stx $a2,[$tab+2*8] ! tab[2]=a2
+ xor $a4,$a8,$a48
+ stx $a12,[$tab+3*8] ! tab[3]=a1^a2
+ xor $a4,$a1,$a1
+
+ stx $a4,[$tab+4*8] ! tab[4]=a4
+ xor $a4,$a2,$a2
+ stx $a1,[$tab+5*8] ! tab[5]=a1^a4
+ xor $a4,$a12,$a12
+ stx $a2,[$tab+6*8] ! tab[6]=a2^a4
+ xor $a48,$a1,$a1
+ stx $a12,[$tab+7*8] ! tab[7]=a1^a2^a4
+ xor $a48,$a2,$a2
+
+ stx $a8,[$tab+8*8] ! tab[8]=a8
+ xor $a48,$a12,$a12
+ stx $a1,[$tab+9*8] ! tab[9]=a1^a8
+ xor $a4,$a1,$a1
+ stx $a2,[$tab+10*8] ! tab[10]=a2^a8
+ xor $a4,$a2,$a2
+ stx $a12,[$tab+11*8] ! tab[11]=a1^a2^a8
+
+ xor $a4,$a12,$a12
+ stx $a48,[$tab+12*8] ! tab[12]=a4^a8
+ srlx $lo,1,$hi
+ stx $a1,[$tab+13*8] ! tab[13]=a1^a4^a8
+ sllx $lo,63,$lo
+ stx $a2,[$tab+14*8] ! tab[14]=a2^a4^a8
+ srlx @i[0],2,@T[0]
+ stx $a12,[$tab+15*8] ! tab[15]=a1^a2^a4^a8
+
+ sllx @i[0],62,$a1
+ sllx $b,3,@i[0]
+ srlx @i[1],3,@T[1]
+ and @i[0],`0xf<<3`,@i[0]
+ sllx @i[1],61,$a2
+ ldx [$tab+@i[0]],@i[0]
+ srlx $b,4-3,@i[1]
+ xor @T[0],$hi,$hi
+ and @i[1],`0xf<<3`,@i[1]
+ xor $a1,$lo,$lo
+ ldx [$tab+@i[1]],@i[1]
+ xor @T[1],$hi,$hi
+
+ xor @i[0],$lo,$lo
+ srlx $b,8-3,@i[0]
+ xor $a2,$lo,$lo
+ and @i[0],`0xf<<3`,@i[0]
+___
+for($n=1;$n<14;$n++) {
+$code.=<<___;
+ sllx @i[1],`$n*4`,@T[0]
+ ldx [$tab+@i[0]],@i[0]
+ srlx @i[1],`64-$n*4`,@T[1]
+ xor @T[0],$lo,$lo
+ srlx $b,`($n+2)*4`-3,@i[1]
+ xor @T[1],$hi,$hi
+ and @i[1],`0xf<<3`,@i[1]
+___
+ push(@i,shift(@i)); push(@T,shift(@T));
+}
+$code.=<<___;
+ sllx @i[1],`$n*4`,@T[0]
+ ldx [$tab+@i[0]],@i[0]
+ srlx @i[1],`64-$n*4`,@T[1]
+ xor @T[0],$lo,$lo
+
+ sllx @i[0],`($n+1)*4`,@T[0]
+ xor @T[1],$hi,$hi
+ srlx @i[0],`64-($n+1)*4`,@T[1]
+ xor @T[0],$lo,$lo
+ xor @T[1],$hi,$hi
+
+ srlx $lo,32,%i1
+ st $lo,[%i0+0]
+ st %i1,[%i0+4]
+ srlx $hi,32,%i2
+ st $hi,[%i0+8]
+ st %i2,[%i0+12]
+
+ ret
+ restore
+.type bn_GF2m_mul_2x2,#function
+.size bn_GF2m_mul_2x2,.-bn_GF2m_mul_2x2
+.asciz "GF(2^m) Multiplication for SPARCv9, CRYPTOGAMS by <appro\@openssl.org>"
+.align 4
+___
+
+$code =~ s/\`([^\`]*)\`/eval($1)/gem;
+print $code;
+close STDOUT;
diff --git a/openssl-1.1.0h/crypto/bn/asm/sparcv9-mont.pl b/openssl-1.1.0h/crypto/bn/asm/sparcv9-mont.pl
new file mode 100644
index 0000000..6807c8b
--- /dev/null
+++ b/openssl-1.1.0h/crypto/bn/asm/sparcv9-mont.pl
@@ -0,0 +1,619 @@
+#! /usr/bin/env perl
+# Copyright 2005-2016 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License"). You may not use
+# this file except in compliance with the License. You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# December 2005
+#
+# Pure SPARCv9/8+ and IALU-only bn_mul_mont implementation. The reasons
+# for undertaken effort are multiple. First of all, UltraSPARC is not
+# the whole SPARCv9 universe and other VIS-free implementations deserve
+# optimized code as much. Secondly, newly introduced UltraSPARC T1,
+# a.k.a. Niagara, has shared FPU and concurrent FPU-intensive paths,
+# such as sparcv9a-mont, will simply sink it. Yes, T1 is equipped with
+# several integrated RSA/DSA accelerator circuits accessible through
+# kernel driver [only(*)], but having decent user-land software
+# implementation is important too. Finally, reasons like desire to
+# experiment with dedicated squaring procedure. Yes, this module
+# implements one, because it was easiest to draft it in SPARCv9
+# instructions...
+
+# (*) Engine accessing the driver in question is on my TODO list.
+# For reference, accelerator is estimated to give 6 to 10 times
+# improvement on single-threaded RSA sign. It should be noted
+# that 6-10x improvement coefficient does not actually mean
+# something extraordinary in terms of absolute [single-threaded]
+# performance, as SPARCv9 instruction set is by all means least
+# suitable for high performance crypto among other 64 bit
+# platforms. 6-10x factor simply places T1 in same performance
+# domain as say AMD64 and IA-64. Improvement of RSA verify don't
+# appear impressive at all, but it's the sign operation which is
+# far more critical/interesting.
+
+# You might notice that inner loops are modulo-scheduled:-) This has
+# essentially negligible impact on UltraSPARC performance, it's
+# Fujitsu SPARC64 V users who should notice and hopefully appreciate
+# the advantage... Currently this module surpasses sparcv9a-mont.pl
+# by ~20% on UltraSPARC-III and later cores, but recall that sparcv9a
+# module still have hidden potential [see TODO list there], which is
+# estimated to be larger than 20%...
+
+$output = pop;
+open STDOUT,">$output";
+
+# int bn_mul_mont(
+$rp="%i0"; # BN_ULONG *rp,
+$ap="%i1"; # const BN_ULONG *ap,
+$bp="%i2"; # const BN_ULONG *bp,
+$np="%i3"; # const BN_ULONG *np,
+$n0="%i4"; # const BN_ULONG *n0,
+$num="%i5"; # int num);
+
+$frame="STACK_FRAME";
+$bias="STACK_BIAS";
+
+$car0="%o0";
+$car1="%o1";
+$car2="%o2"; # 1 bit
+$acc0="%o3";
+$acc1="%o4";
+$mask="%g1"; # 32 bits, what a waste...
+$tmp0="%g4";
+$tmp1="%g5";
+
+$i="%l0";
+$j="%l1";
+$mul0="%l2";
+$mul1="%l3";
+$tp="%l4";
+$apj="%l5";
+$npj="%l6";
+$tpj="%l7";
+
+$fname="bn_mul_mont_int";
+
+$code=<<___;
+#include "sparc_arch.h"
+
+.section ".text",#alloc,#execinstr
+
+.global $fname
+.align 32
+$fname:
+ cmp %o5,4 ! 128 bits minimum
+ bge,pt %icc,.Lenter
+ sethi %hi(0xffffffff),$mask
+ retl
+ clr %o0
+.align 32
+.Lenter:
+ save %sp,-$frame,%sp
+ sll $num,2,$num ! num*=4
+ or $mask,%lo(0xffffffff),$mask
+ ld [$n0],$n0
+ cmp $ap,$bp
+ and $num,$mask,$num
+ ld [$bp],$mul0 ! bp[0]
+ nop
+
+ add %sp,$bias,%o7 ! real top of stack
+ ld [$ap],$car0 ! ap[0] ! redundant in squaring context
+ sub %o7,$num,%o7
+ ld [$ap+4],$apj ! ap[1]
+ and %o7,-1024,%o7
+ ld [$np],$car1 ! np[0]
+ sub %o7,$bias,%sp ! alloca
+ ld [$np+4],$npj ! np[1]
+ be,pt SIZE_T_CC,.Lbn_sqr_mont
+ mov 12,$j
+
+ mulx $car0,$mul0,$car0 ! ap[0]*bp[0]
+ mulx $apj,$mul0,$tmp0 !prologue! ap[1]*bp[0]
+ and $car0,$mask,$acc0
+ add %sp,$bias+$frame,$tp
+ ld [$ap+8],$apj !prologue!
+
+ mulx $n0,$acc0,$mul1 ! "t[0]"*n0
+ and $mul1,$mask,$mul1
+
+ mulx $car1,$mul1,$car1 ! np[0]*"t[0]"*n0
+ mulx $npj,$mul1,$acc1 !prologue! np[1]*"t[0]"*n0
+ srlx $car0,32,$car0
+ add $acc0,$car1,$car1
+ ld [$np+8],$npj !prologue!
+ srlx $car1,32,$car1
+ mov $tmp0,$acc0 !prologue!
+
+.L1st:
+ mulx $apj,$mul0,$tmp0
+ mulx $npj,$mul1,$tmp1
+ add $acc0,$car0,$car0
+ ld [$ap+$j],$apj ! ap[j]
+ and $car0,$mask,$acc0
+ add $acc1,$car1,$car1
+ ld [$np+$j],$npj ! np[j]
+ srlx $car0,32,$car0
+ add $acc0,$car1,$car1
+ add $j,4,$j ! j++
+ mov $tmp0,$acc0
+ st $car1,[$tp]
+ cmp $j,$num
+ mov $tmp1,$acc1
+ srlx $car1,32,$car1
+ bl %icc,.L1st
+ add $tp,4,$tp ! tp++
+!.L1st
+
+ mulx $apj,$mul0,$tmp0 !epilogue!
+ mulx $npj,$mul1,$tmp1
+ add $acc0,$car0,$car0
+ and $car0,$mask,$acc0
+ add $acc1,$car1,$car1
+ srlx $car0,32,$car0
+ add $acc0,$car1,$car1
+ st $car1,[$tp]
+ srlx $car1,32,$car1
+
+ add $tmp0,$car0,$car0
+ and $car0,$mask,$acc0
+ add $tmp1,$car1,$car1
+ srlx $car0,32,$car0
+ add $acc0,$car1,$car1
+ st $car1,[$tp+4]
+ srlx $car1,32,$car1
+
+ add $car0,$car1,$car1
+ st $car1,[$tp+8]
+ srlx $car1,32,$car2
+
+ mov 4,$i ! i++
+ ld [$bp+4],$mul0 ! bp[1]
+.Louter:
+ add %sp,$bias+$frame,$tp
+ ld [$ap],$car0 ! ap[0]
+ ld [$ap+4],$apj ! ap[1]
+ ld [$np],$car1 ! np[0]
+ ld [$np+4],$npj ! np[1]
+ ld [$tp],$tmp1 ! tp[0]
+ ld [$tp+4],$tpj ! tp[1]
+ mov 12,$j
+
+ mulx $car0,$mul0,$car0
+ mulx $apj,$mul0,$tmp0 !prologue!
+ add $tmp1,$car0,$car0
+ ld [$ap+8],$apj !prologue!
+ and $car0,$mask,$acc0
+
+ mulx $n0,$acc0,$mul1
+ and $mul1,$mask,$mul1
+
+ mulx $car1,$mul1,$car1
+ mulx $npj,$mul1,$acc1 !prologue!
+ srlx $car0,32,$car0
+ add $acc0,$car1,$car1
+ ld [$np+8],$npj !prologue!
+ srlx $car1,32,$car1
+ mov $tmp0,$acc0 !prologue!
+
+.Linner:
+ mulx $apj,$mul0,$tmp0
+ mulx $npj,$mul1,$tmp1
+ add $tpj,$car0,$car0
+ ld [$ap+$j],$apj ! ap[j]
+ add $acc0,$car0,$car0
+ add $acc1,$car1,$car1
+ ld [$np+$j],$npj ! np[j]
+ and $car0,$mask,$acc0
+ ld [$tp+8],$tpj ! tp[j]
+ srlx $car0,32,$car0
+ add $acc0,$car1,$car1
+ add $j,4,$j ! j++
+ mov $tmp0,$acc0
+ st $car1,[$tp] ! tp[j-1]
+ srlx $car1,32,$car1
+ mov $tmp1,$acc1
+ cmp $j,$num
+ bl %icc,.Linner
+ add $tp,4,$tp ! tp++
+!.Linner
+
+ mulx $apj,$mul0,$tmp0 !epilogue!
+ mulx $npj,$mul1,$tmp1
+ add $tpj,$car0,$car0
+ add $acc0,$car0,$car0
+ ld [$tp+8],$tpj ! tp[j]
+ and $car0,$mask,$acc0
+ add $acc1,$car1,$car1
+ srlx $car0,32,$car0
+ add $acc0,$car1,$car1
+ st $car1,[$tp] ! tp[j-1]
+ srlx $car1,32,$car1
+
+ add $tpj,$car0,$car0
+ add $tmp0,$car0,$car0
+ and $car0,$mask,$acc0
+ add $tmp1,$car1,$car1
+ add $acc0,$car1,$car1
+ st $car1,[$tp+4] ! tp[j-1]
+ srlx $car0,32,$car0
+ add $i,4,$i ! i++
+ srlx $car1,32,$car1
+
+ add $car0,$car1,$car1
+ cmp $i,$num
+ add $car2,$car1,$car1
+ st $car1,[$tp+8]
+
+ srlx $car1,32,$car2
+ bl,a %icc,.Louter
+ ld [$bp+$i],$mul0 ! bp[i]
+!.Louter
+
+ add $tp,12,$tp
+
+.Ltail:
+ add $np,$num,$np
+ add $rp,$num,$rp
+ mov $tp,$ap
+ sub %g0,$num,%o7 ! k=-num
+ ba .Lsub
+ subcc %g0,%g0,%g0 ! clear %icc.c
+.align 16
+.Lsub:
+ ld [$tp+%o7],%o0
+ ld [$np+%o7],%o1
+ subccc %o0,%o1,%o1 ! tp[j]-np[j]
+ add $rp,%o7,$i
+ add %o7,4,%o7
+ brnz %o7,.Lsub
+ st %o1,[$i]
+ subc $car2,0,$car2 ! handle upmost overflow bit
+ and $tp,$car2,$ap
+ andn $rp,$car2,$np
+ or $ap,$np,$ap
+ sub %g0,$num,%o7
+
+.Lcopy:
+ ld [$ap+%o7],%o0 ! copy or in-place refresh
+ st %g0,[$tp+%o7] ! zap tp
+ st %o0,[$rp+%o7]
+ add %o7,4,%o7
+ brnz %o7,.Lcopy
+ nop
+ mov 1,%i0
+ ret
+ restore
+___
+
+########
+######## .Lbn_sqr_mont gives up to 20% *overall* improvement over
+######## code without following dedicated squaring procedure.
+########
+$sbit="%o5";
+
+$code.=<<___;
+.align 32
+.Lbn_sqr_mont:
+ mulx $mul0,$mul0,$car0 ! ap[0]*ap[0]
+ mulx $apj,$mul0,$tmp0 !prologue!
+ and $car0,$mask,$acc0
+ add %sp,$bias+$frame,$tp
+ ld [$ap+8],$apj !prologue!
+
+ mulx $n0,$acc0,$mul1 ! "t[0]"*n0
+ srlx $car0,32,$car0
+ and $mul1,$mask,$mul1
+
+ mulx $car1,$mul1,$car1 ! np[0]*"t[0]"*n0
+ mulx $npj,$mul1,$acc1 !prologue!
+ and $car0,1,$sbit
+ ld [$np+8],$npj !prologue!
+ srlx $car0,1,$car0
+ add $acc0,$car1,$car1
+ srlx $car1,32,$car1
+ mov $tmp0,$acc0 !prologue!
+
+.Lsqr_1st:
+ mulx $apj,$mul0,$tmp0
+ mulx $npj,$mul1,$tmp1
+ add $acc0,$car0,$car0 ! ap[j]*a0+c0
+ add $acc1,$car1,$car1
+ ld [$ap+$j],$apj ! ap[j]
+ and $car0,$mask,$acc0
+ ld [$np+$j],$npj ! np[j]
+ srlx $car0,32,$car0
+ add $acc0,$acc0,$acc0
+ or $sbit,$acc0,$acc0
+ mov $tmp1,$acc1
+ srlx $acc0,32,$sbit
+ add $j,4,$j ! j++
+ and $acc0,$mask,$acc0
+ cmp $j,$num
+ add $acc0,$car1,$car1
+ st $car1,[$tp]
+ mov $tmp0,$acc0
+ srlx $car1,32,$car1
+ bl %icc,.Lsqr_1st
+ add $tp,4,$tp ! tp++
+!.Lsqr_1st
+
+ mulx $apj,$mul0,$tmp0 ! epilogue
+ mulx $npj,$mul1,$tmp1
+ add $acc0,$car0,$car0 ! ap[j]*a0+c0
+ add $acc1,$car1,$car1
+ and $car0,$mask,$acc0
+ srlx $car0,32,$car0
+ add $acc0,$acc0,$acc0
+ or $sbit,$acc0,$acc0
+ srlx $acc0,32,$sbit
+ and $acc0,$mask,$acc0
+ add $acc0,$car1,$car1
+ st $car1,[$tp]
+ srlx $car1,32,$car1
+
+ add $tmp0,$car0,$car0 ! ap[j]*a0+c0
+ add $tmp1,$car1,$car1
+ and $car0,$mask,$acc0
+ srlx $car0,32,$car0
+ add $acc0,$acc0,$acc0
+ or $sbit,$acc0,$acc0
+ srlx $acc0,32,$sbit
+ and $acc0,$mask,$acc0
+ add $acc0,$car1,$car1
+ st $car1,[$tp+4]
+ srlx $car1,32,$car1
+
+ add $car0,$car0,$car0
+ or $sbit,$car0,$car0
+ add $car0,$car1,$car1
+ st $car1,[$tp+8]
+ srlx $car1,32,$car2
+
+ ld [%sp+$bias+$frame],$tmp0 ! tp[0]
+ ld [%sp+$bias+$frame+4],$tmp1 ! tp[1]
+ ld [%sp+$bias+$frame+8],$tpj ! tp[2]
+ ld [$ap+4],$mul0 ! ap[1]
+ ld [$ap+8],$apj ! ap[2]
+ ld [$np],$car1 ! np[0]
+ ld [$np+4],$npj ! np[1]
+ mulx $n0,$tmp0,$mul1
+
+ mulx $mul0,$mul0,$car0
+ and $mul1,$mask,$mul1
+
+ mulx $car1,$mul1,$car1
+ mulx $npj,$mul1,$acc1
+ add $tmp0,$car1,$car1
+ and $car0,$mask,$acc0
+ ld [$np+8],$npj ! np[2]
+ srlx $car1,32,$car1
+ add $tmp1,$car1,$car1
+ srlx $car0,32,$car0
+ add $acc0,$car1,$car1
+ and $car0,1,$sbit
+ add $acc1,$car1,$car1
+ srlx $car0,1,$car0
+ mov 12,$j
+ st $car1,[%sp+$bias+$frame] ! tp[0]=
+ srlx $car1,32,$car1
+ add %sp,$bias+$frame+4,$tp
+
+.Lsqr_2nd:
+ mulx $apj,$mul0,$acc0
+ mulx $npj,$mul1,$acc1
+ add $acc0,$car0,$car0
+ add $tpj,$sbit,$sbit
+ ld [$ap+$j],$apj ! ap[j]
+ and $car0,$mask,$acc0
+ ld [$np+$j],$npj ! np[j]
+ srlx $car0,32,$car0
+ add $acc1,$car1,$car1
+ ld [$tp+8],$tpj ! tp[j]
+ add $acc0,$acc0,$acc0
+ add $j,4,$j ! j++
+ add $sbit,$acc0,$acc0
+ srlx $acc0,32,$sbit
+ and $acc0,$mask,$acc0
+ cmp $j,$num
+ add $acc0,$car1,$car1
+ st $car1,[$tp] ! tp[j-1]
+ srlx $car1,32,$car1
+ bl %icc,.Lsqr_2nd
+ add $tp,4,$tp ! tp++
+!.Lsqr_2nd
+
+ mulx $apj,$mul0,$acc0
+ mulx $npj,$mul1,$acc1
+ add $acc0,$car0,$car0
+ add $tpj,$sbit,$sbit
+ and $car0,$mask,$acc0
+ srlx $car0,32,$car0
+ add $acc1,$car1,$car1
+ add $acc0,$acc0,$acc0
+ add $sbit,$acc0,$acc0
+ srlx $acc0,32,$sbit
+ and $acc0,$mask,$acc0
+ add $acc0,$car1,$car1
+ st $car1,[$tp] ! tp[j-1]
+ srlx $car1,32,$car1
+
+ add $car0,$car0,$car0
+ add $sbit,$car0,$car0
+ add $car0,$car1,$car1
+ add $car2,$car1,$car1
+ st $car1,[$tp+4]
+ srlx $car1,32,$car2
+
+ ld [%sp+$bias+$frame],$tmp1 ! tp[0]
+ ld [%sp+$bias+$frame+4],$tpj ! tp[1]
+ ld [$ap+8],$mul0 ! ap[2]
+ ld [$np],$car1 ! np[0]
+ ld [$np+4],$npj ! np[1]
+ mulx $n0,$tmp1,$mul1
+ and $mul1,$mask,$mul1
+ mov 8,$i
+
+ mulx $mul0,$mul0,$car0
+ mulx $car1,$mul1,$car1
+ and $car0,$mask,$acc0
+ add $tmp1,$car1,$car1
+ srlx $car0,32,$car0
+ add %sp,$bias+$frame,$tp
+ srlx $car1,32,$car1
+ and $car0,1,$sbit
+ srlx $car0,1,$car0
+ mov 4,$j
+
+.Lsqr_outer:
+.Lsqr_inner1:
+ mulx $npj,$mul1,$acc1
+ add $tpj,$car1,$car1
+ add $j,4,$j
+ ld [$tp+8],$tpj
+ cmp $j,$i
+ add $acc1,$car1,$car1
+ ld [$np+$j],$npj
+ st $car1,[$tp]
+ srlx $car1,32,$car1
+ bl %icc,.Lsqr_inner1
+ add $tp,4,$tp
+!.Lsqr_inner1
+
+ add $j,4,$j
+ ld [$ap+$j],$apj ! ap[j]
+ mulx $npj,$mul1,$acc1
+ add $tpj,$car1,$car1
+ ld [$np+$j],$npj ! np[j]
+ add $acc0,$car1,$car1
+ ld [$tp+8],$tpj ! tp[j]
+ add $acc1,$car1,$car1
+ st $car1,[$tp]
+ srlx $car1,32,$car1
+
+ add $j,4,$j
+ cmp $j,$num
+ be,pn %icc,.Lsqr_no_inner2
+ add $tp,4,$tp
+
+.Lsqr_inner2:
+ mulx $apj,$mul0,$acc0
+ mulx $npj,$mul1,$acc1
+ add $tpj,$sbit,$sbit
+ add $acc0,$car0,$car0
+ ld [$ap+$j],$apj ! ap[j]
+ and $car0,$mask,$acc0
+ ld [$np+$j],$npj ! np[j]
+ srlx $car0,32,$car0
+ add $acc0,$acc0,$acc0
+ ld [$tp+8],$tpj ! tp[j]
+ add $sbit,$acc0,$acc0
+ add $j,4,$j ! j++
+ srlx $acc0,32,$sbit
+ and $acc0,$mask,$acc0
+ cmp $j,$num
+ add $acc0,$car1,$car1
+ add $acc1,$car1,$car1
+ st $car1,[$tp] ! tp[j-1]
+ srlx $car1,32,$car1
+ bl %icc,.Lsqr_inner2
+ add $tp,4,$tp ! tp++
+
+.Lsqr_no_inner2:
+ mulx $apj,$mul0,$acc0
+ mulx $npj,$mul1,$acc1
+ add $tpj,$sbit,$sbit
+ add $acc0,$car0,$car0
+ and $car0,$mask,$acc0
+ srlx $car0,32,$car0
+ add $acc0,$acc0,$acc0
+ add $sbit,$acc0,$acc0
+ srlx $acc0,32,$sbit
+ and $acc0,$mask,$acc0
+ add $acc0,$car1,$car1
+ add $acc1,$car1,$car1
+ st $car1,[$tp] ! tp[j-1]
+ srlx $car1,32,$car1
+
+ add $car0,$car0,$car0
+ add $sbit,$car0,$car0
+ add $car0,$car1,$car1
+ add $car2,$car1,$car1
+ st $car1,[$tp+4]
+ srlx $car1,32,$car2
+
+ add $i,4,$i ! i++
+ ld [%sp+$bias+$frame],$tmp1 ! tp[0]
+ ld [%sp+$bias+$frame+4],$tpj ! tp[1]
+ ld [$ap+$i],$mul0 ! ap[j]
+ ld [$np],$car1 ! np[0]
+ ld [$np+4],$npj ! np[1]
+ mulx $n0,$tmp1,$mul1
+ and $mul1,$mask,$mul1
+ add $i,4,$tmp0
+
+ mulx $mul0,$mul0,$car0
+ mulx $car1,$mul1,$car1
+ and $car0,$mask,$acc0
+ add $tmp1,$car1,$car1
+ srlx $car0,32,$car0
+ add %sp,$bias+$frame,$tp
+ srlx $car1,32,$car1
+ and $car0,1,$sbit
+ srlx $car0,1,$car0
+
+ cmp $tmp0,$num ! i<num-1
+ bl %icc,.Lsqr_outer
+ mov 4,$j
+
+.Lsqr_last:
+ mulx $npj,$mul1,$acc1
+ add $tpj,$car1,$car1
+ add $j,4,$j
+ ld [$tp+8],$tpj
+ cmp $j,$i
+ add $acc1,$car1,$car1
+ ld [$np+$j],$npj
+ st $car1,[$tp]
+ srlx $car1,32,$car1
+ bl %icc,.Lsqr_last
+ add $tp,4,$tp
+!.Lsqr_last
+
+ mulx $npj,$mul1,$acc1
+ add $tpj,$acc0,$acc0
+ srlx $acc0,32,$tmp0
+ and $acc0,$mask,$acc0
+ add $tmp0,$sbit,$sbit
+ add $acc0,$car1,$car1
+ add $acc1,$car1,$car1
+ st $car1,[$tp]
+ srlx $car1,32,$car1
+
+ add $car0,$car0,$car0 ! recover $car0
+ add $sbit,$car0,$car0
+ add $car0,$car1,$car1
+ add $car2,$car1,$car1
+ st $car1,[$tp+4]
+ srlx $car1,32,$car2
+
+ ba .Ltail
+ add $tp,8,$tp
+.type $fname,#function
+.size $fname,(.-$fname)
+.asciz "Montgomery Multipltication for SPARCv9, CRYPTOGAMS by <appro\@openssl.org>"
+.align 32
+___
+$code =~ s/\`([^\`]*)\`/eval($1)/gem;
+print $code;
+close STDOUT;
diff --git a/openssl-1.1.0h/crypto/bn/asm/sparcv9a-mont.pl b/openssl-1.1.0h/crypto/bn/asm/sparcv9a-mont.pl
new file mode 100755
index 0000000..50b6906
--- /dev/null
+++ b/openssl-1.1.0h/crypto/bn/asm/sparcv9a-mont.pl
@@ -0,0 +1,887 @@
+#! /usr/bin/env perl
+# Copyright 2005-2016 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License"). You may not use
+# this file except in compliance with the License. You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# October 2005
+#
+# "Teaser" Montgomery multiplication module for UltraSPARC. Why FPU?
+# Because unlike integer multiplier, which simply stalls whole CPU,
+# FPU is fully pipelined and can effectively emit 48 bit partial
+# product every cycle. Why not blended SPARC v9? One can argue that
+# making this module dependent on UltraSPARC VIS extension limits its
+# binary compatibility. Well yes, it does exclude SPARC64 prior-V(!)
+# implementations from compatibility matrix. But the rest, whole Sun
+# UltraSPARC family and brand new Fujitsu's SPARC64 V, all support
+# VIS extension instructions used in this module. This is considered
+# good enough to not care about HAL SPARC64 users [if any] who have
+# integer-only pure SPARCv9 module to "fall down" to.
+
+# USI&II cores currently exhibit uniform 2x improvement [over pre-
+# bn_mul_mont codebase] for all key lengths and benchmarks. On USIII
+# performance improves few percents for shorter keys and worsens few
+# percents for longer keys. This is because USIII integer multiplier
+# is >3x faster than USI&II one, which is harder to match [but see
+# TODO list below]. It should also be noted that SPARC64 V features
+# out-of-order execution, which *might* mean that integer multiplier
+# is pipelined, which in turn *might* be impossible to match... On
+# additional note, SPARC64 V implements FP Multiply-Add instruction,
+# which is perfectly usable in this context... In other words, as far
+# as Fujitsu SPARC64 V goes, talk to the author:-)
+
+# The implementation implies following "non-natural" limitations on
+# input arguments:
+# - num may not be less than 4;
+# - num has to be even;
+# Failure to meet either condition has no fatal effects, simply
+# doesn't give any performance gain.
+
+# TODO:
+# - modulo-schedule inner loop for better performance (on in-order
+# execution core such as UltraSPARC this shall result in further
+# noticeable(!) improvement);
+# - dedicated squaring procedure[?];
+
+######################################################################
+# November 2006
+#
+# Modulo-scheduled inner loops allow to interleave floating point and
+# integer instructions and minimize Read-After-Write penalties. This
+# results in *further* 20-50% performance improvement [depending on
+# key length, more for longer keys] on USI&II cores and 30-80% - on
+# USIII&IV.
+
+$output = pop;
+open STDOUT,">$output";
+
+$fname="bn_mul_mont_fpu";
+
+$frame="STACK_FRAME";
+$bias="STACK_BIAS";
+$locals=64;
+
+# In order to provide for 32-/64-bit ABI duality, I keep integers wider
+# than 32 bit in %g1-%g4 and %o0-%o5. %l0-%l7 and %i0-%i5 are used
+# exclusively for pointers, indexes and other small values...
+# int bn_mul_mont(
+$rp="%i0"; # BN_ULONG *rp,
+$ap="%i1"; # const BN_ULONG *ap,
+$bp="%i2"; # const BN_ULONG *bp,
+$np="%i3"; # const BN_ULONG *np,
+$n0="%i4"; # const BN_ULONG *n0,
+$num="%i5"; # int num);
+
+$tp="%l0"; # t[num]
+$ap_l="%l1"; # a[num],n[num] are smashed to 32-bit words and saved
+$ap_h="%l2"; # to these four vectors as double-precision FP values.
+$np_l="%l3"; # This way a bunch of fxtods are eliminated in second
+$np_h="%l4"; # loop and L1-cache aliasing is minimized...
+$i="%l5";
+$j="%l6";
+$mask="%l7"; # 16-bit mask, 0xffff
+
+$n0="%g4"; # reassigned(!) to "64-bit" register
+$carry="%i4"; # %i4 reused(!) for a carry bit
+
+# FP register naming chart
+#
+# ..HILO
+# dcba
+# --------
+# LOa
+# LOb
+# LOc
+# LOd
+# HIa
+# HIb
+# HIc
+# HId
+# ..a
+# ..b
+$ba="%f0"; $bb="%f2"; $bc="%f4"; $bd="%f6";
+$na="%f8"; $nb="%f10"; $nc="%f12"; $nd="%f14";
+$alo="%f16"; $alo_="%f17"; $ahi="%f18"; $ahi_="%f19";
+$nlo="%f20"; $nlo_="%f21"; $nhi="%f22"; $nhi_="%f23";
+
+$dota="%f24"; $dotb="%f26";
+
+$aloa="%f32"; $alob="%f34"; $aloc="%f36"; $alod="%f38";
+$ahia="%f40"; $ahib="%f42"; $ahic="%f44"; $ahid="%f46";
+$nloa="%f48"; $nlob="%f50"; $nloc="%f52"; $nlod="%f54";
+$nhia="%f56"; $nhib="%f58"; $nhic="%f60"; $nhid="%f62";
+
+$ASI_FL16_P=0xD2; # magic ASI value to engage 16-bit FP load
+
+$code=<<___;
+#include "sparc_arch.h"
+
+.section ".text",#alloc,#execinstr
+
+.global $fname
+.align 32
+$fname:
+ save %sp,-$frame-$locals,%sp
+
+ cmp $num,4
+ bl,a,pn %icc,.Lret
+ clr %i0
+ andcc $num,1,%g0 ! $num has to be even...
+ bnz,a,pn %icc,.Lret
+ clr %i0 ! signal "unsupported input value"
+
+ srl $num,1,$num
+ sethi %hi(0xffff),$mask
+ ld [%i4+0],$n0 ! $n0 reassigned, remember?
+ or $mask,%lo(0xffff),$mask
+ ld [%i4+4],%o0
+ sllx %o0,32,%o0
+ or %o0,$n0,$n0 ! $n0=n0[1].n0[0]
+
+ sll $num,3,$num ! num*=8
+
+ add %sp,$bias,%o0 ! real top of stack
+ sll $num,2,%o1
+ add %o1,$num,%o1 ! %o1=num*5
+ sub %o0,%o1,%o0
+ and %o0,-2048,%o0 ! optimize TLB utilization
+ sub %o0,$bias,%sp ! alloca(5*num*8)
+
+ rd %asi,%o7 ! save %asi
+ add %sp,$bias+$frame+$locals,$tp
+ add $tp,$num,$ap_l
+ add $ap_l,$num,$ap_l ! [an]p_[lh] point at the vectors' ends !
+ add $ap_l,$num,$ap_h
+ add $ap_h,$num,$np_l
+ add $np_l,$num,$np_h
+
+ wr %g0,$ASI_FL16_P,%asi ! setup %asi for 16-bit FP loads
+
+ add $rp,$num,$rp ! readjust input pointers to point
+ add $ap,$num,$ap ! at the ends too...
+ add $bp,$num,$bp
+ add $np,$num,$np
+
+ stx %o7,[%sp+$bias+$frame+48] ! save %asi
+
+ sub %g0,$num,$i ! i=-num
+ sub %g0,$num,$j ! j=-num
+
+ add $ap,$j,%o3
+ add $bp,$i,%o4
+
+ ld [%o3+4],%g1 ! bp[0]
+ ld [%o3+0],%o0
+ ld [%o4+4],%g5 ! ap[0]
+ sllx %g1,32,%g1
+ ld [%o4+0],%o1
+ sllx %g5,32,%g5
+ or %g1,%o0,%o0
+ or %g5,%o1,%o1
+
+ add $np,$j,%o5
+
+ mulx %o1,%o0,%o0 ! ap[0]*bp[0]
+ mulx $n0,%o0,%o0 ! ap[0]*bp[0]*n0
+ stx %o0,[%sp+$bias+$frame+0]
+
+ ld [%o3+0],$alo_ ! load a[j] as pair of 32-bit words
+ fzeros $alo
+ ld [%o3+4],$ahi_
+ fzeros $ahi
+ ld [%o5+0],$nlo_ ! load n[j] as pair of 32-bit words
+ fzeros $nlo
+ ld [%o5+4],$nhi_
+ fzeros $nhi
+
+ ! transfer b[i] to FPU as 4x16-bit values
+ ldda [%o4+2]%asi,$ba
+ fxtod $alo,$alo
+ ldda [%o4+0]%asi,$bb
+ fxtod $ahi,$ahi
+ ldda [%o4+6]%asi,$bc
+ fxtod $nlo,$nlo
+ ldda [%o4+4]%asi,$bd
+ fxtod $nhi,$nhi
+
+ ! transfer ap[0]*b[0]*n0 to FPU as 4x16-bit values
+ ldda [%sp+$bias+$frame+6]%asi,$na
+ fxtod $ba,$ba
+ ldda [%sp+$bias+$frame+4]%asi,$nb
+ fxtod $bb,$bb
+ ldda [%sp+$bias+$frame+2]%asi,$nc
+ fxtod $bc,$bc
+ ldda [%sp+$bias+$frame+0]%asi,$nd
+ fxtod $bd,$bd
+
+ std $alo,[$ap_l+$j] ! save smashed ap[j] in double format
+ fxtod $na,$na
+ std $ahi,[$ap_h+$j]
+ fxtod $nb,$nb
+ std $nlo,[$np_l+$j] ! save smashed np[j] in double format
+ fxtod $nc,$nc
+ std $nhi,[$np_h+$j]
+ fxtod $nd,$nd
+
+ fmuld $alo,$ba,$aloa
+ fmuld $nlo,$na,$nloa
+ fmuld $alo,$bb,$alob
+ fmuld $nlo,$nb,$nlob
+ fmuld $alo,$bc,$aloc
+ faddd $aloa,$nloa,$nloa
+ fmuld $nlo,$nc,$nloc
+ fmuld $alo,$bd,$alod
+ faddd $alob,$nlob,$nlob
+ fmuld $nlo,$nd,$nlod
+ fmuld $ahi,$ba,$ahia
+ faddd $aloc,$nloc,$nloc
+ fmuld $nhi,$na,$nhia
+ fmuld $ahi,$bb,$ahib
+ faddd $alod,$nlod,$nlod
+ fmuld $nhi,$nb,$nhib
+ fmuld $ahi,$bc,$ahic
+ faddd $ahia,$nhia,$nhia
+ fmuld $nhi,$nc,$nhic
+ fmuld $ahi,$bd,$ahid
+ faddd $ahib,$nhib,$nhib
+ fmuld $nhi,$nd,$nhid
+
+ faddd $ahic,$nhic,$dota ! $nhic
+ faddd $ahid,$nhid,$dotb ! $nhid
+
+ faddd $nloc,$nhia,$nloc
+ faddd $nlod,$nhib,$nlod
+
+ fdtox $nloa,$nloa
+ fdtox $nlob,$nlob
+ fdtox $nloc,$nloc
+ fdtox $nlod,$nlod
+
+ std $nloa,[%sp+$bias+$frame+0]
+ add $j,8,$j
+ std $nlob,[%sp+$bias+$frame+8]
+ add $ap,$j,%o4
+ std $nloc,[%sp+$bias+$frame+16]
+ add $np,$j,%o5
+ std $nlod,[%sp+$bias+$frame+24]
+
+ ld [%o4+0],$alo_ ! load a[j] as pair of 32-bit words
+ fzeros $alo
+ ld [%o4+4],$ahi_
+ fzeros $ahi
+ ld [%o5+0],$nlo_ ! load n[j] as pair of 32-bit words
+ fzeros $nlo
+ ld [%o5+4],$nhi_
+ fzeros $nhi
+
+ fxtod $alo,$alo
+ fxtod $ahi,$ahi
+ fxtod $nlo,$nlo
+ fxtod $nhi,$nhi
+
+ ldx [%sp+$bias+$frame+0],%o0
+ fmuld $alo,$ba,$aloa
+ ldx [%sp+$bias+$frame+8],%o1
+ fmuld $nlo,$na,$nloa
+ ldx [%sp+$bias+$frame+16],%o2
+ fmuld $alo,$bb,$alob
+ ldx [%sp+$bias+$frame+24],%o3
+ fmuld $nlo,$nb,$nlob
+
+ srlx %o0,16,%o7
+ std $alo,[$ap_l+$j] ! save smashed ap[j] in double format
+ fmuld $alo,$bc,$aloc
+ add %o7,%o1,%o1
+ std $ahi,[$ap_h+$j]
+ faddd $aloa,$nloa,$nloa
+ fmuld $nlo,$nc,$nloc
+ srlx %o1,16,%o7
+ std $nlo,[$np_l+$j] ! save smashed np[j] in double format
+ fmuld $alo,$bd,$alod
+ add %o7,%o2,%o2
+ std $nhi,[$np_h+$j]
+ faddd $alob,$nlob,$nlob
+ fmuld $nlo,$nd,$nlod
+ srlx %o2,16,%o7
+ fmuld $ahi,$ba,$ahia
+ add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
+ faddd $aloc,$nloc,$nloc
+ fmuld $nhi,$na,$nhia
+ !and %o0,$mask,%o0
+ !and %o1,$mask,%o1
+ !and %o2,$mask,%o2
+ !sllx %o1,16,%o1
+ !sllx %o2,32,%o2
+ !sllx %o3,48,%o7
+ !or %o1,%o0,%o0
+ !or %o2,%o0,%o0
+ !or %o7,%o0,%o0 ! 64-bit result
+ srlx %o3,16,%g1 ! 34-bit carry
+ fmuld $ahi,$bb,$ahib
+
+ faddd $alod,$nlod,$nlod
+ fmuld $nhi,$nb,$nhib
+ fmuld $ahi,$bc,$ahic
+ faddd $ahia,$nhia,$nhia
+ fmuld $nhi,$nc,$nhic
+ fmuld $ahi,$bd,$ahid
+ faddd $ahib,$nhib,$nhib
+ fmuld $nhi,$nd,$nhid
+
+ faddd $dota,$nloa,$nloa
+ faddd $dotb,$nlob,$nlob
+ faddd $ahic,$nhic,$dota ! $nhic
+ faddd $ahid,$nhid,$dotb ! $nhid
+
+ faddd $nloc,$nhia,$nloc
+ faddd $nlod,$nhib,$nlod
+
+ fdtox $nloa,$nloa
+ fdtox $nlob,$nlob
+ fdtox $nloc,$nloc
+ fdtox $nlod,$nlod
+
+ std $nloa,[%sp+$bias+$frame+0]
+ std $nlob,[%sp+$bias+$frame+8]
+ addcc $j,8,$j
+ std $nloc,[%sp+$bias+$frame+16]
+ bz,pn %icc,.L1stskip
+ std $nlod,[%sp+$bias+$frame+24]
+
+.align 32 ! incidentally already aligned !
+.L1st:
+ add $ap,$j,%o4
+ add $np,$j,%o5
+ ld [%o4+0],$alo_ ! load a[j] as pair of 32-bit words
+ fzeros $alo
+ ld [%o4+4],$ahi_
+ fzeros $ahi
+ ld [%o5+0],$nlo_ ! load n[j] as pair of 32-bit words
+ fzeros $nlo
+ ld [%o5+4],$nhi_
+ fzeros $nhi
+
+ fxtod $alo,$alo
+ fxtod $ahi,$ahi
+ fxtod $nlo,$nlo
+ fxtod $nhi,$nhi
+
+ ldx [%sp+$bias+$frame+0],%o0
+ fmuld $alo,$ba,$aloa
+ ldx [%sp+$bias+$frame+8],%o1
+ fmuld $nlo,$na,$nloa
+ ldx [%sp+$bias+$frame+16],%o2
+ fmuld $alo,$bb,$alob
+ ldx [%sp+$bias+$frame+24],%o3
+ fmuld $nlo,$nb,$nlob
+
+ srlx %o0,16,%o7
+ std $alo,[$ap_l+$j] ! save smashed ap[j] in double format
+ fmuld $alo,$bc,$aloc
+ add %o7,%o1,%o1
+ std $ahi,[$ap_h+$j]
+ faddd $aloa,$nloa,$nloa
+ fmuld $nlo,$nc,$nloc
+ srlx %o1,16,%o7
+ std $nlo,[$np_l+$j] ! save smashed np[j] in double format
+ fmuld $alo,$bd,$alod
+ add %o7,%o2,%o2
+ std $nhi,[$np_h+$j]
+ faddd $alob,$nlob,$nlob
+ fmuld $nlo,$nd,$nlod
+ srlx %o2,16,%o7
+ fmuld $ahi,$ba,$ahia
+ add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
+ and %o0,$mask,%o0
+ faddd $aloc,$nloc,$nloc
+ fmuld $nhi,$na,$nhia
+ and %o1,$mask,%o1
+ and %o2,$mask,%o2
+ fmuld $ahi,$bb,$ahib
+ sllx %o1,16,%o1
+ faddd $alod,$nlod,$nlod
+ fmuld $nhi,$nb,$nhib
+ sllx %o2,32,%o2
+ fmuld $ahi,$bc,$ahic
+ sllx %o3,48,%o7
+ or %o1,%o0,%o0
+ faddd $ahia,$nhia,$nhia
+ fmuld $nhi,$nc,$nhic
+ or %o2,%o0,%o0
+ fmuld $ahi,$bd,$ahid
+ or %o7,%o0,%o0 ! 64-bit result
+ faddd $ahib,$nhib,$nhib
+ fmuld $nhi,$nd,$nhid
+ addcc %g1,%o0,%o0
+ faddd $dota,$nloa,$nloa
+ srlx %o3,16,%g1 ! 34-bit carry
+ faddd $dotb,$nlob,$nlob
+ bcs,a %xcc,.+8
+ add %g1,1,%g1
+
+ stx %o0,[$tp] ! tp[j-1]=
+
+ faddd $ahic,$nhic,$dota ! $nhic
+ faddd $ahid,$nhid,$dotb ! $nhid
+
+ faddd $nloc,$nhia,$nloc
+ faddd $nlod,$nhib,$nlod
+
+ fdtox $nloa,$nloa
+ fdtox $nlob,$nlob
+ fdtox $nloc,$nloc
+ fdtox $nlod,$nlod
+
+ std $nloa,[%sp+$bias+$frame+0]
+ std $nlob,[%sp+$bias+$frame+8]
+ std $nloc,[%sp+$bias+$frame+16]
+ std $nlod,[%sp+$bias+$frame+24]
+
+ addcc $j,8,$j
+ bnz,pt %icc,.L1st
+ add $tp,8,$tp
+
+.L1stskip:
+ fdtox $dota,$dota
+ fdtox $dotb,$dotb
+
+ ldx [%sp+$bias+$frame+0],%o0
+ ldx [%sp+$bias+$frame+8],%o1
+ ldx [%sp+$bias+$frame+16],%o2
+ ldx [%sp+$bias+$frame+24],%o3
+
+ srlx %o0,16,%o7
+ std $dota,[%sp+$bias+$frame+32]
+ add %o7,%o1,%o1
+ std $dotb,[%sp+$bias+$frame+40]
+ srlx %o1,16,%o7
+ add %o7,%o2,%o2
+ srlx %o2,16,%o7
+ add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
+ and %o0,$mask,%o0
+ and %o1,$mask,%o1
+ and %o2,$mask,%o2
+ sllx %o1,16,%o1
+ sllx %o2,32,%o2
+ sllx %o3,48,%o7
+ or %o1,%o0,%o0
+ or %o2,%o0,%o0
+ or %o7,%o0,%o0 ! 64-bit result
+ ldx [%sp+$bias+$frame+32],%o4
+ addcc %g1,%o0,%o0
+ ldx [%sp+$bias+$frame+40],%o5
+ srlx %o3,16,%g1 ! 34-bit carry
+ bcs,a %xcc,.+8
+ add %g1,1,%g1
+
+ stx %o0,[$tp] ! tp[j-1]=
+ add $tp,8,$tp
+
+ srlx %o4,16,%o7
+ add %o7,%o5,%o5
+ and %o4,$mask,%o4
+ sllx %o5,16,%o7
+ or %o7,%o4,%o4
+ addcc %g1,%o4,%o4
+ srlx %o5,48,%g1
+ bcs,a %xcc,.+8
+ add %g1,1,%g1
+
+ mov %g1,$carry
+ stx %o4,[$tp] ! tp[num-1]=
+
+ ba .Louter
+ add $i,8,$i
+.align 32
+.Louter:
+ sub %g0,$num,$j ! j=-num
+ add %sp,$bias+$frame+$locals,$tp
+
+ add $ap,$j,%o3
+ add $bp,$i,%o4
+
+ ld [%o3+4],%g1 ! bp[i]
+ ld [%o3+0],%o0
+ ld [%o4+4],%g5 ! ap[0]
+ sllx %g1,32,%g1
+ ld [%o4+0],%o1
+ sllx %g5,32,%g5
+ or %g1,%o0,%o0
+ or %g5,%o1,%o1
+
+ ldx [$tp],%o2 ! tp[0]
+ mulx %o1,%o0,%o0
+ addcc %o2,%o0,%o0
+ mulx $n0,%o0,%o0 ! (ap[0]*bp[i]+t[0])*n0
+ stx %o0,[%sp+$bias+$frame+0]
+
+ ! transfer b[i] to FPU as 4x16-bit values
+ ldda [%o4+2]%asi,$ba
+ ldda [%o4+0]%asi,$bb
+ ldda [%o4+6]%asi,$bc
+ ldda [%o4+4]%asi,$bd
+
+ ! transfer (ap[0]*b[i]+t[0])*n0 to FPU as 4x16-bit values
+ ldda [%sp+$bias+$frame+6]%asi,$na
+ fxtod $ba,$ba
+ ldda [%sp+$bias+$frame+4]%asi,$nb
+ fxtod $bb,$bb
+ ldda [%sp+$bias+$frame+2]%asi,$nc
+ fxtod $bc,$bc
+ ldda [%sp+$bias+$frame+0]%asi,$nd
+ fxtod $bd,$bd
+ ldd [$ap_l+$j],$alo ! load a[j] in double format
+ fxtod $na,$na
+ ldd [$ap_h+$j],$ahi
+ fxtod $nb,$nb
+ ldd [$np_l+$j],$nlo ! load n[j] in double format
+ fxtod $nc,$nc
+ ldd [$np_h+$j],$nhi
+ fxtod $nd,$nd
+
+ fmuld $alo,$ba,$aloa
+ fmuld $nlo,$na,$nloa
+ fmuld $alo,$bb,$alob
+ fmuld $nlo,$nb,$nlob
+ fmuld $alo,$bc,$aloc
+ faddd $aloa,$nloa,$nloa
+ fmuld $nlo,$nc,$nloc
+ fmuld $alo,$bd,$alod
+ faddd $alob,$nlob,$nlob
+ fmuld $nlo,$nd,$nlod
+ fmuld $ahi,$ba,$ahia
+ faddd $aloc,$nloc,$nloc
+ fmuld $nhi,$na,$nhia
+ fmuld $ahi,$bb,$ahib
+ faddd $alod,$nlod,$nlod
+ fmuld $nhi,$nb,$nhib
+ fmuld $ahi,$bc,$ahic
+ faddd $ahia,$nhia,$nhia
+ fmuld $nhi,$nc,$nhic
+ fmuld $ahi,$bd,$ahid
+ faddd $ahib,$nhib,$nhib
+ fmuld $nhi,$nd,$nhid
+
+ faddd $ahic,$nhic,$dota ! $nhic
+ faddd $ahid,$nhid,$dotb ! $nhid
+
+ faddd $nloc,$nhia,$nloc
+ faddd $nlod,$nhib,$nlod
+
+ fdtox $nloa,$nloa
+ fdtox $nlob,$nlob
+ fdtox $nloc,$nloc
+ fdtox $nlod,$nlod
+
+ std $nloa,[%sp+$bias+$frame+0]
+ std $nlob,[%sp+$bias+$frame+8]
+ std $nloc,[%sp+$bias+$frame+16]
+ add $j,8,$j
+ std $nlod,[%sp+$bias+$frame+24]
+
+ ldd [$ap_l+$j],$alo ! load a[j] in double format
+ ldd [$ap_h+$j],$ahi
+ ldd [$np_l+$j],$nlo ! load n[j] in double format
+ ldd [$np_h+$j],$nhi
+
+ fmuld $alo,$ba,$aloa
+ fmuld $nlo,$na,$nloa
+ fmuld $alo,$bb,$alob
+ fmuld $nlo,$nb,$nlob
+ fmuld $alo,$bc,$aloc
+ ldx [%sp+$bias+$frame+0],%o0
+ faddd $aloa,$nloa,$nloa
+ fmuld $nlo,$nc,$nloc
+ ldx [%sp+$bias+$frame+8],%o1
+ fmuld $alo,$bd,$alod
+ ldx [%sp+$bias+$frame+16],%o2
+ faddd $alob,$nlob,$nlob
+ fmuld $nlo,$nd,$nlod
+ ldx [%sp+$bias+$frame+24],%o3
+ fmuld $ahi,$ba,$ahia
+
+ srlx %o0,16,%o7
+ faddd $aloc,$nloc,$nloc
+ fmuld $nhi,$na,$nhia
+ add %o7,%o1,%o1
+ fmuld $ahi,$bb,$ahib
+ srlx %o1,16,%o7
+ faddd $alod,$nlod,$nlod
+ fmuld $nhi,$nb,$nhib
+ add %o7,%o2,%o2
+ fmuld $ahi,$bc,$ahic
+ srlx %o2,16,%o7
+ faddd $ahia,$nhia,$nhia
+ fmuld $nhi,$nc,$nhic
+ add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
+ ! why?
+ and %o0,$mask,%o0
+ fmuld $ahi,$bd,$ahid
+ and %o1,$mask,%o1
+ and %o2,$mask,%o2
+ faddd $ahib,$nhib,$nhib
+ fmuld $nhi,$nd,$nhid
+ sllx %o1,16,%o1
+ faddd $dota,$nloa,$nloa
+ sllx %o2,32,%o2
+ faddd $dotb,$nlob,$nlob
+ sllx %o3,48,%o7
+ or %o1,%o0,%o0
+ faddd $ahic,$nhic,$dota ! $nhic
+ or %o2,%o0,%o0
+ faddd $ahid,$nhid,$dotb ! $nhid
+ or %o7,%o0,%o0 ! 64-bit result
+ ldx [$tp],%o7
+ faddd $nloc,$nhia,$nloc
+ addcc %o7,%o0,%o0
+ ! end-of-why?
+ faddd $nlod,$nhib,$nlod
+ srlx %o3,16,%g1 ! 34-bit carry
+ fdtox $nloa,$nloa
+ bcs,a %xcc,.+8
+ add %g1,1,%g1
+
+ fdtox $nlob,$nlob
+ fdtox $nloc,$nloc
+ fdtox $nlod,$nlod
+
+ std $nloa,[%sp+$bias+$frame+0]
+ std $nlob,[%sp+$bias+$frame+8]
+ addcc $j,8,$j
+ std $nloc,[%sp+$bias+$frame+16]
+ bz,pn %icc,.Linnerskip
+ std $nlod,[%sp+$bias+$frame+24]
+
+ ba .Linner
+ nop
+.align 32
+.Linner:
+ ldd [$ap_l+$j],$alo ! load a[j] in double format
+ ldd [$ap_h+$j],$ahi
+ ldd [$np_l+$j],$nlo ! load n[j] in double format
+ ldd [$np_h+$j],$nhi
+
+ fmuld $alo,$ba,$aloa
+ fmuld $nlo,$na,$nloa
+ fmuld $alo,$bb,$alob
+ fmuld $nlo,$nb,$nlob
+ fmuld $alo,$bc,$aloc
+ ldx [%sp+$bias+$frame+0],%o0
+ faddd $aloa,$nloa,$nloa
+ fmuld $nlo,$nc,$nloc
+ ldx [%sp+$bias+$frame+8],%o1
+ fmuld $alo,$bd,$alod
+ ldx [%sp+$bias+$frame+16],%o2
+ faddd $alob,$nlob,$nlob
+ fmuld $nlo,$nd,$nlod
+ ldx [%sp+$bias+$frame+24],%o3
+ fmuld $ahi,$ba,$ahia
+
+ srlx %o0,16,%o7
+ faddd $aloc,$nloc,$nloc
+ fmuld $nhi,$na,$nhia
+ add %o7,%o1,%o1
+ fmuld $ahi,$bb,$ahib
+ srlx %o1,16,%o7
+ faddd $alod,$nlod,$nlod
+ fmuld $nhi,$nb,$nhib
+ add %o7,%o2,%o2
+ fmuld $ahi,$bc,$ahic
+ srlx %o2,16,%o7
+ faddd $ahia,$nhia,$nhia
+ fmuld $nhi,$nc,$nhic
+ add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
+ and %o0,$mask,%o0
+ fmuld $ahi,$bd,$ahid
+ and %o1,$mask,%o1
+ and %o2,$mask,%o2
+ faddd $ahib,$nhib,$nhib
+ fmuld $nhi,$nd,$nhid
+ sllx %o1,16,%o1
+ faddd $dota,$nloa,$nloa
+ sllx %o2,32,%o2
+ faddd $dotb,$nlob,$nlob
+ sllx %o3,48,%o7
+ or %o1,%o0,%o0
+ faddd $ahic,$nhic,$dota ! $nhic
+ or %o2,%o0,%o0
+ faddd $ahid,$nhid,$dotb ! $nhid
+ or %o7,%o0,%o0 ! 64-bit result
+ faddd $nloc,$nhia,$nloc
+ addcc %g1,%o0,%o0
+ ldx [$tp+8],%o7 ! tp[j]
+ faddd $nlod,$nhib,$nlod
+ srlx %o3,16,%g1 ! 34-bit carry
+ fdtox $nloa,$nloa
+ bcs,a %xcc,.+8
+ add %g1,1,%g1
+ fdtox $nlob,$nlob
+ addcc %o7,%o0,%o0
+ fdtox $nloc,$nloc
+ bcs,a %xcc,.+8
+ add %g1,1,%g1
+
+ stx %o0,[$tp] ! tp[j-1]
+ fdtox $nlod,$nlod
+
+ std $nloa,[%sp+$bias+$frame+0]
+ std $nlob,[%sp+$bias+$frame+8]
+ std $nloc,[%sp+$bias+$frame+16]
+ addcc $j,8,$j
+ std $nlod,[%sp+$bias+$frame+24]
+ bnz,pt %icc,.Linner
+ add $tp,8,$tp
+
+.Linnerskip:
+ fdtox $dota,$dota
+ fdtox $dotb,$dotb
+
+ ldx [%sp+$bias+$frame+0],%o0
+ ldx [%sp+$bias+$frame+8],%o1
+ ldx [%sp+$bias+$frame+16],%o2
+ ldx [%sp+$bias+$frame+24],%o3
+
+ srlx %o0,16,%o7
+ std $dota,[%sp+$bias+$frame+32]
+ add %o7,%o1,%o1
+ std $dotb,[%sp+$bias+$frame+40]
+ srlx %o1,16,%o7
+ add %o7,%o2,%o2
+ srlx %o2,16,%o7
+ add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
+ and %o0,$mask,%o0
+ and %o1,$mask,%o1
+ and %o2,$mask,%o2
+ sllx %o1,16,%o1
+ sllx %o2,32,%o2
+ sllx %o3,48,%o7
+ or %o1,%o0,%o0
+ or %o2,%o0,%o0
+ ldx [%sp+$bias+$frame+32],%o4
+ or %o7,%o0,%o0 ! 64-bit result
+ ldx [%sp+$bias+$frame+40],%o5
+ addcc %g1,%o0,%o0
+ ldx [$tp+8],%o7 ! tp[j]
+ srlx %o3,16,%g1 ! 34-bit carry
+ bcs,a %xcc,.+8
+ add %g1,1,%g1
+
+ addcc %o7,%o0,%o0
+ bcs,a %xcc,.+8
+ add %g1,1,%g1
+
+ stx %o0,[$tp] ! tp[j-1]
+ add $tp,8,$tp
+
+ srlx %o4,16,%o7
+ add %o7,%o5,%o5
+ and %o4,$mask,%o4
+ sllx %o5,16,%o7
+ or %o7,%o4,%o4
+ addcc %g1,%o4,%o4
+ srlx %o5,48,%g1
+ bcs,a %xcc,.+8
+ add %g1,1,%g1
+
+ addcc $carry,%o4,%o4
+ stx %o4,[$tp] ! tp[num-1]
+ mov %g1,$carry
+ bcs,a %xcc,.+8
+ add $carry,1,$carry
+
+ addcc $i,8,$i
+ bnz %icc,.Louter
+ nop
+
+ add $tp,8,$tp ! adjust tp to point at the end
+ orn %g0,%g0,%g4
+ sub %g0,$num,%o7 ! n=-num
+ ba .Lsub
+ subcc %g0,%g0,%g0 ! clear %icc.c
+
+.align 32
+.Lsub:
+ ldx [$tp+%o7],%o0
+ add $np,%o7,%g1
+ ld [%g1+0],%o2
+ ld [%g1+4],%o3
+ srlx %o0,32,%o1
+ subccc %o0,%o2,%o2
+ add $rp,%o7,%g1
+ subccc %o1,%o3,%o3
+ st %o2,[%g1+0]
+ add %o7,8,%o7
+ brnz,pt %o7,.Lsub
+ st %o3,[%g1+4]
+ subc $carry,0,%g4
+ sub %g0,$num,%o7 ! n=-num
+ ba .Lcopy
+ nop
+
+.align 32
+.Lcopy:
+ ldx [$tp+%o7],%o0
+ add $rp,%o7,%g1
+ ld [%g1+0],%o2
+ ld [%g1+4],%o3
+ stx %g0,[$tp+%o7]
+ and %o0,%g4,%o0
+ srlx %o0,32,%o1
+ andn %o2,%g4,%o2
+ andn %o3,%g4,%o3
+ or %o2,%o0,%o0
+ or %o3,%o1,%o1
+ st %o0,[%g1+0]
+ add %o7,8,%o7
+ brnz,pt %o7,.Lcopy
+ st %o1,[%g1+4]
+ sub %g0,$num,%o7 ! n=-num
+
+.Lzap:
+ stx %g0,[$ap_l+%o7]
+ stx %g0,[$ap_h+%o7]
+ stx %g0,[$np_l+%o7]
+ stx %g0,[$np_h+%o7]
+ add %o7,8,%o7
+ brnz,pt %o7,.Lzap
+ nop
+
+ ldx [%sp+$bias+$frame+48],%o7
+ wr %g0,%o7,%asi ! restore %asi
+
+ mov 1,%i0
+.Lret:
+ ret
+ restore
+.type $fname,#function
+.size $fname,(.-$fname)
+.asciz "Montgomery Multipltication for UltraSPARC, CRYPTOGAMS by <appro\@openssl.org>"
+.align 32
+___
+
+$code =~ s/\`([^\`]*)\`/eval($1)/gem;
+
+# Below substitution makes it possible to compile without demanding
+# VIS extensions on command line, e.g. -xarch=v9 vs. -xarch=v9a. I
+# dare to do this, because VIS capability is detected at run-time now
+# and this routine is not called on CPU not capable to execute it. Do
+# note that fzeros is not the only VIS dependency! Another dependency
+# is implicit and is just _a_ numerical value loaded to %asi register,
+# which assembler can't recognize as VIS specific...
+$code =~ s/fzeros\s+%f([0-9]+)/
+ sprintf(".word\t0x%x\t! fzeros %%f%d",0x81b00c20|($1<<25),$1)
+ /gem;
+
+print $code;
+# flush
+close STDOUT;
diff --git a/openssl-1.1.0h/crypto/bn/asm/via-mont.pl b/openssl-1.1.0h/crypto/bn/asm/via-mont.pl
new file mode 100644
index 0000000..9f81bc8
--- /dev/null
+++ b/openssl-1.1.0h/crypto/bn/asm/via-mont.pl
@@ -0,0 +1,254 @@
+#! /usr/bin/env perl
+# Copyright 2006-2016 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License"). You may not use
+# this file except in compliance with the License. You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# Wrapper around 'rep montmul', VIA-specific instruction accessing
+# PadLock Montgomery Multiplier. The wrapper is designed as drop-in
+# replacement for OpenSSL bn_mul_mont [first implemented in 0.9.9].
+#
+# Below are interleaved outputs from 'openssl speed rsa dsa' for 4
+# different software configurations on 1.5GHz VIA Esther processor.
+# Lines marked with "software integer" denote performance of hand-
+# coded integer-only assembler found in OpenSSL 0.9.7. "Software SSE2"
+# refers to hand-coded SSE2 Montgomery multiplication procedure found
+# OpenSSL 0.9.9. "Hardware VIA SDK" refers to padlock_pmm routine from
+# Padlock SDK 2.0.1 available for download from VIA, which naturally
+# utilizes the magic 'repz montmul' instruction. And finally "hardware
+# this" refers to *this* implementation which also uses 'repz montmul'
+#
+# sign verify sign/s verify/s
+# rsa 512 bits 0.001720s 0.000140s 581.4 7149.7 software integer
+# rsa 512 bits 0.000690s 0.000086s 1450.3 11606.0 software SSE2
+# rsa 512 bits 0.006136s 0.000201s 163.0 4974.5 hardware VIA SDK
+# rsa 512 bits 0.000712s 0.000050s 1404.9 19858.5 hardware this
+#
+# rsa 1024 bits 0.008518s 0.000413s 117.4 2420.8 software integer
+# rsa 1024 bits 0.004275s 0.000277s 233.9 3609.7 software SSE2
+# rsa 1024 bits 0.012136s 0.000260s 82.4 3844.5 hardware VIA SDK
+# rsa 1024 bits 0.002522s 0.000116s 396.5 8650.9 hardware this
+#
+# rsa 2048 bits 0.050101s 0.001371s 20.0 729.6 software integer
+# rsa 2048 bits 0.030273s 0.001008s 33.0 991.9 software SSE2
+# rsa 2048 bits 0.030833s 0.000976s 32.4 1025.1 hardware VIA SDK
+# rsa 2048 bits 0.011879s 0.000342s 84.2 2921.7 hardware this
+#
+# rsa 4096 bits 0.327097s 0.004859s 3.1 205.8 software integer
+# rsa 4096 bits 0.229318s 0.003859s 4.4 259.2 software SSE2
+# rsa 4096 bits 0.233953s 0.003274s 4.3 305.4 hardware VIA SDK
+# rsa 4096 bits 0.070493s 0.001166s 14.2 857.6 hardware this
+#
+# dsa 512 bits 0.001342s 0.001651s 745.2 605.7 software integer
+# dsa 512 bits 0.000844s 0.000987s 1185.3 1013.1 software SSE2
+# dsa 512 bits 0.001902s 0.002247s 525.6 444.9 hardware VIA SDK
+# dsa 512 bits 0.000458s 0.000524s 2182.2 1909.1 hardware this
+#
+# dsa 1024 bits 0.003964s 0.004926s 252.3 203.0 software integer
+# dsa 1024 bits 0.002686s 0.003166s 372.3 315.8 software SSE2
+# dsa 1024 bits 0.002397s 0.002823s 417.1 354.3 hardware VIA SDK
+# dsa 1024 bits 0.000978s 0.001170s 1022.2 855.0 hardware this
+#
+# dsa 2048 bits 0.013280s 0.016518s 75.3 60.5 software integer
+# dsa 2048 bits 0.009911s 0.011522s 100.9 86.8 software SSE2
+# dsa 2048 bits 0.009542s 0.011763s 104.8 85.0 hardware VIA SDK
+# dsa 2048 bits 0.002884s 0.003352s 346.8 298.3 hardware this
+#
+# To give you some other reference point here is output for 2.4GHz P4
+# running hand-coded SSE2 bn_mul_mont found in 0.9.9, i.e. "software
+# SSE2" in above terms.
+#
+# rsa 512 bits 0.000407s 0.000047s 2454.2 21137.0
+# rsa 1024 bits 0.002426s 0.000141s 412.1 7100.0
+# rsa 2048 bits 0.015046s 0.000491s 66.5 2034.9
+# rsa 4096 bits 0.109770s 0.002379s 9.1 420.3
+# dsa 512 bits 0.000438s 0.000525s 2281.1 1904.1
+# dsa 1024 bits 0.001346s 0.001595s 742.7 627.0
+# dsa 2048 bits 0.004745s 0.005582s 210.7 179.1
+#
+# Conclusions:
+# - VIA SDK leaves a *lot* of room for improvement (which this
+# implementation successfully fills:-);
+# - 'rep montmul' gives up to >3x performance improvement depending on
+# key length;
+# - in terms of absolute performance it delivers approximately as much
+# as modern out-of-order 32-bit cores [again, for longer keys].
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+push(@INC,"${dir}","${dir}../../perlasm");
+require "x86asm.pl";
+
+$output = pop;
+open STDOUT,">$output";
+
+&asm_init($ARGV[0],"via-mont.pl");
+
+# int bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np,const BN_ULONG *n0, int num);
+$func="bn_mul_mont_padlock";
+
+$pad=16*1; # amount of reserved bytes on top of every vector
+
+# stack layout
+$mZeroPrime=&DWP(0,"esp"); # these are specified by VIA
+$A=&DWP(4,"esp");
+$B=&DWP(8,"esp");
+$T=&DWP(12,"esp");
+$M=&DWP(16,"esp");
+$scratch=&DWP(20,"esp");
+$rp=&DWP(24,"esp"); # these are mine
+$sp=&DWP(28,"esp");
+# &DWP(32,"esp") # 32 byte scratch area
+# &DWP(64+(4*$num+$pad)*0,"esp") # padded tp[num]
+# &DWP(64+(4*$num+$pad)*1,"esp") # padded copy of ap[num]
+# &DWP(64+(4*$num+$pad)*2,"esp") # padded copy of bp[num]
+# &DWP(64+(4*$num+$pad)*3,"esp") # padded copy of np[num]
+# Note that SDK suggests to unconditionally allocate 2K per vector. This
+# has quite an impact on performance. It naturally depends on key length,
+# but to give an example 1024 bit private RSA key operations suffer >30%
+# penalty. I allocate only as much as actually required...
+
+&function_begin($func);
+ &xor ("eax","eax");
+ &mov ("ecx",&wparam(5)); # num
+ # meet VIA's limitations for num [note that the specification
+ # expresses them in bits, while we work with amount of 32-bit words]
+ &test ("ecx",3);
+ &jnz (&label("leave")); # num % 4 != 0
+ &cmp ("ecx",8);
+ &jb (&label("leave")); # num < 8
+ &cmp ("ecx",1024);
+ &ja (&label("leave")); # num > 1024
+
+ &pushf ();
+ &cld ();
+
+ &mov ("edi",&wparam(0)); # rp
+ &mov ("eax",&wparam(1)); # ap
+ &mov ("ebx",&wparam(2)); # bp
+ &mov ("edx",&wparam(3)); # np
+ &mov ("esi",&wparam(4)); # n0
+ &mov ("esi",&DWP(0,"esi")); # *n0
+
+ &lea ("ecx",&DWP($pad,"","ecx",4)); # ecx becomes vector size in bytes
+ &lea ("ebp",&DWP(64,"","ecx",4)); # allocate 4 vectors + 64 bytes
+ &neg ("ebp");
+ &add ("ebp","esp");
+ &and ("ebp",-64); # align to cache-line
+ &xchg ("ebp","esp"); # alloca
+
+ &mov ($rp,"edi"); # save rp
+ &mov ($sp,"ebp"); # save esp
+
+ &mov ($mZeroPrime,"esi");
+ &lea ("esi",&DWP(64,"esp")); # tp
+ &mov ($T,"esi");
+ &lea ("edi",&DWP(32,"esp")); # scratch area
+ &mov ($scratch,"edi");
+ &mov ("esi","eax");
+
+ &lea ("ebp",&DWP(-$pad,"ecx"));
+ &shr ("ebp",2); # restore original num value in ebp
+
+ &xor ("eax","eax");
+
+ &mov ("ecx","ebp");
+ &lea ("ecx",&DWP((32+$pad)/4,"ecx"));# padded tp + scratch
+ &data_byte(0xf3,0xab); # rep stosl, bzero
+
+ &mov ("ecx","ebp");
+ &lea ("edi",&DWP(64+$pad,"esp","ecx",4));# pointer to ap copy
+ &mov ($A,"edi");
+ &data_byte(0xf3,0xa5); # rep movsl, memcpy
+ &mov ("ecx",$pad/4);
+ &data_byte(0xf3,0xab); # rep stosl, bzero pad
+ # edi points at the end of padded ap copy...
+
+ &mov ("ecx","ebp");
+ &mov ("esi","ebx");
+ &mov ($B,"edi");
+ &data_byte(0xf3,0xa5); # rep movsl, memcpy
+ &mov ("ecx",$pad/4);
+ &data_byte(0xf3,0xab); # rep stosl, bzero pad
+ # edi points at the end of padded bp copy...
+
+ &mov ("ecx","ebp");
+ &mov ("esi","edx");
+ &mov ($M,"edi");
+ &data_byte(0xf3,0xa5); # rep movsl, memcpy
+ &mov ("ecx",$pad/4);
+ &data_byte(0xf3,0xab); # rep stosl, bzero pad
+ # edi points at the end of padded np copy...
+
+ # let magic happen...
+ &mov ("ecx","ebp");
+ &mov ("esi","esp");
+ &shl ("ecx",5); # convert word counter to bit counter
+ &align (4);
+ &data_byte(0xf3,0x0f,0xa6,0xc0);# rep montmul
+
+ &mov ("ecx","ebp");
+ &lea ("esi",&DWP(64,"esp")); # tp
+ # edi still points at the end of padded np copy...
+ &neg ("ebp");
+ &lea ("ebp",&DWP(-$pad,"edi","ebp",4)); # so just "rewind"
+ &mov ("edi",$rp); # restore rp
+ &xor ("edx","edx"); # i=0 and clear CF
+
+&set_label("sub",8);
+ &mov ("eax",&DWP(0,"esi","edx",4));
+ &sbb ("eax",&DWP(0,"ebp","edx",4));
+ &mov (&DWP(0,"edi","edx",4),"eax"); # rp[i]=tp[i]-np[i]
+ &lea ("edx",&DWP(1,"edx")); # i++
+ &loop (&label("sub")); # doesn't affect CF!
+
+ &mov ("eax",&DWP(0,"esi","edx",4)); # upmost overflow bit
+ &sbb ("eax",0);
+ &and ("esi","eax");
+ &not ("eax");
+ &mov ("ebp","edi");
+ &and ("ebp","eax");
+ &or ("esi","ebp"); # tp=carry?tp:rp
+
+ &mov ("ecx","edx"); # num
+ &xor ("edx","edx"); # i=0
+
+&set_label("copy",8);
+ &mov ("eax",&DWP(0,"esi","edx",4));
+ &mov (&DWP(64,"esp","edx",4),"ecx"); # zap tp
+ &mov (&DWP(0,"edi","edx",4),"eax");
+ &lea ("edx",&DWP(1,"edx")); # i++
+ &loop (&label("copy"));
+
+ &mov ("ebp",$sp);
+ &xor ("eax","eax");
+
+ &mov ("ecx",64/4);
+ &mov ("edi","esp"); # zap frame including scratch area
+ &data_byte(0xf3,0xab); # rep stosl, bzero
+
+ # zap copies of ap, bp and np
+ &lea ("edi",&DWP(64+$pad,"esp","edx",4));# pointer to ap
+ &lea ("ecx",&DWP(3*$pad/4,"edx","edx",2));
+ &data_byte(0xf3,0xab); # rep stosl, bzero
+
+ &mov ("esp","ebp");
+ &inc ("eax"); # signal "done"
+ &popf ();
+&set_label("leave");
+&function_end($func);
+
+&asciz("Padlock Montgomery Multiplication, CRYPTOGAMS by <appro\@openssl.org>");
+
+&asm_finish();
+
+close STDOUT;
diff --git a/openssl-1.1.0h/crypto/bn/asm/vis3-mont.pl b/openssl-1.1.0h/crypto/bn/asm/vis3-mont.pl
new file mode 100644
index 0000000..64dba44
--- /dev/null
+++ b/openssl-1.1.0h/crypto/bn/asm/vis3-mont.pl
@@ -0,0 +1,384 @@
+#! /usr/bin/env perl
+# Copyright 2012-2016 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License"). You may not use
+# this file except in compliance with the License. You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# October 2012.
+#
+# SPARCv9 VIS3 Montgomery multiplicaion procedure suitable for T3 and
+# onward. There are three new instructions used here: umulxhi,
+# addxc[cc] and initializing store. On T3 RSA private key operations
+# are 1.54/1.87/2.11/2.26 times faster for 512/1024/2048/4096-bit key
+# lengths. This is without dedicated squaring procedure. On T4
+# corresponding coefficients are 1.47/2.10/2.80/2.90x, which is mostly
+# for reference purposes, because T4 has dedicated Montgomery
+# multiplication and squaring *instructions* that deliver even more.
+
+$output = pop;
+open STDOUT,">$output";
+
+$frame = "STACK_FRAME";
+$bias = "STACK_BIAS";
+
+$code.=<<___;
+#include "sparc_arch.h"
+
+#ifdef __arch64__
+.register %g2,#scratch
+.register %g3,#scratch
+#endif
+
+.section ".text",#alloc,#execinstr
+___
+
+($n0,$m0,$m1,$lo0,$hi0, $lo1,$hi1,$aj,$alo,$nj,$nlo,$tj)=
+ (map("%g$_",(1..5)),map("%o$_",(0..5,7)));
+
+# int bn_mul_mont(
+$rp="%o0"; # BN_ULONG *rp,
+$ap="%o1"; # const BN_ULONG *ap,
+$bp="%o2"; # const BN_ULONG *bp,
+$np="%o3"; # const BN_ULONG *np,
+$n0p="%o4"; # const BN_ULONG *n0,
+$num="%o5"; # int num); # caller ensures that num is even
+ # and >=6
+$code.=<<___;
+.globl bn_mul_mont_vis3
+.align 32
+bn_mul_mont_vis3:
+ add %sp, $bias, %g4 ! real top of stack
+ sll $num, 2, $num ! size in bytes
+ add $num, 63, %g5
+ andn %g5, 63, %g5 ! buffer size rounded up to 64 bytes
+ add %g5, %g5, %g1
+ add %g5, %g1, %g1 ! 3*buffer size
+ sub %g4, %g1, %g1
+ andn %g1, 63, %g1 ! align at 64 byte
+ sub %g1, $frame, %g1 ! new top of stack
+ sub %g1, %g4, %g1
+
+ save %sp, %g1, %sp
+___
+
+# +-------------------------------+<----- %sp
+# . .
+# +-------------------------------+<----- aligned at 64 bytes
+# | __int64 tmp[0] |
+# +-------------------------------+
+# . .
+# . .
+# +-------------------------------+<----- aligned at 64 bytes
+# | __int64 ap[1..0] | converted ap[]
+# +-------------------------------+
+# | __int64 np[1..0] | converted np[]
+# +-------------------------------+
+# | __int64 ap[3..2] |
+# . .
+# . .
+# +-------------------------------+
+($rp,$ap,$bp,$np,$n0p,$num)=map("%i$_",(0..5));
+($t0,$t1,$t2,$t3,$cnt,$tp,$bufsz,$anp)=map("%l$_",(0..7));
+($ovf,$i)=($t0,$t1);
+$code.=<<___;
+ ld [$n0p+0], $t0 ! pull n0[0..1] value
+ add %sp, $bias+$frame, $tp
+ ld [$n0p+4], $t1
+ add $tp, %g5, $anp
+ ld [$bp+0], $t2 ! m0=bp[0]
+ sllx $t1, 32, $n0
+ ld [$bp+4], $t3
+ or $t0, $n0, $n0
+ add $bp, 8, $bp
+
+ ld [$ap+0], $t0 ! ap[0]
+ sllx $t3, 32, $m0
+ ld [$ap+4], $t1
+ or $t2, $m0, $m0
+
+ ld [$ap+8], $t2 ! ap[1]
+ sllx $t1, 32, $aj
+ ld [$ap+12], $t3
+ or $t0, $aj, $aj
+ add $ap, 16, $ap
+ stx $aj, [$anp] ! converted ap[0]
+
+ mulx $aj, $m0, $lo0 ! ap[0]*bp[0]
+ umulxhi $aj, $m0, $hi0
+
+ ld [$np+0], $t0 ! np[0]
+ sllx $t3, 32, $aj
+ ld [$np+4], $t1
+ or $t2, $aj, $aj
+
+ ld [$np+8], $t2 ! np[1]
+ sllx $t1, 32, $nj
+ ld [$np+12], $t3
+ or $t0, $nj, $nj
+ add $np, 16, $np
+ stx $nj, [$anp+8] ! converted np[0]
+
+ mulx $lo0, $n0, $m1 ! "tp[0]"*n0
+ stx $aj, [$anp+16] ! converted ap[1]
+
+ mulx $aj, $m0, $alo ! ap[1]*bp[0]
+ umulxhi $aj, $m0, $aj ! ahi=aj
+
+ mulx $nj, $m1, $lo1 ! np[0]*m1
+ umulxhi $nj, $m1, $hi1
+
+ sllx $t3, 32, $nj
+ or $t2, $nj, $nj
+ stx $nj, [$anp+24] ! converted np[1]
+ add $anp, 32, $anp
+
+ addcc $lo0, $lo1, $lo1
+ addxc %g0, $hi1, $hi1
+
+ mulx $nj, $m1, $nlo ! np[1]*m1
+ umulxhi $nj, $m1, $nj ! nhi=nj
+
+ ba .L1st
+ sub $num, 24, $cnt ! cnt=num-3
+
+.align 16
+.L1st:
+ ld [$ap+0], $t0 ! ap[j]
+ addcc $alo, $hi0, $lo0
+ ld [$ap+4], $t1
+ addxc $aj, %g0, $hi0
+
+ sllx $t1, 32, $aj
+ add $ap, 8, $ap
+ or $t0, $aj, $aj
+ stx $aj, [$anp] ! converted ap[j]
+
+ ld [$np+0], $t2 ! np[j]
+ addcc $nlo, $hi1, $lo1
+ ld [$np+4], $t3
+ addxc $nj, %g0, $hi1 ! nhi=nj
+
+ sllx $t3, 32, $nj
+ add $np, 8, $np
+ mulx $aj, $m0, $alo ! ap[j]*bp[0]
+ or $t2, $nj, $nj
+ umulxhi $aj, $m0, $aj ! ahi=aj
+ stx $nj, [$anp+8] ! converted np[j]
+ add $anp, 16, $anp ! anp++
+
+ mulx $nj, $m1, $nlo ! np[j]*m1
+ addcc $lo0, $lo1, $lo1 ! np[j]*m1+ap[j]*bp[0]
+ umulxhi $nj, $m1, $nj ! nhi=nj
+ addxc %g0, $hi1, $hi1
+ stx $lo1, [$tp] ! tp[j-1]
+ add $tp, 8, $tp ! tp++
+
+ brnz,pt $cnt, .L1st
+ sub $cnt, 8, $cnt ! j--
+!.L1st
+ addcc $alo, $hi0, $lo0
+ addxc $aj, %g0, $hi0 ! ahi=aj
+
+ addcc $nlo, $hi1, $lo1
+ addxc $nj, %g0, $hi1
+ addcc $lo0, $lo1, $lo1 ! np[j]*m1+ap[j]*bp[0]
+ addxc %g0, $hi1, $hi1
+ stx $lo1, [$tp] ! tp[j-1]
+ add $tp, 8, $tp
+
+ addcc $hi0, $hi1, $hi1
+ addxc %g0, %g0, $ovf ! upmost overflow bit
+ stx $hi1, [$tp]
+ add $tp, 8, $tp
+
+ ba .Louter
+ sub $num, 16, $i ! i=num-2
+
+.align 16
+.Louter:
+ ld [$bp+0], $t2 ! m0=bp[i]
+ ld [$bp+4], $t3
+
+ sub $anp, $num, $anp ! rewind
+ sub $tp, $num, $tp
+ sub $anp, $num, $anp
+
+ add $bp, 8, $bp
+ sllx $t3, 32, $m0
+ ldx [$anp+0], $aj ! ap[0]
+ or $t2, $m0, $m0
+ ldx [$anp+8], $nj ! np[0]
+
+ mulx $aj, $m0, $lo0 ! ap[0]*bp[i]
+ ldx [$tp], $tj ! tp[0]
+ umulxhi $aj, $m0, $hi0
+ ldx [$anp+16], $aj ! ap[1]
+ addcc $lo0, $tj, $lo0 ! ap[0]*bp[i]+tp[0]
+ mulx $aj, $m0, $alo ! ap[1]*bp[i]
+ addxc %g0, $hi0, $hi0
+ mulx $lo0, $n0, $m1 ! tp[0]*n0
+ umulxhi $aj, $m0, $aj ! ahi=aj
+ mulx $nj, $m1, $lo1 ! np[0]*m1
+ umulxhi $nj, $m1, $hi1
+ ldx [$anp+24], $nj ! np[1]
+ add $anp, 32, $anp
+ addcc $lo1, $lo0, $lo1
+ mulx $nj, $m1, $nlo ! np[1]*m1
+ addxc %g0, $hi1, $hi1
+ umulxhi $nj, $m1, $nj ! nhi=nj
+
+ ba .Linner
+ sub $num, 24, $cnt ! cnt=num-3
+.align 16
+.Linner:
+ addcc $alo, $hi0, $lo0
+ ldx [$tp+8], $tj ! tp[j]
+ addxc $aj, %g0, $hi0 ! ahi=aj
+ ldx [$anp+0], $aj ! ap[j]
+ addcc $nlo, $hi1, $lo1
+ mulx $aj, $m0, $alo ! ap[j]*bp[i]
+ addxc $nj, %g0, $hi1 ! nhi=nj
+ ldx [$anp+8], $nj ! np[j]
+ add $anp, 16, $anp
+ umulxhi $aj, $m0, $aj ! ahi=aj
+ addcc $lo0, $tj, $lo0 ! ap[j]*bp[i]+tp[j]
+ mulx $nj, $m1, $nlo ! np[j]*m1
+ addxc %g0, $hi0, $hi0
+ umulxhi $nj, $m1, $nj ! nhi=nj
+ addcc $lo1, $lo0, $lo1 ! np[j]*m1+ap[j]*bp[i]+tp[j]
+ addxc %g0, $hi1, $hi1
+ stx $lo1, [$tp] ! tp[j-1]
+ add $tp, 8, $tp
+ brnz,pt $cnt, .Linner
+ sub $cnt, 8, $cnt
+!.Linner
+ ldx [$tp+8], $tj ! tp[j]
+ addcc $alo, $hi0, $lo0
+ addxc $aj, %g0, $hi0 ! ahi=aj
+ addcc $lo0, $tj, $lo0 ! ap[j]*bp[i]+tp[j]
+ addxc %g0, $hi0, $hi0
+
+ addcc $nlo, $hi1, $lo1
+ addxc $nj, %g0, $hi1 ! nhi=nj
+ addcc $lo1, $lo0, $lo1 ! np[j]*m1+ap[j]*bp[i]+tp[j]
+ addxc %g0, $hi1, $hi1
+ stx $lo1, [$tp] ! tp[j-1]
+
+ subcc %g0, $ovf, %g0 ! move upmost overflow to CCR.xcc
+ addxccc $hi1, $hi0, $hi1
+ addxc %g0, %g0, $ovf
+ stx $hi1, [$tp+8]
+ add $tp, 16, $tp
+
+ brnz,pt $i, .Louter
+ sub $i, 8, $i
+
+ sub $anp, $num, $anp ! rewind
+ sub $tp, $num, $tp
+ sub $anp, $num, $anp
+ ba .Lsub
+ subcc $num, 8, $cnt ! cnt=num-1 and clear CCR.xcc
+
+.align 16
+.Lsub:
+ ldx [$tp], $tj
+ add $tp, 8, $tp
+ ldx [$anp+8], $nj
+ add $anp, 16, $anp
+ subccc $tj, $nj, $t2 ! tp[j]-np[j]
+ srlx $tj, 32, $tj
+ srlx $nj, 32, $nj
+ subccc $tj, $nj, $t3
+ add $rp, 8, $rp
+ st $t2, [$rp-4] ! reverse order
+ st $t3, [$rp-8]
+ brnz,pt $cnt, .Lsub
+ sub $cnt, 8, $cnt
+
+ sub $anp, $num, $anp ! rewind
+ sub $tp, $num, $tp
+ sub $anp, $num, $anp
+ sub $rp, $num, $rp
+
+ subc $ovf, %g0, $ovf ! handle upmost overflow bit
+ and $tp, $ovf, $ap
+ andn $rp, $ovf, $np
+ or $np, $ap, $ap ! ap=borrow?tp:rp
+ ba .Lcopy
+ sub $num, 8, $cnt
+
+.align 16
+.Lcopy: ! copy or in-place refresh
+ ld [$ap+0], $t2
+ ld [$ap+4], $t3
+ add $ap, 8, $ap
+ stx %g0, [$tp] ! zap
+ add $tp, 8, $tp
+ stx %g0, [$anp] ! zap
+ stx %g0, [$anp+8]
+ add $anp, 16, $anp
+ st $t3, [$rp+0] ! flip order
+ st $t2, [$rp+4]
+ add $rp, 8, $rp
+ brnz $cnt, .Lcopy
+ sub $cnt, 8, $cnt
+
+ mov 1, %o0
+ ret
+ restore
+.type bn_mul_mont_vis3, #function
+.size bn_mul_mont_vis3, .-bn_mul_mont_vis3
+.asciz "Montgomery Multiplication for SPARCv9 VIS3, CRYPTOGAMS by <appro\@openssl.org>"
+.align 4
+___
+
+# Purpose of these subroutines is to explicitly encode VIS instructions,
+# so that one can compile the module without having to specify VIS
+# extensions on compiler command line, e.g. -xarch=v9 vs. -xarch=v9a.
+# Idea is to reserve for option to produce "universal" binary and let
+# programmer detect if current CPU is VIS capable at run-time.
+sub unvis3 {
+my ($mnemonic,$rs1,$rs2,$rd)=@_;
+my %bias = ( "g" => 0, "o" => 8, "l" => 16, "i" => 24 );
+my ($ref,$opf);
+my %visopf = ( "addxc" => 0x011,
+ "addxccc" => 0x013,
+ "umulxhi" => 0x016 );
+
+ $ref = "$mnemonic\t$rs1,$rs2,$rd";
+
+ if ($opf=$visopf{$mnemonic}) {
+ foreach ($rs1,$rs2,$rd) {
+ return $ref if (!/%([goli])([0-9])/);
+ $_=$bias{$1}+$2;
+ }
+
+ return sprintf ".word\t0x%08x !%s",
+ 0x81b00000|$rd<<25|$rs1<<14|$opf<<5|$rs2,
+ $ref;
+ } else {
+ return $ref;
+ }
+}
+
+foreach (split("\n",$code)) {
+ s/\`([^\`]*)\`/eval $1/ge;
+
+ s/\b(umulxhi|addxc[c]{0,2})\s+(%[goli][0-7]),\s*(%[goli][0-7]),\s*(%[goli][0-7])/
+ &unvis3($1,$2,$3,$4)
+ /ge;
+
+ print $_,"\n";
+}
+
+close STDOUT;
diff --git a/openssl-1.1.0h/crypto/bn/asm/x86-gf2m.pl b/openssl-1.1.0h/crypto/bn/asm/x86-gf2m.pl
new file mode 100644
index 0000000..f464368
--- /dev/null
+++ b/openssl-1.1.0h/crypto/bn/asm/x86-gf2m.pl
@@ -0,0 +1,325 @@
+#! /usr/bin/env perl
+# Copyright 2011-2016 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License"). You may not use
+# this file except in compliance with the License. You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# May 2011
+#
+# The module implements bn_GF2m_mul_2x2 polynomial multiplication used
+# in bn_gf2m.c. It's kind of low-hanging mechanical port from C for
+# the time being... Except that it has three code paths: pure integer
+# code suitable for any x86 CPU, MMX code suitable for PIII and later
+# and PCLMULQDQ suitable for Westmere and later. Improvement varies
+# from one benchmark and µ-arch to another. Below are interval values
+# for 163- and 571-bit ECDH benchmarks relative to compiler-generated
+# code:
+#
+# PIII 16%-30%
+# P4 12%-12%
+# Opteron 18%-40%
+# Core2 19%-44%
+# Atom 38%-64%
+# Westmere 53%-121%(PCLMULQDQ)/20%-32%(MMX)
+# Sandy Bridge 72%-127%(PCLMULQDQ)/27%-23%(MMX)
+#
+# Note that above improvement coefficients are not coefficients for
+# bn_GF2m_mul_2x2 itself. For example 120% ECDH improvement is result
+# of bn_GF2m_mul_2x2 being >4x faster. As it gets faster, benchmark
+# is more and more dominated by other subroutines, most notably by
+# BN_GF2m_mod[_mul]_arr...
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+push(@INC,"${dir}","${dir}../../perlasm");
+require "x86asm.pl";
+
+$output = pop;
+open STDOUT,">$output";
+
+&asm_init($ARGV[0],$0,$x86only = $ARGV[$#ARGV] eq "386");
+
+$sse2=0;
+for (@ARGV) { $sse2=1 if (/-DOPENSSL_IA32_SSE2/); }
+
+&external_label("OPENSSL_ia32cap_P") if ($sse2);
+
+$a="eax";
+$b="ebx";
+($a1,$a2,$a4)=("ecx","edx","ebp");
+
+$R="mm0";
+@T=("mm1","mm2");
+($A,$B,$B30,$B31)=("mm2","mm3","mm4","mm5");
+@i=("esi","edi");
+
+ if (!$x86only) {
+&function_begin_B("_mul_1x1_mmx");
+ &sub ("esp",32+4);
+ &mov ($a1,$a);
+ &lea ($a2,&DWP(0,$a,$a));
+ &and ($a1,0x3fffffff);
+ &lea ($a4,&DWP(0,$a2,$a2));
+ &mov (&DWP(0*4,"esp"),0);
+ &and ($a2,0x7fffffff);
+ &movd ($A,$a);
+ &movd ($B,$b);
+ &mov (&DWP(1*4,"esp"),$a1); # a1
+ &xor ($a1,$a2); # a1^a2
+ &pxor ($B31,$B31);
+ &pxor ($B30,$B30);
+ &mov (&DWP(2*4,"esp"),$a2); # a2
+ &xor ($a2,$a4); # a2^a4
+ &mov (&DWP(3*4,"esp"),$a1); # a1^a2
+ &pcmpgtd($B31,$A); # broadcast 31st bit
+ &paddd ($A,$A); # $A<<=1
+ &xor ($a1,$a2); # a1^a4=a1^a2^a2^a4
+ &mov (&DWP(4*4,"esp"),$a4); # a4
+ &xor ($a4,$a2); # a2=a4^a2^a4
+ &pand ($B31,$B);
+ &pcmpgtd($B30,$A); # broadcast 30th bit
+ &mov (&DWP(5*4,"esp"),$a1); # a1^a4
+ &xor ($a4,$a1); # a1^a2^a4
+ &psllq ($B31,31);
+ &pand ($B30,$B);
+ &mov (&DWP(6*4,"esp"),$a2); # a2^a4
+ &mov (@i[0],0x7);
+ &mov (&DWP(7*4,"esp"),$a4); # a1^a2^a4
+ &mov ($a4,@i[0]);
+ &and (@i[0],$b);
+ &shr ($b,3);
+ &mov (@i[1],$a4);
+ &psllq ($B30,30);
+ &and (@i[1],$b);
+ &shr ($b,3);
+ &movd ($R,&DWP(0,"esp",@i[0],4));
+ &mov (@i[0],$a4);
+ &and (@i[0],$b);
+ &shr ($b,3);
+ for($n=1;$n<9;$n++) {
+ &movd (@T[1],&DWP(0,"esp",@i[1],4));
+ &mov (@i[1],$a4);
+ &psllq (@T[1],3*$n);
+ &and (@i[1],$b);
+ &shr ($b,3);
+ &pxor ($R,@T[1]);
+
+ push(@i,shift(@i)); push(@T,shift(@T));
+ }
+ &movd (@T[1],&DWP(0,"esp",@i[1],4));
+ &pxor ($R,$B30);
+ &psllq (@T[1],3*$n++);
+ &pxor ($R,@T[1]);
+
+ &movd (@T[0],&DWP(0,"esp",@i[0],4));
+ &pxor ($R,$B31);
+ &psllq (@T[0],3*$n);
+ &add ("esp",32+4);
+ &pxor ($R,@T[0]);
+ &ret ();
+&function_end_B("_mul_1x1_mmx");
+ }
+
+($lo,$hi)=("eax","edx");
+@T=("ecx","ebp");
+
+&function_begin_B("_mul_1x1_ialu");
+ &sub ("esp",32+4);
+ &mov ($a1,$a);
+ &lea ($a2,&DWP(0,$a,$a));
+ &lea ($a4,&DWP(0,"",$a,4));
+ &and ($a1,0x3fffffff);
+ &lea (@i[1],&DWP(0,$lo,$lo));
+ &sar ($lo,31); # broadcast 31st bit
+ &mov (&DWP(0*4,"esp"),0);
+ &and ($a2,0x7fffffff);
+ &mov (&DWP(1*4,"esp"),$a1); # a1
+ &xor ($a1,$a2); # a1^a2
+ &mov (&DWP(2*4,"esp"),$a2); # a2
+ &xor ($a2,$a4); # a2^a4
+ &mov (&DWP(3*4,"esp"),$a1); # a1^a2
+ &xor ($a1,$a2); # a1^a4=a1^a2^a2^a4
+ &mov (&DWP(4*4,"esp"),$a4); # a4
+ &xor ($a4,$a2); # a2=a4^a2^a4
+ &mov (&DWP(5*4,"esp"),$a1); # a1^a4
+ &xor ($a4,$a1); # a1^a2^a4
+ &sar (@i[1],31); # broardcast 30th bit
+ &and ($lo,$b);
+ &mov (&DWP(6*4,"esp"),$a2); # a2^a4
+ &and (@i[1],$b);
+ &mov (&DWP(7*4,"esp"),$a4); # a1^a2^a4
+ &mov ($hi,$lo);
+ &shl ($lo,31);
+ &mov (@T[0],@i[1]);
+ &shr ($hi,1);
+
+ &mov (@i[0],0x7);
+ &shl (@i[1],30);
+ &and (@i[0],$b);
+ &shr (@T[0],2);
+ &xor ($lo,@i[1]);
+
+ &shr ($b,3);
+ &mov (@i[1],0x7); # 5-byte instruction!?
+ &and (@i[1],$b);
+ &shr ($b,3);
+ &xor ($hi,@T[0]);
+ &xor ($lo,&DWP(0,"esp",@i[0],4));
+ &mov (@i[0],0x7);
+ &and (@i[0],$b);
+ &shr ($b,3);
+ for($n=1;$n<9;$n++) {
+ &mov (@T[1],&DWP(0,"esp",@i[1],4));
+ &mov (@i[1],0x7);
+ &mov (@T[0],@T[1]);
+ &shl (@T[1],3*$n);
+ &and (@i[1],$b);
+ &shr (@T[0],32-3*$n);
+ &xor ($lo,@T[1]);
+ &shr ($b,3);
+ &xor ($hi,@T[0]);
+
+ push(@i,shift(@i)); push(@T,shift(@T));
+ }
+ &mov (@T[1],&DWP(0,"esp",@i[1],4));
+ &mov (@T[0],@T[1]);
+ &shl (@T[1],3*$n);
+ &mov (@i[1],&DWP(0,"esp",@i[0],4));
+ &shr (@T[0],32-3*$n); $n++;
+ &mov (@i[0],@i[1]);
+ &xor ($lo,@T[1]);
+ &shl (@i[1],3*$n);
+ &xor ($hi,@T[0]);
+ &shr (@i[0],32-3*$n);
+ &xor ($lo,@i[1]);
+ &xor ($hi,@i[0]);
+
+ &add ("esp",32+4);
+ &ret ();
+&function_end_B("_mul_1x1_ialu");
+
+# void bn_GF2m_mul_2x2(BN_ULONG *r, BN_ULONG a1, BN_ULONG a0, BN_ULONG b1, BN_ULONG b0);
+&function_begin_B("bn_GF2m_mul_2x2");
+if (!$x86only) {
+ &picmeup("edx","OPENSSL_ia32cap_P");
+ &mov ("eax",&DWP(0,"edx"));
+ &mov ("edx",&DWP(4,"edx"));
+ &test ("eax",1<<23); # check MMX bit
+ &jz (&label("ialu"));
+if ($sse2) {
+ &test ("eax",1<<24); # check FXSR bit
+ &jz (&label("mmx"));
+ &test ("edx",1<<1); # check PCLMULQDQ bit
+ &jz (&label("mmx"));
+
+ &movups ("xmm0",&QWP(8,"esp"));
+ &shufps ("xmm0","xmm0",0b10110001);
+ &pclmulqdq ("xmm0","xmm0",1);
+ &mov ("eax",&DWP(4,"esp"));
+ &movups (&QWP(0,"eax"),"xmm0");
+ &ret ();
+
+&set_label("mmx",16);
+}
+ &push ("ebp");
+ &push ("ebx");
+ &push ("esi");
+ &push ("edi");
+ &mov ($a,&wparam(1));
+ &mov ($b,&wparam(3));
+ &call ("_mul_1x1_mmx"); # a1·b1
+ &movq ("mm7",$R);
+
+ &mov ($a,&wparam(2));
+ &mov ($b,&wparam(4));
+ &call ("_mul_1x1_mmx"); # a0·b0
+ &movq ("mm6",$R);
+
+ &mov ($a,&wparam(1));
+ &mov ($b,&wparam(3));
+ &xor ($a,&wparam(2));
+ &xor ($b,&wparam(4));
+ &call ("_mul_1x1_mmx"); # (a0+a1)·(b0+b1)
+ &pxor ($R,"mm7");
+ &mov ($a,&wparam(0));
+ &pxor ($R,"mm6"); # (a0+a1)·(b0+b1)-a1·b1-a0·b0
+
+ &movq ($A,$R);
+ &psllq ($R,32);
+ &pop ("edi");
+ &psrlq ($A,32);
+ &pop ("esi");
+ &pxor ($R,"mm6");
+ &pop ("ebx");
+ &pxor ($A,"mm7");
+ &movq (&QWP(0,$a),$R);
+ &pop ("ebp");
+ &movq (&QWP(8,$a),$A);
+ &emms ();
+ &ret ();
+&set_label("ialu",16);
+}
+ &push ("ebp");
+ &push ("ebx");
+ &push ("esi");
+ &push ("edi");
+ &stack_push(4+1);
+
+ &mov ($a,&wparam(1));
+ &mov ($b,&wparam(3));
+ &call ("_mul_1x1_ialu"); # a1·b1
+ &mov (&DWP(8,"esp"),$lo);
+ &mov (&DWP(12,"esp"),$hi);
+
+ &mov ($a,&wparam(2));
+ &mov ($b,&wparam(4));
+ &call ("_mul_1x1_ialu"); # a0·b0
+ &mov (&DWP(0,"esp"),$lo);
+ &mov (&DWP(4,"esp"),$hi);
+
+ &mov ($a,&wparam(1));
+ &mov ($b,&wparam(3));
+ &xor ($a,&wparam(2));
+ &xor ($b,&wparam(4));
+ &call ("_mul_1x1_ialu"); # (a0+a1)·(b0+b1)
+
+ &mov ("ebp",&wparam(0));
+ @r=("ebx","ecx","edi","esi");
+ &mov (@r[0],&DWP(0,"esp"));
+ &mov (@r[1],&DWP(4,"esp"));
+ &mov (@r[2],&DWP(8,"esp"));
+ &mov (@r[3],&DWP(12,"esp"));
+
+ &xor ($lo,$hi);
+ &xor ($hi,@r[1]);
+ &xor ($lo,@r[0]);
+ &mov (&DWP(0,"ebp"),@r[0]);
+ &xor ($hi,@r[2]);
+ &mov (&DWP(12,"ebp"),@r[3]);
+ &xor ($lo,@r[3]);
+ &stack_pop(4+1);
+ &xor ($hi,@r[3]);
+ &pop ("edi");
+ &xor ($lo,$hi);
+ &pop ("esi");
+ &mov (&DWP(8,"ebp"),$hi);
+ &pop ("ebx");
+ &mov (&DWP(4,"ebp"),$lo);
+ &pop ("ebp");
+ &ret ();
+&function_end_B("bn_GF2m_mul_2x2");
+
+&asciz ("GF(2^m) Multiplication for x86, CRYPTOGAMS by <appro\@openssl.org>");
+
+&asm_finish();
+
+close STDOUT;
diff --git a/openssl-1.1.0h/crypto/bn/asm/x86-mont.pl b/openssl-1.1.0h/crypto/bn/asm/x86-mont.pl
new file mode 100755
index 0000000..6787503
--- /dev/null
+++ b/openssl-1.1.0h/crypto/bn/asm/x86-mont.pl
@@ -0,0 +1,629 @@
+#! /usr/bin/env perl
+# Copyright 2005-2016 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License"). You may not use
+# this file except in compliance with the License. You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+
+# ====================================================================
+# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# October 2005
+#
+# This is a "teaser" code, as it can be improved in several ways...
+# First of all non-SSE2 path should be implemented (yes, for now it
+# performs Montgomery multiplication/convolution only on SSE2-capable
+# CPUs such as P4, others fall down to original code). Then inner loop
+# can be unrolled and modulo-scheduled to improve ILP and possibly
+# moved to 128-bit XMM register bank (though it would require input
+# rearrangement and/or increase bus bandwidth utilization). Dedicated
+# squaring procedure should give further performance improvement...
+# Yet, for being draft, the code improves rsa512 *sign* benchmark by
+# 110%(!), rsa1024 one - by 70% and rsa4096 - by 20%:-)
+
+# December 2006
+#
+# Modulo-scheduling SSE2 loops results in further 15-20% improvement.
+# Integer-only code [being equipped with dedicated squaring procedure]
+# gives ~40% on rsa512 sign benchmark...
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+push(@INC,"${dir}","${dir}../../perlasm");
+require "x86asm.pl";
+
+$output = pop;
+open STDOUT,">$output";
+
+&asm_init($ARGV[0],$0);
+
+$sse2=0;
+for (@ARGV) { $sse2=1 if (/-DOPENSSL_IA32_SSE2/); }
+
+&external_label("OPENSSL_ia32cap_P") if ($sse2);
+
+&function_begin("bn_mul_mont");
+
+$i="edx";
+$j="ecx";
+$ap="esi"; $tp="esi"; # overlapping variables!!!
+$rp="edi"; $bp="edi"; # overlapping variables!!!
+$np="ebp";
+$num="ebx";
+
+$_num=&DWP(4*0,"esp"); # stack top layout
+$_rp=&DWP(4*1,"esp");
+$_ap=&DWP(4*2,"esp");
+$_bp=&DWP(4*3,"esp");
+$_np=&DWP(4*4,"esp");
+$_n0=&DWP(4*5,"esp"); $_n0q=&QWP(4*5,"esp");
+$_sp=&DWP(4*6,"esp");
+$_bpend=&DWP(4*7,"esp");
+$frame=32; # size of above frame rounded up to 16n
+
+ &xor ("eax","eax");
+ &mov ("edi",&wparam(5)); # int num
+ &cmp ("edi",4);
+ &jl (&label("just_leave"));
+
+ &lea ("esi",&wparam(0)); # put aside pointer to argument block
+ &lea ("edx",&wparam(1)); # load ap
+ &add ("edi",2); # extra two words on top of tp
+ &neg ("edi");
+ &lea ("ebp",&DWP(-$frame,"esp","edi",4)); # future alloca($frame+4*(num+2))
+ &neg ("edi");
+
+ # minimize cache contention by arraning 2K window between stack
+ # pointer and ap argument [np is also position sensitive vector,
+ # but it's assumed to be near ap, as it's allocated at ~same
+ # time].
+ &mov ("eax","ebp");
+ &sub ("eax","edx");
+ &and ("eax",2047);
+ &sub ("ebp","eax"); # this aligns sp and ap modulo 2048
+
+ &xor ("edx","ebp");
+ &and ("edx",2048);
+ &xor ("edx",2048);
+ &sub ("ebp","edx"); # this splits them apart modulo 4096
+
+ &and ("ebp",-64); # align to cache line
+
+ # An OS-agnostic version of __chkstk.
+ #
+ # Some OSes (Windows) insist on stack being "wired" to
+ # physical memory in strictly sequential manner, i.e. if stack
+ # allocation spans two pages, then reference to farmost one can
+ # be punishable by SEGV. But page walking can do good even on
+ # other OSes, because it guarantees that villain thread hits
+ # the guard page before it can make damage to innocent one...
+ &mov ("eax","esp");
+ &sub ("eax","ebp");
+ &and ("eax",-4096);
+ &mov ("edx","esp"); # saved stack pointer!
+ &lea ("esp",&DWP(0,"ebp","eax"));
+ &mov ("eax",&DWP(0,"esp"));
+ &cmp ("esp","ebp");
+ &ja (&label("page_walk"));
+ &jmp (&label("page_walk_done"));
+
+&set_label("page_walk",16);
+ &lea ("esp",&DWP(-4096,"esp"));
+ &mov ("eax",&DWP(0,"esp"));
+ &cmp ("esp","ebp");
+ &ja (&label("page_walk"));
+&set_label("page_walk_done");
+
+ ################################# load argument block...
+ &mov ("eax",&DWP(0*4,"esi"));# BN_ULONG *rp
+ &mov ("ebx",&DWP(1*4,"esi"));# const BN_ULONG *ap
+ &mov ("ecx",&DWP(2*4,"esi"));# const BN_ULONG *bp
+ &mov ("ebp",&DWP(3*4,"esi"));# const BN_ULONG *np
+ &mov ("esi",&DWP(4*4,"esi"));# const BN_ULONG *n0
+ #&mov ("edi",&DWP(5*4,"esi"));# int num
+
+ &mov ("esi",&DWP(0,"esi")); # pull n0[0]
+ &mov ($_rp,"eax"); # ... save a copy of argument block
+ &mov ($_ap,"ebx");
+ &mov ($_bp,"ecx");
+ &mov ($_np,"ebp");
+ &mov ($_n0,"esi");
+ &lea ($num,&DWP(-3,"edi")); # num=num-1 to assist modulo-scheduling
+ #&mov ($_num,$num); # redundant as $num is not reused
+ &mov ($_sp,"edx"); # saved stack pointer!
+
+if($sse2) {
+$acc0="mm0"; # mmx register bank layout
+$acc1="mm1";
+$car0="mm2";
+$car1="mm3";
+$mul0="mm4";
+$mul1="mm5";
+$temp="mm6";
+$mask="mm7";
+
+ &picmeup("eax","OPENSSL_ia32cap_P");
+ &bt (&DWP(0,"eax"),26);
+ &jnc (&label("non_sse2"));
+
+ &mov ("eax",-1);
+ &movd ($mask,"eax"); # mask 32 lower bits
+
+ &mov ($ap,$_ap); # load input pointers
+ &mov ($bp,$_bp);
+ &mov ($np,$_np);
+
+ &xor ($i,$i); # i=0
+ &xor ($j,$j); # j=0
+
+ &movd ($mul0,&DWP(0,$bp)); # bp[0]
+ &movd ($mul1,&DWP(0,$ap)); # ap[0]
+ &movd ($car1,&DWP(0,$np)); # np[0]
+
+ &pmuludq($mul1,$mul0); # ap[0]*bp[0]
+ &movq ($car0,$mul1);
+ &movq ($acc0,$mul1); # I wish movd worked for
+ &pand ($acc0,$mask); # inter-register transfers
+
+ &pmuludq($mul1,$_n0q); # *=n0
+
+ &pmuludq($car1,$mul1); # "t[0]"*np[0]*n0
+ &paddq ($car1,$acc0);
+
+ &movd ($acc1,&DWP(4,$np)); # np[1]
+ &movd ($acc0,&DWP(4,$ap)); # ap[1]
+
+ &psrlq ($car0,32);
+ &psrlq ($car1,32);
+
+ &inc ($j); # j++
+&set_label("1st",16);
+ &pmuludq($acc0,$mul0); # ap[j]*bp[0]
+ &pmuludq($acc1,$mul1); # np[j]*m1
+ &paddq ($car0,$acc0); # +=c0
+ &paddq ($car1,$acc1); # +=c1
+
+ &movq ($acc0,$car0);
+ &pand ($acc0,$mask);
+ &movd ($acc1,&DWP(4,$np,$j,4)); # np[j+1]
+ &paddq ($car1,$acc0); # +=ap[j]*bp[0];
+ &movd ($acc0,&DWP(4,$ap,$j,4)); # ap[j+1]
+ &psrlq ($car0,32);
+ &movd (&DWP($frame-4,"esp",$j,4),$car1); # tp[j-1]=
+ &psrlq ($car1,32);
+
+ &lea ($j,&DWP(1,$j));
+ &cmp ($j,$num);
+ &jl (&label("1st"));
+
+ &pmuludq($acc0,$mul0); # ap[num-1]*bp[0]
+ &pmuludq($acc1,$mul1); # np[num-1]*m1
+ &paddq ($car0,$acc0); # +=c0
+ &paddq ($car1,$acc1); # +=c1
+
+ &movq ($acc0,$car0);
+ &pand ($acc0,$mask);
+ &paddq ($car1,$acc0); # +=ap[num-1]*bp[0];
+ &movd (&DWP($frame-4,"esp",$j,4),$car1); # tp[num-2]=
+
+ &psrlq ($car0,32);
+ &psrlq ($car1,32);
+
+ &paddq ($car1,$car0);
+ &movq (&QWP($frame,"esp",$num,4),$car1); # tp[num].tp[num-1]
+
+ &inc ($i); # i++
+&set_label("outer");
+ &xor ($j,$j); # j=0
+
+ &movd ($mul0,&DWP(0,$bp,$i,4)); # bp[i]
+ &movd ($mul1,&DWP(0,$ap)); # ap[0]
+ &movd ($temp,&DWP($frame,"esp")); # tp[0]
+ &movd ($car1,&DWP(0,$np)); # np[0]
+ &pmuludq($mul1,$mul0); # ap[0]*bp[i]
+
+ &paddq ($mul1,$temp); # +=tp[0]
+ &movq ($acc0,$mul1);
+ &movq ($car0,$mul1);
+ &pand ($acc0,$mask);
+
+ &pmuludq($mul1,$_n0q); # *=n0
+
+ &pmuludq($car1,$mul1);
+ &paddq ($car1,$acc0);
+
+ &movd ($temp,&DWP($frame+4,"esp")); # tp[1]
+ &movd ($acc1,&DWP(4,$np)); # np[1]
+ &movd ($acc0,&DWP(4,$ap)); # ap[1]
+
+ &psrlq ($car0,32);
+ &psrlq ($car1,32);
+ &paddq ($car0,$temp); # +=tp[1]
+
+ &inc ($j); # j++
+ &dec ($num);
+&set_label("inner");
+ &pmuludq($acc0,$mul0); # ap[j]*bp[i]
+ &pmuludq($acc1,$mul1); # np[j]*m1
+ &paddq ($car0,$acc0); # +=c0
+ &paddq ($car1,$acc1); # +=c1
+
+ &movq ($acc0,$car0);
+ &movd ($temp,&DWP($frame+4,"esp",$j,4));# tp[j+1]
+ &pand ($acc0,$mask);
+ &movd ($acc1,&DWP(4,$np,$j,4)); # np[j+1]
+ &paddq ($car1,$acc0); # +=ap[j]*bp[i]+tp[j]
+ &movd ($acc0,&DWP(4,$ap,$j,4)); # ap[j+1]
+ &psrlq ($car0,32);
+ &movd (&DWP($frame-4,"esp",$j,4),$car1);# tp[j-1]=
+ &psrlq ($car1,32);
+ &paddq ($car0,$temp); # +=tp[j+1]
+
+ &dec ($num);
+ &lea ($j,&DWP(1,$j)); # j++
+ &jnz (&label("inner"));
+
+ &mov ($num,$j);
+ &pmuludq($acc0,$mul0); # ap[num-1]*bp[i]
+ &pmuludq($acc1,$mul1); # np[num-1]*m1
+ &paddq ($car0,$acc0); # +=c0
+ &paddq ($car1,$acc1); # +=c1
+
+ &movq ($acc0,$car0);
+ &pand ($acc0,$mask);
+ &paddq ($car1,$acc0); # +=ap[num-1]*bp[i]+tp[num-1]
+ &movd (&DWP($frame-4,"esp",$j,4),$car1); # tp[num-2]=
+ &psrlq ($car0,32);
+ &psrlq ($car1,32);
+
+ &movd ($temp,&DWP($frame+4,"esp",$num,4)); # += tp[num]
+ &paddq ($car1,$car0);
+ &paddq ($car1,$temp);
+ &movq (&QWP($frame,"esp",$num,4),$car1); # tp[num].tp[num-1]
+
+ &lea ($i,&DWP(1,$i)); # i++
+ &cmp ($i,$num);
+ &jle (&label("outer"));
+
+ &emms (); # done with mmx bank
+ &jmp (&label("common_tail"));
+
+&set_label("non_sse2",16);
+}
+
+if (0) {
+ &mov ("esp",$_sp);
+ &xor ("eax","eax"); # signal "not fast enough [yet]"
+ &jmp (&label("just_leave"));
+ # While the below code provides competitive performance for
+ # all key lengths on modern Intel cores, it's still more
+ # than 10% slower for 4096-bit key elsewhere:-( "Competitive"
+ # means compared to the original integer-only assembler.
+ # 512-bit RSA sign is better by ~40%, but that's about all
+ # one can say about all CPUs...
+} else {
+$inp="esi"; # integer path uses these registers differently
+$word="edi";
+$carry="ebp";
+
+ &mov ($inp,$_ap);
+ &lea ($carry,&DWP(1,$num));
+ &mov ($word,$_bp);
+ &xor ($j,$j); # j=0
+ &mov ("edx",$inp);
+ &and ($carry,1); # see if num is even
+ &sub ("edx",$word); # see if ap==bp
+ &lea ("eax",&DWP(4,$word,$num,4)); # &bp[num]
+ &or ($carry,"edx");
+ &mov ($word,&DWP(0,$word)); # bp[0]
+ &jz (&label("bn_sqr_mont"));
+ &mov ($_bpend,"eax");
+ &mov ("eax",&DWP(0,$inp));
+ &xor ("edx","edx");
+
+&set_label("mull",16);
+ &mov ($carry,"edx");
+ &mul ($word); # ap[j]*bp[0]
+ &add ($carry,"eax");
+ &lea ($j,&DWP(1,$j));
+ &adc ("edx",0);
+ &mov ("eax",&DWP(0,$inp,$j,4)); # ap[j+1]
+ &cmp ($j,$num);
+ &mov (&DWP($frame-4,"esp",$j,4),$carry); # tp[j]=
+ &jl (&label("mull"));
+
+ &mov ($carry,"edx");
+ &mul ($word); # ap[num-1]*bp[0]
+ &mov ($word,$_n0);
+ &add ("eax",$carry);
+ &mov ($inp,$_np);
+ &adc ("edx",0);
+ &imul ($word,&DWP($frame,"esp")); # n0*tp[0]
+
+ &mov (&DWP($frame,"esp",$num,4),"eax"); # tp[num-1]=
+ &xor ($j,$j);
+ &mov (&DWP($frame+4,"esp",$num,4),"edx"); # tp[num]=
+ &mov (&DWP($frame+8,"esp",$num,4),$j); # tp[num+1]=
+
+ &mov ("eax",&DWP(0,$inp)); # np[0]
+ &mul ($word); # np[0]*m
+ &add ("eax",&DWP($frame,"esp")); # +=tp[0]
+ &mov ("eax",&DWP(4,$inp)); # np[1]
+ &adc ("edx",0);
+ &inc ($j);
+
+ &jmp (&label("2ndmadd"));
+
+&set_label("1stmadd",16);
+ &mov ($carry,"edx");
+ &mul ($word); # ap[j]*bp[i]
+ &add ($carry,&DWP($frame,"esp",$j,4)); # +=tp[j]
+ &lea ($j,&DWP(1,$j));
+ &adc ("edx",0);
+ &add ($carry,"eax");
+ &mov ("eax",&DWP(0,$inp,$j,4)); # ap[j+1]
+ &adc ("edx",0);
+ &cmp ($j,$num);
+ &mov (&DWP($frame-4,"esp",$j,4),$carry); # tp[j]=
+ &jl (&label("1stmadd"));
+
+ &mov ($carry,"edx");
+ &mul ($word); # ap[num-1]*bp[i]
+ &add ("eax",&DWP($frame,"esp",$num,4)); # +=tp[num-1]
+ &mov ($word,$_n0);
+ &adc ("edx",0);
+ &mov ($inp,$_np);
+ &add ($carry,"eax");
+ &adc ("edx",0);
+ &imul ($word,&DWP($frame,"esp")); # n0*tp[0]
+
+ &xor ($j,$j);
+ &add ("edx",&DWP($frame+4,"esp",$num,4)); # carry+=tp[num]
+ &mov (&DWP($frame,"esp",$num,4),$carry); # tp[num-1]=
+ &adc ($j,0);
+ &mov ("eax",&DWP(0,$inp)); # np[0]
+ &mov (&DWP($frame+4,"esp",$num,4),"edx"); # tp[num]=
+ &mov (&DWP($frame+8,"esp",$num,4),$j); # tp[num+1]=
+
+ &mul ($word); # np[0]*m
+ &add ("eax",&DWP($frame,"esp")); # +=tp[0]
+ &mov ("eax",&DWP(4,$inp)); # np[1]
+ &adc ("edx",0);
+ &mov ($j,1);
+
+&set_label("2ndmadd",16);
+ &mov ($carry,"edx");
+ &mul ($word); # np[j]*m
+ &add ($carry,&DWP($frame,"esp",$j,4)); # +=tp[j]
+ &lea ($j,&DWP(1,$j));
+ &adc ("edx",0);
+ &add ($carry,"eax");
+ &mov ("eax",&DWP(0,$inp,$j,4)); # np[j+1]
+ &adc ("edx",0);
+ &cmp ($j,$num);
+ &mov (&DWP($frame-8,"esp",$j,4),$carry); # tp[j-1]=
+ &jl (&label("2ndmadd"));
+
+ &mov ($carry,"edx");
+ &mul ($word); # np[j]*m
+ &add ($carry,&DWP($frame,"esp",$num,4)); # +=tp[num-1]
+ &adc ("edx",0);
+ &add ($carry,"eax");
+ &adc ("edx",0);
+ &mov (&DWP($frame-4,"esp",$num,4),$carry); # tp[num-2]=
+
+ &xor ("eax","eax");
+ &mov ($j,$_bp); # &bp[i]
+ &add ("edx",&DWP($frame+4,"esp",$num,4)); # carry+=tp[num]
+ &adc ("eax",&DWP($frame+8,"esp",$num,4)); # +=tp[num+1]
+ &lea ($j,&DWP(4,$j));
+ &mov (&DWP($frame,"esp",$num,4),"edx"); # tp[num-1]=
+ &cmp ($j,$_bpend);
+ &mov (&DWP($frame+4,"esp",$num,4),"eax"); # tp[num]=
+ &je (&label("common_tail"));
+
+ &mov ($word,&DWP(0,$j)); # bp[i+1]
+ &mov ($inp,$_ap);
+ &mov ($_bp,$j); # &bp[++i]
+ &xor ($j,$j);
+ &xor ("edx","edx");
+ &mov ("eax",&DWP(0,$inp));
+ &jmp (&label("1stmadd"));
+
+&set_label("bn_sqr_mont",16);
+$sbit=$num;
+ &mov ($_num,$num);
+ &mov ($_bp,$j); # i=0
+
+ &mov ("eax",$word); # ap[0]
+ &mul ($word); # ap[0]*ap[0]
+ &mov (&DWP($frame,"esp"),"eax"); # tp[0]=
+ &mov ($sbit,"edx");
+ &shr ("edx",1);
+ &and ($sbit,1);
+ &inc ($j);
+&set_label("sqr",16);
+ &mov ("eax",&DWP(0,$inp,$j,4)); # ap[j]
+ &mov ($carry,"edx");
+ &mul ($word); # ap[j]*ap[0]
+ &add ("eax",$carry);
+ &lea ($j,&DWP(1,$j));
+ &adc ("edx",0);
+ &lea ($carry,&DWP(0,$sbit,"eax",2));
+ &shr ("eax",31);
+ &cmp ($j,$_num);
+ &mov ($sbit,"eax");
+ &mov (&DWP($frame-4,"esp",$j,4),$carry); # tp[j]=
+ &jl (&label("sqr"));
+
+ &mov ("eax",&DWP(0,$inp,$j,4)); # ap[num-1]
+ &mov ($carry,"edx");
+ &mul ($word); # ap[num-1]*ap[0]
+ &add ("eax",$carry);
+ &mov ($word,$_n0);
+ &adc ("edx",0);
+ &mov ($inp,$_np);
+ &lea ($carry,&DWP(0,$sbit,"eax",2));
+ &imul ($word,&DWP($frame,"esp")); # n0*tp[0]
+ &shr ("eax",31);
+ &mov (&DWP($frame,"esp",$j,4),$carry); # tp[num-1]=
+
+ &lea ($carry,&DWP(0,"eax","edx",2));
+ &mov ("eax",&DWP(0,$inp)); # np[0]
+ &shr ("edx",31);
+ &mov (&DWP($frame+4,"esp",$j,4),$carry); # tp[num]=
+ &mov (&DWP($frame+8,"esp",$j,4),"edx"); # tp[num+1]=
+
+ &mul ($word); # np[0]*m
+ &add ("eax",&DWP($frame,"esp")); # +=tp[0]
+ &mov ($num,$j);
+ &adc ("edx",0);
+ &mov ("eax",&DWP(4,$inp)); # np[1]
+ &mov ($j,1);
+
+&set_label("3rdmadd",16);
+ &mov ($carry,"edx");
+ &mul ($word); # np[j]*m
+ &add ($carry,&DWP($frame,"esp",$j,4)); # +=tp[j]
+ &adc ("edx",0);
+ &add ($carry,"eax");
+ &mov ("eax",&DWP(4,$inp,$j,4)); # np[j+1]
+ &adc ("edx",0);
+ &mov (&DWP($frame-4,"esp",$j,4),$carry); # tp[j-1]=
+
+ &mov ($carry,"edx");
+ &mul ($word); # np[j+1]*m
+ &add ($carry,&DWP($frame+4,"esp",$j,4)); # +=tp[j+1]
+ &lea ($j,&DWP(2,$j));
+ &adc ("edx",0);
+ &add ($carry,"eax");
+ &mov ("eax",&DWP(0,$inp,$j,4)); # np[j+2]
+ &adc ("edx",0);
+ &cmp ($j,$num);
+ &mov (&DWP($frame-8,"esp",$j,4),$carry); # tp[j]=
+ &jl (&label("3rdmadd"));
+
+ &mov ($carry,"edx");
+ &mul ($word); # np[j]*m
+ &add ($carry,&DWP($frame,"esp",$num,4)); # +=tp[num-1]
+ &adc ("edx",0);
+ &add ($carry,"eax");
+ &adc ("edx",0);
+ &mov (&DWP($frame-4,"esp",$num,4),$carry); # tp[num-2]=
+
+ &mov ($j,$_bp); # i
+ &xor ("eax","eax");
+ &mov ($inp,$_ap);
+ &add ("edx",&DWP($frame+4,"esp",$num,4)); # carry+=tp[num]
+ &adc ("eax",&DWP($frame+8,"esp",$num,4)); # +=tp[num+1]
+ &mov (&DWP($frame,"esp",$num,4),"edx"); # tp[num-1]=
+ &cmp ($j,$num);
+ &mov (&DWP($frame+4,"esp",$num,4),"eax"); # tp[num]=
+ &je (&label("common_tail"));
+
+ &mov ($word,&DWP(4,$inp,$j,4)); # ap[i]
+ &lea ($j,&DWP(1,$j));
+ &mov ("eax",$word);
+ &mov ($_bp,$j); # ++i
+ &mul ($word); # ap[i]*ap[i]
+ &add ("eax",&DWP($frame,"esp",$j,4)); # +=tp[i]
+ &adc ("edx",0);
+ &mov (&DWP($frame,"esp",$j,4),"eax"); # tp[i]=
+ &xor ($carry,$carry);
+ &cmp ($j,$num);
+ &lea ($j,&DWP(1,$j));
+ &je (&label("sqrlast"));
+
+ &mov ($sbit,"edx"); # zaps $num
+ &shr ("edx",1);
+ &and ($sbit,1);
+&set_label("sqradd",16);
+ &mov ("eax",&DWP(0,$inp,$j,4)); # ap[j]
+ &mov ($carry,"edx");
+ &mul ($word); # ap[j]*ap[i]
+ &add ("eax",$carry);
+ &lea ($carry,&DWP(0,"eax","eax"));
+ &adc ("edx",0);
+ &shr ("eax",31);
+ &add ($carry,&DWP($frame,"esp",$j,4)); # +=tp[j]
+ &lea ($j,&DWP(1,$j));
+ &adc ("eax",0);
+ &add ($carry,$sbit);
+ &adc ("eax",0);
+ &cmp ($j,$_num);
+ &mov (&DWP($frame-4,"esp",$j,4),$carry); # tp[j]=
+ &mov ($sbit,"eax");
+ &jle (&label("sqradd"));
+
+ &mov ($carry,"edx");
+ &add ("edx","edx");
+ &shr ($carry,31);
+ &add ("edx",$sbit);
+ &adc ($carry,0);
+&set_label("sqrlast");
+ &mov ($word,$_n0);
+ &mov ($inp,$_np);
+ &imul ($word,&DWP($frame,"esp")); # n0*tp[0]
+
+ &add ("edx",&DWP($frame,"esp",$j,4)); # +=tp[num]
+ &mov ("eax",&DWP(0,$inp)); # np[0]
+ &adc ($carry,0);
+ &mov (&DWP($frame,"esp",$j,4),"edx"); # tp[num]=
+ &mov (&DWP($frame+4,"esp",$j,4),$carry); # tp[num+1]=
+
+ &mul ($word); # np[0]*m
+ &add ("eax",&DWP($frame,"esp")); # +=tp[0]
+ &lea ($num,&DWP(-1,$j));
+ &adc ("edx",0);
+ &mov ($j,1);
+ &mov ("eax",&DWP(4,$inp)); # np[1]
+
+ &jmp (&label("3rdmadd"));
+}
+
+&set_label("common_tail",16);
+ &mov ($np,$_np); # load modulus pointer
+ &mov ($rp,$_rp); # load result pointer
+ &lea ($tp,&DWP($frame,"esp")); # [$ap and $bp are zapped]
+
+ &mov ("eax",&DWP(0,$tp)); # tp[0]
+ &mov ($j,$num); # j=num-1
+ &xor ($i,$i); # i=0 and clear CF!
+
+&set_label("sub",16);
+ &sbb ("eax",&DWP(0,$np,$i,4));
+ &mov (&DWP(0,$rp,$i,4),"eax"); # rp[i]=tp[i]-np[i]
+ &dec ($j); # doesn't affect CF!
+ &mov ("eax",&DWP(4,$tp,$i,4)); # tp[i+1]
+ &lea ($i,&DWP(1,$i)); # i++
+ &jge (&label("sub"));
+
+ &sbb ("eax",0); # handle upmost overflow bit
+ &and ($tp,"eax");
+ &not ("eax");
+ &mov ($np,$rp);
+ &and ($np,"eax");
+ &or ($tp,$np); # tp=carry?tp:rp
+
+&set_label("copy",16); # copy or in-place refresh
+ &mov ("eax",&DWP(0,$tp,$num,4));
+ &mov (&DWP(0,$rp,$num,4),"eax"); # rp[i]=tp[i]
+ &mov (&DWP($frame,"esp",$num,4),$j); # zap temporary vector
+ &dec ($num);
+ &jge (&label("copy"));
+
+ &mov ("esp",$_sp); # pull saved stack pointer
+ &mov ("eax",1);
+&set_label("just_leave");
+&function_end("bn_mul_mont");
+
+&asciz("Montgomery Multiplication for x86, CRYPTOGAMS by <appro\@openssl.org>");
+
+&asm_finish();
+
+close STDOUT;
diff --git a/openssl-1.1.0h/crypto/bn/asm/x86_64-gcc.c b/openssl-1.1.0h/crypto/bn/asm/x86_64-gcc.c
new file mode 100644
index 0000000..0ff3805
--- /dev/null
+++ b/openssl-1.1.0h/crypto/bn/asm/x86_64-gcc.c
@@ -0,0 +1,649 @@
+/*
+ * Copyright 2002-2016 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the OpenSSL license (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#include "../bn_lcl.h"
+#if !(defined(__GNUC__) && __GNUC__>=2)
+# include "../bn_asm.c" /* kind of dirty hack for Sun Studio */
+#else
+/*-
+ * x86_64 BIGNUM accelerator version 0.1, December 2002.
+ *
+ * Implemented by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+ * project.
+ *
+ * Rights for redistribution and usage in source and binary forms are
+ * granted according to the OpenSSL license. Warranty of any kind is
+ * disclaimed.
+ *
+ * Q. Version 0.1? It doesn't sound like Andy, he used to assign real
+ * versions, like 1.0...
+ * A. Well, that's because this code is basically a quick-n-dirty
+ * proof-of-concept hack. As you can see it's implemented with
+ * inline assembler, which means that you're bound to GCC and that
+ * there might be enough room for further improvement.
+ *
+ * Q. Why inline assembler?
+ * A. x86_64 features own ABI which I'm not familiar with. This is
+ * why I decided to let the compiler take care of subroutine
+ * prologue/epilogue as well as register allocation. For reference.
+ * Win64 implements different ABI for AMD64, different from Linux.
+ *
+ * Q. How much faster does it get?
+ * A. 'apps/openssl speed rsa dsa' output with no-asm:
+ *
+ * sign verify sign/s verify/s
+ * rsa 512 bits 0.0006s 0.0001s 1683.8 18456.2
+ * rsa 1024 bits 0.0028s 0.0002s 356.0 6407.0
+ * rsa 2048 bits 0.0172s 0.0005s 58.0 1957.8
+ * rsa 4096 bits 0.1155s 0.0018s 8.7 555.6
+ * sign verify sign/s verify/s
+ * dsa 512 bits 0.0005s 0.0006s 2100.8 1768.3
+ * dsa 1024 bits 0.0014s 0.0018s 692.3 559.2
+ * dsa 2048 bits 0.0049s 0.0061s 204.7 165.0
+ *
+ * 'apps/openssl speed rsa dsa' output with this module:
+ *
+ * sign verify sign/s verify/s
+ * rsa 512 bits 0.0004s 0.0000s 2767.1 33297.9
+ * rsa 1024 bits 0.0012s 0.0001s 867.4 14674.7
+ * rsa 2048 bits 0.0061s 0.0002s 164.0 5270.0
+ * rsa 4096 bits 0.0384s 0.0006s 26.1 1650.8
+ * sign verify sign/s verify/s
+ * dsa 512 bits 0.0002s 0.0003s 4442.2 3786.3
+ * dsa 1024 bits 0.0005s 0.0007s 1835.1 1497.4
+ * dsa 2048 bits 0.0016s 0.0020s 620.4 504.6
+ *
+ * For the reference. IA-32 assembler implementation performs
+ * very much like 64-bit code compiled with no-asm on the same
+ * machine.
+ */
+
+# if defined(_WIN64) || !defined(__LP64__)
+# define BN_ULONG unsigned long long
+# else
+# define BN_ULONG unsigned long
+# endif
+
+# undef mul
+# undef mul_add
+
+/*-
+ * "m"(a), "+m"(r) is the way to favor DirectPath µ-code;
+ * "g"(0) let the compiler to decide where does it
+ * want to keep the value of zero;
+ */
+# define mul_add(r,a,word,carry) do { \
+ register BN_ULONG high,low; \
+ asm ("mulq %3" \
+ : "=a"(low),"=d"(high) \
+ : "a"(word),"m"(a) \
+ : "cc"); \
+ asm ("addq %2,%0; adcq %3,%1" \
+ : "+r"(carry),"+d"(high)\
+ : "a"(low),"g"(0) \
+ : "cc"); \
+ asm ("addq %2,%0; adcq %3,%1" \
+ : "+m"(r),"+d"(high) \
+ : "r"(carry),"g"(0) \
+ : "cc"); \
+ carry=high; \
+ } while (0)
+
+# define mul(r,a,word,carry) do { \
+ register BN_ULONG high,low; \
+ asm ("mulq %3" \
+ : "=a"(low),"=d"(high) \
+ : "a"(word),"g"(a) \
+ : "cc"); \
+ asm ("addq %2,%0; adcq %3,%1" \
+ : "+r"(carry),"+d"(high)\
+ : "a"(low),"g"(0) \
+ : "cc"); \
+ (r)=carry, carry=high; \
+ } while (0)
+# undef sqr
+# define sqr(r0,r1,a) \
+ asm ("mulq %2" \
+ : "=a"(r0),"=d"(r1) \
+ : "a"(a) \
+ : "cc");
+
+BN_ULONG bn_mul_add_words(BN_ULONG *rp, const BN_ULONG *ap, int num,
+ BN_ULONG w)
+{
+ BN_ULONG c1 = 0;
+
+ if (num <= 0)
+ return (c1);
+
+ while (num & ~3) {
+ mul_add(rp[0], ap[0], w, c1);
+ mul_add(rp[1], ap[1], w, c1);
+ mul_add(rp[2], ap[2], w, c1);
+ mul_add(rp[3], ap[3], w, c1);
+ ap += 4;
+ rp += 4;
+ num -= 4;
+ }
+ if (num) {
+ mul_add(rp[0], ap[0], w, c1);
+ if (--num == 0)
+ return c1;
+ mul_add(rp[1], ap[1], w, c1);
+ if (--num == 0)
+ return c1;
+ mul_add(rp[2], ap[2], w, c1);
+ return c1;
+ }
+
+ return (c1);
+}
+
+BN_ULONG bn_mul_words(BN_ULONG *rp, const BN_ULONG *ap, int num, BN_ULONG w)
+{
+ BN_ULONG c1 = 0;
+
+ if (num <= 0)
+ return (c1);
+
+ while (num & ~3) {
+ mul(rp[0], ap[0], w, c1);
+ mul(rp[1], ap[1], w, c1);
+ mul(rp[2], ap[2], w, c1);
+ mul(rp[3], ap[3], w, c1);
+ ap += 4;
+ rp += 4;
+ num -= 4;
+ }
+ if (num) {
+ mul(rp[0], ap[0], w, c1);
+ if (--num == 0)
+ return c1;
+ mul(rp[1], ap[1], w, c1);
+ if (--num == 0)
+ return c1;
+ mul(rp[2], ap[2], w, c1);
+ }
+ return (c1);
+}
+
+void bn_sqr_words(BN_ULONG *r, const BN_ULONG *a, int n)
+{
+ if (n <= 0)
+ return;
+
+ while (n & ~3) {
+ sqr(r[0], r[1], a[0]);
+ sqr(r[2], r[3], a[1]);
+ sqr(r[4], r[5], a[2]);
+ sqr(r[6], r[7], a[3]);
+ a += 4;
+ r += 8;
+ n -= 4;
+ }
+ if (n) {
+ sqr(r[0], r[1], a[0]);
+ if (--n == 0)
+ return;
+ sqr(r[2], r[3], a[1]);
+ if (--n == 0)
+ return;
+ sqr(r[4], r[5], a[2]);
+ }
+}
+
+BN_ULONG bn_div_words(BN_ULONG h, BN_ULONG l, BN_ULONG d)
+{
+ BN_ULONG ret, waste;
+
+ asm("divq %4":"=a"(ret), "=d"(waste)
+ : "a"(l), "d"(h), "r"(d)
+ : "cc");
+
+ return ret;
+}
+
+BN_ULONG bn_add_words(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp,
+ int n)
+{
+ BN_ULONG ret;
+ size_t i = 0;
+
+ if (n <= 0)
+ return 0;
+
+ asm volatile (" subq %0,%0 \n" /* clear carry */
+ " jmp 1f \n"
+ ".p2align 4 \n"
+ "1: movq (%4,%2,8),%0 \n"
+ " adcq (%5,%2,8),%0 \n"
+ " movq %0,(%3,%2,8) \n"
+ " lea 1(%2),%2 \n"
+ " dec %1 \n"
+ " jnz 1b \n"
+ " sbbq %0,%0 \n"
+ :"=&r" (ret), "+c"(n), "+r"(i)
+ :"r"(rp), "r"(ap), "r"(bp)
+ :"cc", "memory");
+
+ return ret & 1;
+}
+
+# ifndef SIMICS
+BN_ULONG bn_sub_words(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp,
+ int n)
+{
+ BN_ULONG ret;
+ size_t i = 0;
+
+ if (n <= 0)
+ return 0;
+
+ asm volatile (" subq %0,%0 \n" /* clear borrow */
+ " jmp 1f \n"
+ ".p2align 4 \n"
+ "1: movq (%4,%2,8),%0 \n"
+ " sbbq (%5,%2,8),%0 \n"
+ " movq %0,(%3,%2,8) \n"
+ " lea 1(%2),%2 \n"
+ " dec %1 \n"
+ " jnz 1b \n"
+ " sbbq %0,%0 \n"
+ :"=&r" (ret), "+c"(n), "+r"(i)
+ :"r"(rp), "r"(ap), "r"(bp)
+ :"cc", "memory");
+
+ return ret & 1;
+}
+# else
+/* Simics 1.4<7 has buggy sbbq:-( */
+# define BN_MASK2 0xffffffffffffffffL
+BN_ULONG bn_sub_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
+{
+ BN_ULONG t1, t2;
+ int c = 0;
+
+ if (n <= 0)
+ return ((BN_ULONG)0);
+
+ for (;;) {
+ t1 = a[0];
+ t2 = b[0];
+ r[0] = (t1 - t2 - c) & BN_MASK2;
+ if (t1 != t2)
+ c = (t1 < t2);
+ if (--n <= 0)
+ break;
+
+ t1 = a[1];
+ t2 = b[1];
+ r[1] = (t1 - t2 - c) & BN_MASK2;
+ if (t1 != t2)
+ c = (t1 < t2);
+ if (--n <= 0)
+ break;
+
+ t1 = a[2];
+ t2 = b[2];
+ r[2] = (t1 - t2 - c) & BN_MASK2;
+ if (t1 != t2)
+ c = (t1 < t2);
+ if (--n <= 0)
+ break;
+
+ t1 = a[3];
+ t2 = b[3];
+ r[3] = (t1 - t2 - c) & BN_MASK2;
+ if (t1 != t2)
+ c = (t1 < t2);
+ if (--n <= 0)
+ break;
+
+ a += 4;
+ b += 4;
+ r += 4;
+ }
+ return (c);
+}
+# endif
+
+/* mul_add_c(a,b,c0,c1,c2) -- c+=a*b for three word number c=(c2,c1,c0) */
+/* mul_add_c2(a,b,c0,c1,c2) -- c+=2*a*b for three word number c=(c2,c1,c0) */
+/* sqr_add_c(a,i,c0,c1,c2) -- c+=a[i]^2 for three word number c=(c2,c1,c0) */
+/*
+ * sqr_add_c2(a,i,c0,c1,c2) -- c+=2*a[i]*a[j] for three word number
+ * c=(c2,c1,c0)
+ */
+
+/*
+ * Keep in mind that carrying into high part of multiplication result
+ * can not overflow, because it cannot be all-ones.
+ */
+# if 0
+/* original macros are kept for reference purposes */
+# define mul_add_c(a,b,c0,c1,c2) do { \
+ BN_ULONG ta = (a), tb = (b); \
+ BN_ULONG lo, hi; \
+ BN_UMULT_LOHI(lo,hi,ta,tb); \
+ c0 += lo; hi += (c0<lo)?1:0; \
+ c1 += hi; c2 += (c1<hi)?1:0; \
+ } while(0)
+
+# define mul_add_c2(a,b,c0,c1,c2) do { \
+ BN_ULONG ta = (a), tb = (b); \
+ BN_ULONG lo, hi, tt; \
+ BN_UMULT_LOHI(lo,hi,ta,tb); \
+ c0 += lo; tt = hi+((c0<lo)?1:0); \
+ c1 += tt; c2 += (c1<tt)?1:0; \
+ c0 += lo; hi += (c0<lo)?1:0; \
+ c1 += hi; c2 += (c1<hi)?1:0; \
+ } while(0)
+
+# define sqr_add_c(a,i,c0,c1,c2) do { \
+ BN_ULONG ta = (a)[i]; \
+ BN_ULONG lo, hi; \
+ BN_UMULT_LOHI(lo,hi,ta,ta); \
+ c0 += lo; hi += (c0<lo)?1:0; \
+ c1 += hi; c2 += (c1<hi)?1:0; \
+ } while(0)
+# else
+# define mul_add_c(a,b,c0,c1,c2) do { \
+ BN_ULONG t1,t2; \
+ asm ("mulq %3" \
+ : "=a"(t1),"=d"(t2) \
+ : "a"(a),"m"(b) \
+ : "cc"); \
+ asm ("addq %3,%0; adcq %4,%1; adcq %5,%2" \
+ : "+r"(c0),"+r"(c1),"+r"(c2) \
+ : "r"(t1),"r"(t2),"g"(0) \
+ : "cc"); \
+ } while (0)
+
+# define sqr_add_c(a,i,c0,c1,c2) do { \
+ BN_ULONG t1,t2; \
+ asm ("mulq %2" \
+ : "=a"(t1),"=d"(t2) \
+ : "a"(a[i]) \
+ : "cc"); \
+ asm ("addq %3,%0; adcq %4,%1; adcq %5,%2" \
+ : "+r"(c0),"+r"(c1),"+r"(c2) \
+ : "r"(t1),"r"(t2),"g"(0) \
+ : "cc"); \
+ } while (0)
+
+# define mul_add_c2(a,b,c0,c1,c2) do { \
+ BN_ULONG t1,t2; \
+ asm ("mulq %3" \
+ : "=a"(t1),"=d"(t2) \
+ : "a"(a),"m"(b) \
+ : "cc"); \
+ asm ("addq %3,%0; adcq %4,%1; adcq %5,%2" \
+ : "+r"(c0),"+r"(c1),"+r"(c2) \
+ : "r"(t1),"r"(t2),"g"(0) \
+ : "cc"); \
+ asm ("addq %3,%0; adcq %4,%1; adcq %5,%2" \
+ : "+r"(c0),"+r"(c1),"+r"(c2) \
+ : "r"(t1),"r"(t2),"g"(0) \
+ : "cc"); \
+ } while (0)
+# endif
+
+# define sqr_add_c2(a,i,j,c0,c1,c2) \
+ mul_add_c2((a)[i],(a)[j],c0,c1,c2)
+
+void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
+{
+ BN_ULONG c1, c2, c3;
+
+ c1 = 0;
+ c2 = 0;
+ c3 = 0;
+ mul_add_c(a[0], b[0], c1, c2, c3);
+ r[0] = c1;
+ c1 = 0;
+ mul_add_c(a[0], b[1], c2, c3, c1);
+ mul_add_c(a[1], b[0], c2, c3, c1);
+ r[1] = c2;
+ c2 = 0;
+ mul_add_c(a[2], b[0], c3, c1, c2);
+ mul_add_c(a[1], b[1], c3, c1, c2);
+ mul_add_c(a[0], b[2], c3, c1, c2);
+ r[2] = c3;
+ c3 = 0;
+ mul_add_c(a[0], b[3], c1, c2, c3);
+ mul_add_c(a[1], b[2], c1, c2, c3);
+ mul_add_c(a[2], b[1], c1, c2, c3);
+ mul_add_c(a[3], b[0], c1, c2, c3);
+ r[3] = c1;
+ c1 = 0;
+ mul_add_c(a[4], b[0], c2, c3, c1);
+ mul_add_c(a[3], b[1], c2, c3, c1);
+ mul_add_c(a[2], b[2], c2, c3, c1);
+ mul_add_c(a[1], b[3], c2, c3, c1);
+ mul_add_c(a[0], b[4], c2, c3, c1);
+ r[4] = c2;
+ c2 = 0;
+ mul_add_c(a[0], b[5], c3, c1, c2);
+ mul_add_c(a[1], b[4], c3, c1, c2);
+ mul_add_c(a[2], b[3], c3, c1, c2);
+ mul_add_c(a[3], b[2], c3, c1, c2);
+ mul_add_c(a[4], b[1], c3, c1, c2);
+ mul_add_c(a[5], b[0], c3, c1, c2);
+ r[5] = c3;
+ c3 = 0;
+ mul_add_c(a[6], b[0], c1, c2, c3);
+ mul_add_c(a[5], b[1], c1, c2, c3);
+ mul_add_c(a[4], b[2], c1, c2, c3);
+ mul_add_c(a[3], b[3], c1, c2, c3);
+ mul_add_c(a[2], b[4], c1, c2, c3);
+ mul_add_c(a[1], b[5], c1, c2, c3);
+ mul_add_c(a[0], b[6], c1, c2, c3);
+ r[6] = c1;
+ c1 = 0;
+ mul_add_c(a[0], b[7], c2, c3, c1);
+ mul_add_c(a[1], b[6], c2, c3, c1);
+ mul_add_c(a[2], b[5], c2, c3, c1);
+ mul_add_c(a[3], b[4], c2, c3, c1);
+ mul_add_c(a[4], b[3], c2, c3, c1);
+ mul_add_c(a[5], b[2], c2, c3, c1);
+ mul_add_c(a[6], b[1], c2, c3, c1);
+ mul_add_c(a[7], b[0], c2, c3, c1);
+ r[7] = c2;
+ c2 = 0;
+ mul_add_c(a[7], b[1], c3, c1, c2);
+ mul_add_c(a[6], b[2], c3, c1, c2);
+ mul_add_c(a[5], b[3], c3, c1, c2);
+ mul_add_c(a[4], b[4], c3, c1, c2);
+ mul_add_c(a[3], b[5], c3, c1, c2);
+ mul_add_c(a[2], b[6], c3, c1, c2);
+ mul_add_c(a[1], b[7], c3, c1, c2);
+ r[8] = c3;
+ c3 = 0;
+ mul_add_c(a[2], b[7], c1, c2, c3);
+ mul_add_c(a[3], b[6], c1, c2, c3);
+ mul_add_c(a[4], b[5], c1, c2, c3);
+ mul_add_c(a[5], b[4], c1, c2, c3);
+ mul_add_c(a[6], b[3], c1, c2, c3);
+ mul_add_c(a[7], b[2], c1, c2, c3);
+ r[9] = c1;
+ c1 = 0;
+ mul_add_c(a[7], b[3], c2, c3, c1);
+ mul_add_c(a[6], b[4], c2, c3, c1);
+ mul_add_c(a[5], b[5], c2, c3, c1);
+ mul_add_c(a[4], b[6], c2, c3, c1);
+ mul_add_c(a[3], b[7], c2, c3, c1);
+ r[10] = c2;
+ c2 = 0;
+ mul_add_c(a[4], b[7], c3, c1, c2);
+ mul_add_c(a[5], b[6], c3, c1, c2);
+ mul_add_c(a[6], b[5], c3, c1, c2);
+ mul_add_c(a[7], b[4], c3, c1, c2);
+ r[11] = c3;
+ c3 = 0;
+ mul_add_c(a[7], b[5], c1, c2, c3);
+ mul_add_c(a[6], b[6], c1, c2, c3);
+ mul_add_c(a[5], b[7], c1, c2, c3);
+ r[12] = c1;
+ c1 = 0;
+ mul_add_c(a[6], b[7], c2, c3, c1);
+ mul_add_c(a[7], b[6], c2, c3, c1);
+ r[13] = c2;
+ c2 = 0;
+ mul_add_c(a[7], b[7], c3, c1, c2);
+ r[14] = c3;
+ r[15] = c1;
+}
+
+void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
+{
+ BN_ULONG c1, c2, c3;
+
+ c1 = 0;
+ c2 = 0;
+ c3 = 0;
+ mul_add_c(a[0], b[0], c1, c2, c3);
+ r[0] = c1;
+ c1 = 0;
+ mul_add_c(a[0], b[1], c2, c3, c1);
+ mul_add_c(a[1], b[0], c2, c3, c1);
+ r[1] = c2;
+ c2 = 0;
+ mul_add_c(a[2], b[0], c3, c1, c2);
+ mul_add_c(a[1], b[1], c3, c1, c2);
+ mul_add_c(a[0], b[2], c3, c1, c2);
+ r[2] = c3;
+ c3 = 0;
+ mul_add_c(a[0], b[3], c1, c2, c3);
+ mul_add_c(a[1], b[2], c1, c2, c3);
+ mul_add_c(a[2], b[1], c1, c2, c3);
+ mul_add_c(a[3], b[0], c1, c2, c3);
+ r[3] = c1;
+ c1 = 0;
+ mul_add_c(a[3], b[1], c2, c3, c1);
+ mul_add_c(a[2], b[2], c2, c3, c1);
+ mul_add_c(a[1], b[3], c2, c3, c1);
+ r[4] = c2;
+ c2 = 0;
+ mul_add_c(a[2], b[3], c3, c1, c2);
+ mul_add_c(a[3], b[2], c3, c1, c2);
+ r[5] = c3;
+ c3 = 0;
+ mul_add_c(a[3], b[3], c1, c2, c3);
+ r[6] = c1;
+ r[7] = c2;
+}
+
+void bn_sqr_comba8(BN_ULONG *r, const BN_ULONG *a)
+{
+ BN_ULONG c1, c2, c3;
+
+ c1 = 0;
+ c2 = 0;
+ c3 = 0;
+ sqr_add_c(a, 0, c1, c2, c3);
+ r[0] = c1;
+ c1 = 0;
+ sqr_add_c2(a, 1, 0, c2, c3, c1);
+ r[1] = c2;
+ c2 = 0;
+ sqr_add_c(a, 1, c3, c1, c2);
+ sqr_add_c2(a, 2, 0, c3, c1, c2);
+ r[2] = c3;
+ c3 = 0;
+ sqr_add_c2(a, 3, 0, c1, c2, c3);
+ sqr_add_c2(a, 2, 1, c1, c2, c3);
+ r[3] = c1;
+ c1 = 0;
+ sqr_add_c(a, 2, c2, c3, c1);
+ sqr_add_c2(a, 3, 1, c2, c3, c1);
+ sqr_add_c2(a, 4, 0, c2, c3, c1);
+ r[4] = c2;
+ c2 = 0;
+ sqr_add_c2(a, 5, 0, c3, c1, c2);
+ sqr_add_c2(a, 4, 1, c3, c1, c2);
+ sqr_add_c2(a, 3, 2, c3, c1, c2);
+ r[5] = c3;
+ c3 = 0;
+ sqr_add_c(a, 3, c1, c2, c3);
+ sqr_add_c2(a, 4, 2, c1, c2, c3);
+ sqr_add_c2(a, 5, 1, c1, c2, c3);
+ sqr_add_c2(a, 6, 0, c1, c2, c3);
+ r[6] = c1;
+ c1 = 0;
+ sqr_add_c2(a, 7, 0, c2, c3, c1);
+ sqr_add_c2(a, 6, 1, c2, c3, c1);
+ sqr_add_c2(a, 5, 2, c2, c3, c1);
+ sqr_add_c2(a, 4, 3, c2, c3, c1);
+ r[7] = c2;
+ c2 = 0;
+ sqr_add_c(a, 4, c3, c1, c2);
+ sqr_add_c2(a, 5, 3, c3, c1, c2);
+ sqr_add_c2(a, 6, 2, c3, c1, c2);
+ sqr_add_c2(a, 7, 1, c3, c1, c2);
+ r[8] = c3;
+ c3 = 0;
+ sqr_add_c2(a, 7, 2, c1, c2, c3);
+ sqr_add_c2(a, 6, 3, c1, c2, c3);
+ sqr_add_c2(a, 5, 4, c1, c2, c3);
+ r[9] = c1;
+ c1 = 0;
+ sqr_add_c(a, 5, c2, c3, c1);
+ sqr_add_c2(a, 6, 4, c2, c3, c1);
+ sqr_add_c2(a, 7, 3, c2, c3, c1);
+ r[10] = c2;
+ c2 = 0;
+ sqr_add_c2(a, 7, 4, c3, c1, c2);
+ sqr_add_c2(a, 6, 5, c3, c1, c2);
+ r[11] = c3;
+ c3 = 0;
+ sqr_add_c(a, 6, c1, c2, c3);
+ sqr_add_c2(a, 7, 5, c1, c2, c3);
+ r[12] = c1;
+ c1 = 0;
+ sqr_add_c2(a, 7, 6, c2, c3, c1);
+ r[13] = c2;
+ c2 = 0;
+ sqr_add_c(a, 7, c3, c1, c2);
+ r[14] = c3;
+ r[15] = c1;
+}
+
+void bn_sqr_comba4(BN_ULONG *r, const BN_ULONG *a)
+{
+ BN_ULONG c1, c2, c3;
+
+ c1 = 0;
+ c2 = 0;
+ c3 = 0;
+ sqr_add_c(a, 0, c1, c2, c3);
+ r[0] = c1;
+ c1 = 0;
+ sqr_add_c2(a, 1, 0, c2, c3, c1);
+ r[1] = c2;
+ c2 = 0;
+ sqr_add_c(a, 1, c3, c1, c2);
+ sqr_add_c2(a, 2, 0, c3, c1, c2);
+ r[2] = c3;
+ c3 = 0;
+ sqr_add_c2(a, 3, 0, c1, c2, c3);
+ sqr_add_c2(a, 2, 1, c1, c2, c3);
+ r[3] = c1;
+ c1 = 0;
+ sqr_add_c(a, 2, c2, c3, c1);
+ sqr_add_c2(a, 3, 1, c2, c3, c1);
+ r[4] = c2;
+ c2 = 0;
+ sqr_add_c2(a, 3, 2, c3, c1, c2);
+ r[5] = c3;
+ c3 = 0;
+ sqr_add_c(a, 3, c1, c2, c3);
+ r[6] = c1;
+ r[7] = c2;
+}
+#endif
diff --git a/openssl-1.1.0h/crypto/bn/asm/x86_64-gf2m.pl b/openssl-1.1.0h/crypto/bn/asm/x86_64-gf2m.pl
new file mode 100644
index 0000000..d962f62
--- /dev/null
+++ b/openssl-1.1.0h/crypto/bn/asm/x86_64-gf2m.pl
@@ -0,0 +1,397 @@
+#! /usr/bin/env perl
+# Copyright 2011-2016 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License"). You may not use
+# this file except in compliance with the License. You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# May 2011
+#
+# The module implements bn_GF2m_mul_2x2 polynomial multiplication used
+# in bn_gf2m.c. It's kind of low-hanging mechanical port from C for
+# the time being... Except that it has two code paths: code suitable
+# for any x86_64 CPU and PCLMULQDQ one suitable for Westmere and
+# later. Improvement varies from one benchmark and µ-arch to another.
+# Vanilla code path is at most 20% faster than compiler-generated code
+# [not very impressive], while PCLMULQDQ - whole 85%-160% better on
+# 163- and 571-bit ECDH benchmarks on Intel CPUs. Keep in mind that
+# these coefficients are not ones for bn_GF2m_mul_2x2 itself, as not
+# all CPU time is burnt in it...
+
+$flavour = shift;
+$output = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
+*STDOUT=*OUT;
+
+($lo,$hi)=("%rax","%rdx"); $a=$lo;
+($i0,$i1)=("%rsi","%rdi");
+($t0,$t1)=("%rbx","%rcx");
+($b,$mask)=("%rbp","%r8");
+($a1,$a2,$a4,$a8,$a12,$a48)=map("%r$_",(9..15));
+($R,$Tx)=("%xmm0","%xmm1");
+
+$code.=<<___;
+.text
+
+.type _mul_1x1,\@abi-omnipotent
+.align 16
+_mul_1x1:
+ sub \$128+8,%rsp
+ mov \$-1,$a1
+ lea ($a,$a),$i0
+ shr \$3,$a1
+ lea (,$a,4),$i1
+ and $a,$a1 # a1=a&0x1fffffffffffffff
+ lea (,$a,8),$a8
+ sar \$63,$a # broadcast 63rd bit
+ lea ($a1,$a1),$a2
+ sar \$63,$i0 # broadcast 62nd bit
+ lea (,$a1,4),$a4
+ and $b,$a
+ sar \$63,$i1 # boardcast 61st bit
+ mov $a,$hi # $a is $lo
+ shl \$63,$lo
+ and $b,$i0
+ shr \$1,$hi
+ mov $i0,$t1
+ shl \$62,$i0
+ and $b,$i1
+ shr \$2,$t1
+ xor $i0,$lo
+ mov $i1,$t0
+ shl \$61,$i1
+ xor $t1,$hi
+ shr \$3,$t0
+ xor $i1,$lo
+ xor $t0,$hi
+
+ mov $a1,$a12
+ movq \$0,0(%rsp) # tab[0]=0
+ xor $a2,$a12 # a1^a2
+ mov $a1,8(%rsp) # tab[1]=a1
+ mov $a4,$a48
+ mov $a2,16(%rsp) # tab[2]=a2
+ xor $a8,$a48 # a4^a8
+ mov $a12,24(%rsp) # tab[3]=a1^a2
+
+ xor $a4,$a1
+ mov $a4,32(%rsp) # tab[4]=a4
+ xor $a4,$a2
+ mov $a1,40(%rsp) # tab[5]=a1^a4
+ xor $a4,$a12
+ mov $a2,48(%rsp) # tab[6]=a2^a4
+ xor $a48,$a1 # a1^a4^a4^a8=a1^a8
+ mov $a12,56(%rsp) # tab[7]=a1^a2^a4
+ xor $a48,$a2 # a2^a4^a4^a8=a1^a8
+
+ mov $a8,64(%rsp) # tab[8]=a8
+ xor $a48,$a12 # a1^a2^a4^a4^a8=a1^a2^a8
+ mov $a1,72(%rsp) # tab[9]=a1^a8
+ xor $a4,$a1 # a1^a8^a4
+ mov $a2,80(%rsp) # tab[10]=a2^a8
+ xor $a4,$a2 # a2^a8^a4
+ mov $a12,88(%rsp) # tab[11]=a1^a2^a8
+
+ xor $a4,$a12 # a1^a2^a8^a4
+ mov $a48,96(%rsp) # tab[12]=a4^a8
+ mov $mask,$i0
+ mov $a1,104(%rsp) # tab[13]=a1^a4^a8
+ and $b,$i0
+ mov $a2,112(%rsp) # tab[14]=a2^a4^a8
+ shr \$4,$b
+ mov $a12,120(%rsp) # tab[15]=a1^a2^a4^a8
+ mov $mask,$i1
+ and $b,$i1
+ shr \$4,$b
+
+ movq (%rsp,$i0,8),$R # half of calculations is done in SSE2
+ mov $mask,$i0
+ and $b,$i0
+ shr \$4,$b
+___
+ for ($n=1;$n<8;$n++) {
+ $code.=<<___;
+ mov (%rsp,$i1,8),$t1
+ mov $mask,$i1
+ mov $t1,$t0
+ shl \$`8*$n-4`,$t1
+ and $b,$i1
+ movq (%rsp,$i0,8),$Tx
+ shr \$`64-(8*$n-4)`,$t0
+ xor $t1,$lo
+ pslldq \$$n,$Tx
+ mov $mask,$i0
+ shr \$4,$b
+ xor $t0,$hi
+ and $b,$i0
+ shr \$4,$b
+ pxor $Tx,$R
+___
+ }
+$code.=<<___;
+ mov (%rsp,$i1,8),$t1
+ mov $t1,$t0
+ shl \$`8*$n-4`,$t1
+ movq $R,$i0
+ shr \$`64-(8*$n-4)`,$t0
+ xor $t1,$lo
+ psrldq \$8,$R
+ xor $t0,$hi
+ movq $R,$i1
+ xor $i0,$lo
+ xor $i1,$hi
+
+ add \$128+8,%rsp
+ ret
+.Lend_mul_1x1:
+.size _mul_1x1,.-_mul_1x1
+___
+
+($rp,$a1,$a0,$b1,$b0) = $win64? ("%rcx","%rdx","%r8", "%r9","%r10") : # Win64 order
+ ("%rdi","%rsi","%rdx","%rcx","%r8"); # Unix order
+
+$code.=<<___;
+.extern OPENSSL_ia32cap_P
+.globl bn_GF2m_mul_2x2
+.type bn_GF2m_mul_2x2,\@abi-omnipotent
+.align 16
+bn_GF2m_mul_2x2:
+ mov OPENSSL_ia32cap_P(%rip),%rax
+ bt \$33,%rax
+ jnc .Lvanilla_mul_2x2
+
+ movq $a1,%xmm0
+ movq $b1,%xmm1
+ movq $a0,%xmm2
+___
+$code.=<<___ if ($win64);
+ movq 40(%rsp),%xmm3
+___
+$code.=<<___ if (!$win64);
+ movq $b0,%xmm3
+___
+$code.=<<___;
+ movdqa %xmm0,%xmm4
+ movdqa %xmm1,%xmm5
+ pclmulqdq \$0,%xmm1,%xmm0 # a1·b1
+ pxor %xmm2,%xmm4
+ pxor %xmm3,%xmm5
+ pclmulqdq \$0,%xmm3,%xmm2 # a0·b0
+ pclmulqdq \$0,%xmm5,%xmm4 # (a0+a1)·(b0+b1)
+ xorps %xmm0,%xmm4
+ xorps %xmm2,%xmm4 # (a0+a1)·(b0+b1)-a0·b0-a1·b1
+ movdqa %xmm4,%xmm5
+ pslldq \$8,%xmm4
+ psrldq \$8,%xmm5
+ pxor %xmm4,%xmm2
+ pxor %xmm5,%xmm0
+ movdqu %xmm2,0($rp)
+ movdqu %xmm0,16($rp)
+ ret
+
+.align 16
+.Lvanilla_mul_2x2:
+ lea -8*17(%rsp),%rsp
+___
+$code.=<<___ if ($win64);
+ mov `8*17+40`(%rsp),$b0
+ mov %rdi,8*15(%rsp)
+ mov %rsi,8*16(%rsp)
+___
+$code.=<<___;
+ mov %r14,8*10(%rsp)
+ mov %r13,8*11(%rsp)
+ mov %r12,8*12(%rsp)
+ mov %rbp,8*13(%rsp)
+ mov %rbx,8*14(%rsp)
+.Lbody_mul_2x2:
+ mov $rp,32(%rsp) # save the arguments
+ mov $a1,40(%rsp)
+ mov $a0,48(%rsp)
+ mov $b1,56(%rsp)
+ mov $b0,64(%rsp)
+
+ mov \$0xf,$mask
+ mov $a1,$a
+ mov $b1,$b
+ call _mul_1x1 # a1·b1
+ mov $lo,16(%rsp)
+ mov $hi,24(%rsp)
+
+ mov 48(%rsp),$a
+ mov 64(%rsp),$b
+ call _mul_1x1 # a0·b0
+ mov $lo,0(%rsp)
+ mov $hi,8(%rsp)
+
+ mov 40(%rsp),$a
+ mov 56(%rsp),$b
+ xor 48(%rsp),$a
+ xor 64(%rsp),$b
+ call _mul_1x1 # (a0+a1)·(b0+b1)
+___
+ @r=("%rbx","%rcx","%rdi","%rsi");
+$code.=<<___;
+ mov 0(%rsp),@r[0]
+ mov 8(%rsp),@r[1]
+ mov 16(%rsp),@r[2]
+ mov 24(%rsp),@r[3]
+ mov 32(%rsp),%rbp
+
+ xor $hi,$lo
+ xor @r[1],$hi
+ xor @r[0],$lo
+ mov @r[0],0(%rbp)
+ xor @r[2],$hi
+ mov @r[3],24(%rbp)
+ xor @r[3],$lo
+ xor @r[3],$hi
+ xor $hi,$lo
+ mov $hi,16(%rbp)
+ mov $lo,8(%rbp)
+
+ mov 8*10(%rsp),%r14
+ mov 8*11(%rsp),%r13
+ mov 8*12(%rsp),%r12
+ mov 8*13(%rsp),%rbp
+ mov 8*14(%rsp),%rbx
+___
+$code.=<<___ if ($win64);
+ mov 8*15(%rsp),%rdi
+ mov 8*16(%rsp),%rsi
+___
+$code.=<<___;
+ lea 8*17(%rsp),%rsp
+ ret
+.Lend_mul_2x2:
+.size bn_GF2m_mul_2x2,.-bn_GF2m_mul_2x2
+.asciz "GF(2^m) Multiplication for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
+.align 16
+___
+
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+# CONTEXT *context,DISPATCHER_CONTEXT *disp)
+if ($win64) {
+$rec="%rcx";
+$frame="%rdx";
+$context="%r8";
+$disp="%r9";
+
+$code.=<<___;
+.extern __imp_RtlVirtualUnwind
+
+.type se_handler,\@abi-omnipotent
+.align 16
+se_handler:
+ push %rsi
+ push %rdi
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ pushfq
+ sub \$64,%rsp
+
+ mov 152($context),%rax # pull context->Rsp
+ mov 248($context),%rbx # pull context->Rip
+
+ lea .Lbody_mul_2x2(%rip),%r10
+ cmp %r10,%rbx # context->Rip<"prologue" label
+ jb .Lin_prologue
+
+ mov 8*10(%rax),%r14 # mimic epilogue
+ mov 8*11(%rax),%r13
+ mov 8*12(%rax),%r12
+ mov 8*13(%rax),%rbp
+ mov 8*14(%rax),%rbx
+ mov 8*15(%rax),%rdi
+ mov 8*16(%rax),%rsi
+
+ mov %rbx,144($context) # restore context->Rbx
+ mov %rbp,160($context) # restore context->Rbp
+ mov %rsi,168($context) # restore context->Rsi
+ mov %rdi,176($context) # restore context->Rdi
+ mov %r12,216($context) # restore context->R12
+ mov %r13,224($context) # restore context->R13
+ mov %r14,232($context) # restore context->R14
+
+.Lin_prologue:
+ lea 8*17(%rax),%rax
+ mov %rax,152($context) # restore context->Rsp
+
+ mov 40($disp),%rdi # disp->ContextRecord
+ mov $context,%rsi # context
+ mov \$154,%ecx # sizeof(CONTEXT)
+ .long 0xa548f3fc # cld; rep movsq
+
+ mov $disp,%rsi
+ xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
+ mov 8(%rsi),%rdx # arg2, disp->ImageBase
+ mov 0(%rsi),%r8 # arg3, disp->ControlPc
+ mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
+ mov 40(%rsi),%r10 # disp->ContextRecord
+ lea 56(%rsi),%r11 # &disp->HandlerData
+ lea 24(%rsi),%r12 # &disp->EstablisherFrame
+ mov %r10,32(%rsp) # arg5
+ mov %r11,40(%rsp) # arg6
+ mov %r12,48(%rsp) # arg7
+ mov %rcx,56(%rsp) # arg8, (NULL)
+ call *__imp_RtlVirtualUnwind(%rip)
+
+ mov \$1,%eax # ExceptionContinueSearch
+ add \$64,%rsp
+ popfq
+ pop %r15
+ pop %r14
+ pop %r13
+ pop %r12
+ pop %rbp
+ pop %rbx
+ pop %rdi
+ pop %rsi
+ ret
+.size se_handler,.-se_handler
+
+.section .pdata
+.align 4
+ .rva _mul_1x1
+ .rva .Lend_mul_1x1
+ .rva .LSEH_info_1x1
+
+ .rva .Lvanilla_mul_2x2
+ .rva .Lend_mul_2x2
+ .rva .LSEH_info_2x2
+.section .xdata
+.align 8
+.LSEH_info_1x1:
+ .byte 0x01,0x07,0x02,0x00
+ .byte 0x07,0x01,0x11,0x00 # sub rsp,128+8
+.LSEH_info_2x2:
+ .byte 9,0,0,0
+ .rva se_handler
+___
+}
+
+$code =~ s/\`([^\`]*)\`/eval($1)/gem;
+print $code;
+close STDOUT;
diff --git a/openssl-1.1.0h/crypto/bn/asm/x86_64-mont.pl b/openssl-1.1.0h/crypto/bn/asm/x86_64-mont.pl
new file mode 100755
index 0000000..df4cca5
--- /dev/null
+++ b/openssl-1.1.0h/crypto/bn/asm/x86_64-mont.pl
@@ -0,0 +1,1521 @@
+#! /usr/bin/env perl
+# Copyright 2005-2016 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License"). You may not use
+# this file except in compliance with the License. You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# October 2005.
+#
+# Montgomery multiplication routine for x86_64. While it gives modest
+# 9% improvement of rsa4096 sign on Opteron, rsa512 sign runs more
+# than twice, >2x, as fast. Most common rsa1024 sign is improved by
+# respectful 50%. It remains to be seen if loop unrolling and
+# dedicated squaring routine can provide further improvement...
+
+# July 2011.
+#
+# Add dedicated squaring procedure. Performance improvement varies
+# from platform to platform, but in average it's ~5%/15%/25%/33%
+# for 512-/1024-/2048-/4096-bit RSA *sign* benchmarks respectively.
+
+# August 2011.
+#
+# Unroll and modulo-schedule inner loops in such manner that they
+# are "fallen through" for input lengths of 8, which is critical for
+# 1024-bit RSA *sign*. Average performance improvement in comparison
+# to *initial* version of this module from 2005 is ~0%/30%/40%/45%
+# for 512-/1024-/2048-/4096-bit RSA *sign* benchmarks respectively.
+
+# June 2013.
+#
+# Optimize reduction in squaring procedure and improve 1024+-bit RSA
+# sign performance by 10-16% on Intel Sandy Bridge and later
+# (virtually same on non-Intel processors).
+
+# August 2013.
+#
+# Add MULX/ADOX/ADCX code path.
+
+$flavour = shift;
+$output = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
+*STDOUT=*OUT;
+
+if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
+ =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
+ $addx = ($1>=2.23);
+}
+
+if (!$addx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
+ `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
+ $addx = ($1>=2.10);
+}
+
+if (!$addx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
+ `ml64 2>&1` =~ /Version ([0-9]+)\./) {
+ $addx = ($1>=12);
+}
+
+if (!$addx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9])\.([0-9]+)/) {
+ my $ver = $2 + $3/100.0; # 3.1->3.01, 3.10->3.10
+ $addx = ($ver>=3.03);
+}
+
+# int bn_mul_mont(
+$rp="%rdi"; # BN_ULONG *rp,
+$ap="%rsi"; # const BN_ULONG *ap,
+$bp="%rdx"; # const BN_ULONG *bp,
+$np="%rcx"; # const BN_ULONG *np,
+$n0="%r8"; # const BN_ULONG *n0,
+$num="%r9"; # int num);
+$lo0="%r10";
+$hi0="%r11";
+$hi1="%r13";
+$i="%r14";
+$j="%r15";
+$m0="%rbx";
+$m1="%rbp";
+
+$code=<<___;
+.text
+
+.extern OPENSSL_ia32cap_P
+
+.globl bn_mul_mont
+.type bn_mul_mont,\@function,6
+.align 16
+bn_mul_mont:
+ mov ${num}d,${num}d
+ mov %rsp,%rax
+ test \$3,${num}d
+ jnz .Lmul_enter
+ cmp \$8,${num}d
+ jb .Lmul_enter
+___
+$code.=<<___ if ($addx);
+ mov OPENSSL_ia32cap_P+8(%rip),%r11d
+___
+$code.=<<___;
+ cmp $ap,$bp
+ jne .Lmul4x_enter
+ test \$7,${num}d
+ jz .Lsqr8x_enter
+ jmp .Lmul4x_enter
+
+.align 16
+.Lmul_enter:
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+
+ neg $num
+ mov %rsp,%r11
+ lea -16(%rsp,$num,8),%r10 # future alloca(8*(num+2))
+ neg $num # restore $num
+ and \$-1024,%r10 # minimize TLB usage
+
+ # An OS-agnostic version of __chkstk.
+ #
+ # Some OSes (Windows) insist on stack being "wired" to
+ # physical memory in strictly sequential manner, i.e. if stack
+ # allocation spans two pages, then reference to farmost one can
+ # be punishable by SEGV. But page walking can do good even on
+ # other OSes, because it guarantees that villain thread hits
+ # the guard page before it can make damage to innocent one...
+ sub %r10,%r11
+ and \$-4096,%r11
+ lea (%r10,%r11),%rsp
+ mov (%rsp),%r11
+ cmp %r10,%rsp
+ ja .Lmul_page_walk
+ jmp .Lmul_page_walk_done
+
+.align 16
+.Lmul_page_walk:
+ lea -4096(%rsp),%rsp
+ mov (%rsp),%r11
+ cmp %r10,%rsp
+ ja .Lmul_page_walk
+.Lmul_page_walk_done:
+
+ mov %rax,8(%rsp,$num,8) # tp[num+1]=%rsp
+.Lmul_body:
+ mov $bp,%r12 # reassign $bp
+___
+ $bp="%r12";
+$code.=<<___;
+ mov ($n0),$n0 # pull n0[0] value
+ mov ($bp),$m0 # m0=bp[0]
+ mov ($ap),%rax
+
+ xor $i,$i # i=0
+ xor $j,$j # j=0
+
+ mov $n0,$m1
+ mulq $m0 # ap[0]*bp[0]
+ mov %rax,$lo0
+ mov ($np),%rax
+
+ imulq $lo0,$m1 # "tp[0]"*n0
+ mov %rdx,$hi0
+
+ mulq $m1 # np[0]*m1
+ add %rax,$lo0 # discarded
+ mov 8($ap),%rax
+ adc \$0,%rdx
+ mov %rdx,$hi1
+
+ lea 1($j),$j # j++
+ jmp .L1st_enter
+
+.align 16
+.L1st:
+ add %rax,$hi1
+ mov ($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
+ mov $lo0,$hi0
+ adc \$0,%rdx
+ mov $hi1,-16(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$hi1
+
+.L1st_enter:
+ mulq $m0 # ap[j]*bp[0]
+ add %rax,$hi0
+ mov ($np,$j,8),%rax
+ adc \$0,%rdx
+ lea 1($j),$j # j++
+ mov %rdx,$lo0
+
+ mulq $m1 # np[j]*m1
+ cmp $num,$j
+ jne .L1st
+
+ add %rax,$hi1
+ mov ($ap),%rax # ap[0]
+ adc \$0,%rdx
+ add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
+ adc \$0,%rdx
+ mov $hi1,-16(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$hi1
+ mov $lo0,$hi0
+
+ xor %rdx,%rdx
+ add $hi0,$hi1
+ adc \$0,%rdx
+ mov $hi1,-8(%rsp,$num,8)
+ mov %rdx,(%rsp,$num,8) # store upmost overflow bit
+
+ lea 1($i),$i # i++
+ jmp .Louter
+.align 16
+.Louter:
+ mov ($bp,$i,8),$m0 # m0=bp[i]
+ xor $j,$j # j=0
+ mov $n0,$m1
+ mov (%rsp),$lo0
+ mulq $m0 # ap[0]*bp[i]
+ add %rax,$lo0 # ap[0]*bp[i]+tp[0]
+ mov ($np),%rax
+ adc \$0,%rdx
+
+ imulq $lo0,$m1 # tp[0]*n0
+ mov %rdx,$hi0
+
+ mulq $m1 # np[0]*m1
+ add %rax,$lo0 # discarded
+ mov 8($ap),%rax
+ adc \$0,%rdx
+ mov 8(%rsp),$lo0 # tp[1]
+ mov %rdx,$hi1
+
+ lea 1($j),$j # j++
+ jmp .Linner_enter
+
+.align 16
+.Linner:
+ add %rax,$hi1
+ mov ($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
+ mov (%rsp,$j,8),$lo0
+ adc \$0,%rdx
+ mov $hi1,-16(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$hi1
+
+.Linner_enter:
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$hi0
+ mov ($np,$j,8),%rax
+ adc \$0,%rdx
+ add $hi0,$lo0 # ap[j]*bp[i]+tp[j]
+ mov %rdx,$hi0
+ adc \$0,$hi0
+ lea 1($j),$j # j++
+
+ mulq $m1 # np[j]*m1
+ cmp $num,$j
+ jne .Linner
+
+ add %rax,$hi1
+ mov ($ap),%rax # ap[0]
+ adc \$0,%rdx
+ add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
+ mov (%rsp,$j,8),$lo0
+ adc \$0,%rdx
+ mov $hi1,-16(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$hi1
+
+ xor %rdx,%rdx
+ add $hi0,$hi1
+ adc \$0,%rdx
+ add $lo0,$hi1 # pull upmost overflow bit
+ adc \$0,%rdx
+ mov $hi1,-8(%rsp,$num,8)
+ mov %rdx,(%rsp,$num,8) # store upmost overflow bit
+
+ lea 1($i),$i # i++
+ cmp $num,$i
+ jb .Louter
+
+ xor $i,$i # i=0 and clear CF!
+ mov (%rsp),%rax # tp[0]
+ lea (%rsp),$ap # borrow ap for tp
+ mov $num,$j # j=num
+ jmp .Lsub
+.align 16
+.Lsub: sbb ($np,$i,8),%rax
+ mov %rax,($rp,$i,8) # rp[i]=tp[i]-np[i]
+ mov 8($ap,$i,8),%rax # tp[i+1]
+ lea 1($i),$i # i++
+ dec $j # doesnn't affect CF!
+ jnz .Lsub
+
+ sbb \$0,%rax # handle upmost overflow bit
+ xor $i,$i
+ and %rax,$ap
+ not %rax
+ mov $rp,$np
+ and %rax,$np
+ mov $num,$j # j=num
+ or $np,$ap # ap=borrow?tp:rp
+.align 16
+.Lcopy: # copy or in-place refresh
+ mov ($ap,$i,8),%rax
+ mov $i,(%rsp,$i,8) # zap temporary vector
+ mov %rax,($rp,$i,8) # rp[i]=tp[i]
+ lea 1($i),$i
+ sub \$1,$j
+ jnz .Lcopy
+
+ mov 8(%rsp,$num,8),%rsi # restore %rsp
+ mov \$1,%rax
+ mov -48(%rsi),%r15
+ mov -40(%rsi),%r14
+ mov -32(%rsi),%r13
+ mov -24(%rsi),%r12
+ mov -16(%rsi),%rbp
+ mov -8(%rsi),%rbx
+ lea (%rsi),%rsp
+.Lmul_epilogue:
+ ret
+.size bn_mul_mont,.-bn_mul_mont
+___
+{{{
+my @A=("%r10","%r11");
+my @N=("%r13","%rdi");
+$code.=<<___;
+.type bn_mul4x_mont,\@function,6
+.align 16
+bn_mul4x_mont:
+ mov ${num}d,${num}d
+ mov %rsp,%rax
+.Lmul4x_enter:
+___
+$code.=<<___ if ($addx);
+ and \$0x80100,%r11d
+ cmp \$0x80100,%r11d
+ je .Lmulx4x_enter
+___
+$code.=<<___;
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+
+ neg $num
+ mov %rsp,%r11
+ lea -32(%rsp,$num,8),%r10 # future alloca(8*(num+4))
+ neg $num # restore
+ and \$-1024,%r10 # minimize TLB usage
+
+ sub %r10,%r11
+ and \$-4096,%r11
+ lea (%r10,%r11),%rsp
+ mov (%rsp),%r11
+ cmp %r10,%rsp
+ ja .Lmul4x_page_walk
+ jmp .Lmul4x_page_walk_done
+
+.Lmul4x_page_walk:
+ lea -4096(%rsp),%rsp
+ mov (%rsp),%r11
+ cmp %r10,%rsp
+ ja .Lmul4x_page_walk
+.Lmul4x_page_walk_done:
+
+ mov %rax,8(%rsp,$num,8) # tp[num+1]=%rsp
+.Lmul4x_body:
+ mov $rp,16(%rsp,$num,8) # tp[num+2]=$rp
+ mov %rdx,%r12 # reassign $bp
+___
+ $bp="%r12";
+$code.=<<___;
+ mov ($n0),$n0 # pull n0[0] value
+ mov ($bp),$m0 # m0=bp[0]
+ mov ($ap),%rax
+
+ xor $i,$i # i=0
+ xor $j,$j # j=0
+
+ mov $n0,$m1
+ mulq $m0 # ap[0]*bp[0]
+ mov %rax,$A[0]
+ mov ($np),%rax
+
+ imulq $A[0],$m1 # "tp[0]"*n0
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[0]*m1
+ add %rax,$A[0] # discarded
+ mov 8($ap),%rax
+ adc \$0,%rdx
+ mov %rdx,$N[1]
+
+ mulq $m0
+ add %rax,$A[1]
+ mov 8($np),%rax
+ adc \$0,%rdx
+ mov %rdx,$A[0]
+
+ mulq $m1
+ add %rax,$N[1]
+ mov 16($ap),%rax
+ adc \$0,%rdx
+ add $A[1],$N[1]
+ lea 4($j),$j # j++
+ adc \$0,%rdx
+ mov $N[1],(%rsp)
+ mov %rdx,$N[0]
+ jmp .L1st4x
+.align 16
+.L1st4x:
+ mulq $m0 # ap[j]*bp[0]
+ add %rax,$A[0]
+ mov -16($np,$j,8),%rax
+ adc \$0,%rdx
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[0]
+ mov -8($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
+ adc \$0,%rdx
+ mov $N[0],-24(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[1]
+
+ mulq $m0 # ap[j]*bp[0]
+ add %rax,$A[1]
+ mov -8($np,$j,8),%rax
+ adc \$0,%rdx
+ mov %rdx,$A[0]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[1]
+ mov ($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
+ adc \$0,%rdx
+ mov $N[1],-16(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[0]
+
+ mulq $m0 # ap[j]*bp[0]
+ add %rax,$A[0]
+ mov ($np,$j,8),%rax
+ adc \$0,%rdx
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[0]
+ mov 8($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
+ adc \$0,%rdx
+ mov $N[0],-8(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[1]
+
+ mulq $m0 # ap[j]*bp[0]
+ add %rax,$A[1]
+ mov 8($np,$j,8),%rax
+ adc \$0,%rdx
+ lea 4($j),$j # j++
+ mov %rdx,$A[0]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[1]
+ mov -16($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
+ adc \$0,%rdx
+ mov $N[1],-32(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[0]
+ cmp $num,$j
+ jb .L1st4x
+
+ mulq $m0 # ap[j]*bp[0]
+ add %rax,$A[0]
+ mov -16($np,$j,8),%rax
+ adc \$0,%rdx
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[0]
+ mov -8($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
+ adc \$0,%rdx
+ mov $N[0],-24(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[1]
+
+ mulq $m0 # ap[j]*bp[0]
+ add %rax,$A[1]
+ mov -8($np,$j,8),%rax
+ adc \$0,%rdx
+ mov %rdx,$A[0]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[1]
+ mov ($ap),%rax # ap[0]
+ adc \$0,%rdx
+ add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
+ adc \$0,%rdx
+ mov $N[1],-16(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[0]
+
+ xor $N[1],$N[1]
+ add $A[0],$N[0]
+ adc \$0,$N[1]
+ mov $N[0],-8(%rsp,$j,8)
+ mov $N[1],(%rsp,$j,8) # store upmost overflow bit
+
+ lea 1($i),$i # i++
+.align 4
+.Louter4x:
+ mov ($bp,$i,8),$m0 # m0=bp[i]
+ xor $j,$j # j=0
+ mov (%rsp),$A[0]
+ mov $n0,$m1
+ mulq $m0 # ap[0]*bp[i]
+ add %rax,$A[0] # ap[0]*bp[i]+tp[0]
+ mov ($np),%rax
+ adc \$0,%rdx
+
+ imulq $A[0],$m1 # tp[0]*n0
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[0]*m1
+ add %rax,$A[0] # "$N[0]", discarded
+ mov 8($ap),%rax
+ adc \$0,%rdx
+ mov %rdx,$N[1]
+
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$A[1]
+ mov 8($np),%rax
+ adc \$0,%rdx
+ add 8(%rsp),$A[1] # +tp[1]
+ adc \$0,%rdx
+ mov %rdx,$A[0]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[1]
+ mov 16($ap),%rax
+ adc \$0,%rdx
+ add $A[1],$N[1] # np[j]*m1+ap[j]*bp[i]+tp[j]
+ lea 4($j),$j # j+=2
+ adc \$0,%rdx
+ mov $N[1],(%rsp) # tp[j-1]
+ mov %rdx,$N[0]
+ jmp .Linner4x
+.align 16
+.Linner4x:
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$A[0]
+ mov -16($np,$j,8),%rax
+ adc \$0,%rdx
+ add -16(%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
+ adc \$0,%rdx
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[0]
+ mov -8($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[0],$N[0]
+ adc \$0,%rdx
+ mov $N[0],-24(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[1]
+
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$A[1]
+ mov -8($np,$j,8),%rax
+ adc \$0,%rdx
+ add -8(%rsp,$j,8),$A[1]
+ adc \$0,%rdx
+ mov %rdx,$A[0]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[1]
+ mov ($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[1],$N[1]
+ adc \$0,%rdx
+ mov $N[1],-16(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[0]
+
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$A[0]
+ mov ($np,$j,8),%rax
+ adc \$0,%rdx
+ add (%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
+ adc \$0,%rdx
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[0]
+ mov 8($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[0],$N[0]
+ adc \$0,%rdx
+ mov $N[0],-8(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[1]
+
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$A[1]
+ mov 8($np,$j,8),%rax
+ adc \$0,%rdx
+ add 8(%rsp,$j,8),$A[1]
+ adc \$0,%rdx
+ lea 4($j),$j # j++
+ mov %rdx,$A[0]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[1]
+ mov -16($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[1],$N[1]
+ adc \$0,%rdx
+ mov $N[1],-32(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[0]
+ cmp $num,$j
+ jb .Linner4x
+
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$A[0]
+ mov -16($np,$j,8),%rax
+ adc \$0,%rdx
+ add -16(%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
+ adc \$0,%rdx
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[0]
+ mov -8($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $A[0],$N[0]
+ adc \$0,%rdx
+ mov $N[0],-24(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[1]
+
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$A[1]
+ mov -8($np,$j,8),%rax
+ adc \$0,%rdx
+ add -8(%rsp,$j,8),$A[1]
+ adc \$0,%rdx
+ lea 1($i),$i # i++
+ mov %rdx,$A[0]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[1]
+ mov ($ap),%rax # ap[0]
+ adc \$0,%rdx
+ add $A[1],$N[1]
+ adc \$0,%rdx
+ mov $N[1],-16(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$N[0]
+
+ xor $N[1],$N[1]
+ add $A[0],$N[0]
+ adc \$0,$N[1]
+ add (%rsp,$num,8),$N[0] # pull upmost overflow bit
+ adc \$0,$N[1]
+ mov $N[0],-8(%rsp,$j,8)
+ mov $N[1],(%rsp,$j,8) # store upmost overflow bit
+
+ cmp $num,$i
+ jb .Louter4x
+___
+{
+my @ri=("%rax","%rdx",$m0,$m1);
+$code.=<<___;
+ mov 16(%rsp,$num,8),$rp # restore $rp
+ mov 0(%rsp),@ri[0] # tp[0]
+ pxor %xmm0,%xmm0
+ mov 8(%rsp),@ri[1] # tp[1]
+ shr \$2,$num # num/=4
+ lea (%rsp),$ap # borrow ap for tp
+ xor $i,$i # i=0 and clear CF!
+
+ sub 0($np),@ri[0]
+ mov 16($ap),@ri[2] # tp[2]
+ mov 24($ap),@ri[3] # tp[3]
+ sbb 8($np),@ri[1]
+ lea -1($num),$j # j=num/4-1
+ jmp .Lsub4x
+.align 16
+.Lsub4x:
+ mov @ri[0],0($rp,$i,8) # rp[i]=tp[i]-np[i]
+ mov @ri[1],8($rp,$i,8) # rp[i]=tp[i]-np[i]
+ sbb 16($np,$i,8),@ri[2]
+ mov 32($ap,$i,8),@ri[0] # tp[i+1]
+ mov 40($ap,$i,8),@ri[1]
+ sbb 24($np,$i,8),@ri[3]
+ mov @ri[2],16($rp,$i,8) # rp[i]=tp[i]-np[i]
+ mov @ri[3],24($rp,$i,8) # rp[i]=tp[i]-np[i]
+ sbb 32($np,$i,8),@ri[0]
+ mov 48($ap,$i,8),@ri[2]
+ mov 56($ap,$i,8),@ri[3]
+ sbb 40($np,$i,8),@ri[1]
+ lea 4($i),$i # i++
+ dec $j # doesnn't affect CF!
+ jnz .Lsub4x
+
+ mov @ri[0],0($rp,$i,8) # rp[i]=tp[i]-np[i]
+ mov 32($ap,$i,8),@ri[0] # load overflow bit
+ sbb 16($np,$i,8),@ri[2]
+ mov @ri[1],8($rp,$i,8) # rp[i]=tp[i]-np[i]
+ sbb 24($np,$i,8),@ri[3]
+ mov @ri[2],16($rp,$i,8) # rp[i]=tp[i]-np[i]
+
+ sbb \$0,@ri[0] # handle upmost overflow bit
+ mov @ri[3],24($rp,$i,8) # rp[i]=tp[i]-np[i]
+ xor $i,$i # i=0
+ and @ri[0],$ap
+ not @ri[0]
+ mov $rp,$np
+ and @ri[0],$np
+ lea -1($num),$j
+ or $np,$ap # ap=borrow?tp:rp
+
+ movdqu ($ap),%xmm1
+ movdqa %xmm0,(%rsp)
+ movdqu %xmm1,($rp)
+ jmp .Lcopy4x
+.align 16
+.Lcopy4x: # copy or in-place refresh
+ movdqu 16($ap,$i),%xmm2
+ movdqu 32($ap,$i),%xmm1
+ movdqa %xmm0,16(%rsp,$i)
+ movdqu %xmm2,16($rp,$i)
+ movdqa %xmm0,32(%rsp,$i)
+ movdqu %xmm1,32($rp,$i)
+ lea 32($i),$i
+ dec $j
+ jnz .Lcopy4x
+
+ shl \$2,$num
+ movdqu 16($ap,$i),%xmm2
+ movdqa %xmm0,16(%rsp,$i)
+ movdqu %xmm2,16($rp,$i)
+___
+}
+$code.=<<___;
+ mov 8(%rsp,$num,8),%rsi # restore %rsp
+ mov \$1,%rax
+ mov -48(%rsi),%r15
+ mov -40(%rsi),%r14
+ mov -32(%rsi),%r13
+ mov -24(%rsi),%r12
+ mov -16(%rsi),%rbp
+ mov -8(%rsi),%rbx
+ lea (%rsi),%rsp
+.Lmul4x_epilogue:
+ ret
+.size bn_mul4x_mont,.-bn_mul4x_mont
+___
+}}}
+ {{{
+######################################################################
+# void bn_sqr8x_mont(
+my $rptr="%rdi"; # const BN_ULONG *rptr,
+my $aptr="%rsi"; # const BN_ULONG *aptr,
+my $bptr="%rdx"; # not used
+my $nptr="%rcx"; # const BN_ULONG *nptr,
+my $n0 ="%r8"; # const BN_ULONG *n0);
+my $num ="%r9"; # int num, has to be divisible by 8
+
+my ($i,$j,$tptr)=("%rbp","%rcx",$rptr);
+my @A0=("%r10","%r11");
+my @A1=("%r12","%r13");
+my ($a0,$a1,$ai)=("%r14","%r15","%rbx");
+
+$code.=<<___ if ($addx);
+.extern bn_sqrx8x_internal # see x86_64-mont5 module
+___
+$code.=<<___;
+.extern bn_sqr8x_internal # see x86_64-mont5 module
+
+.type bn_sqr8x_mont,\@function,6
+.align 32
+bn_sqr8x_mont:
+ mov %rsp,%rax
+.Lsqr8x_enter:
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+.Lsqr8x_prologue:
+
+ mov ${num}d,%r10d
+ shl \$3,${num}d # convert $num to bytes
+ shl \$3+2,%r10 # 4*$num
+ neg $num
+
+ ##############################################################
+ # ensure that stack frame doesn't alias with $aptr modulo
+ # 4096. this is done to allow memory disambiguation logic
+ # do its job.
+ #
+ lea -64(%rsp,$num,2),%r11
+ mov %rsp,%rbp
+ mov ($n0),$n0 # *n0
+ sub $aptr,%r11
+ and \$4095,%r11
+ cmp %r11,%r10
+ jb .Lsqr8x_sp_alt
+ sub %r11,%rbp # align with $aptr
+ lea -64(%rbp,$num,2),%rbp # future alloca(frame+2*$num)
+ jmp .Lsqr8x_sp_done
+
+.align 32
+.Lsqr8x_sp_alt:
+ lea 4096-64(,$num,2),%r10 # 4096-frame-2*$num
+ lea -64(%rbp,$num,2),%rbp # future alloca(frame+2*$num)
+ sub %r10,%r11
+ mov \$0,%r10
+ cmovc %r10,%r11
+ sub %r11,%rbp
+.Lsqr8x_sp_done:
+ and \$-64,%rbp
+ mov %rsp,%r11
+ sub %rbp,%r11
+ and \$-4096,%r11
+ lea (%rbp,%r11),%rsp
+ mov (%rsp),%r10
+ cmp %rbp,%rsp
+ ja .Lsqr8x_page_walk
+ jmp .Lsqr8x_page_walk_done
+
+.align 16
+.Lsqr8x_page_walk:
+ lea -4096(%rsp),%rsp
+ mov (%rsp),%r10
+ cmp %rbp,%rsp
+ ja .Lsqr8x_page_walk
+.Lsqr8x_page_walk_done:
+
+ mov $num,%r10
+ neg $num
+
+ mov $n0, 32(%rsp)
+ mov %rax, 40(%rsp) # save original %rsp
+.Lsqr8x_body:
+
+ movq $nptr, %xmm2 # save pointer to modulus
+ pxor %xmm0,%xmm0
+ movq $rptr,%xmm1 # save $rptr
+ movq %r10, %xmm3 # -$num
+___
+$code.=<<___ if ($addx);
+ mov OPENSSL_ia32cap_P+8(%rip),%eax
+ and \$0x80100,%eax
+ cmp \$0x80100,%eax
+ jne .Lsqr8x_nox
+
+ call bn_sqrx8x_internal # see x86_64-mont5 module
+ # %rax top-most carry
+ # %rbp nptr
+ # %rcx -8*num
+ # %r8 end of tp[2*num]
+ lea (%r8,%rcx),%rbx
+ mov %rcx,$num
+ mov %rcx,%rdx
+ movq %xmm1,$rptr
+ sar \$3+2,%rcx # %cf=0
+ jmp .Lsqr8x_sub
+
+.align 32
+.Lsqr8x_nox:
+___
+$code.=<<___;
+ call bn_sqr8x_internal # see x86_64-mont5 module
+ # %rax top-most carry
+ # %rbp nptr
+ # %r8 -8*num
+ # %rdi end of tp[2*num]
+ lea (%rdi,$num),%rbx
+ mov $num,%rcx
+ mov $num,%rdx
+ movq %xmm1,$rptr
+ sar \$3+2,%rcx # %cf=0
+ jmp .Lsqr8x_sub
+
+.align 32
+.Lsqr8x_sub:
+ mov 8*0(%rbx),%r12
+ mov 8*1(%rbx),%r13
+ mov 8*2(%rbx),%r14
+ mov 8*3(%rbx),%r15
+ lea 8*4(%rbx),%rbx
+ sbb 8*0(%rbp),%r12
+ sbb 8*1(%rbp),%r13
+ sbb 8*2(%rbp),%r14
+ sbb 8*3(%rbp),%r15
+ lea 8*4(%rbp),%rbp
+ mov %r12,8*0($rptr)
+ mov %r13,8*1($rptr)
+ mov %r14,8*2($rptr)
+ mov %r15,8*3($rptr)
+ lea 8*4($rptr),$rptr
+ inc %rcx # preserves %cf
+ jnz .Lsqr8x_sub
+
+ sbb \$0,%rax # top-most carry
+ lea (%rbx,$num),%rbx # rewind
+ lea ($rptr,$num),$rptr # rewind
+
+ movq %rax,%xmm1
+ pxor %xmm0,%xmm0
+ pshufd \$0,%xmm1,%xmm1
+ mov 40(%rsp),%rsi # restore %rsp
+ jmp .Lsqr8x_cond_copy
+
+.align 32
+.Lsqr8x_cond_copy:
+ movdqa 16*0(%rbx),%xmm2
+ movdqa 16*1(%rbx),%xmm3
+ lea 16*2(%rbx),%rbx
+ movdqu 16*0($rptr),%xmm4
+ movdqu 16*1($rptr),%xmm5
+ lea 16*2($rptr),$rptr
+ movdqa %xmm0,-16*2(%rbx) # zero tp
+ movdqa %xmm0,-16*1(%rbx)
+ movdqa %xmm0,-16*2(%rbx,%rdx)
+ movdqa %xmm0,-16*1(%rbx,%rdx)
+ pcmpeqd %xmm1,%xmm0
+ pand %xmm1,%xmm2
+ pand %xmm1,%xmm3
+ pand %xmm0,%xmm4
+ pand %xmm0,%xmm5
+ pxor %xmm0,%xmm0
+ por %xmm2,%xmm4
+ por %xmm3,%xmm5
+ movdqu %xmm4,-16*2($rptr)
+ movdqu %xmm5,-16*1($rptr)
+ add \$32,$num
+ jnz .Lsqr8x_cond_copy
+
+ mov \$1,%rax
+ mov -48(%rsi),%r15
+ mov -40(%rsi),%r14
+ mov -32(%rsi),%r13
+ mov -24(%rsi),%r12
+ mov -16(%rsi),%rbp
+ mov -8(%rsi),%rbx
+ lea (%rsi),%rsp
+.Lsqr8x_epilogue:
+ ret
+.size bn_sqr8x_mont,.-bn_sqr8x_mont
+___
+}}}
+
+if ($addx) {{{
+my $bp="%rdx"; # original value
+
+$code.=<<___;
+.type bn_mulx4x_mont,\@function,6
+.align 32
+bn_mulx4x_mont:
+ mov %rsp,%rax
+.Lmulx4x_enter:
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+.Lmulx4x_prologue:
+
+ shl \$3,${num}d # convert $num to bytes
+ xor %r10,%r10
+ sub $num,%r10 # -$num
+ mov ($n0),$n0 # *n0
+ lea -72(%rsp,%r10),%rbp # future alloca(frame+$num+8)
+ and \$-128,%rbp
+ mov %rsp,%r11
+ sub %rbp,%r11
+ and \$-4096,%r11
+ lea (%rbp,%r11),%rsp
+ mov (%rsp),%r10
+ cmp %rbp,%rsp
+ ja .Lmulx4x_page_walk
+ jmp .Lmulx4x_page_walk_done
+
+.align 16
+.Lmulx4x_page_walk:
+ lea -4096(%rsp),%rsp
+ mov (%rsp),%r10
+ cmp %rbp,%rsp
+ ja .Lmulx4x_page_walk
+.Lmulx4x_page_walk_done:
+
+ lea ($bp,$num),%r10
+ ##############################################################
+ # Stack layout
+ # +0 num
+ # +8 off-loaded &b[i]
+ # +16 end of b[num]
+ # +24 saved n0
+ # +32 saved rp
+ # +40 saved %rsp
+ # +48 inner counter
+ # +56
+ # +64 tmp[num+1]
+ #
+ mov $num,0(%rsp) # save $num
+ shr \$5,$num
+ mov %r10,16(%rsp) # end of b[num]
+ sub \$1,$num
+ mov $n0, 24(%rsp) # save *n0
+ mov $rp, 32(%rsp) # save $rp
+ mov %rax,40(%rsp) # save original %rsp
+ mov $num,48(%rsp) # inner counter
+ jmp .Lmulx4x_body
+
+.align 32
+.Lmulx4x_body:
+___
+my ($aptr, $bptr, $nptr, $tptr, $mi, $bi, $zero, $num)=
+ ("%rsi","%rdi","%rcx","%rbx","%r8","%r9","%rbp","%rax");
+my $rptr=$bptr;
+$code.=<<___;
+ lea 8($bp),$bptr
+ mov ($bp),%rdx # b[0], $bp==%rdx actually
+ lea 64+32(%rsp),$tptr
+ mov %rdx,$bi
+
+ mulx 0*8($aptr),$mi,%rax # a[0]*b[0]
+ mulx 1*8($aptr),%r11,%r14 # a[1]*b[0]
+ add %rax,%r11
+ mov $bptr,8(%rsp) # off-load &b[i]
+ mulx 2*8($aptr),%r12,%r13 # ...
+ adc %r14,%r12
+ adc \$0,%r13
+
+ mov $mi,$bptr # borrow $bptr
+ imulq 24(%rsp),$mi # "t[0]"*n0
+ xor $zero,$zero # cf=0, of=0
+
+ mulx 3*8($aptr),%rax,%r14
+ mov $mi,%rdx
+ lea 4*8($aptr),$aptr
+ adcx %rax,%r13
+ adcx $zero,%r14 # cf=0
+
+ mulx 0*8($nptr),%rax,%r10
+ adcx %rax,$bptr # discarded
+ adox %r11,%r10
+ mulx 1*8($nptr),%rax,%r11
+ adcx %rax,%r10
+ adox %r12,%r11
+ .byte 0xc4,0x62,0xfb,0xf6,0xa1,0x10,0x00,0x00,0x00 # mulx 2*8($nptr),%rax,%r12
+ mov 48(%rsp),$bptr # counter value
+ mov %r10,-4*8($tptr)
+ adcx %rax,%r11
+ adox %r13,%r12
+ mulx 3*8($nptr),%rax,%r15
+ mov $bi,%rdx
+ mov %r11,-3*8($tptr)
+ adcx %rax,%r12
+ adox $zero,%r15 # of=0
+ lea 4*8($nptr),$nptr
+ mov %r12,-2*8($tptr)
+
+ jmp .Lmulx4x_1st
+
+.align 32
+.Lmulx4x_1st:
+ adcx $zero,%r15 # cf=0, modulo-scheduled
+ mulx 0*8($aptr),%r10,%rax # a[4]*b[0]
+ adcx %r14,%r10
+ mulx 1*8($aptr),%r11,%r14 # a[5]*b[0]
+ adcx %rax,%r11
+ mulx 2*8($aptr),%r12,%rax # ...
+ adcx %r14,%r12
+ mulx 3*8($aptr),%r13,%r14
+ .byte 0x67,0x67
+ mov $mi,%rdx
+ adcx %rax,%r13
+ adcx $zero,%r14 # cf=0
+ lea 4*8($aptr),$aptr
+ lea 4*8($tptr),$tptr
+
+ adox %r15,%r10
+ mulx 0*8($nptr),%rax,%r15
+ adcx %rax,%r10
+ adox %r15,%r11
+ mulx 1*8($nptr),%rax,%r15
+ adcx %rax,%r11
+ adox %r15,%r12
+ mulx 2*8($nptr),%rax,%r15
+ mov %r10,-5*8($tptr)
+ adcx %rax,%r12
+ mov %r11,-4*8($tptr)
+ adox %r15,%r13
+ mulx 3*8($nptr),%rax,%r15
+ mov $bi,%rdx
+ mov %r12,-3*8($tptr)
+ adcx %rax,%r13
+ adox $zero,%r15
+ lea 4*8($nptr),$nptr
+ mov %r13,-2*8($tptr)
+
+ dec $bptr # of=0, pass cf
+ jnz .Lmulx4x_1st
+
+ mov 0(%rsp),$num # load num
+ mov 8(%rsp),$bptr # re-load &b[i]
+ adc $zero,%r15 # modulo-scheduled
+ add %r15,%r14
+ sbb %r15,%r15 # top-most carry
+ mov %r14,-1*8($tptr)
+ jmp .Lmulx4x_outer
+
+.align 32
+.Lmulx4x_outer:
+ mov ($bptr),%rdx # b[i]
+ lea 8($bptr),$bptr # b++
+ sub $num,$aptr # rewind $aptr
+ mov %r15,($tptr) # save top-most carry
+ lea 64+4*8(%rsp),$tptr
+ sub $num,$nptr # rewind $nptr
+
+ mulx 0*8($aptr),$mi,%r11 # a[0]*b[i]
+ xor %ebp,%ebp # xor $zero,$zero # cf=0, of=0
+ mov %rdx,$bi
+ mulx 1*8($aptr),%r14,%r12 # a[1]*b[i]
+ adox -4*8($tptr),$mi
+ adcx %r14,%r11
+ mulx 2*8($aptr),%r15,%r13 # ...
+ adox -3*8($tptr),%r11
+ adcx %r15,%r12
+ adox -2*8($tptr),%r12
+ adcx $zero,%r13
+ adox $zero,%r13
+
+ mov $bptr,8(%rsp) # off-load &b[i]
+ mov $mi,%r15
+ imulq 24(%rsp),$mi # "t[0]"*n0
+ xor %ebp,%ebp # xor $zero,$zero # cf=0, of=0
+
+ mulx 3*8($aptr),%rax,%r14
+ mov $mi,%rdx
+ adcx %rax,%r13
+ adox -1*8($tptr),%r13
+ adcx $zero,%r14
+ lea 4*8($aptr),$aptr
+ adox $zero,%r14
+
+ mulx 0*8($nptr),%rax,%r10
+ adcx %rax,%r15 # discarded
+ adox %r11,%r10
+ mulx 1*8($nptr),%rax,%r11
+ adcx %rax,%r10
+ adox %r12,%r11
+ mulx 2*8($nptr),%rax,%r12
+ mov %r10,-4*8($tptr)
+ adcx %rax,%r11
+ adox %r13,%r12
+ mulx 3*8($nptr),%rax,%r15
+ mov $bi,%rdx
+ mov %r11,-3*8($tptr)
+ lea 4*8($nptr),$nptr
+ adcx %rax,%r12
+ adox $zero,%r15 # of=0
+ mov 48(%rsp),$bptr # counter value
+ mov %r12,-2*8($tptr)
+
+ jmp .Lmulx4x_inner
+
+.align 32
+.Lmulx4x_inner:
+ mulx 0*8($aptr),%r10,%rax # a[4]*b[i]
+ adcx $zero,%r15 # cf=0, modulo-scheduled
+ adox %r14,%r10
+ mulx 1*8($aptr),%r11,%r14 # a[5]*b[i]
+ adcx 0*8($tptr),%r10
+ adox %rax,%r11
+ mulx 2*8($aptr),%r12,%rax # ...
+ adcx 1*8($tptr),%r11
+ adox %r14,%r12
+ mulx 3*8($aptr),%r13,%r14
+ mov $mi,%rdx
+ adcx 2*8($tptr),%r12
+ adox %rax,%r13
+ adcx 3*8($tptr),%r13
+ adox $zero,%r14 # of=0
+ lea 4*8($aptr),$aptr
+ lea 4*8($tptr),$tptr
+ adcx $zero,%r14 # cf=0
+
+ adox %r15,%r10
+ mulx 0*8($nptr),%rax,%r15
+ adcx %rax,%r10
+ adox %r15,%r11
+ mulx 1*8($nptr),%rax,%r15
+ adcx %rax,%r11
+ adox %r15,%r12
+ mulx 2*8($nptr),%rax,%r15
+ mov %r10,-5*8($tptr)
+ adcx %rax,%r12
+ adox %r15,%r13
+ mulx 3*8($nptr),%rax,%r15
+ mov $bi,%rdx
+ mov %r11,-4*8($tptr)
+ mov %r12,-3*8($tptr)
+ adcx %rax,%r13
+ adox $zero,%r15
+ lea 4*8($nptr),$nptr
+ mov %r13,-2*8($tptr)
+
+ dec $bptr # of=0, pass cf
+ jnz .Lmulx4x_inner
+
+ mov 0(%rsp),$num # load num
+ mov 8(%rsp),$bptr # re-load &b[i]
+ adc $zero,%r15 # modulo-scheduled
+ sub 0*8($tptr),$zero # pull top-most carry
+ adc %r15,%r14
+ sbb %r15,%r15 # top-most carry
+ mov %r14,-1*8($tptr)
+
+ cmp 16(%rsp),$bptr
+ jne .Lmulx4x_outer
+
+ lea 64(%rsp),$tptr
+ sub $num,$nptr # rewind $nptr
+ neg %r15
+ mov $num,%rdx
+ shr \$3+2,$num # %cf=0
+ mov 32(%rsp),$rptr # restore rp
+ jmp .Lmulx4x_sub
+
+.align 32
+.Lmulx4x_sub:
+ mov 8*0($tptr),%r11
+ mov 8*1($tptr),%r12
+ mov 8*2($tptr),%r13
+ mov 8*3($tptr),%r14
+ lea 8*4($tptr),$tptr
+ sbb 8*0($nptr),%r11
+ sbb 8*1($nptr),%r12
+ sbb 8*2($nptr),%r13
+ sbb 8*3($nptr),%r14
+ lea 8*4($nptr),$nptr
+ mov %r11,8*0($rptr)
+ mov %r12,8*1($rptr)
+ mov %r13,8*2($rptr)
+ mov %r14,8*3($rptr)
+ lea 8*4($rptr),$rptr
+ dec $num # preserves %cf
+ jnz .Lmulx4x_sub
+
+ sbb \$0,%r15 # top-most carry
+ lea 64(%rsp),$tptr
+ sub %rdx,$rptr # rewind
+
+ movq %r15,%xmm1
+ pxor %xmm0,%xmm0
+ pshufd \$0,%xmm1,%xmm1
+ mov 40(%rsp),%rsi # restore %rsp
+ jmp .Lmulx4x_cond_copy
+
+.align 32
+.Lmulx4x_cond_copy:
+ movdqa 16*0($tptr),%xmm2
+ movdqa 16*1($tptr),%xmm3
+ lea 16*2($tptr),$tptr
+ movdqu 16*0($rptr),%xmm4
+ movdqu 16*1($rptr),%xmm5
+ lea 16*2($rptr),$rptr
+ movdqa %xmm0,-16*2($tptr) # zero tp
+ movdqa %xmm0,-16*1($tptr)
+ pcmpeqd %xmm1,%xmm0
+ pand %xmm1,%xmm2
+ pand %xmm1,%xmm3
+ pand %xmm0,%xmm4
+ pand %xmm0,%xmm5
+ pxor %xmm0,%xmm0
+ por %xmm2,%xmm4
+ por %xmm3,%xmm5
+ movdqu %xmm4,-16*2($rptr)
+ movdqu %xmm5,-16*1($rptr)
+ sub \$32,%rdx
+ jnz .Lmulx4x_cond_copy
+
+ mov %rdx,($tptr)
+
+ mov \$1,%rax
+ mov -48(%rsi),%r15
+ mov -40(%rsi),%r14
+ mov -32(%rsi),%r13
+ mov -24(%rsi),%r12
+ mov -16(%rsi),%rbp
+ mov -8(%rsi),%rbx
+ lea (%rsi),%rsp
+.Lmulx4x_epilogue:
+ ret
+.size bn_mulx4x_mont,.-bn_mulx4x_mont
+___
+}}}
+$code.=<<___;
+.asciz "Montgomery Multiplication for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
+.align 16
+___
+
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+# CONTEXT *context,DISPATCHER_CONTEXT *disp)
+if ($win64) {
+$rec="%rcx";
+$frame="%rdx";
+$context="%r8";
+$disp="%r9";
+
+$code.=<<___;
+.extern __imp_RtlVirtualUnwind
+.type mul_handler,\@abi-omnipotent
+.align 16
+mul_handler:
+ push %rsi
+ push %rdi
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ pushfq
+ sub \$64,%rsp
+
+ mov 120($context),%rax # pull context->Rax
+ mov 248($context),%rbx # pull context->Rip
+
+ mov 8($disp),%rsi # disp->ImageBase
+ mov 56($disp),%r11 # disp->HandlerData
+
+ mov 0(%r11),%r10d # HandlerData[0]
+ lea (%rsi,%r10),%r10 # end of prologue label
+ cmp %r10,%rbx # context->Rip<end of prologue label
+ jb .Lcommon_seh_tail
+
+ mov 152($context),%rax # pull context->Rsp
+
+ mov 4(%r11),%r10d # HandlerData[1]
+ lea (%rsi,%r10),%r10 # epilogue label
+ cmp %r10,%rbx # context->Rip>=epilogue label
+ jae .Lcommon_seh_tail
+
+ mov 192($context),%r10 # pull $num
+ mov 8(%rax,%r10,8),%rax # pull saved stack pointer
+
+ jmp .Lcommon_pop_regs
+.size mul_handler,.-mul_handler
+
+.type sqr_handler,\@abi-omnipotent
+.align 16
+sqr_handler:
+ push %rsi
+ push %rdi
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ pushfq
+ sub \$64,%rsp
+
+ mov 120($context),%rax # pull context->Rax
+ mov 248($context),%rbx # pull context->Rip
+
+ mov 8($disp),%rsi # disp->ImageBase
+ mov 56($disp),%r11 # disp->HandlerData
+
+ mov 0(%r11),%r10d # HandlerData[0]
+ lea (%rsi,%r10),%r10 # end of prologue label
+ cmp %r10,%rbx # context->Rip<.Lsqr_body
+ jb .Lcommon_seh_tail
+
+ mov 4(%r11),%r10d # HandlerData[1]
+ lea (%rsi,%r10),%r10 # body label
+ cmp %r10,%rbx # context->Rip>=.Lsqr_epilogue
+ jb .Lcommon_pop_regs
+
+ mov 152($context),%rax # pull context->Rsp
+
+ mov 8(%r11),%r10d # HandlerData[2]
+ lea (%rsi,%r10),%r10 # epilogue label
+ cmp %r10,%rbx # context->Rip>=.Lsqr_epilogue
+ jae .Lcommon_seh_tail
+
+ mov 40(%rax),%rax # pull saved stack pointer
+
+.Lcommon_pop_regs:
+ mov -8(%rax),%rbx
+ mov -16(%rax),%rbp
+ mov -24(%rax),%r12
+ mov -32(%rax),%r13
+ mov -40(%rax),%r14
+ mov -48(%rax),%r15
+ mov %rbx,144($context) # restore context->Rbx
+ mov %rbp,160($context) # restore context->Rbp
+ mov %r12,216($context) # restore context->R12
+ mov %r13,224($context) # restore context->R13
+ mov %r14,232($context) # restore context->R14
+ mov %r15,240($context) # restore context->R15
+
+.Lcommon_seh_tail:
+ mov 8(%rax),%rdi
+ mov 16(%rax),%rsi
+ mov %rax,152($context) # restore context->Rsp
+ mov %rsi,168($context) # restore context->Rsi
+ mov %rdi,176($context) # restore context->Rdi
+
+ mov 40($disp),%rdi # disp->ContextRecord
+ mov $context,%rsi # context
+ mov \$154,%ecx # sizeof(CONTEXT)
+ .long 0xa548f3fc # cld; rep movsq
+
+ mov $disp,%rsi
+ xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
+ mov 8(%rsi),%rdx # arg2, disp->ImageBase
+ mov 0(%rsi),%r8 # arg3, disp->ControlPc
+ mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
+ mov 40(%rsi),%r10 # disp->ContextRecord
+ lea 56(%rsi),%r11 # &disp->HandlerData
+ lea 24(%rsi),%r12 # &disp->EstablisherFrame
+ mov %r10,32(%rsp) # arg5
+ mov %r11,40(%rsp) # arg6
+ mov %r12,48(%rsp) # arg7
+ mov %rcx,56(%rsp) # arg8, (NULL)
+ call *__imp_RtlVirtualUnwind(%rip)
+
+ mov \$1,%eax # ExceptionContinueSearch
+ add \$64,%rsp
+ popfq
+ pop %r15
+ pop %r14
+ pop %r13
+ pop %r12
+ pop %rbp
+ pop %rbx
+ pop %rdi
+ pop %rsi
+ ret
+.size sqr_handler,.-sqr_handler
+
+.section .pdata
+.align 4
+ .rva .LSEH_begin_bn_mul_mont
+ .rva .LSEH_end_bn_mul_mont
+ .rva .LSEH_info_bn_mul_mont
+
+ .rva .LSEH_begin_bn_mul4x_mont
+ .rva .LSEH_end_bn_mul4x_mont
+ .rva .LSEH_info_bn_mul4x_mont
+
+ .rva .LSEH_begin_bn_sqr8x_mont
+ .rva .LSEH_end_bn_sqr8x_mont
+ .rva .LSEH_info_bn_sqr8x_mont
+___
+$code.=<<___ if ($addx);
+ .rva .LSEH_begin_bn_mulx4x_mont
+ .rva .LSEH_end_bn_mulx4x_mont
+ .rva .LSEH_info_bn_mulx4x_mont
+___
+$code.=<<___;
+.section .xdata
+.align 8
+.LSEH_info_bn_mul_mont:
+ .byte 9,0,0,0
+ .rva mul_handler
+ .rva .Lmul_body,.Lmul_epilogue # HandlerData[]
+.LSEH_info_bn_mul4x_mont:
+ .byte 9,0,0,0
+ .rva mul_handler
+ .rva .Lmul4x_body,.Lmul4x_epilogue # HandlerData[]
+.LSEH_info_bn_sqr8x_mont:
+ .byte 9,0,0,0
+ .rva sqr_handler
+ .rva .Lsqr8x_prologue,.Lsqr8x_body,.Lsqr8x_epilogue # HandlerData[]
+.align 8
+___
+$code.=<<___ if ($addx);
+.LSEH_info_bn_mulx4x_mont:
+ .byte 9,0,0,0
+ .rva sqr_handler
+ .rva .Lmulx4x_prologue,.Lmulx4x_body,.Lmulx4x_epilogue # HandlerData[]
+.align 8
+___
+}
+
+print $code;
+close STDOUT;
diff --git a/openssl-1.1.0h/crypto/bn/asm/x86_64-mont5.pl b/openssl-1.1.0h/crypto/bn/asm/x86_64-mont5.pl
new file mode 100755
index 0000000..5779059
--- /dev/null
+++ b/openssl-1.1.0h/crypto/bn/asm/x86_64-mont5.pl
@@ -0,0 +1,3835 @@
+#! /usr/bin/env perl
+# Copyright 2011-2016 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License"). You may not use
+# this file except in compliance with the License. You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+
+# August 2011.
+#
+# Companion to x86_64-mont.pl that optimizes cache-timing attack
+# countermeasures. The subroutines are produced by replacing bp[i]
+# references in their x86_64-mont.pl counterparts with cache-neutral
+# references to powers table computed in BN_mod_exp_mont_consttime.
+# In addition subroutine that scatters elements of the powers table
+# is implemented, so that scatter-/gathering can be tuned without
+# bn_exp.c modifications.
+
+# August 2013.
+#
+# Add MULX/AD*X code paths and additional interfaces to optimize for
+# branch prediction unit. For input lengths that are multiples of 8
+# the np argument is not just modulus value, but one interleaved
+# with 0. This is to optimize post-condition...
+
+$flavour = shift;
+$output = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
+*STDOUT=*OUT;
+
+if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
+ =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
+ $addx = ($1>=2.23);
+}
+
+if (!$addx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
+ `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
+ $addx = ($1>=2.10);
+}
+
+if (!$addx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
+ `ml64 2>&1` =~ /Version ([0-9]+)\./) {
+ $addx = ($1>=12);
+}
+
+if (!$addx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9])\.([0-9]+)/) {
+ my $ver = $2 + $3/100.0; # 3.1->3.01, 3.10->3.10
+ $addx = ($ver>=3.03);
+}
+
+# int bn_mul_mont_gather5(
+$rp="%rdi"; # BN_ULONG *rp,
+$ap="%rsi"; # const BN_ULONG *ap,
+$bp="%rdx"; # const BN_ULONG *bp,
+$np="%rcx"; # const BN_ULONG *np,
+$n0="%r8"; # const BN_ULONG *n0,
+$num="%r9"; # int num,
+ # int idx); # 0 to 2^5-1, "index" in $bp holding
+ # pre-computed powers of a', interlaced
+ # in such manner that b[0] is $bp[idx],
+ # b[1] is [2^5+idx], etc.
+$lo0="%r10";
+$hi0="%r11";
+$hi1="%r13";
+$i="%r14";
+$j="%r15";
+$m0="%rbx";
+$m1="%rbp";
+
+$code=<<___;
+.text
+
+.extern OPENSSL_ia32cap_P
+
+.globl bn_mul_mont_gather5
+.type bn_mul_mont_gather5,\@function,6
+.align 64
+bn_mul_mont_gather5:
+ mov ${num}d,${num}d
+ mov %rsp,%rax
+ test \$7,${num}d
+ jnz .Lmul_enter
+___
+$code.=<<___ if ($addx);
+ mov OPENSSL_ia32cap_P+8(%rip),%r11d
+___
+$code.=<<___;
+ jmp .Lmul4x_enter
+
+.align 16
+.Lmul_enter:
+ movd `($win64?56:8)`(%rsp),%xmm5 # load 7th argument
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+
+ neg $num
+ mov %rsp,%r11
+ lea -280(%rsp,$num,8),%r10 # future alloca(8*(num+2)+256+8)
+ neg $num # restore $num
+ and \$-1024,%r10 # minimize TLB usage
+
+ # An OS-agnostic version of __chkstk.
+ #
+ # Some OSes (Windows) insist on stack being "wired" to
+ # physical memory in strictly sequential manner, i.e. if stack
+ # allocation spans two pages, then reference to farmost one can
+ # be punishable by SEGV. But page walking can do good even on
+ # other OSes, because it guarantees that villain thread hits
+ # the guard page before it can make damage to innocent one...
+ sub %r10,%r11
+ and \$-4096,%r11
+ lea (%r10,%r11),%rsp
+ mov (%rsp),%r11
+ cmp %r10,%rsp
+ ja .Lmul_page_walk
+ jmp .Lmul_page_walk_done
+
+.Lmul_page_walk:
+ lea -4096(%rsp),%rsp
+ mov (%rsp),%r11
+ cmp %r10,%rsp
+ ja .Lmul_page_walk
+.Lmul_page_walk_done:
+
+ lea .Linc(%rip),%r10
+ mov %rax,8(%rsp,$num,8) # tp[num+1]=%rsp
+.Lmul_body:
+
+ lea 128($bp),%r12 # reassign $bp (+size optimization)
+___
+ $bp="%r12";
+ $STRIDE=2**5*8; # 5 is "window size"
+ $N=$STRIDE/4; # should match cache line size
+$code.=<<___;
+ movdqa 0(%r10),%xmm0 # 00000001000000010000000000000000
+ movdqa 16(%r10),%xmm1 # 00000002000000020000000200000002
+ lea 24-112(%rsp,$num,8),%r10# place the mask after tp[num+3] (+ICache optimization)
+ and \$-16,%r10
+
+ pshufd \$0,%xmm5,%xmm5 # broadcast index
+ movdqa %xmm1,%xmm4
+ movdqa %xmm1,%xmm2
+___
+########################################################################
+# calculate mask by comparing 0..31 to index and save result to stack
+#
+$code.=<<___;
+ paddd %xmm0,%xmm1
+ pcmpeqd %xmm5,%xmm0 # compare to 1,0
+ .byte 0x67
+ movdqa %xmm4,%xmm3
+___
+for($k=0;$k<$STRIDE/16-4;$k+=4) {
+$code.=<<___;
+ paddd %xmm1,%xmm2
+ pcmpeqd %xmm5,%xmm1 # compare to 3,2
+ movdqa %xmm0,`16*($k+0)+112`(%r10)
+ movdqa %xmm4,%xmm0
+
+ paddd %xmm2,%xmm3
+ pcmpeqd %xmm5,%xmm2 # compare to 5,4
+ movdqa %xmm1,`16*($k+1)+112`(%r10)
+ movdqa %xmm4,%xmm1
+
+ paddd %xmm3,%xmm0
+ pcmpeqd %xmm5,%xmm3 # compare to 7,6
+ movdqa %xmm2,`16*($k+2)+112`(%r10)
+ movdqa %xmm4,%xmm2
+
+ paddd %xmm0,%xmm1
+ pcmpeqd %xmm5,%xmm0
+ movdqa %xmm3,`16*($k+3)+112`(%r10)
+ movdqa %xmm4,%xmm3
+___
+}
+$code.=<<___; # last iteration can be optimized
+ paddd %xmm1,%xmm2
+ pcmpeqd %xmm5,%xmm1
+ movdqa %xmm0,`16*($k+0)+112`(%r10)
+
+ paddd %xmm2,%xmm3
+ .byte 0x67
+ pcmpeqd %xmm5,%xmm2
+ movdqa %xmm1,`16*($k+1)+112`(%r10)
+
+ pcmpeqd %xmm5,%xmm3
+ movdqa %xmm2,`16*($k+2)+112`(%r10)
+ pand `16*($k+0)-128`($bp),%xmm0 # while it's still in register
+
+ pand `16*($k+1)-128`($bp),%xmm1
+ pand `16*($k+2)-128`($bp),%xmm2
+ movdqa %xmm3,`16*($k+3)+112`(%r10)
+ pand `16*($k+3)-128`($bp),%xmm3
+ por %xmm2,%xmm0
+ por %xmm3,%xmm1
+___
+for($k=0;$k<$STRIDE/16-4;$k+=4) {
+$code.=<<___;
+ movdqa `16*($k+0)-128`($bp),%xmm4
+ movdqa `16*($k+1)-128`($bp),%xmm5
+ movdqa `16*($k+2)-128`($bp),%xmm2
+ pand `16*($k+0)+112`(%r10),%xmm4
+ movdqa `16*($k+3)-128`($bp),%xmm3
+ pand `16*($k+1)+112`(%r10),%xmm5
+ por %xmm4,%xmm0
+ pand `16*($k+2)+112`(%r10),%xmm2
+ por %xmm5,%xmm1
+ pand `16*($k+3)+112`(%r10),%xmm3
+ por %xmm2,%xmm0
+ por %xmm3,%xmm1
+___
+}
+$code.=<<___;
+ por %xmm1,%xmm0
+ pshufd \$0x4e,%xmm0,%xmm1
+ por %xmm1,%xmm0
+ lea $STRIDE($bp),$bp
+ movq %xmm0,$m0 # m0=bp[0]
+
+ mov ($n0),$n0 # pull n0[0] value
+ mov ($ap),%rax
+
+ xor $i,$i # i=0
+ xor $j,$j # j=0
+
+ mov $n0,$m1
+ mulq $m0 # ap[0]*bp[0]
+ mov %rax,$lo0
+ mov ($np),%rax
+
+ imulq $lo0,$m1 # "tp[0]"*n0
+ mov %rdx,$hi0
+
+ mulq $m1 # np[0]*m1
+ add %rax,$lo0 # discarded
+ mov 8($ap),%rax
+ adc \$0,%rdx
+ mov %rdx,$hi1
+
+ lea 1($j),$j # j++
+ jmp .L1st_enter
+
+.align 16
+.L1st:
+ add %rax,$hi1
+ mov ($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
+ mov $lo0,$hi0
+ adc \$0,%rdx
+ mov $hi1,-16(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$hi1
+
+.L1st_enter:
+ mulq $m0 # ap[j]*bp[0]
+ add %rax,$hi0
+ mov ($np,$j,8),%rax
+ adc \$0,%rdx
+ lea 1($j),$j # j++
+ mov %rdx,$lo0
+
+ mulq $m1 # np[j]*m1
+ cmp $num,$j
+ jne .L1st # note that upon exit $j==$num, so
+ # they can be used interchangeably
+
+ add %rax,$hi1
+ adc \$0,%rdx
+ add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
+ adc \$0,%rdx
+ mov $hi1,-16(%rsp,$num,8) # tp[num-1]
+ mov %rdx,$hi1
+ mov $lo0,$hi0
+
+ xor %rdx,%rdx
+ add $hi0,$hi1
+ adc \$0,%rdx
+ mov $hi1,-8(%rsp,$num,8)
+ mov %rdx,(%rsp,$num,8) # store upmost overflow bit
+
+ lea 1($i),$i # i++
+ jmp .Louter
+.align 16
+.Louter:
+ lea 24+128(%rsp,$num,8),%rdx # where 256-byte mask is (+size optimization)
+ and \$-16,%rdx
+ pxor %xmm4,%xmm4
+ pxor %xmm5,%xmm5
+___
+for($k=0;$k<$STRIDE/16;$k+=4) {
+$code.=<<___;
+ movdqa `16*($k+0)-128`($bp),%xmm0
+ movdqa `16*($k+1)-128`($bp),%xmm1
+ movdqa `16*($k+2)-128`($bp),%xmm2
+ movdqa `16*($k+3)-128`($bp),%xmm3
+ pand `16*($k+0)-128`(%rdx),%xmm0
+ pand `16*($k+1)-128`(%rdx),%xmm1
+ por %xmm0,%xmm4
+ pand `16*($k+2)-128`(%rdx),%xmm2
+ por %xmm1,%xmm5
+ pand `16*($k+3)-128`(%rdx),%xmm3
+ por %xmm2,%xmm4
+ por %xmm3,%xmm5
+___
+}
+$code.=<<___;
+ por %xmm5,%xmm4
+ pshufd \$0x4e,%xmm4,%xmm0
+ por %xmm4,%xmm0
+ lea $STRIDE($bp),$bp
+
+ mov ($ap),%rax # ap[0]
+ movq %xmm0,$m0 # m0=bp[i]
+
+ xor $j,$j # j=0
+ mov $n0,$m1
+ mov (%rsp),$lo0
+
+ mulq $m0 # ap[0]*bp[i]
+ add %rax,$lo0 # ap[0]*bp[i]+tp[0]
+ mov ($np),%rax
+ adc \$0,%rdx
+
+ imulq $lo0,$m1 # tp[0]*n0
+ mov %rdx,$hi0
+
+ mulq $m1 # np[0]*m1
+ add %rax,$lo0 # discarded
+ mov 8($ap),%rax
+ adc \$0,%rdx
+ mov 8(%rsp),$lo0 # tp[1]
+ mov %rdx,$hi1
+
+ lea 1($j),$j # j++
+ jmp .Linner_enter
+
+.align 16
+.Linner:
+ add %rax,$hi1
+ mov ($ap,$j,8),%rax
+ adc \$0,%rdx
+ add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
+ mov (%rsp,$j,8),$lo0
+ adc \$0,%rdx
+ mov $hi1,-16(%rsp,$j,8) # tp[j-1]
+ mov %rdx,$hi1
+
+.Linner_enter:
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$hi0
+ mov ($np,$j,8),%rax
+ adc \$0,%rdx
+ add $hi0,$lo0 # ap[j]*bp[i]+tp[j]
+ mov %rdx,$hi0
+ adc \$0,$hi0
+ lea 1($j),$j # j++
+
+ mulq $m1 # np[j]*m1
+ cmp $num,$j
+ jne .Linner # note that upon exit $j==$num, so
+ # they can be used interchangeably
+ add %rax,$hi1
+ adc \$0,%rdx
+ add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
+ mov (%rsp,$num,8),$lo0
+ adc \$0,%rdx
+ mov $hi1,-16(%rsp,$num,8) # tp[num-1]
+ mov %rdx,$hi1
+
+ xor %rdx,%rdx
+ add $hi0,$hi1
+ adc \$0,%rdx
+ add $lo0,$hi1 # pull upmost overflow bit
+ adc \$0,%rdx
+ mov $hi1,-8(%rsp,$num,8)
+ mov %rdx,(%rsp,$num,8) # store upmost overflow bit
+
+ lea 1($i),$i # i++
+ cmp $num,$i
+ jb .Louter
+
+ xor $i,$i # i=0 and clear CF!
+ mov (%rsp),%rax # tp[0]
+ lea (%rsp),$ap # borrow ap for tp
+ mov $num,$j # j=num
+ jmp .Lsub
+.align 16
+.Lsub: sbb ($np,$i,8),%rax
+ mov %rax,($rp,$i,8) # rp[i]=tp[i]-np[i]
+ mov 8($ap,$i,8),%rax # tp[i+1]
+ lea 1($i),$i # i++
+ dec $j # doesnn't affect CF!
+ jnz .Lsub
+
+ sbb \$0,%rax # handle upmost overflow bit
+ xor $i,$i
+ and %rax,$ap
+ not %rax
+ mov $rp,$np
+ and %rax,$np
+ mov $num,$j # j=num
+ or $np,$ap # ap=borrow?tp:rp
+.align 16
+.Lcopy: # copy or in-place refresh
+ mov ($ap,$i,8),%rax
+ mov $i,(%rsp,$i,8) # zap temporary vector
+ mov %rax,($rp,$i,8) # rp[i]=tp[i]
+ lea 1($i),$i
+ sub \$1,$j
+ jnz .Lcopy
+
+ mov 8(%rsp,$num,8),%rsi # restore %rsp
+ mov \$1,%rax
+
+ mov -48(%rsi),%r15
+ mov -40(%rsi),%r14
+ mov -32(%rsi),%r13
+ mov -24(%rsi),%r12
+ mov -16(%rsi),%rbp
+ mov -8(%rsi),%rbx
+ lea (%rsi),%rsp
+.Lmul_epilogue:
+ ret
+.size bn_mul_mont_gather5,.-bn_mul_mont_gather5
+___
+{{{
+my @A=("%r10","%r11");
+my @N=("%r13","%rdi");
+$code.=<<___;
+.type bn_mul4x_mont_gather5,\@function,6
+.align 32
+bn_mul4x_mont_gather5:
+ .byte 0x67
+ mov %rsp,%rax
+.Lmul4x_enter:
+___
+$code.=<<___ if ($addx);
+ and \$0x80108,%r11d
+ cmp \$0x80108,%r11d # check for AD*X+BMI2+BMI1
+ je .Lmulx4x_enter
+___
+$code.=<<___;
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+.Lmul4x_prologue:
+
+ .byte 0x67
+ shl \$3,${num}d # convert $num to bytes
+ lea ($num,$num,2),%r10 # 3*$num in bytes
+ neg $num # -$num
+
+ ##############################################################
+ # Ensure that stack frame doesn't alias with $rptr+3*$num
+ # modulo 4096, which covers ret[num], am[num] and n[num]
+ # (see bn_exp.c). This is done to allow memory disambiguation
+ # logic do its magic. [Extra [num] is allocated in order
+ # to align with bn_power5's frame, which is cleansed after
+ # completing exponentiation. Extra 256 bytes is for power mask
+ # calculated from 7th argument, the index.]
+ #
+ lea -320(%rsp,$num,2),%r11
+ mov %rsp,%rbp
+ sub $rp,%r11
+ and \$4095,%r11
+ cmp %r11,%r10
+ jb .Lmul4xsp_alt
+ sub %r11,%rbp # align with $rp
+ lea -320(%rbp,$num,2),%rbp # future alloca(frame+2*num*8+256)
+ jmp .Lmul4xsp_done
+
+.align 32
+.Lmul4xsp_alt:
+ lea 4096-320(,$num,2),%r10
+ lea -320(%rbp,$num,2),%rbp # future alloca(frame+2*num*8+256)
+ sub %r10,%r11
+ mov \$0,%r10
+ cmovc %r10,%r11
+ sub %r11,%rbp
+.Lmul4xsp_done:
+ and \$-64,%rbp
+ mov %rsp,%r11
+ sub %rbp,%r11
+ and \$-4096,%r11
+ lea (%rbp,%r11),%rsp
+ mov (%rsp),%r10
+ cmp %rbp,%rsp
+ ja .Lmul4x_page_walk
+ jmp .Lmul4x_page_walk_done
+
+.Lmul4x_page_walk:
+ lea -4096(%rsp),%rsp
+ mov (%rsp),%r10
+ cmp %rbp,%rsp
+ ja .Lmul4x_page_walk
+.Lmul4x_page_walk_done:
+
+ neg $num
+
+ mov %rax,40(%rsp)
+.Lmul4x_body:
+
+ call mul4x_internal
+
+ mov 40(%rsp),%rsi # restore %rsp
+ mov \$1,%rax
+
+ mov -48(%rsi),%r15
+ mov -40(%rsi),%r14
+ mov -32(%rsi),%r13
+ mov -24(%rsi),%r12
+ mov -16(%rsi),%rbp
+ mov -8(%rsi),%rbx
+ lea (%rsi),%rsp
+.Lmul4x_epilogue:
+ ret
+.size bn_mul4x_mont_gather5,.-bn_mul4x_mont_gather5
+
+.type mul4x_internal,\@abi-omnipotent
+.align 32
+mul4x_internal:
+ shl \$5,$num # $num was in bytes
+ movd `($win64?56:8)`(%rax),%xmm5 # load 7th argument, index
+ lea .Linc(%rip),%rax
+ lea 128(%rdx,$num),%r13 # end of powers table (+size optimization)
+ shr \$5,$num # restore $num
+___
+ $bp="%r12";
+ $STRIDE=2**5*8; # 5 is "window size"
+ $N=$STRIDE/4; # should match cache line size
+ $tp=$i;
+$code.=<<___;
+ movdqa 0(%rax),%xmm0 # 00000001000000010000000000000000
+ movdqa 16(%rax),%xmm1 # 00000002000000020000000200000002
+ lea 88-112(%rsp,$num),%r10 # place the mask after tp[num+1] (+ICache optimization)
+ lea 128(%rdx),$bp # size optimization
+
+ pshufd \$0,%xmm5,%xmm5 # broadcast index
+ movdqa %xmm1,%xmm4
+ .byte 0x67,0x67
+ movdqa %xmm1,%xmm2
+___
+########################################################################
+# calculate mask by comparing 0..31 to index and save result to stack
+#
+$code.=<<___;
+ paddd %xmm0,%xmm1
+ pcmpeqd %xmm5,%xmm0 # compare to 1,0
+ .byte 0x67
+ movdqa %xmm4,%xmm3
+___
+for($i=0;$i<$STRIDE/16-4;$i+=4) {
+$code.=<<___;
+ paddd %xmm1,%xmm2
+ pcmpeqd %xmm5,%xmm1 # compare to 3,2
+ movdqa %xmm0,`16*($i+0)+112`(%r10)
+ movdqa %xmm4,%xmm0
+
+ paddd %xmm2,%xmm3
+ pcmpeqd %xmm5,%xmm2 # compare to 5,4
+ movdqa %xmm1,`16*($i+1)+112`(%r10)
+ movdqa %xmm4,%xmm1
+
+ paddd %xmm3,%xmm0
+ pcmpeqd %xmm5,%xmm3 # compare to 7,6
+ movdqa %xmm2,`16*($i+2)+112`(%r10)
+ movdqa %xmm4,%xmm2
+
+ paddd %xmm0,%xmm1
+ pcmpeqd %xmm5,%xmm0
+ movdqa %xmm3,`16*($i+3)+112`(%r10)
+ movdqa %xmm4,%xmm3
+___
+}
+$code.=<<___; # last iteration can be optimized
+ paddd %xmm1,%xmm2
+ pcmpeqd %xmm5,%xmm1
+ movdqa %xmm0,`16*($i+0)+112`(%r10)
+
+ paddd %xmm2,%xmm3
+ .byte 0x67
+ pcmpeqd %xmm5,%xmm2
+ movdqa %xmm1,`16*($i+1)+112`(%r10)
+
+ pcmpeqd %xmm5,%xmm3
+ movdqa %xmm2,`16*($i+2)+112`(%r10)
+ pand `16*($i+0)-128`($bp),%xmm0 # while it's still in register
+
+ pand `16*($i+1)-128`($bp),%xmm1
+ pand `16*($i+2)-128`($bp),%xmm2
+ movdqa %xmm3,`16*($i+3)+112`(%r10)
+ pand `16*($i+3)-128`($bp),%xmm3
+ por %xmm2,%xmm0
+ por %xmm3,%xmm1
+___
+for($i=0;$i<$STRIDE/16-4;$i+=4) {
+$code.=<<___;
+ movdqa `16*($i+0)-128`($bp),%xmm4
+ movdqa `16*($i+1)-128`($bp),%xmm5
+ movdqa `16*($i+2)-128`($bp),%xmm2
+ pand `16*($i+0)+112`(%r10),%xmm4
+ movdqa `16*($i+3)-128`($bp),%xmm3
+ pand `16*($i+1)+112`(%r10),%xmm5
+ por %xmm4,%xmm0
+ pand `16*($i+2)+112`(%r10),%xmm2
+ por %xmm5,%xmm1
+ pand `16*($i+3)+112`(%r10),%xmm3
+ por %xmm2,%xmm0
+ por %xmm3,%xmm1
+___
+}
+$code.=<<___;
+ por %xmm1,%xmm0
+ pshufd \$0x4e,%xmm0,%xmm1
+ por %xmm1,%xmm0
+ lea $STRIDE($bp),$bp
+ movq %xmm0,$m0 # m0=bp[0]
+
+ mov %r13,16+8(%rsp) # save end of b[num]
+ mov $rp, 56+8(%rsp) # save $rp
+
+ mov ($n0),$n0 # pull n0[0] value
+ mov ($ap),%rax
+ lea ($ap,$num),$ap # end of a[num]
+ neg $num
+
+ mov $n0,$m1
+ mulq $m0 # ap[0]*bp[0]
+ mov %rax,$A[0]
+ mov ($np),%rax
+
+ imulq $A[0],$m1 # "tp[0]"*n0
+ lea 64+8(%rsp),$tp
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[0]*m1
+ add %rax,$A[0] # discarded
+ mov 8($ap,$num),%rax
+ adc \$0,%rdx
+ mov %rdx,$N[1]
+
+ mulq $m0
+ add %rax,$A[1]
+ mov 8*1($np),%rax
+ adc \$0,%rdx
+ mov %rdx,$A[0]
+
+ mulq $m1
+ add %rax,$N[1]
+ mov 16($ap,$num),%rax
+ adc \$0,%rdx
+ add $A[1],$N[1]
+ lea 4*8($num),$j # j=4
+ lea 8*4($np),$np
+ adc \$0,%rdx
+ mov $N[1],($tp)
+ mov %rdx,$N[0]
+ jmp .L1st4x
+
+.align 32
+.L1st4x:
+ mulq $m0 # ap[j]*bp[0]
+ add %rax,$A[0]
+ mov -8*2($np),%rax
+ lea 32($tp),$tp
+ adc \$0,%rdx
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[0]
+ mov -8($ap,$j),%rax
+ adc \$0,%rdx
+ add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
+ adc \$0,%rdx
+ mov $N[0],-24($tp) # tp[j-1]
+ mov %rdx,$N[1]
+
+ mulq $m0 # ap[j]*bp[0]
+ add %rax,$A[1]
+ mov -8*1($np),%rax
+ adc \$0,%rdx
+ mov %rdx,$A[0]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[1]
+ mov ($ap,$j),%rax
+ adc \$0,%rdx
+ add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
+ adc \$0,%rdx
+ mov $N[1],-16($tp) # tp[j-1]
+ mov %rdx,$N[0]
+
+ mulq $m0 # ap[j]*bp[0]
+ add %rax,$A[0]
+ mov 8*0($np),%rax
+ adc \$0,%rdx
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[0]
+ mov 8($ap,$j),%rax
+ adc \$0,%rdx
+ add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
+ adc \$0,%rdx
+ mov $N[0],-8($tp) # tp[j-1]
+ mov %rdx,$N[1]
+
+ mulq $m0 # ap[j]*bp[0]
+ add %rax,$A[1]
+ mov 8*1($np),%rax
+ adc \$0,%rdx
+ mov %rdx,$A[0]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[1]
+ mov 16($ap,$j),%rax
+ adc \$0,%rdx
+ add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
+ lea 8*4($np),$np
+ adc \$0,%rdx
+ mov $N[1],($tp) # tp[j-1]
+ mov %rdx,$N[0]
+
+ add \$32,$j # j+=4
+ jnz .L1st4x
+
+ mulq $m0 # ap[j]*bp[0]
+ add %rax,$A[0]
+ mov -8*2($np),%rax
+ lea 32($tp),$tp
+ adc \$0,%rdx
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[0]
+ mov -8($ap),%rax
+ adc \$0,%rdx
+ add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
+ adc \$0,%rdx
+ mov $N[0],-24($tp) # tp[j-1]
+ mov %rdx,$N[1]
+
+ mulq $m0 # ap[j]*bp[0]
+ add %rax,$A[1]
+ mov -8*1($np),%rax
+ adc \$0,%rdx
+ mov %rdx,$A[0]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[1]
+ mov ($ap,$num),%rax # ap[0]
+ adc \$0,%rdx
+ add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
+ adc \$0,%rdx
+ mov $N[1],-16($tp) # tp[j-1]
+ mov %rdx,$N[0]
+
+ lea ($np,$num),$np # rewind $np
+
+ xor $N[1],$N[1]
+ add $A[0],$N[0]
+ adc \$0,$N[1]
+ mov $N[0],-8($tp)
+
+ jmp .Louter4x
+
+.align 32
+.Louter4x:
+ lea 16+128($tp),%rdx # where 256-byte mask is (+size optimization)
+ pxor %xmm4,%xmm4
+ pxor %xmm5,%xmm5
+___
+for($i=0;$i<$STRIDE/16;$i+=4) {
+$code.=<<___;
+ movdqa `16*($i+0)-128`($bp),%xmm0
+ movdqa `16*($i+1)-128`($bp),%xmm1
+ movdqa `16*($i+2)-128`($bp),%xmm2
+ movdqa `16*($i+3)-128`($bp),%xmm3
+ pand `16*($i+0)-128`(%rdx),%xmm0
+ pand `16*($i+1)-128`(%rdx),%xmm1
+ por %xmm0,%xmm4
+ pand `16*($i+2)-128`(%rdx),%xmm2
+ por %xmm1,%xmm5
+ pand `16*($i+3)-128`(%rdx),%xmm3
+ por %xmm2,%xmm4
+ por %xmm3,%xmm5
+___
+}
+$code.=<<___;
+ por %xmm5,%xmm4
+ pshufd \$0x4e,%xmm4,%xmm0
+ por %xmm4,%xmm0
+ lea $STRIDE($bp),$bp
+ movq %xmm0,$m0 # m0=bp[i]
+
+ mov ($tp,$num),$A[0]
+ mov $n0,$m1
+ mulq $m0 # ap[0]*bp[i]
+ add %rax,$A[0] # ap[0]*bp[i]+tp[0]
+ mov ($np),%rax
+ adc \$0,%rdx
+
+ imulq $A[0],$m1 # tp[0]*n0
+ mov %rdx,$A[1]
+ mov $N[1],($tp) # store upmost overflow bit
+
+ lea ($tp,$num),$tp # rewind $tp
+
+ mulq $m1 # np[0]*m1
+ add %rax,$A[0] # "$N[0]", discarded
+ mov 8($ap,$num),%rax
+ adc \$0,%rdx
+ mov %rdx,$N[1]
+
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$A[1]
+ mov 8*1($np),%rax
+ adc \$0,%rdx
+ add 8($tp),$A[1] # +tp[1]
+ adc \$0,%rdx
+ mov %rdx,$A[0]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[1]
+ mov 16($ap,$num),%rax
+ adc \$0,%rdx
+ add $A[1],$N[1] # np[j]*m1+ap[j]*bp[i]+tp[j]
+ lea 4*8($num),$j # j=4
+ lea 8*4($np),$np
+ adc \$0,%rdx
+ mov %rdx,$N[0]
+ jmp .Linner4x
+
+.align 32
+.Linner4x:
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$A[0]
+ mov -8*2($np),%rax
+ adc \$0,%rdx
+ add 16($tp),$A[0] # ap[j]*bp[i]+tp[j]
+ lea 32($tp),$tp
+ adc \$0,%rdx
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[0]
+ mov -8($ap,$j),%rax
+ adc \$0,%rdx
+ add $A[0],$N[0]
+ adc \$0,%rdx
+ mov $N[1],-32($tp) # tp[j-1]
+ mov %rdx,$N[1]
+
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$A[1]
+ mov -8*1($np),%rax
+ adc \$0,%rdx
+ add -8($tp),$A[1]
+ adc \$0,%rdx
+ mov %rdx,$A[0]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[1]
+ mov ($ap,$j),%rax
+ adc \$0,%rdx
+ add $A[1],$N[1]
+ adc \$0,%rdx
+ mov $N[0],-24($tp) # tp[j-1]
+ mov %rdx,$N[0]
+
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$A[0]
+ mov 8*0($np),%rax
+ adc \$0,%rdx
+ add ($tp),$A[0] # ap[j]*bp[i]+tp[j]
+ adc \$0,%rdx
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[0]
+ mov 8($ap,$j),%rax
+ adc \$0,%rdx
+ add $A[0],$N[0]
+ adc \$0,%rdx
+ mov $N[1],-16($tp) # tp[j-1]
+ mov %rdx,$N[1]
+
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$A[1]
+ mov 8*1($np),%rax
+ adc \$0,%rdx
+ add 8($tp),$A[1]
+ adc \$0,%rdx
+ mov %rdx,$A[0]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[1]
+ mov 16($ap,$j),%rax
+ adc \$0,%rdx
+ add $A[1],$N[1]
+ lea 8*4($np),$np
+ adc \$0,%rdx
+ mov $N[0],-8($tp) # tp[j-1]
+ mov %rdx,$N[0]
+
+ add \$32,$j # j+=4
+ jnz .Linner4x
+
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$A[0]
+ mov -8*2($np),%rax
+ adc \$0,%rdx
+ add 16($tp),$A[0] # ap[j]*bp[i]+tp[j]
+ lea 32($tp),$tp
+ adc \$0,%rdx
+ mov %rdx,$A[1]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[0]
+ mov -8($ap),%rax
+ adc \$0,%rdx
+ add $A[0],$N[0]
+ adc \$0,%rdx
+ mov $N[1],-32($tp) # tp[j-1]
+ mov %rdx,$N[1]
+
+ mulq $m0 # ap[j]*bp[i]
+ add %rax,$A[1]
+ mov $m1,%rax
+ mov -8*1($np),$m1
+ adc \$0,%rdx
+ add -8($tp),$A[1]
+ adc \$0,%rdx
+ mov %rdx,$A[0]
+
+ mulq $m1 # np[j]*m1
+ add %rax,$N[1]
+ mov ($ap,$num),%rax # ap[0]
+ adc \$0,%rdx
+ add $A[1],$N[1]
+ adc \$0,%rdx
+ mov $N[0],-24($tp) # tp[j-1]
+ mov %rdx,$N[0]
+
+ mov $N[1],-16($tp) # tp[j-1]
+ lea ($np,$num),$np # rewind $np
+
+ xor $N[1],$N[1]
+ add $A[0],$N[0]
+ adc \$0,$N[1]
+ add ($tp),$N[0] # pull upmost overflow bit
+ adc \$0,$N[1] # upmost overflow bit
+ mov $N[0],-8($tp)
+
+ cmp 16+8(%rsp),$bp
+ jb .Louter4x
+___
+if (1) {
+$code.=<<___;
+ xor %rax,%rax
+ sub $N[0],$m1 # compare top-most words
+ adc $j,$j # $j is zero
+ or $j,$N[1]
+ sub $N[1],%rax # %rax=-$N[1]
+ lea ($tp,$num),%rbx # tptr in .sqr4x_sub
+ mov ($np),%r12
+ lea ($np),%rbp # nptr in .sqr4x_sub
+ mov %r9,%rcx
+ sar \$3+2,%rcx
+ mov 56+8(%rsp),%rdi # rptr in .sqr4x_sub
+ dec %r12 # so that after 'not' we get -n[0]
+ xor %r10,%r10
+ mov 8*1(%rbp),%r13
+ mov 8*2(%rbp),%r14
+ mov 8*3(%rbp),%r15
+ jmp .Lsqr4x_sub_entry
+___
+} else {
+my @ri=("%rax",$bp,$m0,$m1);
+my $rp="%rdx";
+$code.=<<___
+ xor \$1,$N[1]
+ lea ($tp,$num),$tp # rewind $tp
+ sar \$5,$num # cf=0
+ lea ($np,$N[1],8),$np
+ mov 56+8(%rsp),$rp # restore $rp
+ jmp .Lsub4x
+
+.align 32
+.Lsub4x:
+ .byte 0x66
+ mov 8*0($tp),@ri[0]
+ mov 8*1($tp),@ri[1]
+ .byte 0x66
+ sbb 16*0($np),@ri[0]
+ mov 8*2($tp),@ri[2]
+ sbb 16*1($np),@ri[1]
+ mov 3*8($tp),@ri[3]
+ lea 4*8($tp),$tp
+ sbb 16*2($np),@ri[2]
+ mov @ri[0],8*0($rp)
+ sbb 16*3($np),@ri[3]
+ lea 16*4($np),$np
+ mov @ri[1],8*1($rp)
+ mov @ri[2],8*2($rp)
+ mov @ri[3],8*3($rp)
+ lea 8*4($rp),$rp
+
+ inc $num
+ jnz .Lsub4x
+
+ ret
+___
+}
+$code.=<<___;
+.size mul4x_internal,.-mul4x_internal
+___
+}}}
+ {{{
+######################################################################
+# void bn_power5(
+my $rptr="%rdi"; # BN_ULONG *rptr,
+my $aptr="%rsi"; # const BN_ULONG *aptr,
+my $bptr="%rdx"; # const void *table,
+my $nptr="%rcx"; # const BN_ULONG *nptr,
+my $n0 ="%r8"; # const BN_ULONG *n0);
+my $num ="%r9"; # int num, has to be divisible by 8
+ # int pwr
+
+my ($i,$j,$tptr)=("%rbp","%rcx",$rptr);
+my @A0=("%r10","%r11");
+my @A1=("%r12","%r13");
+my ($a0,$a1,$ai)=("%r14","%r15","%rbx");
+
+$code.=<<___;
+.globl bn_power5
+.type bn_power5,\@function,6
+.align 32
+bn_power5:
+ mov %rsp,%rax
+___
+$code.=<<___ if ($addx);
+ mov OPENSSL_ia32cap_P+8(%rip),%r11d
+ and \$0x80108,%r11d
+ cmp \$0x80108,%r11d # check for AD*X+BMI2+BMI1
+ je .Lpowerx5_enter
+___
+$code.=<<___;
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+.Lpower5_prologue:
+
+ shl \$3,${num}d # convert $num to bytes
+ lea ($num,$num,2),%r10d # 3*$num
+ neg $num
+ mov ($n0),$n0 # *n0
+
+ ##############################################################
+ # Ensure that stack frame doesn't alias with $rptr+3*$num
+ # modulo 4096, which covers ret[num], am[num] and n[num]
+ # (see bn_exp.c). This is done to allow memory disambiguation
+ # logic do its magic. [Extra 256 bytes is for power mask
+ # calculated from 7th argument, the index.]
+ #
+ lea -320(%rsp,$num,2),%r11
+ mov %rsp,%rbp
+ sub $rptr,%r11
+ and \$4095,%r11
+ cmp %r11,%r10
+ jb .Lpwr_sp_alt
+ sub %r11,%rbp # align with $aptr
+ lea -320(%rbp,$num,2),%rbp # future alloca(frame+2*num*8+256)
+ jmp .Lpwr_sp_done
+
+.align 32
+.Lpwr_sp_alt:
+ lea 4096-320(,$num,2),%r10
+ lea -320(%rbp,$num,2),%rbp # future alloca(frame+2*num*8+256)
+ sub %r10,%r11
+ mov \$0,%r10
+ cmovc %r10,%r11
+ sub %r11,%rbp
+.Lpwr_sp_done:
+ and \$-64,%rbp
+ mov %rsp,%r11
+ sub %rbp,%r11
+ and \$-4096,%r11
+ lea (%rbp,%r11),%rsp
+ mov (%rsp),%r10
+ cmp %rbp,%rsp
+ ja .Lpwr_page_walk
+ jmp .Lpwr_page_walk_done
+
+.Lpwr_page_walk:
+ lea -4096(%rsp),%rsp
+ mov (%rsp),%r10
+ cmp %rbp,%rsp
+ ja .Lpwr_page_walk
+.Lpwr_page_walk_done:
+
+ mov $num,%r10
+ neg $num
+
+ ##############################################################
+ # Stack layout
+ #
+ # +0 saved $num, used in reduction section
+ # +8 &t[2*$num], used in reduction section
+ # +32 saved *n0
+ # +40 saved %rsp
+ # +48 t[2*$num]
+ #
+ mov $n0, 32(%rsp)
+ mov %rax, 40(%rsp) # save original %rsp
+.Lpower5_body:
+ movq $rptr,%xmm1 # save $rptr, used in sqr8x
+ movq $nptr,%xmm2 # save $nptr
+ movq %r10, %xmm3 # -$num, used in sqr8x
+ movq $bptr,%xmm4
+
+ call __bn_sqr8x_internal
+ call __bn_post4x_internal
+ call __bn_sqr8x_internal
+ call __bn_post4x_internal
+ call __bn_sqr8x_internal
+ call __bn_post4x_internal
+ call __bn_sqr8x_internal
+ call __bn_post4x_internal
+ call __bn_sqr8x_internal
+ call __bn_post4x_internal
+
+ movq %xmm2,$nptr
+ movq %xmm4,$bptr
+ mov $aptr,$rptr
+ mov 40(%rsp),%rax
+ lea 32(%rsp),$n0
+
+ call mul4x_internal
+
+ mov 40(%rsp),%rsi # restore %rsp
+ mov \$1,%rax
+ mov -48(%rsi),%r15
+ mov -40(%rsi),%r14
+ mov -32(%rsi),%r13
+ mov -24(%rsi),%r12
+ mov -16(%rsi),%rbp
+ mov -8(%rsi),%rbx
+ lea (%rsi),%rsp
+.Lpower5_epilogue:
+ ret
+.size bn_power5,.-bn_power5
+
+.globl bn_sqr8x_internal
+.hidden bn_sqr8x_internal
+.type bn_sqr8x_internal,\@abi-omnipotent
+.align 32
+bn_sqr8x_internal:
+__bn_sqr8x_internal:
+ ##############################################################
+ # Squaring part:
+ #
+ # a) multiply-n-add everything but a[i]*a[i];
+ # b) shift result of a) by 1 to the left and accumulate
+ # a[i]*a[i] products;
+ #
+ ##############################################################
+ # a[1]a[0]
+ # a[2]a[0]
+ # a[3]a[0]
+ # a[2]a[1]
+ # a[4]a[0]
+ # a[3]a[1]
+ # a[5]a[0]
+ # a[4]a[1]
+ # a[3]a[2]
+ # a[6]a[0]
+ # a[5]a[1]
+ # a[4]a[2]
+ # a[7]a[0]
+ # a[6]a[1]
+ # a[5]a[2]
+ # a[4]a[3]
+ # a[7]a[1]
+ # a[6]a[2]
+ # a[5]a[3]
+ # a[7]a[2]
+ # a[6]a[3]
+ # a[5]a[4]
+ # a[7]a[3]
+ # a[6]a[4]
+ # a[7]a[4]
+ # a[6]a[5]
+ # a[7]a[5]
+ # a[7]a[6]
+ # a[1]a[0]
+ # a[2]a[0]
+ # a[3]a[0]
+ # a[4]a[0]
+ # a[5]a[0]
+ # a[6]a[0]
+ # a[7]a[0]
+ # a[2]a[1]
+ # a[3]a[1]
+ # a[4]a[1]
+ # a[5]a[1]
+ # a[6]a[1]
+ # a[7]a[1]
+ # a[3]a[2]
+ # a[4]a[2]
+ # a[5]a[2]
+ # a[6]a[2]
+ # a[7]a[2]
+ # a[4]a[3]
+ # a[5]a[3]
+ # a[6]a[3]
+ # a[7]a[3]
+ # a[5]a[4]
+ # a[6]a[4]
+ # a[7]a[4]
+ # a[6]a[5]
+ # a[7]a[5]
+ # a[7]a[6]
+ # a[0]a[0]
+ # a[1]a[1]
+ # a[2]a[2]
+ # a[3]a[3]
+ # a[4]a[4]
+ # a[5]a[5]
+ # a[6]a[6]
+ # a[7]a[7]
+
+ lea 32(%r10),$i # $i=-($num-32)
+ lea ($aptr,$num),$aptr # end of a[] buffer, ($aptr,$i)=&ap[2]
+
+ mov $num,$j # $j=$num
+
+ # comments apply to $num==8 case
+ mov -32($aptr,$i),$a0 # a[0]
+ lea 48+8(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
+ mov -24($aptr,$i),%rax # a[1]
+ lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
+ mov -16($aptr,$i),$ai # a[2]
+ mov %rax,$a1
+
+ mul $a0 # a[1]*a[0]
+ mov %rax,$A0[0] # a[1]*a[0]
+ mov $ai,%rax # a[2]
+ mov %rdx,$A0[1]
+ mov $A0[0],-24($tptr,$i) # t[1]
+
+ mul $a0 # a[2]*a[0]
+ add %rax,$A0[1]
+ mov $ai,%rax
+ adc \$0,%rdx
+ mov $A0[1],-16($tptr,$i) # t[2]
+ mov %rdx,$A0[0]
+
+
+ mov -8($aptr,$i),$ai # a[3]
+ mul $a1 # a[2]*a[1]
+ mov %rax,$A1[0] # a[2]*a[1]+t[3]
+ mov $ai,%rax
+ mov %rdx,$A1[1]
+
+ lea ($i),$j
+ mul $a0 # a[3]*a[0]
+ add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
+ mov $ai,%rax
+ mov %rdx,$A0[1]
+ adc \$0,$A0[1]
+ add $A1[0],$A0[0]
+ adc \$0,$A0[1]
+ mov $A0[0],-8($tptr,$j) # t[3]
+ jmp .Lsqr4x_1st
+
+.align 32
+.Lsqr4x_1st:
+ mov ($aptr,$j),$ai # a[4]
+ mul $a1 # a[3]*a[1]
+ add %rax,$A1[1] # a[3]*a[1]+t[4]
+ mov $ai,%rax
+ mov %rdx,$A1[0]
+ adc \$0,$A1[0]
+
+ mul $a0 # a[4]*a[0]
+ add %rax,$A0[1] # a[4]*a[0]+a[3]*a[1]+t[4]
+ mov $ai,%rax # a[3]
+ mov 8($aptr,$j),$ai # a[5]
+ mov %rdx,$A0[0]
+ adc \$0,$A0[0]
+ add $A1[1],$A0[1]
+ adc \$0,$A0[0]
+
+
+ mul $a1 # a[4]*a[3]
+ add %rax,$A1[0] # a[4]*a[3]+t[5]
+ mov $ai,%rax
+ mov $A0[1],($tptr,$j) # t[4]
+ mov %rdx,$A1[1]
+ adc \$0,$A1[1]
+
+ mul $a0 # a[5]*a[2]
+ add %rax,$A0[0] # a[5]*a[2]+a[4]*a[3]+t[5]
+ mov $ai,%rax
+ mov 16($aptr,$j),$ai # a[6]
+ mov %rdx,$A0[1]
+ adc \$0,$A0[1]
+ add $A1[0],$A0[0]
+ adc \$0,$A0[1]
+
+ mul $a1 # a[5]*a[3]
+ add %rax,$A1[1] # a[5]*a[3]+t[6]
+ mov $ai,%rax
+ mov $A0[0],8($tptr,$j) # t[5]
+ mov %rdx,$A1[0]
+ adc \$0,$A1[0]
+
+ mul $a0 # a[6]*a[2]
+ add %rax,$A0[1] # a[6]*a[2]+a[5]*a[3]+t[6]
+ mov $ai,%rax # a[3]
+ mov 24($aptr,$j),$ai # a[7]
+ mov %rdx,$A0[0]
+ adc \$0,$A0[0]
+ add $A1[1],$A0[1]
+ adc \$0,$A0[0]
+
+
+ mul $a1 # a[6]*a[5]
+ add %rax,$A1[0] # a[6]*a[5]+t[7]
+ mov $ai,%rax
+ mov $A0[1],16($tptr,$j) # t[6]
+ mov %rdx,$A1[1]
+ adc \$0,$A1[1]
+ lea 32($j),$j
+
+ mul $a0 # a[7]*a[4]
+ add %rax,$A0[0] # a[7]*a[4]+a[6]*a[5]+t[6]
+ mov $ai,%rax
+ mov %rdx,$A0[1]
+ adc \$0,$A0[1]
+ add $A1[0],$A0[0]
+ adc \$0,$A0[1]
+ mov $A0[0],-8($tptr,$j) # t[7]
+
+ cmp \$0,$j
+ jne .Lsqr4x_1st
+
+ mul $a1 # a[7]*a[5]
+ add %rax,$A1[1]
+ lea 16($i),$i
+ adc \$0,%rdx
+ add $A0[1],$A1[1]
+ adc \$0,%rdx
+
+ mov $A1[1],($tptr) # t[8]
+ mov %rdx,$A1[0]
+ mov %rdx,8($tptr) # t[9]
+ jmp .Lsqr4x_outer
+
+.align 32
+.Lsqr4x_outer: # comments apply to $num==6 case
+ mov -32($aptr,$i),$a0 # a[0]
+ lea 48+8(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
+ mov -24($aptr,$i),%rax # a[1]
+ lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
+ mov -16($aptr,$i),$ai # a[2]
+ mov %rax,$a1
+
+ mul $a0 # a[1]*a[0]
+ mov -24($tptr,$i),$A0[0] # t[1]
+ add %rax,$A0[0] # a[1]*a[0]+t[1]
+ mov $ai,%rax # a[2]
+ adc \$0,%rdx
+ mov $A0[0],-24($tptr,$i) # t[1]
+ mov %rdx,$A0[1]
+
+ mul $a0 # a[2]*a[0]
+ add %rax,$A0[1]
+ mov $ai,%rax
+ adc \$0,%rdx
+ add -16($tptr,$i),$A0[1] # a[2]*a[0]+t[2]
+ mov %rdx,$A0[0]
+ adc \$0,$A0[0]
+ mov $A0[1],-16($tptr,$i) # t[2]
+
+ xor $A1[0],$A1[0]
+
+ mov -8($aptr,$i),$ai # a[3]
+ mul $a1 # a[2]*a[1]
+ add %rax,$A1[0] # a[2]*a[1]+t[3]
+ mov $ai,%rax
+ adc \$0,%rdx
+ add -8($tptr,$i),$A1[0]
+ mov %rdx,$A1[1]
+ adc \$0,$A1[1]
+
+ mul $a0 # a[3]*a[0]
+ add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
+ mov $ai,%rax
+ adc \$0,%rdx
+ add $A1[0],$A0[0]
+ mov %rdx,$A0[1]
+ adc \$0,$A0[1]
+ mov $A0[0],-8($tptr,$i) # t[3]
+
+ lea ($i),$j
+ jmp .Lsqr4x_inner
+
+.align 32
+.Lsqr4x_inner:
+ mov ($aptr,$j),$ai # a[4]
+ mul $a1 # a[3]*a[1]
+ add %rax,$A1[1] # a[3]*a[1]+t[4]
+ mov $ai,%rax
+ mov %rdx,$A1[0]
+ adc \$0,$A1[0]
+ add ($tptr,$j),$A1[1]
+ adc \$0,$A1[0]
+
+ .byte 0x67
+ mul $a0 # a[4]*a[0]
+ add %rax,$A0[1] # a[4]*a[0]+a[3]*a[1]+t[4]
+ mov $ai,%rax # a[3]
+ mov 8($aptr,$j),$ai # a[5]
+ mov %rdx,$A0[0]
+ adc \$0,$A0[0]
+ add $A1[1],$A0[1]
+ adc \$0,$A0[0]
+
+ mul $a1 # a[4]*a[3]
+ add %rax,$A1[0] # a[4]*a[3]+t[5]
+ mov $A0[1],($tptr,$j) # t[4]
+ mov $ai,%rax
+ mov %rdx,$A1[1]
+ adc \$0,$A1[1]
+ add 8($tptr,$j),$A1[0]
+ lea 16($j),$j # j++
+ adc \$0,$A1[1]
+
+ mul $a0 # a[5]*a[2]
+ add %rax,$A0[0] # a[5]*a[2]+a[4]*a[3]+t[5]
+ mov $ai,%rax
+ adc \$0,%rdx
+ add $A1[0],$A0[0]
+ mov %rdx,$A0[1]
+ adc \$0,$A0[1]
+ mov $A0[0],-8($tptr,$j) # t[5], "preloaded t[1]" below
+
+ cmp \$0,$j
+ jne .Lsqr4x_inner
+
+ .byte 0x67
+ mul $a1 # a[5]*a[3]
+ add %rax,$A1[1]
+ adc \$0,%rdx
+ add $A0[1],$A1[1]
+ adc \$0,%rdx
+
+ mov $A1[1],($tptr) # t[6], "preloaded t[2]" below
+ mov %rdx,$A1[0]
+ mov %rdx,8($tptr) # t[7], "preloaded t[3]" below
+
+ add \$16,$i
+ jnz .Lsqr4x_outer
+
+ # comments apply to $num==4 case
+ mov -32($aptr),$a0 # a[0]
+ lea 48+8(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
+ mov -24($aptr),%rax # a[1]
+ lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
+ mov -16($aptr),$ai # a[2]
+ mov %rax,$a1
+
+ mul $a0 # a[1]*a[0]
+ add %rax,$A0[0] # a[1]*a[0]+t[1], preloaded t[1]
+ mov $ai,%rax # a[2]
+ mov %rdx,$A0[1]
+ adc \$0,$A0[1]
+
+ mul $a0 # a[2]*a[0]
+ add %rax,$A0[1]
+ mov $ai,%rax
+ mov $A0[0],-24($tptr) # t[1]
+ mov %rdx,$A0[0]
+ adc \$0,$A0[0]
+ add $A1[1],$A0[1] # a[2]*a[0]+t[2], preloaded t[2]
+ mov -8($aptr),$ai # a[3]
+ adc \$0,$A0[0]
+
+ mul $a1 # a[2]*a[1]
+ add %rax,$A1[0] # a[2]*a[1]+t[3], preloaded t[3]
+ mov $ai,%rax
+ mov $A0[1],-16($tptr) # t[2]
+ mov %rdx,$A1[1]
+ adc \$0,$A1[1]
+
+ mul $a0 # a[3]*a[0]
+ add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
+ mov $ai,%rax
+ mov %rdx,$A0[1]
+ adc \$0,$A0[1]
+ add $A1[0],$A0[0]
+ adc \$0,$A0[1]
+ mov $A0[0],-8($tptr) # t[3]
+
+ mul $a1 # a[3]*a[1]
+ add %rax,$A1[1]
+ mov -16($aptr),%rax # a[2]
+ adc \$0,%rdx
+ add $A0[1],$A1[1]
+ adc \$0,%rdx
+
+ mov $A1[1],($tptr) # t[4]
+ mov %rdx,$A1[0]
+ mov %rdx,8($tptr) # t[5]
+
+ mul $ai # a[2]*a[3]
+___
+{
+my ($shift,$carry)=($a0,$a1);
+my @S=(@A1,$ai,$n0);
+$code.=<<___;
+ add \$16,$i
+ xor $shift,$shift
+ sub $num,$i # $i=16-$num
+ xor $carry,$carry
+
+ add $A1[0],%rax # t[5]
+ adc \$0,%rdx
+ mov %rax,8($tptr) # t[5]
+ mov %rdx,16($tptr) # t[6]
+ mov $carry,24($tptr) # t[7]
+
+ mov -16($aptr,$i),%rax # a[0]
+ lea 48+8(%rsp),$tptr
+ xor $A0[0],$A0[0] # t[0]
+ mov 8($tptr),$A0[1] # t[1]
+
+ lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
+ shr \$63,$A0[0]
+ lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
+ shr \$63,$A0[1]
+ or $A0[0],$S[1] # | t[2*i]>>63
+ mov 16($tptr),$A0[0] # t[2*i+2] # prefetch
+ mov $A0[1],$shift # shift=t[2*i+1]>>63
+ mul %rax # a[i]*a[i]
+ neg $carry # mov $carry,cf
+ mov 24($tptr),$A0[1] # t[2*i+2+1] # prefetch
+ adc %rax,$S[0]
+ mov -8($aptr,$i),%rax # a[i+1] # prefetch
+ mov $S[0],($tptr)
+ adc %rdx,$S[1]
+
+ lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
+ mov $S[1],8($tptr)
+ sbb $carry,$carry # mov cf,$carry
+ shr \$63,$A0[0]
+ lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
+ shr \$63,$A0[1]
+ or $A0[0],$S[3] # | t[2*i]>>63
+ mov 32($tptr),$A0[0] # t[2*i+2] # prefetch
+ mov $A0[1],$shift # shift=t[2*i+1]>>63
+ mul %rax # a[i]*a[i]
+ neg $carry # mov $carry,cf
+ mov 40($tptr),$A0[1] # t[2*i+2+1] # prefetch
+ adc %rax,$S[2]
+ mov 0($aptr,$i),%rax # a[i+1] # prefetch
+ mov $S[2],16($tptr)
+ adc %rdx,$S[3]
+ lea 16($i),$i
+ mov $S[3],24($tptr)
+ sbb $carry,$carry # mov cf,$carry
+ lea 64($tptr),$tptr
+ jmp .Lsqr4x_shift_n_add
+
+.align 32
+.Lsqr4x_shift_n_add:
+ lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
+ shr \$63,$A0[0]
+ lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
+ shr \$63,$A0[1]
+ or $A0[0],$S[1] # | t[2*i]>>63
+ mov -16($tptr),$A0[0] # t[2*i+2] # prefetch
+ mov $A0[1],$shift # shift=t[2*i+1]>>63
+ mul %rax # a[i]*a[i]
+ neg $carry # mov $carry,cf
+ mov -8($tptr),$A0[1] # t[2*i+2+1] # prefetch
+ adc %rax,$S[0]
+ mov -8($aptr,$i),%rax # a[i+1] # prefetch
+ mov $S[0],-32($tptr)
+ adc %rdx,$S[1]
+
+ lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
+ mov $S[1],-24($tptr)
+ sbb $carry,$carry # mov cf,$carry
+ shr \$63,$A0[0]
+ lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
+ shr \$63,$A0[1]
+ or $A0[0],$S[3] # | t[2*i]>>63
+ mov 0($tptr),$A0[0] # t[2*i+2] # prefetch
+ mov $A0[1],$shift # shift=t[2*i+1]>>63
+ mul %rax # a[i]*a[i]
+ neg $carry # mov $carry,cf
+ mov 8($tptr),$A0[1] # t[2*i+2+1] # prefetch
+ adc %rax,$S[2]
+ mov 0($aptr,$i),%rax # a[i+1] # prefetch
+ mov $S[2],-16($tptr)
+ adc %rdx,$S[3]
+
+ lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
+ mov $S[3],-8($tptr)
+ sbb $carry,$carry # mov cf,$carry
+ shr \$63,$A0[0]
+ lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
+ shr \$63,$A0[1]
+ or $A0[0],$S[1] # | t[2*i]>>63
+ mov 16($tptr),$A0[0] # t[2*i+2] # prefetch
+ mov $A0[1],$shift # shift=t[2*i+1]>>63
+ mul %rax # a[i]*a[i]
+ neg $carry # mov $carry,cf
+ mov 24($tptr),$A0[1] # t[2*i+2+1] # prefetch
+ adc %rax,$S[0]
+ mov 8($aptr,$i),%rax # a[i+1] # prefetch
+ mov $S[0],0($tptr)
+ adc %rdx,$S[1]
+
+ lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
+ mov $S[1],8($tptr)
+ sbb $carry,$carry # mov cf,$carry
+ shr \$63,$A0[0]
+ lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
+ shr \$63,$A0[1]
+ or $A0[0],$S[3] # | t[2*i]>>63
+ mov 32($tptr),$A0[0] # t[2*i+2] # prefetch
+ mov $A0[1],$shift # shift=t[2*i+1]>>63
+ mul %rax # a[i]*a[i]
+ neg $carry # mov $carry,cf
+ mov 40($tptr),$A0[1] # t[2*i+2+1] # prefetch
+ adc %rax,$S[2]
+ mov 16($aptr,$i),%rax # a[i+1] # prefetch
+ mov $S[2],16($tptr)
+ adc %rdx,$S[3]
+ mov $S[3],24($tptr)
+ sbb $carry,$carry # mov cf,$carry
+ lea 64($tptr),$tptr
+ add \$32,$i
+ jnz .Lsqr4x_shift_n_add
+
+ lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
+ .byte 0x67
+ shr \$63,$A0[0]
+ lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
+ shr \$63,$A0[1]
+ or $A0[0],$S[1] # | t[2*i]>>63
+ mov -16($tptr),$A0[0] # t[2*i+2] # prefetch
+ mov $A0[1],$shift # shift=t[2*i+1]>>63
+ mul %rax # a[i]*a[i]
+ neg $carry # mov $carry,cf
+ mov -8($tptr),$A0[1] # t[2*i+2+1] # prefetch
+ adc %rax,$S[0]
+ mov -8($aptr),%rax # a[i+1] # prefetch
+ mov $S[0],-32($tptr)
+ adc %rdx,$S[1]
+
+ lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1|shift
+ mov $S[1],-24($tptr)
+ sbb $carry,$carry # mov cf,$carry
+ shr \$63,$A0[0]
+ lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
+ shr \$63,$A0[1]
+ or $A0[0],$S[3] # | t[2*i]>>63
+ mul %rax # a[i]*a[i]
+ neg $carry # mov $carry,cf
+ adc %rax,$S[2]
+ adc %rdx,$S[3]
+ mov $S[2],-16($tptr)
+ mov $S[3],-8($tptr)
+___
+}
+######################################################################
+# Montgomery reduction part, "word-by-word" algorithm.
+#
+# This new path is inspired by multiple submissions from Intel, by
+# Shay Gueron, Vlad Krasnov, Erdinc Ozturk, James Guilford,
+# Vinodh Gopal...
+{
+my ($nptr,$tptr,$carry,$m0)=("%rbp","%rdi","%rsi","%rbx");
+
+$code.=<<___;
+ movq %xmm2,$nptr
+__bn_sqr8x_reduction:
+ xor %rax,%rax
+ lea ($nptr,$num),%rcx # end of n[]
+ lea 48+8(%rsp,$num,2),%rdx # end of t[] buffer
+ mov %rcx,0+8(%rsp)
+ lea 48+8(%rsp,$num),$tptr # end of initial t[] window
+ mov %rdx,8+8(%rsp)
+ neg $num
+ jmp .L8x_reduction_loop
+
+.align 32
+.L8x_reduction_loop:
+ lea ($tptr,$num),$tptr # start of current t[] window
+ .byte 0x66
+ mov 8*0($tptr),$m0
+ mov 8*1($tptr),%r9
+ mov 8*2($tptr),%r10
+ mov 8*3($tptr),%r11
+ mov 8*4($tptr),%r12
+ mov 8*5($tptr),%r13
+ mov 8*6($tptr),%r14
+ mov 8*7($tptr),%r15
+ mov %rax,(%rdx) # store top-most carry bit
+ lea 8*8($tptr),$tptr
+
+ .byte 0x67
+ mov $m0,%r8
+ imulq 32+8(%rsp),$m0 # n0*a[0]
+ mov 8*0($nptr),%rax # n[0]
+ mov \$8,%ecx
+ jmp .L8x_reduce
+
+.align 32
+.L8x_reduce:
+ mulq $m0
+ mov 8*1($nptr),%rax # n[1]
+ neg %r8
+ mov %rdx,%r8
+ adc \$0,%r8
+
+ mulq $m0
+ add %rax,%r9
+ mov 8*2($nptr),%rax
+ adc \$0,%rdx
+ add %r9,%r8
+ mov $m0,48-8+8(%rsp,%rcx,8) # put aside n0*a[i]
+ mov %rdx,%r9
+ adc \$0,%r9
+
+ mulq $m0
+ add %rax,%r10
+ mov 8*3($nptr),%rax
+ adc \$0,%rdx
+ add %r10,%r9
+ mov 32+8(%rsp),$carry # pull n0, borrow $carry
+ mov %rdx,%r10
+ adc \$0,%r10
+
+ mulq $m0
+ add %rax,%r11
+ mov 8*4($nptr),%rax
+ adc \$0,%rdx
+ imulq %r8,$carry # modulo-scheduled
+ add %r11,%r10
+ mov %rdx,%r11
+ adc \$0,%r11
+
+ mulq $m0
+ add %rax,%r12
+ mov 8*5($nptr),%rax
+ adc \$0,%rdx
+ add %r12,%r11
+ mov %rdx,%r12
+ adc \$0,%r12
+
+ mulq $m0
+ add %rax,%r13
+ mov 8*6($nptr),%rax
+ adc \$0,%rdx
+ add %r13,%r12
+ mov %rdx,%r13
+ adc \$0,%r13
+
+ mulq $m0
+ add %rax,%r14
+ mov 8*7($nptr),%rax
+ adc \$0,%rdx
+ add %r14,%r13
+ mov %rdx,%r14
+ adc \$0,%r14
+
+ mulq $m0
+ mov $carry,$m0 # n0*a[i]
+ add %rax,%r15
+ mov 8*0($nptr),%rax # n[0]
+ adc \$0,%rdx
+ add %r15,%r14
+ mov %rdx,%r15
+ adc \$0,%r15
+
+ dec %ecx
+ jnz .L8x_reduce
+
+ lea 8*8($nptr),$nptr
+ xor %rax,%rax
+ mov 8+8(%rsp),%rdx # pull end of t[]
+ cmp 0+8(%rsp),$nptr # end of n[]?
+ jae .L8x_no_tail
+
+ .byte 0x66
+ add 8*0($tptr),%r8
+ adc 8*1($tptr),%r9
+ adc 8*2($tptr),%r10
+ adc 8*3($tptr),%r11
+ adc 8*4($tptr),%r12
+ adc 8*5($tptr),%r13
+ adc 8*6($tptr),%r14
+ adc 8*7($tptr),%r15
+ sbb $carry,$carry # top carry
+
+ mov 48+56+8(%rsp),$m0 # pull n0*a[0]
+ mov \$8,%ecx
+ mov 8*0($nptr),%rax
+ jmp .L8x_tail
+
+.align 32
+.L8x_tail:
+ mulq $m0
+ add %rax,%r8
+ mov 8*1($nptr),%rax
+ mov %r8,($tptr) # save result
+ mov %rdx,%r8
+ adc \$0,%r8
+
+ mulq $m0
+ add %rax,%r9
+ mov 8*2($nptr),%rax
+ adc \$0,%rdx
+ add %r9,%r8
+ lea 8($tptr),$tptr # $tptr++
+ mov %rdx,%r9
+ adc \$0,%r9
+
+ mulq $m0
+ add %rax,%r10
+ mov 8*3($nptr),%rax
+ adc \$0,%rdx
+ add %r10,%r9
+ mov %rdx,%r10
+ adc \$0,%r10
+
+ mulq $m0
+ add %rax,%r11
+ mov 8*4($nptr),%rax
+ adc \$0,%rdx
+ add %r11,%r10
+ mov %rdx,%r11
+ adc \$0,%r11
+
+ mulq $m0
+ add %rax,%r12
+ mov 8*5($nptr),%rax
+ adc \$0,%rdx
+ add %r12,%r11
+ mov %rdx,%r12
+ adc \$0,%r12
+
+ mulq $m0
+ add %rax,%r13
+ mov 8*6($nptr),%rax
+ adc \$0,%rdx
+ add %r13,%r12
+ mov %rdx,%r13
+ adc \$0,%r13
+
+ mulq $m0
+ add %rax,%r14
+ mov 8*7($nptr),%rax
+ adc \$0,%rdx
+ add %r14,%r13
+ mov %rdx,%r14
+ adc \$0,%r14
+
+ mulq $m0
+ mov 48-16+8(%rsp,%rcx,8),$m0# pull n0*a[i]
+ add %rax,%r15
+ adc \$0,%rdx
+ add %r15,%r14
+ mov 8*0($nptr),%rax # pull n[0]
+ mov %rdx,%r15
+ adc \$0,%r15
+
+ dec %ecx
+ jnz .L8x_tail
+
+ lea 8*8($nptr),$nptr
+ mov 8+8(%rsp),%rdx # pull end of t[]
+ cmp 0+8(%rsp),$nptr # end of n[]?
+ jae .L8x_tail_done # break out of loop
+
+ mov 48+56+8(%rsp),$m0 # pull n0*a[0]
+ neg $carry
+ mov 8*0($nptr),%rax # pull n[0]
+ adc 8*0($tptr),%r8
+ adc 8*1($tptr),%r9
+ adc 8*2($tptr),%r10
+ adc 8*3($tptr),%r11
+ adc 8*4($tptr),%r12
+ adc 8*5($tptr),%r13
+ adc 8*6($tptr),%r14
+ adc 8*7($tptr),%r15
+ sbb $carry,$carry # top carry
+
+ mov \$8,%ecx
+ jmp .L8x_tail
+
+.align 32
+.L8x_tail_done:
+ xor %rax,%rax
+ add (%rdx),%r8 # can this overflow?
+ adc \$0,%r9
+ adc \$0,%r10
+ adc \$0,%r11
+ adc \$0,%r12
+ adc \$0,%r13
+ adc \$0,%r14
+ adc \$0,%r15
+ adc \$0,%rax
+
+ neg $carry
+.L8x_no_tail:
+ adc 8*0($tptr),%r8
+ adc 8*1($tptr),%r9
+ adc 8*2($tptr),%r10
+ adc 8*3($tptr),%r11
+ adc 8*4($tptr),%r12
+ adc 8*5($tptr),%r13
+ adc 8*6($tptr),%r14
+ adc 8*7($tptr),%r15
+ adc \$0,%rax # top-most carry
+ mov -8($nptr),%rcx # np[num-1]
+ xor $carry,$carry
+
+ movq %xmm2,$nptr # restore $nptr
+
+ mov %r8,8*0($tptr) # store top 512 bits
+ mov %r9,8*1($tptr)
+ movq %xmm3,$num # $num is %r9, can't be moved upwards
+ mov %r10,8*2($tptr)
+ mov %r11,8*3($tptr)
+ mov %r12,8*4($tptr)
+ mov %r13,8*5($tptr)
+ mov %r14,8*6($tptr)
+ mov %r15,8*7($tptr)
+ lea 8*8($tptr),$tptr
+
+ cmp %rdx,$tptr # end of t[]?
+ jb .L8x_reduction_loop
+ ret
+.size bn_sqr8x_internal,.-bn_sqr8x_internal
+___
+}
+##############################################################
+# Post-condition, 4x unrolled
+#
+{
+my ($tptr,$nptr)=("%rbx","%rbp");
+$code.=<<___;
+.type __bn_post4x_internal,\@abi-omnipotent
+.align 32
+__bn_post4x_internal:
+ mov 8*0($nptr),%r12
+ lea (%rdi,$num),$tptr # %rdi was $tptr above
+ mov $num,%rcx
+ movq %xmm1,$rptr # restore $rptr
+ neg %rax
+ movq %xmm1,$aptr # prepare for back-to-back call
+ sar \$3+2,%rcx
+ dec %r12 # so that after 'not' we get -n[0]
+ xor %r10,%r10
+ mov 8*1($nptr),%r13
+ mov 8*2($nptr),%r14
+ mov 8*3($nptr),%r15
+ jmp .Lsqr4x_sub_entry
+
+.align 16
+.Lsqr4x_sub:
+ mov 8*0($nptr),%r12
+ mov 8*1($nptr),%r13
+ mov 8*2($nptr),%r14
+ mov 8*3($nptr),%r15
+.Lsqr4x_sub_entry:
+ lea 8*4($nptr),$nptr
+ not %r12
+ not %r13
+ not %r14
+ not %r15
+ and %rax,%r12
+ and %rax,%r13
+ and %rax,%r14
+ and %rax,%r15
+
+ neg %r10 # mov %r10,%cf
+ adc 8*0($tptr),%r12
+ adc 8*1($tptr),%r13
+ adc 8*2($tptr),%r14
+ adc 8*3($tptr),%r15
+ mov %r12,8*0($rptr)
+ lea 8*4($tptr),$tptr
+ mov %r13,8*1($rptr)
+ sbb %r10,%r10 # mov %cf,%r10
+ mov %r14,8*2($rptr)
+ mov %r15,8*3($rptr)
+ lea 8*4($rptr),$rptr
+
+ inc %rcx # pass %cf
+ jnz .Lsqr4x_sub
+
+ mov $num,%r10 # prepare for back-to-back call
+ neg $num # restore $num
+ ret
+.size __bn_post4x_internal,.-__bn_post4x_internal
+___
+}
+{
+$code.=<<___;
+.globl bn_from_montgomery
+.type bn_from_montgomery,\@abi-omnipotent
+.align 32
+bn_from_montgomery:
+ testl \$7,`($win64?"48(%rsp)":"%r9d")`
+ jz bn_from_mont8x
+ xor %eax,%eax
+ ret
+.size bn_from_montgomery,.-bn_from_montgomery
+
+.type bn_from_mont8x,\@function,6
+.align 32
+bn_from_mont8x:
+ .byte 0x67
+ mov %rsp,%rax
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+.Lfrom_prologue:
+
+ shl \$3,${num}d # convert $num to bytes
+ lea ($num,$num,2),%r10 # 3*$num in bytes
+ neg $num
+ mov ($n0),$n0 # *n0
+
+ ##############################################################
+ # Ensure that stack frame doesn't alias with $rptr+3*$num
+ # modulo 4096, which covers ret[num], am[num] and n[num]
+ # (see bn_exp.c). The stack is allocated to aligned with
+ # bn_power5's frame, and as bn_from_montgomery happens to be
+ # last operation, we use the opportunity to cleanse it.
+ #
+ lea -320(%rsp,$num,2),%r11
+ mov %rsp,%rbp
+ sub $rptr,%r11
+ and \$4095,%r11
+ cmp %r11,%r10
+ jb .Lfrom_sp_alt
+ sub %r11,%rbp # align with $aptr
+ lea -320(%rbp,$num,2),%rbp # future alloca(frame+2*$num*8+256)
+ jmp .Lfrom_sp_done
+
+.align 32
+.Lfrom_sp_alt:
+ lea 4096-320(,$num,2),%r10
+ lea -320(%rbp,$num,2),%rbp # future alloca(frame+2*$num*8+256)
+ sub %r10,%r11
+ mov \$0,%r10
+ cmovc %r10,%r11
+ sub %r11,%rbp
+.Lfrom_sp_done:
+ and \$-64,%rbp
+ mov %rsp,%r11
+ sub %rbp,%r11
+ and \$-4096,%r11
+ lea (%rbp,%r11),%rsp
+ mov (%rsp),%r10
+ cmp %rbp,%rsp
+ ja .Lfrom_page_walk
+ jmp .Lfrom_page_walk_done
+
+.Lfrom_page_walk:
+ lea -4096(%rsp),%rsp
+ mov (%rsp),%r10
+ cmp %rbp,%rsp
+ ja .Lfrom_page_walk
+.Lfrom_page_walk_done:
+
+ mov $num,%r10
+ neg $num
+
+ ##############################################################
+ # Stack layout
+ #
+ # +0 saved $num, used in reduction section
+ # +8 &t[2*$num], used in reduction section
+ # +32 saved *n0
+ # +40 saved %rsp
+ # +48 t[2*$num]
+ #
+ mov $n0, 32(%rsp)
+ mov %rax, 40(%rsp) # save original %rsp
+.Lfrom_body:
+ mov $num,%r11
+ lea 48(%rsp),%rax
+ pxor %xmm0,%xmm0
+ jmp .Lmul_by_1
+
+.align 32
+.Lmul_by_1:
+ movdqu ($aptr),%xmm1
+ movdqu 16($aptr),%xmm2
+ movdqu 32($aptr),%xmm3
+ movdqa %xmm0,(%rax,$num)
+ movdqu 48($aptr),%xmm4
+ movdqa %xmm0,16(%rax,$num)
+ .byte 0x48,0x8d,0xb6,0x40,0x00,0x00,0x00 # lea 64($aptr),$aptr
+ movdqa %xmm1,(%rax)
+ movdqa %xmm0,32(%rax,$num)
+ movdqa %xmm2,16(%rax)
+ movdqa %xmm0,48(%rax,$num)
+ movdqa %xmm3,32(%rax)
+ movdqa %xmm4,48(%rax)
+ lea 64(%rax),%rax
+ sub \$64,%r11
+ jnz .Lmul_by_1
+
+ movq $rptr,%xmm1
+ movq $nptr,%xmm2
+ .byte 0x67
+ mov $nptr,%rbp
+ movq %r10, %xmm3 # -num
+___
+$code.=<<___ if ($addx);
+ mov OPENSSL_ia32cap_P+8(%rip),%r11d
+ and \$0x80108,%r11d
+ cmp \$0x80108,%r11d # check for AD*X+BMI2+BMI1
+ jne .Lfrom_mont_nox
+
+ lea (%rax,$num),$rptr
+ call __bn_sqrx8x_reduction
+ call __bn_postx4x_internal
+
+ pxor %xmm0,%xmm0
+ lea 48(%rsp),%rax
+ mov 40(%rsp),%rsi # restore %rsp
+ jmp .Lfrom_mont_zero
+
+.align 32
+.Lfrom_mont_nox:
+___
+$code.=<<___;
+ call __bn_sqr8x_reduction
+ call __bn_post4x_internal
+
+ pxor %xmm0,%xmm0
+ lea 48(%rsp),%rax
+ mov 40(%rsp),%rsi # restore %rsp
+ jmp .Lfrom_mont_zero
+
+.align 32
+.Lfrom_mont_zero:
+ movdqa %xmm0,16*0(%rax)
+ movdqa %xmm0,16*1(%rax)
+ movdqa %xmm0,16*2(%rax)
+ movdqa %xmm0,16*3(%rax)
+ lea 16*4(%rax),%rax
+ sub \$32,$num
+ jnz .Lfrom_mont_zero
+
+ mov \$1,%rax
+ mov -48(%rsi),%r15
+ mov -40(%rsi),%r14
+ mov -32(%rsi),%r13
+ mov -24(%rsi),%r12
+ mov -16(%rsi),%rbp
+ mov -8(%rsi),%rbx
+ lea (%rsi),%rsp
+.Lfrom_epilogue:
+ ret
+.size bn_from_mont8x,.-bn_from_mont8x
+___
+}
+}}}
+
+if ($addx) {{{
+my $bp="%rdx"; # restore original value
+
+$code.=<<___;
+.type bn_mulx4x_mont_gather5,\@function,6
+.align 32
+bn_mulx4x_mont_gather5:
+ mov %rsp,%rax
+.Lmulx4x_enter:
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+.Lmulx4x_prologue:
+
+ shl \$3,${num}d # convert $num to bytes
+ lea ($num,$num,2),%r10 # 3*$num in bytes
+ neg $num # -$num
+ mov ($n0),$n0 # *n0
+
+ ##############################################################
+ # Ensure that stack frame doesn't alias with $rptr+3*$num
+ # modulo 4096, which covers ret[num], am[num] and n[num]
+ # (see bn_exp.c). This is done to allow memory disambiguation
+ # logic do its magic. [Extra [num] is allocated in order
+ # to align with bn_power5's frame, which is cleansed after
+ # completing exponentiation. Extra 256 bytes is for power mask
+ # calculated from 7th argument, the index.]
+ #
+ lea -320(%rsp,$num,2),%r11
+ mov %rsp,%rbp
+ sub $rp,%r11
+ and \$4095,%r11
+ cmp %r11,%r10
+ jb .Lmulx4xsp_alt
+ sub %r11,%rbp # align with $aptr
+ lea -320(%rbp,$num,2),%rbp # future alloca(frame+2*$num*8+256)
+ jmp .Lmulx4xsp_done
+
+.Lmulx4xsp_alt:
+ lea 4096-320(,$num,2),%r10
+ lea -320(%rbp,$num,2),%rbp # future alloca(frame+2*$num*8+256)
+ sub %r10,%r11
+ mov \$0,%r10
+ cmovc %r10,%r11
+ sub %r11,%rbp
+.Lmulx4xsp_done:
+ and \$-64,%rbp # ensure alignment
+ mov %rsp,%r11
+ sub %rbp,%r11
+ and \$-4096,%r11
+ lea (%rbp,%r11),%rsp
+ mov (%rsp),%r10
+ cmp %rbp,%rsp
+ ja .Lmulx4x_page_walk
+ jmp .Lmulx4x_page_walk_done
+
+.Lmulx4x_page_walk:
+ lea -4096(%rsp),%rsp
+ mov (%rsp),%r10
+ cmp %rbp,%rsp
+ ja .Lmulx4x_page_walk
+.Lmulx4x_page_walk_done:
+
+ ##############################################################
+ # Stack layout
+ # +0 -num
+ # +8 off-loaded &b[i]
+ # +16 end of b[num]
+ # +24 inner counter
+ # +32 saved n0
+ # +40 saved %rsp
+ # +48
+ # +56 saved rp
+ # +64 tmp[num+1]
+ #
+ mov $n0, 32(%rsp) # save *n0
+ mov %rax,40(%rsp) # save original %rsp
+.Lmulx4x_body:
+ call mulx4x_internal
+
+ mov 40(%rsp),%rsi # restore %rsp
+ mov \$1,%rax
+
+ mov -48(%rsi),%r15
+ mov -40(%rsi),%r14
+ mov -32(%rsi),%r13
+ mov -24(%rsi),%r12
+ mov -16(%rsi),%rbp
+ mov -8(%rsi),%rbx
+ lea (%rsi),%rsp
+.Lmulx4x_epilogue:
+ ret
+.size bn_mulx4x_mont_gather5,.-bn_mulx4x_mont_gather5
+
+.type mulx4x_internal,\@abi-omnipotent
+.align 32
+mulx4x_internal:
+ mov $num,8(%rsp) # save -$num (it was in bytes)
+ mov $num,%r10
+ neg $num # restore $num
+ shl \$5,$num
+ neg %r10 # restore $num
+ lea 128($bp,$num),%r13 # end of powers table (+size optimization)
+ shr \$5+5,$num
+ movd `($win64?56:8)`(%rax),%xmm5 # load 7th argument
+ sub \$1,$num
+ lea .Linc(%rip),%rax
+ mov %r13,16+8(%rsp) # end of b[num]
+ mov $num,24+8(%rsp) # inner counter
+ mov $rp, 56+8(%rsp) # save $rp
+___
+my ($aptr, $bptr, $nptr, $tptr, $mi, $bi, $zero, $num)=
+ ("%rsi","%rdi","%rcx","%rbx","%r8","%r9","%rbp","%rax");
+my $rptr=$bptr;
+my $STRIDE=2**5*8; # 5 is "window size"
+my $N=$STRIDE/4; # should match cache line size
+$code.=<<___;
+ movdqa 0(%rax),%xmm0 # 00000001000000010000000000000000
+ movdqa 16(%rax),%xmm1 # 00000002000000020000000200000002
+ lea 88-112(%rsp,%r10),%r10 # place the mask after tp[num+1] (+ICache optimizaton)
+ lea 128($bp),$bptr # size optimization
+
+ pshufd \$0,%xmm5,%xmm5 # broadcast index
+ movdqa %xmm1,%xmm4
+ .byte 0x67
+ movdqa %xmm1,%xmm2
+___
+########################################################################
+# calculate mask by comparing 0..31 to index and save result to stack
+#
+$code.=<<___;
+ .byte 0x67
+ paddd %xmm0,%xmm1
+ pcmpeqd %xmm5,%xmm0 # compare to 1,0
+ movdqa %xmm4,%xmm3
+___
+for($i=0;$i<$STRIDE/16-4;$i+=4) {
+$code.=<<___;
+ paddd %xmm1,%xmm2
+ pcmpeqd %xmm5,%xmm1 # compare to 3,2
+ movdqa %xmm0,`16*($i+0)+112`(%r10)
+ movdqa %xmm4,%xmm0
+
+ paddd %xmm2,%xmm3
+ pcmpeqd %xmm5,%xmm2 # compare to 5,4
+ movdqa %xmm1,`16*($i+1)+112`(%r10)
+ movdqa %xmm4,%xmm1
+
+ paddd %xmm3,%xmm0
+ pcmpeqd %xmm5,%xmm3 # compare to 7,6
+ movdqa %xmm2,`16*($i+2)+112`(%r10)
+ movdqa %xmm4,%xmm2
+
+ paddd %xmm0,%xmm1
+ pcmpeqd %xmm5,%xmm0
+ movdqa %xmm3,`16*($i+3)+112`(%r10)
+ movdqa %xmm4,%xmm3
+___
+}
+$code.=<<___; # last iteration can be optimized
+ .byte 0x67
+ paddd %xmm1,%xmm2
+ pcmpeqd %xmm5,%xmm1
+ movdqa %xmm0,`16*($i+0)+112`(%r10)
+
+ paddd %xmm2,%xmm3
+ pcmpeqd %xmm5,%xmm2
+ movdqa %xmm1,`16*($i+1)+112`(%r10)
+
+ pcmpeqd %xmm5,%xmm3
+ movdqa %xmm2,`16*($i+2)+112`(%r10)
+
+ pand `16*($i+0)-128`($bptr),%xmm0 # while it's still in register
+ pand `16*($i+1)-128`($bptr),%xmm1
+ pand `16*($i+2)-128`($bptr),%xmm2
+ movdqa %xmm3,`16*($i+3)+112`(%r10)
+ pand `16*($i+3)-128`($bptr),%xmm3
+ por %xmm2,%xmm0
+ por %xmm3,%xmm1
+___
+for($i=0;$i<$STRIDE/16-4;$i+=4) {
+$code.=<<___;
+ movdqa `16*($i+0)-128`($bptr),%xmm4
+ movdqa `16*($i+1)-128`($bptr),%xmm5
+ movdqa `16*($i+2)-128`($bptr),%xmm2
+ pand `16*($i+0)+112`(%r10),%xmm4
+ movdqa `16*($i+3)-128`($bptr),%xmm3
+ pand `16*($i+1)+112`(%r10),%xmm5
+ por %xmm4,%xmm0
+ pand `16*($i+2)+112`(%r10),%xmm2
+ por %xmm5,%xmm1
+ pand `16*($i+3)+112`(%r10),%xmm3
+ por %xmm2,%xmm0
+ por %xmm3,%xmm1
+___
+}
+$code.=<<___;
+ pxor %xmm1,%xmm0
+ pshufd \$0x4e,%xmm0,%xmm1
+ por %xmm1,%xmm0
+ lea $STRIDE($bptr),$bptr
+ movq %xmm0,%rdx # bp[0]
+ lea 64+8*4+8(%rsp),$tptr
+
+ mov %rdx,$bi
+ mulx 0*8($aptr),$mi,%rax # a[0]*b[0]
+ mulx 1*8($aptr),%r11,%r12 # a[1]*b[0]
+ add %rax,%r11
+ mulx 2*8($aptr),%rax,%r13 # ...
+ adc %rax,%r12
+ adc \$0,%r13
+ mulx 3*8($aptr),%rax,%r14
+
+ mov $mi,%r15
+ imulq 32+8(%rsp),$mi # "t[0]"*n0
+ xor $zero,$zero # cf=0, of=0
+ mov $mi,%rdx
+
+ mov $bptr,8+8(%rsp) # off-load &b[i]
+
+ lea 4*8($aptr),$aptr
+ adcx %rax,%r13
+ adcx $zero,%r14 # cf=0
+
+ mulx 0*8($nptr),%rax,%r10
+ adcx %rax,%r15 # discarded
+ adox %r11,%r10
+ mulx 1*8($nptr),%rax,%r11
+ adcx %rax,%r10
+ adox %r12,%r11
+ mulx 2*8($nptr),%rax,%r12
+ mov 24+8(%rsp),$bptr # counter value
+ mov %r10,-8*4($tptr)
+ adcx %rax,%r11
+ adox %r13,%r12
+ mulx 3*8($nptr),%rax,%r15
+ mov $bi,%rdx
+ mov %r11,-8*3($tptr)
+ adcx %rax,%r12
+ adox $zero,%r15 # of=0
+ lea 4*8($nptr),$nptr
+ mov %r12,-8*2($tptr)
+ jmp .Lmulx4x_1st
+
+.align 32
+.Lmulx4x_1st:
+ adcx $zero,%r15 # cf=0, modulo-scheduled
+ mulx 0*8($aptr),%r10,%rax # a[4]*b[0]
+ adcx %r14,%r10
+ mulx 1*8($aptr),%r11,%r14 # a[5]*b[0]
+ adcx %rax,%r11
+ mulx 2*8($aptr),%r12,%rax # ...
+ adcx %r14,%r12
+ mulx 3*8($aptr),%r13,%r14
+ .byte 0x67,0x67
+ mov $mi,%rdx
+ adcx %rax,%r13
+ adcx $zero,%r14 # cf=0
+ lea 4*8($aptr),$aptr
+ lea 4*8($tptr),$tptr
+
+ adox %r15,%r10
+ mulx 0*8($nptr),%rax,%r15
+ adcx %rax,%r10
+ adox %r15,%r11
+ mulx 1*8($nptr),%rax,%r15
+ adcx %rax,%r11
+ adox %r15,%r12
+ mulx 2*8($nptr),%rax,%r15
+ mov %r10,-5*8($tptr)
+ adcx %rax,%r12
+ mov %r11,-4*8($tptr)
+ adox %r15,%r13
+ mulx 3*8($nptr),%rax,%r15
+ mov $bi,%rdx
+ mov %r12,-3*8($tptr)
+ adcx %rax,%r13
+ adox $zero,%r15
+ lea 4*8($nptr),$nptr
+ mov %r13,-2*8($tptr)
+
+ dec $bptr # of=0, pass cf
+ jnz .Lmulx4x_1st
+
+ mov 8(%rsp),$num # load -num
+ adc $zero,%r15 # modulo-scheduled
+ lea ($aptr,$num),$aptr # rewind $aptr
+ add %r15,%r14
+ mov 8+8(%rsp),$bptr # re-load &b[i]
+ adc $zero,$zero # top-most carry
+ mov %r14,-1*8($tptr)
+ jmp .Lmulx4x_outer
+
+.align 32
+.Lmulx4x_outer:
+ lea 16-256($tptr),%r10 # where 256-byte mask is (+density control)
+ pxor %xmm4,%xmm4
+ .byte 0x67,0x67
+ pxor %xmm5,%xmm5
+___
+for($i=0;$i<$STRIDE/16;$i+=4) {
+$code.=<<___;
+ movdqa `16*($i+0)-128`($bptr),%xmm0
+ movdqa `16*($i+1)-128`($bptr),%xmm1
+ movdqa `16*($i+2)-128`($bptr),%xmm2
+ pand `16*($i+0)+256`(%r10),%xmm0
+ movdqa `16*($i+3)-128`($bptr),%xmm3
+ pand `16*($i+1)+256`(%r10),%xmm1
+ por %xmm0,%xmm4
+ pand `16*($i+2)+256`(%r10),%xmm2
+ por %xmm1,%xmm5
+ pand `16*($i+3)+256`(%r10),%xmm3
+ por %xmm2,%xmm4
+ por %xmm3,%xmm5
+___
+}
+$code.=<<___;
+ por %xmm5,%xmm4
+ pshufd \$0x4e,%xmm4,%xmm0
+ por %xmm4,%xmm0
+ lea $STRIDE($bptr),$bptr
+ movq %xmm0,%rdx # m0=bp[i]
+
+ mov $zero,($tptr) # save top-most carry
+ lea 4*8($tptr,$num),$tptr # rewind $tptr
+ mulx 0*8($aptr),$mi,%r11 # a[0]*b[i]
+ xor $zero,$zero # cf=0, of=0
+ mov %rdx,$bi
+ mulx 1*8($aptr),%r14,%r12 # a[1]*b[i]
+ adox -4*8($tptr),$mi # +t[0]
+ adcx %r14,%r11
+ mulx 2*8($aptr),%r15,%r13 # ...
+ adox -3*8($tptr),%r11
+ adcx %r15,%r12
+ mulx 3*8($aptr),%rdx,%r14
+ adox -2*8($tptr),%r12
+ adcx %rdx,%r13
+ lea ($nptr,$num),$nptr # rewind $nptr
+ lea 4*8($aptr),$aptr
+ adox -1*8($tptr),%r13
+ adcx $zero,%r14
+ adox $zero,%r14
+
+ mov $mi,%r15
+ imulq 32+8(%rsp),$mi # "t[0]"*n0
+
+ mov $mi,%rdx
+ xor $zero,$zero # cf=0, of=0
+ mov $bptr,8+8(%rsp) # off-load &b[i]
+
+ mulx 0*8($nptr),%rax,%r10
+ adcx %rax,%r15 # discarded
+ adox %r11,%r10
+ mulx 1*8($nptr),%rax,%r11
+ adcx %rax,%r10
+ adox %r12,%r11
+ mulx 2*8($nptr),%rax,%r12
+ adcx %rax,%r11
+ adox %r13,%r12
+ mulx 3*8($nptr),%rax,%r15
+ mov $bi,%rdx
+ mov 24+8(%rsp),$bptr # counter value
+ mov %r10,-8*4($tptr)
+ adcx %rax,%r12
+ mov %r11,-8*3($tptr)
+ adox $zero,%r15 # of=0
+ mov %r12,-8*2($tptr)
+ lea 4*8($nptr),$nptr
+ jmp .Lmulx4x_inner
+
+.align 32
+.Lmulx4x_inner:
+ mulx 0*8($aptr),%r10,%rax # a[4]*b[i]
+ adcx $zero,%r15 # cf=0, modulo-scheduled
+ adox %r14,%r10
+ mulx 1*8($aptr),%r11,%r14 # a[5]*b[i]
+ adcx 0*8($tptr),%r10
+ adox %rax,%r11
+ mulx 2*8($aptr),%r12,%rax # ...
+ adcx 1*8($tptr),%r11
+ adox %r14,%r12
+ mulx 3*8($aptr),%r13,%r14
+ mov $mi,%rdx
+ adcx 2*8($tptr),%r12
+ adox %rax,%r13
+ adcx 3*8($tptr),%r13
+ adox $zero,%r14 # of=0
+ lea 4*8($aptr),$aptr
+ lea 4*8($tptr),$tptr
+ adcx $zero,%r14 # cf=0
+
+ adox %r15,%r10
+ mulx 0*8($nptr),%rax,%r15
+ adcx %rax,%r10
+ adox %r15,%r11
+ mulx 1*8($nptr),%rax,%r15
+ adcx %rax,%r11
+ adox %r15,%r12
+ mulx 2*8($nptr),%rax,%r15
+ mov %r10,-5*8($tptr)
+ adcx %rax,%r12
+ adox %r15,%r13
+ mov %r11,-4*8($tptr)
+ mulx 3*8($nptr),%rax,%r15
+ mov $bi,%rdx
+ lea 4*8($nptr),$nptr
+ mov %r12,-3*8($tptr)
+ adcx %rax,%r13
+ adox $zero,%r15
+ mov %r13,-2*8($tptr)
+
+ dec $bptr # of=0, pass cf
+ jnz .Lmulx4x_inner
+
+ mov 0+8(%rsp),$num # load -num
+ adc $zero,%r15 # modulo-scheduled
+ sub 0*8($tptr),$bptr # pull top-most carry to %cf
+ mov 8+8(%rsp),$bptr # re-load &b[i]
+ mov 16+8(%rsp),%r10
+ adc %r15,%r14
+ lea ($aptr,$num),$aptr # rewind $aptr
+ adc $zero,$zero # top-most carry
+ mov %r14,-1*8($tptr)
+
+ cmp %r10,$bptr
+ jb .Lmulx4x_outer
+
+ mov -8($nptr),%r10
+ mov $zero,%r8
+ mov ($nptr,$num),%r12
+ lea ($nptr,$num),%rbp # rewind $nptr
+ mov $num,%rcx
+ lea ($tptr,$num),%rdi # rewind $tptr
+ xor %eax,%eax
+ xor %r15,%r15
+ sub %r14,%r10 # compare top-most words
+ adc %r15,%r15
+ or %r15,%r8
+ sar \$3+2,%rcx
+ sub %r8,%rax # %rax=-%r8
+ mov 56+8(%rsp),%rdx # restore rp
+ dec %r12 # so that after 'not' we get -n[0]
+ mov 8*1(%rbp),%r13
+ xor %r8,%r8
+ mov 8*2(%rbp),%r14
+ mov 8*3(%rbp),%r15
+ jmp .Lsqrx4x_sub_entry # common post-condition
+.size mulx4x_internal,.-mulx4x_internal
+___
+} {
+######################################################################
+# void bn_power5(
+my $rptr="%rdi"; # BN_ULONG *rptr,
+my $aptr="%rsi"; # const BN_ULONG *aptr,
+my $bptr="%rdx"; # const void *table,
+my $nptr="%rcx"; # const BN_ULONG *nptr,
+my $n0 ="%r8"; # const BN_ULONG *n0);
+my $num ="%r9"; # int num, has to be divisible by 8
+ # int pwr);
+
+my ($i,$j,$tptr)=("%rbp","%rcx",$rptr);
+my @A0=("%r10","%r11");
+my @A1=("%r12","%r13");
+my ($a0,$a1,$ai)=("%r14","%r15","%rbx");
+
+$code.=<<___;
+.type bn_powerx5,\@function,6
+.align 32
+bn_powerx5:
+ mov %rsp,%rax
+.Lpowerx5_enter:
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+.Lpowerx5_prologue:
+
+ shl \$3,${num}d # convert $num to bytes
+ lea ($num,$num,2),%r10 # 3*$num in bytes
+ neg $num
+ mov ($n0),$n0 # *n0
+
+ ##############################################################
+ # Ensure that stack frame doesn't alias with $rptr+3*$num
+ # modulo 4096, which covers ret[num], am[num] and n[num]
+ # (see bn_exp.c). This is done to allow memory disambiguation
+ # logic do its magic. [Extra 256 bytes is for power mask
+ # calculated from 7th argument, the index.]
+ #
+ lea -320(%rsp,$num,2),%r11
+ mov %rsp,%rbp
+ sub $rptr,%r11
+ and \$4095,%r11
+ cmp %r11,%r10
+ jb .Lpwrx_sp_alt
+ sub %r11,%rbp # align with $aptr
+ lea -320(%rbp,$num,2),%rbp # future alloca(frame+2*$num*8+256)
+ jmp .Lpwrx_sp_done
+
+.align 32
+.Lpwrx_sp_alt:
+ lea 4096-320(,$num,2),%r10
+ lea -320(%rbp,$num,2),%rbp # alloca(frame+2*$num*8+256)
+ sub %r10,%r11
+ mov \$0,%r10
+ cmovc %r10,%r11
+ sub %r11,%rbp
+.Lpwrx_sp_done:
+ and \$-64,%rbp
+ mov %rsp,%r11
+ sub %rbp,%r11
+ and \$-4096,%r11
+ lea (%rbp,%r11),%rsp
+ mov (%rsp),%r10
+ cmp %rbp,%rsp
+ ja .Lpwrx_page_walk
+ jmp .Lpwrx_page_walk_done
+
+.Lpwrx_page_walk:
+ lea -4096(%rsp),%rsp
+ mov (%rsp),%r10
+ cmp %rbp,%rsp
+ ja .Lpwrx_page_walk
+.Lpwrx_page_walk_done:
+
+ mov $num,%r10
+ neg $num
+
+ ##############################################################
+ # Stack layout
+ #
+ # +0 saved $num, used in reduction section
+ # +8 &t[2*$num], used in reduction section
+ # +16 intermediate carry bit
+ # +24 top-most carry bit, used in reduction section
+ # +32 saved *n0
+ # +40 saved %rsp
+ # +48 t[2*$num]
+ #
+ pxor %xmm0,%xmm0
+ movq $rptr,%xmm1 # save $rptr
+ movq $nptr,%xmm2 # save $nptr
+ movq %r10, %xmm3 # -$num
+ movq $bptr,%xmm4
+ mov $n0, 32(%rsp)
+ mov %rax, 40(%rsp) # save original %rsp
+.Lpowerx5_body:
+
+ call __bn_sqrx8x_internal
+ call __bn_postx4x_internal
+ call __bn_sqrx8x_internal
+ call __bn_postx4x_internal
+ call __bn_sqrx8x_internal
+ call __bn_postx4x_internal
+ call __bn_sqrx8x_internal
+ call __bn_postx4x_internal
+ call __bn_sqrx8x_internal
+ call __bn_postx4x_internal
+
+ mov %r10,$num # -num
+ mov $aptr,$rptr
+ movq %xmm2,$nptr
+ movq %xmm4,$bptr
+ mov 40(%rsp),%rax
+
+ call mulx4x_internal
+
+ mov 40(%rsp),%rsi # restore %rsp
+ mov \$1,%rax
+
+ mov -48(%rsi),%r15
+ mov -40(%rsi),%r14
+ mov -32(%rsi),%r13
+ mov -24(%rsi),%r12
+ mov -16(%rsi),%rbp
+ mov -8(%rsi),%rbx
+ lea (%rsi),%rsp
+.Lpowerx5_epilogue:
+ ret
+.size bn_powerx5,.-bn_powerx5
+
+.globl bn_sqrx8x_internal
+.hidden bn_sqrx8x_internal
+.type bn_sqrx8x_internal,\@abi-omnipotent
+.align 32
+bn_sqrx8x_internal:
+__bn_sqrx8x_internal:
+ ##################################################################
+ # Squaring part:
+ #
+ # a) multiply-n-add everything but a[i]*a[i];
+ # b) shift result of a) by 1 to the left and accumulate
+ # a[i]*a[i] products;
+ #
+ ##################################################################
+ # a[7]a[7]a[6]a[6]a[5]a[5]a[4]a[4]a[3]a[3]a[2]a[2]a[1]a[1]a[0]a[0]
+ # a[1]a[0]
+ # a[2]a[0]
+ # a[3]a[0]
+ # a[2]a[1]
+ # a[3]a[1]
+ # a[3]a[2]
+ #
+ # a[4]a[0]
+ # a[5]a[0]
+ # a[6]a[0]
+ # a[7]a[0]
+ # a[4]a[1]
+ # a[5]a[1]
+ # a[6]a[1]
+ # a[7]a[1]
+ # a[4]a[2]
+ # a[5]a[2]
+ # a[6]a[2]
+ # a[7]a[2]
+ # a[4]a[3]
+ # a[5]a[3]
+ # a[6]a[3]
+ # a[7]a[3]
+ #
+ # a[5]a[4]
+ # a[6]a[4]
+ # a[7]a[4]
+ # a[6]a[5]
+ # a[7]a[5]
+ # a[7]a[6]
+ # a[7]a[7]a[6]a[6]a[5]a[5]a[4]a[4]a[3]a[3]a[2]a[2]a[1]a[1]a[0]a[0]
+___
+{
+my ($zero,$carry)=("%rbp","%rcx");
+my $aaptr=$zero;
+$code.=<<___;
+ lea 48+8(%rsp),$tptr
+ lea ($aptr,$num),$aaptr
+ mov $num,0+8(%rsp) # save $num
+ mov $aaptr,8+8(%rsp) # save end of $aptr
+ jmp .Lsqr8x_zero_start
+
+.align 32
+.byte 0x66,0x66,0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00
+.Lsqrx8x_zero:
+ .byte 0x3e
+ movdqa %xmm0,0*8($tptr)
+ movdqa %xmm0,2*8($tptr)
+ movdqa %xmm0,4*8($tptr)
+ movdqa %xmm0,6*8($tptr)
+.Lsqr8x_zero_start: # aligned at 32
+ movdqa %xmm0,8*8($tptr)
+ movdqa %xmm0,10*8($tptr)
+ movdqa %xmm0,12*8($tptr)
+ movdqa %xmm0,14*8($tptr)
+ lea 16*8($tptr),$tptr
+ sub \$64,$num
+ jnz .Lsqrx8x_zero
+
+ mov 0*8($aptr),%rdx # a[0], modulo-scheduled
+ #xor %r9,%r9 # t[1], ex-$num, zero already
+ xor %r10,%r10
+ xor %r11,%r11
+ xor %r12,%r12
+ xor %r13,%r13
+ xor %r14,%r14
+ xor %r15,%r15
+ lea 48+8(%rsp),$tptr
+ xor $zero,$zero # cf=0, cf=0
+ jmp .Lsqrx8x_outer_loop
+
+.align 32
+.Lsqrx8x_outer_loop:
+ mulx 1*8($aptr),%r8,%rax # a[1]*a[0]
+ adcx %r9,%r8 # a[1]*a[0]+=t[1]
+ adox %rax,%r10
+ mulx 2*8($aptr),%r9,%rax # a[2]*a[0]
+ adcx %r10,%r9
+ adox %rax,%r11
+ .byte 0xc4,0xe2,0xab,0xf6,0x86,0x18,0x00,0x00,0x00 # mulx 3*8($aptr),%r10,%rax # ...
+ adcx %r11,%r10
+ adox %rax,%r12
+ .byte 0xc4,0xe2,0xa3,0xf6,0x86,0x20,0x00,0x00,0x00 # mulx 4*8($aptr),%r11,%rax
+ adcx %r12,%r11
+ adox %rax,%r13
+ mulx 5*8($aptr),%r12,%rax
+ adcx %r13,%r12
+ adox %rax,%r14
+ mulx 6*8($aptr),%r13,%rax
+ adcx %r14,%r13
+ adox %r15,%rax
+ mulx 7*8($aptr),%r14,%r15
+ mov 1*8($aptr),%rdx # a[1]
+ adcx %rax,%r14
+ adox $zero,%r15
+ adc 8*8($tptr),%r15
+ mov %r8,1*8($tptr) # t[1]
+ mov %r9,2*8($tptr) # t[2]
+ sbb $carry,$carry # mov %cf,$carry
+ xor $zero,$zero # cf=0, of=0
+
+
+ mulx 2*8($aptr),%r8,%rbx # a[2]*a[1]
+ mulx 3*8($aptr),%r9,%rax # a[3]*a[1]
+ adcx %r10,%r8
+ adox %rbx,%r9
+ mulx 4*8($aptr),%r10,%rbx # ...
+ adcx %r11,%r9
+ adox %rax,%r10
+ .byte 0xc4,0xe2,0xa3,0xf6,0x86,0x28,0x00,0x00,0x00 # mulx 5*8($aptr),%r11,%rax
+ adcx %r12,%r10
+ adox %rbx,%r11
+ .byte 0xc4,0xe2,0x9b,0xf6,0x9e,0x30,0x00,0x00,0x00 # mulx 6*8($aptr),%r12,%rbx
+ adcx %r13,%r11
+ adox %r14,%r12
+ .byte 0xc4,0x62,0x93,0xf6,0xb6,0x38,0x00,0x00,0x00 # mulx 7*8($aptr),%r13,%r14
+ mov 2*8($aptr),%rdx # a[2]
+ adcx %rax,%r12
+ adox %rbx,%r13
+ adcx %r15,%r13
+ adox $zero,%r14 # of=0
+ adcx $zero,%r14 # cf=0
+
+ mov %r8,3*8($tptr) # t[3]
+ mov %r9,4*8($tptr) # t[4]
+
+ mulx 3*8($aptr),%r8,%rbx # a[3]*a[2]
+ mulx 4*8($aptr),%r9,%rax # a[4]*a[2]
+ adcx %r10,%r8
+ adox %rbx,%r9
+ mulx 5*8($aptr),%r10,%rbx # ...
+ adcx %r11,%r9
+ adox %rax,%r10
+ .byte 0xc4,0xe2,0xa3,0xf6,0x86,0x30,0x00,0x00,0x00 # mulx 6*8($aptr),%r11,%rax
+ adcx %r12,%r10
+ adox %r13,%r11
+ .byte 0xc4,0x62,0x9b,0xf6,0xae,0x38,0x00,0x00,0x00 # mulx 7*8($aptr),%r12,%r13
+ .byte 0x3e
+ mov 3*8($aptr),%rdx # a[3]
+ adcx %rbx,%r11
+ adox %rax,%r12
+ adcx %r14,%r12
+ mov %r8,5*8($tptr) # t[5]
+ mov %r9,6*8($tptr) # t[6]
+ mulx 4*8($aptr),%r8,%rax # a[4]*a[3]
+ adox $zero,%r13 # of=0
+ adcx $zero,%r13 # cf=0
+
+ mulx 5*8($aptr),%r9,%rbx # a[5]*a[3]
+ adcx %r10,%r8
+ adox %rax,%r9
+ mulx 6*8($aptr),%r10,%rax # ...
+ adcx %r11,%r9
+ adox %r12,%r10
+ mulx 7*8($aptr),%r11,%r12
+ mov 4*8($aptr),%rdx # a[4]
+ mov 5*8($aptr),%r14 # a[5]
+ adcx %rbx,%r10
+ adox %rax,%r11
+ mov 6*8($aptr),%r15 # a[6]
+ adcx %r13,%r11
+ adox $zero,%r12 # of=0
+ adcx $zero,%r12 # cf=0
+
+ mov %r8,7*8($tptr) # t[7]
+ mov %r9,8*8($tptr) # t[8]
+
+ mulx %r14,%r9,%rax # a[5]*a[4]
+ mov 7*8($aptr),%r8 # a[7]
+ adcx %r10,%r9
+ mulx %r15,%r10,%rbx # a[6]*a[4]
+ adox %rax,%r10
+ adcx %r11,%r10
+ mulx %r8,%r11,%rax # a[7]*a[4]
+ mov %r14,%rdx # a[5]
+ adox %rbx,%r11
+ adcx %r12,%r11
+ #adox $zero,%rax # of=0
+ adcx $zero,%rax # cf=0
+
+ mulx %r15,%r14,%rbx # a[6]*a[5]
+ mulx %r8,%r12,%r13 # a[7]*a[5]
+ mov %r15,%rdx # a[6]
+ lea 8*8($aptr),$aptr
+ adcx %r14,%r11
+ adox %rbx,%r12
+ adcx %rax,%r12
+ adox $zero,%r13
+
+ .byte 0x67,0x67
+ mulx %r8,%r8,%r14 # a[7]*a[6]
+ adcx %r8,%r13
+ adcx $zero,%r14
+
+ cmp 8+8(%rsp),$aptr
+ je .Lsqrx8x_outer_break
+
+ neg $carry # mov $carry,%cf
+ mov \$-8,%rcx
+ mov $zero,%r15
+ mov 8*8($tptr),%r8
+ adcx 9*8($tptr),%r9 # +=t[9]
+ adcx 10*8($tptr),%r10 # ...
+ adcx 11*8($tptr),%r11
+ adc 12*8($tptr),%r12
+ adc 13*8($tptr),%r13
+ adc 14*8($tptr),%r14
+ adc 15*8($tptr),%r15
+ lea ($aptr),$aaptr
+ lea 2*64($tptr),$tptr
+ sbb %rax,%rax # mov %cf,$carry
+
+ mov -64($aptr),%rdx # a[0]
+ mov %rax,16+8(%rsp) # offload $carry
+ mov $tptr,24+8(%rsp)
+
+ #lea 8*8($tptr),$tptr # see 2*8*8($tptr) above
+ xor %eax,%eax # cf=0, of=0
+ jmp .Lsqrx8x_loop
+
+.align 32
+.Lsqrx8x_loop:
+ mov %r8,%rbx
+ mulx 0*8($aaptr),%rax,%r8 # a[8]*a[i]
+ adcx %rax,%rbx # +=t[8]
+ adox %r9,%r8
+
+ mulx 1*8($aaptr),%rax,%r9 # ...
+ adcx %rax,%r8
+ adox %r10,%r9
+
+ mulx 2*8($aaptr),%rax,%r10
+ adcx %rax,%r9
+ adox %r11,%r10
+
+ mulx 3*8($aaptr),%rax,%r11
+ adcx %rax,%r10
+ adox %r12,%r11
+
+ .byte 0xc4,0x62,0xfb,0xf6,0xa5,0x20,0x00,0x00,0x00 # mulx 4*8($aaptr),%rax,%r12
+ adcx %rax,%r11
+ adox %r13,%r12
+
+ mulx 5*8($aaptr),%rax,%r13
+ adcx %rax,%r12
+ adox %r14,%r13
+
+ mulx 6*8($aaptr),%rax,%r14
+ mov %rbx,($tptr,%rcx,8) # store t[8+i]
+ mov \$0,%ebx
+ adcx %rax,%r13
+ adox %r15,%r14
+
+ .byte 0xc4,0x62,0xfb,0xf6,0xbd,0x38,0x00,0x00,0x00 # mulx 7*8($aaptr),%rax,%r15
+ mov 8($aptr,%rcx,8),%rdx # a[i]
+ adcx %rax,%r14
+ adox %rbx,%r15 # %rbx is 0, of=0
+ adcx %rbx,%r15 # cf=0
+
+ .byte 0x67
+ inc %rcx # of=0
+ jnz .Lsqrx8x_loop
+
+ lea 8*8($aaptr),$aaptr
+ mov \$-8,%rcx
+ cmp 8+8(%rsp),$aaptr # done?
+ je .Lsqrx8x_break
+
+ sub 16+8(%rsp),%rbx # mov 16(%rsp),%cf
+ .byte 0x66
+ mov -64($aptr),%rdx
+ adcx 0*8($tptr),%r8
+ adcx 1*8($tptr),%r9
+ adc 2*8($tptr),%r10
+ adc 3*8($tptr),%r11
+ adc 4*8($tptr),%r12
+ adc 5*8($tptr),%r13
+ adc 6*8($tptr),%r14
+ adc 7*8($tptr),%r15
+ lea 8*8($tptr),$tptr
+ .byte 0x67
+ sbb %rax,%rax # mov %cf,%rax
+ xor %ebx,%ebx # cf=0, of=0
+ mov %rax,16+8(%rsp) # offload carry
+ jmp .Lsqrx8x_loop
+
+.align 32
+.Lsqrx8x_break:
+ xor $zero,$zero
+ sub 16+8(%rsp),%rbx # mov 16(%rsp),%cf
+ adcx $zero,%r8
+ mov 24+8(%rsp),$carry # initial $tptr, borrow $carry
+ adcx $zero,%r9
+ mov 0*8($aptr),%rdx # a[8], modulo-scheduled
+ adc \$0,%r10
+ mov %r8,0*8($tptr)
+ adc \$0,%r11
+ adc \$0,%r12
+ adc \$0,%r13
+ adc \$0,%r14
+ adc \$0,%r15
+ cmp $carry,$tptr # cf=0, of=0
+ je .Lsqrx8x_outer_loop
+
+ mov %r9,1*8($tptr)
+ mov 1*8($carry),%r9
+ mov %r10,2*8($tptr)
+ mov 2*8($carry),%r10
+ mov %r11,3*8($tptr)
+ mov 3*8($carry),%r11
+ mov %r12,4*8($tptr)
+ mov 4*8($carry),%r12
+ mov %r13,5*8($tptr)
+ mov 5*8($carry),%r13
+ mov %r14,6*8($tptr)
+ mov 6*8($carry),%r14
+ mov %r15,7*8($tptr)
+ mov 7*8($carry),%r15
+ mov $carry,$tptr
+ jmp .Lsqrx8x_outer_loop
+
+.align 32
+.Lsqrx8x_outer_break:
+ mov %r9,9*8($tptr) # t[9]
+ movq %xmm3,%rcx # -$num
+ mov %r10,10*8($tptr) # ...
+ mov %r11,11*8($tptr)
+ mov %r12,12*8($tptr)
+ mov %r13,13*8($tptr)
+ mov %r14,14*8($tptr)
+___
+} {
+my $i="%rcx";
+$code.=<<___;
+ lea 48+8(%rsp),$tptr
+ mov ($aptr,$i),%rdx # a[0]
+
+ mov 8($tptr),$A0[1] # t[1]
+ xor $A0[0],$A0[0] # t[0], of=0, cf=0
+ mov 0+8(%rsp),$num # restore $num
+ adox $A0[1],$A0[1]
+ mov 16($tptr),$A1[0] # t[2] # prefetch
+ mov 24($tptr),$A1[1] # t[3] # prefetch
+ #jmp .Lsqrx4x_shift_n_add # happens to be aligned
+
+.align 32
+.Lsqrx4x_shift_n_add:
+ mulx %rdx,%rax,%rbx
+ adox $A1[0],$A1[0]
+ adcx $A0[0],%rax
+ .byte 0x48,0x8b,0x94,0x0e,0x08,0x00,0x00,0x00 # mov 8($aptr,$i),%rdx # a[i+1] # prefetch
+ .byte 0x4c,0x8b,0x97,0x20,0x00,0x00,0x00 # mov 32($tptr),$A0[0] # t[2*i+4] # prefetch
+ adox $A1[1],$A1[1]
+ adcx $A0[1],%rbx
+ mov 40($tptr),$A0[1] # t[2*i+4+1] # prefetch
+ mov %rax,0($tptr)
+ mov %rbx,8($tptr)
+
+ mulx %rdx,%rax,%rbx
+ adox $A0[0],$A0[0]
+ adcx $A1[0],%rax
+ mov 16($aptr,$i),%rdx # a[i+2] # prefetch
+ mov 48($tptr),$A1[0] # t[2*i+6] # prefetch
+ adox $A0[1],$A0[1]
+ adcx $A1[1],%rbx
+ mov 56($tptr),$A1[1] # t[2*i+6+1] # prefetch
+ mov %rax,16($tptr)
+ mov %rbx,24($tptr)
+
+ mulx %rdx,%rax,%rbx
+ adox $A1[0],$A1[0]
+ adcx $A0[0],%rax
+ mov 24($aptr,$i),%rdx # a[i+3] # prefetch
+ lea 32($i),$i
+ mov 64($tptr),$A0[0] # t[2*i+8] # prefetch
+ adox $A1[1],$A1[1]
+ adcx $A0[1],%rbx
+ mov 72($tptr),$A0[1] # t[2*i+8+1] # prefetch
+ mov %rax,32($tptr)
+ mov %rbx,40($tptr)
+
+ mulx %rdx,%rax,%rbx
+ adox $A0[0],$A0[0]
+ adcx $A1[0],%rax
+ jrcxz .Lsqrx4x_shift_n_add_break
+ .byte 0x48,0x8b,0x94,0x0e,0x00,0x00,0x00,0x00 # mov 0($aptr,$i),%rdx # a[i+4] # prefetch
+ adox $A0[1],$A0[1]
+ adcx $A1[1],%rbx
+ mov 80($tptr),$A1[0] # t[2*i+10] # prefetch
+ mov 88($tptr),$A1[1] # t[2*i+10+1] # prefetch
+ mov %rax,48($tptr)
+ mov %rbx,56($tptr)
+ lea 64($tptr),$tptr
+ nop
+ jmp .Lsqrx4x_shift_n_add
+
+.align 32
+.Lsqrx4x_shift_n_add_break:
+ adcx $A1[1],%rbx
+ mov %rax,48($tptr)
+ mov %rbx,56($tptr)
+ lea 64($tptr),$tptr # end of t[] buffer
+___
+}
+######################################################################
+# Montgomery reduction part, "word-by-word" algorithm.
+#
+# This new path is inspired by multiple submissions from Intel, by
+# Shay Gueron, Vlad Krasnov, Erdinc Ozturk, James Guilford,
+# Vinodh Gopal...
+{
+my ($nptr,$carry,$m0)=("%rbp","%rsi","%rdx");
+
+$code.=<<___;
+ movq %xmm2,$nptr
+__bn_sqrx8x_reduction:
+ xor %eax,%eax # initial top-most carry bit
+ mov 32+8(%rsp),%rbx # n0
+ mov 48+8(%rsp),%rdx # "%r8", 8*0($tptr)
+ lea -8*8($nptr,$num),%rcx # end of n[]
+ #lea 48+8(%rsp,$num,2),$tptr # end of t[] buffer
+ mov %rcx, 0+8(%rsp) # save end of n[]
+ mov $tptr,8+8(%rsp) # save end of t[]
+
+ lea 48+8(%rsp),$tptr # initial t[] window
+ jmp .Lsqrx8x_reduction_loop
+
+.align 32
+.Lsqrx8x_reduction_loop:
+ mov 8*1($tptr),%r9
+ mov 8*2($tptr),%r10
+ mov 8*3($tptr),%r11
+ mov 8*4($tptr),%r12
+ mov %rdx,%r8
+ imulq %rbx,%rdx # n0*a[i]
+ mov 8*5($tptr),%r13
+ mov 8*6($tptr),%r14
+ mov 8*7($tptr),%r15
+ mov %rax,24+8(%rsp) # store top-most carry bit
+
+ lea 8*8($tptr),$tptr
+ xor $carry,$carry # cf=0,of=0
+ mov \$-8,%rcx
+ jmp .Lsqrx8x_reduce
+
+.align 32
+.Lsqrx8x_reduce:
+ mov %r8, %rbx
+ mulx 8*0($nptr),%rax,%r8 # n[0]
+ adcx %rbx,%rax # discarded
+ adox %r9,%r8
+
+ mulx 8*1($nptr),%rbx,%r9 # n[1]
+ adcx %rbx,%r8
+ adox %r10,%r9
+
+ mulx 8*2($nptr),%rbx,%r10
+ adcx %rbx,%r9
+ adox %r11,%r10
+
+ mulx 8*3($nptr),%rbx,%r11
+ adcx %rbx,%r10
+ adox %r12,%r11
+
+ .byte 0xc4,0x62,0xe3,0xf6,0xa5,0x20,0x00,0x00,0x00 # mulx 8*4($nptr),%rbx,%r12
+ mov %rdx,%rax
+ mov %r8,%rdx
+ adcx %rbx,%r11
+ adox %r13,%r12
+
+ mulx 32+8(%rsp),%rbx,%rdx # %rdx discarded
+ mov %rax,%rdx
+ mov %rax,64+48+8(%rsp,%rcx,8) # put aside n0*a[i]
+
+ mulx 8*5($nptr),%rax,%r13
+ adcx %rax,%r12
+ adox %r14,%r13
+
+ mulx 8*6($nptr),%rax,%r14
+ adcx %rax,%r13
+ adox %r15,%r14
+
+ mulx 8*7($nptr),%rax,%r15
+ mov %rbx,%rdx
+ adcx %rax,%r14
+ adox $carry,%r15 # $carry is 0
+ adcx $carry,%r15 # cf=0
+
+ .byte 0x67,0x67,0x67
+ inc %rcx # of=0
+ jnz .Lsqrx8x_reduce
+
+ mov $carry,%rax # xor %rax,%rax
+ cmp 0+8(%rsp),$nptr # end of n[]?
+ jae .Lsqrx8x_no_tail
+
+ mov 48+8(%rsp),%rdx # pull n0*a[0]
+ add 8*0($tptr),%r8
+ lea 8*8($nptr),$nptr
+ mov \$-8,%rcx
+ adcx 8*1($tptr),%r9
+ adcx 8*2($tptr),%r10
+ adc 8*3($tptr),%r11
+ adc 8*4($tptr),%r12
+ adc 8*5($tptr),%r13
+ adc 8*6($tptr),%r14
+ adc 8*7($tptr),%r15
+ lea 8*8($tptr),$tptr
+ sbb %rax,%rax # top carry
+
+ xor $carry,$carry # of=0, cf=0
+ mov %rax,16+8(%rsp)
+ jmp .Lsqrx8x_tail
+
+.align 32
+.Lsqrx8x_tail:
+ mov %r8,%rbx
+ mulx 8*0($nptr),%rax,%r8
+ adcx %rax,%rbx
+ adox %r9,%r8
+
+ mulx 8*1($nptr),%rax,%r9
+ adcx %rax,%r8
+ adox %r10,%r9
+
+ mulx 8*2($nptr),%rax,%r10
+ adcx %rax,%r9
+ adox %r11,%r10
+
+ mulx 8*3($nptr),%rax,%r11
+ adcx %rax,%r10
+ adox %r12,%r11
+
+ .byte 0xc4,0x62,0xfb,0xf6,0xa5,0x20,0x00,0x00,0x00 # mulx 8*4($nptr),%rax,%r12
+ adcx %rax,%r11
+ adox %r13,%r12
+
+ mulx 8*5($nptr),%rax,%r13
+ adcx %rax,%r12
+ adox %r14,%r13
+
+ mulx 8*6($nptr),%rax,%r14
+ adcx %rax,%r13
+ adox %r15,%r14
+
+ mulx 8*7($nptr),%rax,%r15
+ mov 72+48+8(%rsp,%rcx,8),%rdx # pull n0*a[i]
+ adcx %rax,%r14
+ adox $carry,%r15
+ mov %rbx,($tptr,%rcx,8) # save result
+ mov %r8,%rbx
+ adcx $carry,%r15 # cf=0
+
+ inc %rcx # of=0
+ jnz .Lsqrx8x_tail
+
+ cmp 0+8(%rsp),$nptr # end of n[]?
+ jae .Lsqrx8x_tail_done # break out of loop
+
+ sub 16+8(%rsp),$carry # mov 16(%rsp),%cf
+ mov 48+8(%rsp),%rdx # pull n0*a[0]
+ lea 8*8($nptr),$nptr
+ adc 8*0($tptr),%r8
+ adc 8*1($tptr),%r9
+ adc 8*2($tptr),%r10
+ adc 8*3($tptr),%r11
+ adc 8*4($tptr),%r12
+ adc 8*5($tptr),%r13
+ adc 8*6($tptr),%r14
+ adc 8*7($tptr),%r15
+ lea 8*8($tptr),$tptr
+ sbb %rax,%rax
+ sub \$8,%rcx # mov \$-8,%rcx
+
+ xor $carry,$carry # of=0, cf=0
+ mov %rax,16+8(%rsp)
+ jmp .Lsqrx8x_tail
+
+.align 32
+.Lsqrx8x_tail_done:
+ xor %rax,%rax
+ add 24+8(%rsp),%r8 # can this overflow?
+ adc \$0,%r9
+ adc \$0,%r10
+ adc \$0,%r11
+ adc \$0,%r12
+ adc \$0,%r13
+ adc \$0,%r14
+ adc \$0,%r15
+ adc \$0,%rax
+
+ sub 16+8(%rsp),$carry # mov 16(%rsp),%cf
+.Lsqrx8x_no_tail: # %cf is 0 if jumped here
+ adc 8*0($tptr),%r8
+ movq %xmm3,%rcx
+ adc 8*1($tptr),%r9
+ mov 8*7($nptr),$carry
+ movq %xmm2,$nptr # restore $nptr
+ adc 8*2($tptr),%r10
+ adc 8*3($tptr),%r11
+ adc 8*4($tptr),%r12
+ adc 8*5($tptr),%r13
+ adc 8*6($tptr),%r14
+ adc 8*7($tptr),%r15
+ adc \$0,%rax # top-most carry
+
+ mov 32+8(%rsp),%rbx # n0
+ mov 8*8($tptr,%rcx),%rdx # modulo-scheduled "%r8"
+
+ mov %r8,8*0($tptr) # store top 512 bits
+ lea 8*8($tptr),%r8 # borrow %r8
+ mov %r9,8*1($tptr)
+ mov %r10,8*2($tptr)
+ mov %r11,8*3($tptr)
+ mov %r12,8*4($tptr)
+ mov %r13,8*5($tptr)
+ mov %r14,8*6($tptr)
+ mov %r15,8*7($tptr)
+
+ lea 8*8($tptr,%rcx),$tptr # start of current t[] window
+ cmp 8+8(%rsp),%r8 # end of t[]?
+ jb .Lsqrx8x_reduction_loop
+ ret
+.size bn_sqrx8x_internal,.-bn_sqrx8x_internal
+___
+}
+##############################################################
+# Post-condition, 4x unrolled
+#
+{
+my ($rptr,$nptr)=("%rdx","%rbp");
+$code.=<<___;
+.align 32
+__bn_postx4x_internal:
+ mov 8*0($nptr),%r12
+ mov %rcx,%r10 # -$num
+ mov %rcx,%r9 # -$num
+ neg %rax
+ sar \$3+2,%rcx
+ #lea 48+8(%rsp,%r9),$tptr
+ movq %xmm1,$rptr # restore $rptr
+ movq %xmm1,$aptr # prepare for back-to-back call
+ dec %r12 # so that after 'not' we get -n[0]
+ mov 8*1($nptr),%r13
+ xor %r8,%r8
+ mov 8*2($nptr),%r14
+ mov 8*3($nptr),%r15
+ jmp .Lsqrx4x_sub_entry
+
+.align 16
+.Lsqrx4x_sub:
+ mov 8*0($nptr),%r12
+ mov 8*1($nptr),%r13
+ mov 8*2($nptr),%r14
+ mov 8*3($nptr),%r15
+.Lsqrx4x_sub_entry:
+ andn %rax,%r12,%r12
+ lea 8*4($nptr),$nptr
+ andn %rax,%r13,%r13
+ andn %rax,%r14,%r14
+ andn %rax,%r15,%r15
+
+ neg %r8 # mov %r8,%cf
+ adc 8*0($tptr),%r12
+ adc 8*1($tptr),%r13
+ adc 8*2($tptr),%r14
+ adc 8*3($tptr),%r15
+ mov %r12,8*0($rptr)
+ lea 8*4($tptr),$tptr
+ mov %r13,8*1($rptr)
+ sbb %r8,%r8 # mov %cf,%r8
+ mov %r14,8*2($rptr)
+ mov %r15,8*3($rptr)
+ lea 8*4($rptr),$rptr
+
+ inc %rcx
+ jnz .Lsqrx4x_sub
+
+ neg %r9 # restore $num
+
+ ret
+.size __bn_postx4x_internal,.-__bn_postx4x_internal
+___
+}
+}}}
+{
+my ($inp,$num,$tbl,$idx)=$win64?("%rcx","%edx","%r8", "%r9d") : # Win64 order
+ ("%rdi","%esi","%rdx","%ecx"); # Unix order
+my $out=$inp;
+my $STRIDE=2**5*8;
+my $N=$STRIDE/4;
+
+$code.=<<___;
+.globl bn_get_bits5
+.type bn_get_bits5,\@abi-omnipotent
+.align 16
+bn_get_bits5:
+ lea 0($inp),%r10
+ lea 1($inp),%r11
+ mov $num,%ecx
+ shr \$4,$num
+ and \$15,%ecx
+ lea -8(%ecx),%eax
+ cmp \$11,%ecx
+ cmova %r11,%r10
+ cmova %eax,%ecx
+ movzw (%r10,$num,2),%eax
+ shrl %cl,%eax
+ and \$31,%eax
+ ret
+.size bn_get_bits5,.-bn_get_bits5
+
+.globl bn_scatter5
+.type bn_scatter5,\@abi-omnipotent
+.align 16
+bn_scatter5:
+ cmp \$0, $num
+ jz .Lscatter_epilogue
+ lea ($tbl,$idx,8),$tbl
+.Lscatter:
+ mov ($inp),%rax
+ lea 8($inp),$inp
+ mov %rax,($tbl)
+ lea 32*8($tbl),$tbl
+ sub \$1,$num
+ jnz .Lscatter
+.Lscatter_epilogue:
+ ret
+.size bn_scatter5,.-bn_scatter5
+
+.globl bn_gather5
+.type bn_gather5,\@abi-omnipotent
+.align 32
+bn_gather5:
+.LSEH_begin_bn_gather5: # Win64 thing, but harmless in other cases
+ # I can't trust assembler to use specific encoding:-(
+ .byte 0x4c,0x8d,0x14,0x24 #lea (%rsp),%r10
+ .byte 0x48,0x81,0xec,0x08,0x01,0x00,0x00 #sub $0x108,%rsp
+ lea .Linc(%rip),%rax
+ and \$-16,%rsp # shouldn't be formally required
+
+ movd $idx,%xmm5
+ movdqa 0(%rax),%xmm0 # 00000001000000010000000000000000
+ movdqa 16(%rax),%xmm1 # 00000002000000020000000200000002
+ lea 128($tbl),%r11 # size optimization
+ lea 128(%rsp),%rax # size optimization
+
+ pshufd \$0,%xmm5,%xmm5 # broadcast $idx
+ movdqa %xmm1,%xmm4
+ movdqa %xmm1,%xmm2
+___
+########################################################################
+# calculate mask by comparing 0..31 to $idx and save result to stack
+#
+for($i=0;$i<$STRIDE/16;$i+=4) {
+$code.=<<___;
+ paddd %xmm0,%xmm1
+ pcmpeqd %xmm5,%xmm0 # compare to 1,0
+___
+$code.=<<___ if ($i);
+ movdqa %xmm3,`16*($i-1)-128`(%rax)
+___
+$code.=<<___;
+ movdqa %xmm4,%xmm3
+
+ paddd %xmm1,%xmm2
+ pcmpeqd %xmm5,%xmm1 # compare to 3,2
+ movdqa %xmm0,`16*($i+0)-128`(%rax)
+ movdqa %xmm4,%xmm0
+
+ paddd %xmm2,%xmm3
+ pcmpeqd %xmm5,%xmm2 # compare to 5,4
+ movdqa %xmm1,`16*($i+1)-128`(%rax)
+ movdqa %xmm4,%xmm1
+
+ paddd %xmm3,%xmm0
+ pcmpeqd %xmm5,%xmm3 # compare to 7,6
+ movdqa %xmm2,`16*($i+2)-128`(%rax)
+ movdqa %xmm4,%xmm2
+___
+}
+$code.=<<___;
+ movdqa %xmm3,`16*($i-1)-128`(%rax)
+ jmp .Lgather
+
+.align 32
+.Lgather:
+ pxor %xmm4,%xmm4
+ pxor %xmm5,%xmm5
+___
+for($i=0;$i<$STRIDE/16;$i+=4) {
+$code.=<<___;
+ movdqa `16*($i+0)-128`(%r11),%xmm0
+ movdqa `16*($i+1)-128`(%r11),%xmm1
+ movdqa `16*($i+2)-128`(%r11),%xmm2
+ pand `16*($i+0)-128`(%rax),%xmm0
+ movdqa `16*($i+3)-128`(%r11),%xmm3
+ pand `16*($i+1)-128`(%rax),%xmm1
+ por %xmm0,%xmm4
+ pand `16*($i+2)-128`(%rax),%xmm2
+ por %xmm1,%xmm5
+ pand `16*($i+3)-128`(%rax),%xmm3
+ por %xmm2,%xmm4
+ por %xmm3,%xmm5
+___
+}
+$code.=<<___;
+ por %xmm5,%xmm4
+ lea $STRIDE(%r11),%r11
+ pshufd \$0x4e,%xmm4,%xmm0
+ por %xmm4,%xmm0
+ movq %xmm0,($out) # m0=bp[0]
+ lea 8($out),$out
+ sub \$1,$num
+ jnz .Lgather
+
+ lea (%r10),%rsp
+ ret
+.LSEH_end_bn_gather5:
+.size bn_gather5,.-bn_gather5
+___
+}
+$code.=<<___;
+.align 64
+.Linc:
+ .long 0,0, 1,1
+ .long 2,2, 2,2
+.asciz "Montgomery Multiplication with scatter/gather for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
+___
+
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+# CONTEXT *context,DISPATCHER_CONTEXT *disp)
+if ($win64) {
+$rec="%rcx";
+$frame="%rdx";
+$context="%r8";
+$disp="%r9";
+
+$code.=<<___;
+.extern __imp_RtlVirtualUnwind
+.type mul_handler,\@abi-omnipotent
+.align 16
+mul_handler:
+ push %rsi
+ push %rdi
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ pushfq
+ sub \$64,%rsp
+
+ mov 120($context),%rax # pull context->Rax
+ mov 248($context),%rbx # pull context->Rip
+
+ mov 8($disp),%rsi # disp->ImageBase
+ mov 56($disp),%r11 # disp->HandlerData
+
+ mov 0(%r11),%r10d # HandlerData[0]
+ lea (%rsi,%r10),%r10 # end of prologue label
+ cmp %r10,%rbx # context->Rip<end of prologue label
+ jb .Lcommon_seh_tail
+
+ mov 4(%r11),%r10d # HandlerData[1]
+ lea (%rsi,%r10),%r10 # epilogue label
+ cmp %r10,%rbx # context->Rip>=epilogue label
+ jb .Lcommon_pop_regs
+
+ mov 152($context),%rax # pull context->Rsp
+
+ mov 8(%r11),%r10d # HandlerData[2]
+ lea (%rsi,%r10),%r10 # epilogue label
+ cmp %r10,%rbx # context->Rip>=epilogue label
+ jae .Lcommon_seh_tail
+
+ lea .Lmul_epilogue(%rip),%r10
+ cmp %r10,%rbx
+ ja .Lbody_40
+
+ mov 192($context),%r10 # pull $num
+ mov 8(%rax,%r10,8),%rax # pull saved stack pointer
+
+ jmp .Lcommon_pop_regs
+
+.Lbody_40:
+ mov 40(%rax),%rax # pull saved stack pointer
+.Lcommon_pop_regs:
+ mov -8(%rax),%rbx
+ mov -16(%rax),%rbp
+ mov -24(%rax),%r12
+ mov -32(%rax),%r13
+ mov -40(%rax),%r14
+ mov -48(%rax),%r15
+ mov %rbx,144($context) # restore context->Rbx
+ mov %rbp,160($context) # restore context->Rbp
+ mov %r12,216($context) # restore context->R12
+ mov %r13,224($context) # restore context->R13
+ mov %r14,232($context) # restore context->R14
+ mov %r15,240($context) # restore context->R15
+
+.Lcommon_seh_tail:
+ mov 8(%rax),%rdi
+ mov 16(%rax),%rsi
+ mov %rax,152($context) # restore context->Rsp
+ mov %rsi,168($context) # restore context->Rsi
+ mov %rdi,176($context) # restore context->Rdi
+
+ mov 40($disp),%rdi # disp->ContextRecord
+ mov $context,%rsi # context
+ mov \$154,%ecx # sizeof(CONTEXT)
+ .long 0xa548f3fc # cld; rep movsq
+
+ mov $disp,%rsi
+ xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
+ mov 8(%rsi),%rdx # arg2, disp->ImageBase
+ mov 0(%rsi),%r8 # arg3, disp->ControlPc
+ mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
+ mov 40(%rsi),%r10 # disp->ContextRecord
+ lea 56(%rsi),%r11 # &disp->HandlerData
+ lea 24(%rsi),%r12 # &disp->EstablisherFrame
+ mov %r10,32(%rsp) # arg5
+ mov %r11,40(%rsp) # arg6
+ mov %r12,48(%rsp) # arg7
+ mov %rcx,56(%rsp) # arg8, (NULL)
+ call *__imp_RtlVirtualUnwind(%rip)
+
+ mov \$1,%eax # ExceptionContinueSearch
+ add \$64,%rsp
+ popfq
+ pop %r15
+ pop %r14
+ pop %r13
+ pop %r12
+ pop %rbp
+ pop %rbx
+ pop %rdi
+ pop %rsi
+ ret
+.size mul_handler,.-mul_handler
+
+.section .pdata
+.align 4
+ .rva .LSEH_begin_bn_mul_mont_gather5
+ .rva .LSEH_end_bn_mul_mont_gather5
+ .rva .LSEH_info_bn_mul_mont_gather5
+
+ .rva .LSEH_begin_bn_mul4x_mont_gather5
+ .rva .LSEH_end_bn_mul4x_mont_gather5
+ .rva .LSEH_info_bn_mul4x_mont_gather5
+
+ .rva .LSEH_begin_bn_power5
+ .rva .LSEH_end_bn_power5
+ .rva .LSEH_info_bn_power5
+
+ .rva .LSEH_begin_bn_from_mont8x
+ .rva .LSEH_end_bn_from_mont8x
+ .rva .LSEH_info_bn_from_mont8x
+___
+$code.=<<___ if ($addx);
+ .rva .LSEH_begin_bn_mulx4x_mont_gather5
+ .rva .LSEH_end_bn_mulx4x_mont_gather5
+ .rva .LSEH_info_bn_mulx4x_mont_gather5
+
+ .rva .LSEH_begin_bn_powerx5
+ .rva .LSEH_end_bn_powerx5
+ .rva .LSEH_info_bn_powerx5
+___
+$code.=<<___;
+ .rva .LSEH_begin_bn_gather5
+ .rva .LSEH_end_bn_gather5
+ .rva .LSEH_info_bn_gather5
+
+.section .xdata
+.align 8
+.LSEH_info_bn_mul_mont_gather5:
+ .byte 9,0,0,0
+ .rva mul_handler
+ .rva .Lmul_body,.Lmul_body,.Lmul_epilogue # HandlerData[]
+.align 8
+.LSEH_info_bn_mul4x_mont_gather5:
+ .byte 9,0,0,0
+ .rva mul_handler
+ .rva .Lmul4x_prologue,.Lmul4x_body,.Lmul4x_epilogue # HandlerData[]
+.align 8
+.LSEH_info_bn_power5:
+ .byte 9,0,0,0
+ .rva mul_handler
+ .rva .Lpower5_prologue,.Lpower5_body,.Lpower5_epilogue # HandlerData[]
+.align 8
+.LSEH_info_bn_from_mont8x:
+ .byte 9,0,0,0
+ .rva mul_handler
+ .rva .Lfrom_prologue,.Lfrom_body,.Lfrom_epilogue # HandlerData[]
+___
+$code.=<<___ if ($addx);
+.align 8
+.LSEH_info_bn_mulx4x_mont_gather5:
+ .byte 9,0,0,0
+ .rva mul_handler
+ .rva .Lmulx4x_prologue,.Lmulx4x_body,.Lmulx4x_epilogue # HandlerData[]
+.align 8
+.LSEH_info_bn_powerx5:
+ .byte 9,0,0,0
+ .rva mul_handler
+ .rva .Lpowerx5_prologue,.Lpowerx5_body,.Lpowerx5_epilogue # HandlerData[]
+___
+$code.=<<___;
+.align 8
+.LSEH_info_bn_gather5:
+ .byte 0x01,0x0b,0x03,0x0a
+ .byte 0x0b,0x01,0x21,0x00 # sub rsp,0x108
+ .byte 0x04,0xa3,0x00,0x00 # lea r10,(rsp)
+.align 8
+___
+}
+
+$code =~ s/\`([^\`]*)\`/eval($1)/gem;
+
+print $code;
+close STDOUT;