aboutsummaryrefslogtreecommitdiff
path: root/openssl-1.1.0h/crypto/camellia/asm
diff options
context:
space:
mode:
Diffstat (limited to 'openssl-1.1.0h/crypto/camellia/asm')
-rw-r--r--openssl-1.1.0h/crypto/camellia/asm/cmll-x86.pl1150
-rw-r--r--openssl-1.1.0h/crypto/camellia/asm/cmll-x86_64.pl1088
-rw-r--r--openssl-1.1.0h/crypto/camellia/asm/cmllt4-sparcv9.pl939
3 files changed, 3177 insertions, 0 deletions
diff --git a/openssl-1.1.0h/crypto/camellia/asm/cmll-x86.pl b/openssl-1.1.0h/crypto/camellia/asm/cmll-x86.pl
new file mode 100644
index 0000000..59f9ed9
--- /dev/null
+++ b/openssl-1.1.0h/crypto/camellia/asm/cmll-x86.pl
@@ -0,0 +1,1150 @@
+#! /usr/bin/env perl
+# Copyright 2008-2016 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License"). You may not use
+# this file except in compliance with the License. You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+
+# ====================================================================
+# Copyright (c) 2008 Andy Polyakov <appro@openssl.org>
+#
+# This module may be used under the terms of either the GNU General
+# Public License version 2 or later, the GNU Lesser General Public
+# License version 2.1 or later, the Mozilla Public License version
+# 1.1 or the BSD License. The exact terms of either license are
+# distributed along with this module. For further details see
+# http://www.openssl.org/~appro/camellia/.
+# ====================================================================
+
+# Performance in cycles per processed byte (less is better) in
+# 'openssl speed ...' benchmark:
+#
+# AMD K8 Core2 PIII P4
+# -evp camellia-128-ecb 21.5 22.8 27.0 28.9
+# + over gcc 3.4.6 +90/11% +70/10% +53/4% +160/64%
+# + over icc 8.0 +48/19% +21/15% +21/17% +55/37%
+#
+# camellia-128-cbc 17.3 21.1 23.9 25.9
+#
+# 128-bit key setup 196 280 256 240 cycles/key
+# + over gcc 3.4.6 +30/0% +17/11% +11/0% +63/40%
+# + over icc 8.0 +18/3% +10/0% +10/3% +21/10%
+#
+# Pairs of numbers in "+" rows represent performance improvement over
+# compiler generated position-independent code, PIC, and non-PIC
+# respectively. PIC results are of greater relevance, as this module
+# is position-independent, i.e. suitable for a shared library or PIE.
+# Position independence "costs" one register, which is why compilers
+# are so close with non-PIC results, they have an extra register to
+# spare. CBC results are better than ECB ones thanks to "zero-copy"
+# private _x86_* interface, and are ~30-40% better than with compiler
+# generated cmll_cbc.o, and reach ~80-90% of x86_64 performance on
+# same CPU (where applicable).
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+push(@INC,"${dir}","${dir}../../perlasm");
+require "x86asm.pl";
+
+$OPENSSL=1;
+
+$output = pop;
+open STDOUT,">$output";
+
+&asm_init($ARGV[0],"cmll-586.pl",$ARGV[$#ARGV] eq "386");
+
+@T=("eax","ebx","ecx","edx");
+$idx="esi";
+$key="edi";
+$Tbl="ebp";
+
+# stack frame layout in _x86_Camellia_* routines, frame is allocated
+# by caller
+$__ra=&DWP(0,"esp"); # return address
+$__s0=&DWP(4,"esp"); # s0 backing store
+$__s1=&DWP(8,"esp"); # s1 backing store
+$__s2=&DWP(12,"esp"); # s2 backing store
+$__s3=&DWP(16,"esp"); # s3 backing store
+$__end=&DWP(20,"esp"); # pointer to end/start of key schedule
+
+# stack frame layout in Camellia_[en|crypt] routines, which differs from
+# above by 4 and overlaps by pointer to end/start of key schedule
+$_end=&DWP(16,"esp");
+$_esp=&DWP(20,"esp");
+
+# const unsigned int Camellia_SBOX[4][256];
+# Well, sort of... Camellia_SBOX[0][] is interleaved with [1][],
+# and [2][] - with [3][]. This is done to optimize code size.
+$SBOX1_1110=0; # Camellia_SBOX[0]
+$SBOX4_4404=4; # Camellia_SBOX[1]
+$SBOX2_0222=2048; # Camellia_SBOX[2]
+$SBOX3_3033=2052; # Camellia_SBOX[3]
+&static_label("Camellia_SIGMA");
+&static_label("Camellia_SBOX");
+
+sub Camellia_Feistel {
+my $i=@_[0];
+my $seed=defined(@_[1])?@_[1]:0;
+my $scale=$seed<0?-8:8;
+my $frame=defined(@_[2])?@_[2]:0;
+my $j=($i&1)*2;
+my $t0=@T[($j)%4],$t1=@T[($j+1)%4],$t2=@T[($j+2)%4],$t3=@T[($j+3)%4];
+
+ &xor ($t0,$idx); # t0^=key[0]
+ &xor ($t1,&DWP($seed+$i*$scale+4,$key)); # t1^=key[1]
+ &movz ($idx,&HB($t0)); # (t0>>8)&0xff
+ &mov ($t3,&DWP($SBOX3_3033,$Tbl,$idx,8)); # t3=SBOX3_3033[0]
+ &movz ($idx,&LB($t0)); # (t0>>0)&0xff
+ &xor ($t3,&DWP($SBOX4_4404,$Tbl,$idx,8)); # t3^=SBOX4_4404[0]
+ &shr ($t0,16);
+ &movz ($idx,&LB($t1)); # (t1>>0)&0xff
+ &mov ($t2,&DWP($SBOX1_1110,$Tbl,$idx,8)); # t2=SBOX1_1110[1]
+ &movz ($idx,&HB($t0)); # (t0>>24)&0xff
+ &xor ($t3,&DWP($SBOX1_1110,$Tbl,$idx,8)); # t3^=SBOX1_1110[0]
+ &movz ($idx,&HB($t1)); # (t1>>8)&0xff
+ &xor ($t2,&DWP($SBOX4_4404,$Tbl,$idx,8)); # t2^=SBOX4_4404[1]
+ &shr ($t1,16);
+ &movz ($t0,&LB($t0)); # (t0>>16)&0xff
+ &xor ($t3,&DWP($SBOX2_0222,$Tbl,$t0,8)); # t3^=SBOX2_0222[0]
+ &movz ($idx,&HB($t1)); # (t1>>24)&0xff
+ &mov ($t0,&DWP($frame+4*(($j+3)%4),"esp")); # prefetch "s3"
+ &xor ($t2,$t3); # t2^=t3
+ &rotr ($t3,8); # t3=RightRotate(t3,8)
+ &xor ($t2,&DWP($SBOX2_0222,$Tbl,$idx,8)); # t2^=SBOX2_0222[1]
+ &movz ($idx,&LB($t1)); # (t1>>16)&0xff
+ &mov ($t1,&DWP($frame+4*(($j+2)%4),"esp")); # prefetch "s2"
+ &xor ($t3,$t0); # t3^=s3
+ &xor ($t2,&DWP($SBOX3_3033,$Tbl,$idx,8)); # t2^=SBOX3_3033[1]
+ &mov ($idx,&DWP($seed+($i+1)*$scale,$key)); # prefetch key[i+1]
+ &xor ($t3,$t2); # t3^=t2
+ &mov (&DWP($frame+4*(($j+3)%4),"esp"),$t3); # s3=t3
+ &xor ($t2,$t1); # t2^=s2
+ &mov (&DWP($frame+4*(($j+2)%4),"esp"),$t2); # s2=t2
+}
+
+# void Camellia_EncryptBlock_Rounds(
+# int grandRounds,
+# const Byte plaintext[],
+# const KEY_TABLE_TYPE keyTable,
+# Byte ciphertext[])
+&function_begin("Camellia_EncryptBlock_Rounds");
+ &mov ("eax",&wparam(0)); # load grandRounds
+ &mov ($idx,&wparam(1)); # load plaintext pointer
+ &mov ($key,&wparam(2)); # load key schedule pointer
+
+ &mov ("ebx","esp");
+ &sub ("esp",7*4); # place for s[0-3],keyEnd,esp and ra
+ &and ("esp",-64);
+
+ # place stack frame just "above mod 1024" the key schedule
+ # this ensures that cache associativity of 2 suffices
+ &lea ("ecx",&DWP(-64-63,$key));
+ &sub ("ecx","esp");
+ &neg ("ecx");
+ &and ("ecx",0x3C0); # modulo 1024, but aligned to cache-line
+ &sub ("esp","ecx");
+ &add ("esp",4); # 4 is reserved for callee's return address
+
+ &shl ("eax",6);
+ &lea ("eax",&DWP(0,$key,"eax"));
+ &mov ($_esp,"ebx"); # save %esp
+ &mov ($_end,"eax"); # save keyEnd
+
+ &call (&label("pic_point"));
+ &set_label("pic_point");
+ &blindpop($Tbl);
+ &lea ($Tbl,&DWP(&label("Camellia_SBOX")."-".&label("pic_point"),$Tbl));
+
+ &mov (@T[0],&DWP(0,$idx)); # load plaintext
+ &mov (@T[1],&DWP(4,$idx));
+ &mov (@T[2],&DWP(8,$idx));
+ &bswap (@T[0]);
+ &mov (@T[3],&DWP(12,$idx));
+ &bswap (@T[1]);
+ &bswap (@T[2]);
+ &bswap (@T[3]);
+
+ &call ("_x86_Camellia_encrypt");
+
+ &mov ("esp",$_esp);
+ &bswap (@T[0]);
+ &mov ($idx,&wparam(3)); # load ciphertext pointer
+ &bswap (@T[1]);
+ &bswap (@T[2]);
+ &bswap (@T[3]);
+ &mov (&DWP(0,$idx),@T[0]); # write ciphertext
+ &mov (&DWP(4,$idx),@T[1]);
+ &mov (&DWP(8,$idx),@T[2]);
+ &mov (&DWP(12,$idx),@T[3]);
+&function_end("Camellia_EncryptBlock_Rounds");
+# V1.x API
+&function_begin_B("Camellia_EncryptBlock");
+ &mov ("eax",128);
+ &sub ("eax",&wparam(0)); # load keyBitLength
+ &mov ("eax",3);
+ &adc ("eax",0); # keyBitLength==128?3:4
+ &mov (&wparam(0),"eax");
+ &jmp (&label("Camellia_EncryptBlock_Rounds"));
+&function_end_B("Camellia_EncryptBlock");
+
+if ($OPENSSL) {
+# void Camellia_encrypt(
+# const unsigned char *in,
+# unsigned char *out,
+# const CAMELLIA_KEY *key)
+&function_begin("Camellia_encrypt");
+ &mov ($idx,&wparam(0)); # load plaintext pointer
+ &mov ($key,&wparam(2)); # load key schedule pointer
+
+ &mov ("ebx","esp");
+ &sub ("esp",7*4); # place for s[0-3],keyEnd,esp and ra
+ &and ("esp",-64);
+ &mov ("eax",&DWP(272,$key)); # load grandRounds counter
+
+ # place stack frame just "above mod 1024" the key schedule
+ # this ensures that cache associativity of 2 suffices
+ &lea ("ecx",&DWP(-64-63,$key));
+ &sub ("ecx","esp");
+ &neg ("ecx");
+ &and ("ecx",0x3C0); # modulo 1024, but aligned to cache-line
+ &sub ("esp","ecx");
+ &add ("esp",4); # 4 is reserved for callee's return address
+
+ &shl ("eax",6);
+ &lea ("eax",&DWP(0,$key,"eax"));
+ &mov ($_esp,"ebx"); # save %esp
+ &mov ($_end,"eax"); # save keyEnd
+
+ &call (&label("pic_point"));
+ &set_label("pic_point");
+ &blindpop($Tbl);
+ &lea ($Tbl,&DWP(&label("Camellia_SBOX")."-".&label("pic_point"),$Tbl));
+
+ &mov (@T[0],&DWP(0,$idx)); # load plaintext
+ &mov (@T[1],&DWP(4,$idx));
+ &mov (@T[2],&DWP(8,$idx));
+ &bswap (@T[0]);
+ &mov (@T[3],&DWP(12,$idx));
+ &bswap (@T[1]);
+ &bswap (@T[2]);
+ &bswap (@T[3]);
+
+ &call ("_x86_Camellia_encrypt");
+
+ &mov ("esp",$_esp);
+ &bswap (@T[0]);
+ &mov ($idx,&wparam(1)); # load ciphertext pointer
+ &bswap (@T[1]);
+ &bswap (@T[2]);
+ &bswap (@T[3]);
+ &mov (&DWP(0,$idx),@T[0]); # write ciphertext
+ &mov (&DWP(4,$idx),@T[1]);
+ &mov (&DWP(8,$idx),@T[2]);
+ &mov (&DWP(12,$idx),@T[3]);
+&function_end("Camellia_encrypt");
+}
+
+&function_begin_B("_x86_Camellia_encrypt");
+ &xor (@T[0],&DWP(0,$key)); # ^=key[0-3]
+ &xor (@T[1],&DWP(4,$key));
+ &xor (@T[2],&DWP(8,$key));
+ &xor (@T[3],&DWP(12,$key));
+ &mov ($idx,&DWP(16,$key)); # prefetch key[4]
+
+ &mov ($__s0,@T[0]); # save s[0-3]
+ &mov ($__s1,@T[1]);
+ &mov ($__s2,@T[2]);
+ &mov ($__s3,@T[3]);
+
+&set_label("loop",16);
+ for ($i=0;$i<6;$i++) { Camellia_Feistel($i,16,4); }
+
+ &add ($key,16*4);
+ &cmp ($key,$__end);
+ &je (&label("done"));
+
+ # @T[0-1] are preloaded, $idx is preloaded with key[0]
+ &and ($idx,@T[0]);
+ &mov (@T[3],$__s3);
+ &rotl ($idx,1);
+ &mov (@T[2],@T[3]);
+ &xor (@T[1],$idx);
+ &or (@T[2],&DWP(12,$key));
+ &mov ($__s1,@T[1]); # s1^=LeftRotate(s0&key[0],1);
+ &xor (@T[2],$__s2);
+
+ &mov ($idx,&DWP(4,$key));
+ &mov ($__s2,@T[2]); # s2^=s3|key[3];
+ &or ($idx,@T[1]);
+ &and (@T[2],&DWP(8,$key));
+ &xor (@T[0],$idx);
+ &rotl (@T[2],1);
+ &mov ($__s0,@T[0]); # s0^=s1|key[1];
+ &xor (@T[3],@T[2]);
+ &mov ($idx,&DWP(16,$key)); # prefetch key[4]
+ &mov ($__s3,@T[3]); # s3^=LeftRotate(s2&key[2],1);
+ &jmp (&label("loop"));
+
+&set_label("done",8);
+ &mov (@T[2],@T[0]); # SwapHalf
+ &mov (@T[3],@T[1]);
+ &mov (@T[0],$__s2);
+ &mov (@T[1],$__s3);
+ &xor (@T[0],$idx); # $idx is preloaded with key[0]
+ &xor (@T[1],&DWP(4,$key));
+ &xor (@T[2],&DWP(8,$key));
+ &xor (@T[3],&DWP(12,$key));
+ &ret ();
+&function_end_B("_x86_Camellia_encrypt");
+
+# void Camellia_DecryptBlock_Rounds(
+# int grandRounds,
+# const Byte ciphertext[],
+# const KEY_TABLE_TYPE keyTable,
+# Byte plaintext[])
+&function_begin("Camellia_DecryptBlock_Rounds");
+ &mov ("eax",&wparam(0)); # load grandRounds
+ &mov ($idx,&wparam(1)); # load ciphertext pointer
+ &mov ($key,&wparam(2)); # load key schedule pointer
+
+ &mov ("ebx","esp");
+ &sub ("esp",7*4); # place for s[0-3],keyEnd,esp and ra
+ &and ("esp",-64);
+
+ # place stack frame just "above mod 1024" the key schedule
+ # this ensures that cache associativity of 2 suffices
+ &lea ("ecx",&DWP(-64-63,$key));
+ &sub ("ecx","esp");
+ &neg ("ecx");
+ &and ("ecx",0x3C0); # modulo 1024, but aligned to cache-line
+ &sub ("esp","ecx");
+ &add ("esp",4); # 4 is reserved for callee's return address
+
+ &shl ("eax",6);
+ &mov (&DWP(4*4,"esp"),$key); # save keyStart
+ &lea ($key,&DWP(0,$key,"eax"));
+ &mov (&DWP(5*4,"esp"),"ebx");# save %esp
+
+ &call (&label("pic_point"));
+ &set_label("pic_point");
+ &blindpop($Tbl);
+ &lea ($Tbl,&DWP(&label("Camellia_SBOX")."-".&label("pic_point"),$Tbl));
+
+ &mov (@T[0],&DWP(0,$idx)); # load ciphertext
+ &mov (@T[1],&DWP(4,$idx));
+ &mov (@T[2],&DWP(8,$idx));
+ &bswap (@T[0]);
+ &mov (@T[3],&DWP(12,$idx));
+ &bswap (@T[1]);
+ &bswap (@T[2]);
+ &bswap (@T[3]);
+
+ &call ("_x86_Camellia_decrypt");
+
+ &mov ("esp",&DWP(5*4,"esp"));
+ &bswap (@T[0]);
+ &mov ($idx,&wparam(3)); # load plaintext pointer
+ &bswap (@T[1]);
+ &bswap (@T[2]);
+ &bswap (@T[3]);
+ &mov (&DWP(0,$idx),@T[0]); # write plaintext
+ &mov (&DWP(4,$idx),@T[1]);
+ &mov (&DWP(8,$idx),@T[2]);
+ &mov (&DWP(12,$idx),@T[3]);
+&function_end("Camellia_DecryptBlock_Rounds");
+# V1.x API
+&function_begin_B("Camellia_DecryptBlock");
+ &mov ("eax",128);
+ &sub ("eax",&wparam(0)); # load keyBitLength
+ &mov ("eax",3);
+ &adc ("eax",0); # keyBitLength==128?3:4
+ &mov (&wparam(0),"eax");
+ &jmp (&label("Camellia_DecryptBlock_Rounds"));
+&function_end_B("Camellia_DecryptBlock");
+
+if ($OPENSSL) {
+# void Camellia_decrypt(
+# const unsigned char *in,
+# unsigned char *out,
+# const CAMELLIA_KEY *key)
+&function_begin("Camellia_decrypt");
+ &mov ($idx,&wparam(0)); # load ciphertext pointer
+ &mov ($key,&wparam(2)); # load key schedule pointer
+
+ &mov ("ebx","esp");
+ &sub ("esp",7*4); # place for s[0-3],keyEnd,esp and ra
+ &and ("esp",-64);
+ &mov ("eax",&DWP(272,$key)); # load grandRounds counter
+
+ # place stack frame just "above mod 1024" the key schedule
+ # this ensures that cache associativity of 2 suffices
+ &lea ("ecx",&DWP(-64-63,$key));
+ &sub ("ecx","esp");
+ &neg ("ecx");
+ &and ("ecx",0x3C0); # modulo 1024, but aligned to cache-line
+ &sub ("esp","ecx");
+ &add ("esp",4); # 4 is reserved for callee's return address
+
+ &shl ("eax",6);
+ &mov (&DWP(4*4,"esp"),$key); # save keyStart
+ &lea ($key,&DWP(0,$key,"eax"));
+ &mov (&DWP(5*4,"esp"),"ebx");# save %esp
+
+ &call (&label("pic_point"));
+ &set_label("pic_point");
+ &blindpop($Tbl);
+ &lea ($Tbl,&DWP(&label("Camellia_SBOX")."-".&label("pic_point"),$Tbl));
+
+ &mov (@T[0],&DWP(0,$idx)); # load ciphertext
+ &mov (@T[1],&DWP(4,$idx));
+ &mov (@T[2],&DWP(8,$idx));
+ &bswap (@T[0]);
+ &mov (@T[3],&DWP(12,$idx));
+ &bswap (@T[1]);
+ &bswap (@T[2]);
+ &bswap (@T[3]);
+
+ &call ("_x86_Camellia_decrypt");
+
+ &mov ("esp",&DWP(5*4,"esp"));
+ &bswap (@T[0]);
+ &mov ($idx,&wparam(1)); # load plaintext pointer
+ &bswap (@T[1]);
+ &bswap (@T[2]);
+ &bswap (@T[3]);
+ &mov (&DWP(0,$idx),@T[0]); # write plaintext
+ &mov (&DWP(4,$idx),@T[1]);
+ &mov (&DWP(8,$idx),@T[2]);
+ &mov (&DWP(12,$idx),@T[3]);
+&function_end("Camellia_decrypt");
+}
+
+&function_begin_B("_x86_Camellia_decrypt");
+ &xor (@T[0],&DWP(0,$key)); # ^=key[0-3]
+ &xor (@T[1],&DWP(4,$key));
+ &xor (@T[2],&DWP(8,$key));
+ &xor (@T[3],&DWP(12,$key));
+ &mov ($idx,&DWP(-8,$key)); # prefetch key[-2]
+
+ &mov ($__s0,@T[0]); # save s[0-3]
+ &mov ($__s1,@T[1]);
+ &mov ($__s2,@T[2]);
+ &mov ($__s3,@T[3]);
+
+&set_label("loop",16);
+ for ($i=0;$i<6;$i++) { Camellia_Feistel($i,-8,4); }
+
+ &sub ($key,16*4);
+ &cmp ($key,$__end);
+ &je (&label("done"));
+
+ # @T[0-1] are preloaded, $idx is preloaded with key[2]
+ &and ($idx,@T[0]);
+ &mov (@T[3],$__s3);
+ &rotl ($idx,1);
+ &mov (@T[2],@T[3]);
+ &xor (@T[1],$idx);
+ &or (@T[2],&DWP(4,$key));
+ &mov ($__s1,@T[1]); # s1^=LeftRotate(s0&key[0],1);
+ &xor (@T[2],$__s2);
+
+ &mov ($idx,&DWP(12,$key));
+ &mov ($__s2,@T[2]); # s2^=s3|key[3];
+ &or ($idx,@T[1]);
+ &and (@T[2],&DWP(0,$key));
+ &xor (@T[0],$idx);
+ &rotl (@T[2],1);
+ &mov ($__s0,@T[0]); # s0^=s1|key[1];
+ &xor (@T[3],@T[2]);
+ &mov ($idx,&DWP(-8,$key)); # prefetch key[4]
+ &mov ($__s3,@T[3]); # s3^=LeftRotate(s2&key[2],1);
+ &jmp (&label("loop"));
+
+&set_label("done",8);
+ &mov (@T[2],@T[0]); # SwapHalf
+ &mov (@T[3],@T[1]);
+ &mov (@T[0],$__s2);
+ &mov (@T[1],$__s3);
+ &xor (@T[2],$idx); # $idx is preloaded with key[2]
+ &xor (@T[3],&DWP(12,$key));
+ &xor (@T[0],&DWP(0,$key));
+ &xor (@T[1],&DWP(4,$key));
+ &ret ();
+&function_end_B("_x86_Camellia_decrypt");
+
+# shld is very slow on Intel P4 family. Even on AMD it limits
+# instruction decode rate [because it's VectorPath] and consequently
+# performance. PIII, PM and Core[2] seem to be the only ones which
+# execute this code ~7% faster...
+sub __rotl128 {
+ my ($i0,$i1,$i2,$i3,$rot,$rnd,@T)=@_;
+
+ $rnd *= 2;
+ if ($rot) {
+ &mov ($idx,$i0);
+ &shld ($i0,$i1,$rot);
+ &shld ($i1,$i2,$rot);
+ &shld ($i2,$i3,$rot);
+ &shld ($i3,$idx,$rot);
+ }
+ &mov (&DWP(-128+4*$rnd++,$key),shift(@T)) if ($i0 eq @T[0]);
+ &mov (&DWP(-128+4*$rnd++,$key),shift(@T)) if ($i1 eq @T[0]);
+ &mov (&DWP(-128+4*$rnd++,$key),shift(@T)) if ($i2 eq @T[0]);
+ &mov (&DWP(-128+4*$rnd++,$key),shift(@T)) if ($i3 eq @T[0]);
+}
+
+# ... Implementing 128-bit rotate without shld gives >3x performance
+# improvement on P4, only ~7% degradation on other Intel CPUs and
+# not worse performance on AMD. This is therefore preferred.
+sub _rotl128 {
+ my ($i0,$i1,$i2,$i3,$rot,$rnd,@T)=@_;
+
+ $rnd *= 2;
+ if ($rot) {
+ &mov ($Tbl,$i0);
+ &shl ($i0,$rot);
+ &mov ($idx,$i1);
+ &shr ($idx,32-$rot);
+ &shl ($i1,$rot);
+ &or ($i0,$idx);
+ &mov ($idx,$i2);
+ &shl ($i2,$rot);
+ &mov (&DWP(-128+4*$rnd++,$key),shift(@T)) if ($i0 eq @T[0]);
+ &shr ($idx,32-$rot);
+ &or ($i1,$idx);
+ &shr ($Tbl,32-$rot);
+ &mov ($idx,$i3);
+ &shr ($idx,32-$rot);
+ &mov (&DWP(-128+4*$rnd++,$key),shift(@T)) if ($i1 eq @T[0]);
+ &shl ($i3,$rot);
+ &or ($i2,$idx);
+ &or ($i3,$Tbl);
+ &mov (&DWP(-128+4*$rnd++,$key),shift(@T)) if ($i2 eq @T[0]);
+ &mov (&DWP(-128+4*$rnd++,$key),shift(@T)) if ($i3 eq @T[0]);
+ } else {
+ &mov (&DWP(-128+4*$rnd++,$key),shift(@T)) if ($i0 eq @T[0]);
+ &mov (&DWP(-128+4*$rnd++,$key),shift(@T)) if ($i1 eq @T[0]);
+ &mov (&DWP(-128+4*$rnd++,$key),shift(@T)) if ($i2 eq @T[0]);
+ &mov (&DWP(-128+4*$rnd++,$key),shift(@T)) if ($i3 eq @T[0]);
+ }
+}
+
+sub _saveround {
+my ($rnd,$key,@T)=@_;
+my $bias=int(@T[0])?shift(@T):0;
+
+ &mov (&DWP($bias+$rnd*8+0,$key),@T[0]);
+ &mov (&DWP($bias+$rnd*8+4,$key),@T[1]) if ($#T>=1);
+ &mov (&DWP($bias+$rnd*8+8,$key),@T[2]) if ($#T>=2);
+ &mov (&DWP($bias+$rnd*8+12,$key),@T[3]) if ($#T>=3);
+}
+
+sub _loadround {
+my ($rnd,$key,@T)=@_;
+my $bias=int(@T[0])?shift(@T):0;
+
+ &mov (@T[0],&DWP($bias+$rnd*8+0,$key));
+ &mov (@T[1],&DWP($bias+$rnd*8+4,$key)) if ($#T>=1);
+ &mov (@T[2],&DWP($bias+$rnd*8+8,$key)) if ($#T>=2);
+ &mov (@T[3],&DWP($bias+$rnd*8+12,$key)) if ($#T>=3);
+}
+
+# void Camellia_Ekeygen(
+# const int keyBitLength,
+# const Byte *rawKey,
+# KEY_TABLE_TYPE keyTable)
+&function_begin("Camellia_Ekeygen");
+{ my $step=0;
+
+ &stack_push(4); # place for s[0-3]
+
+ &mov ($Tbl,&wparam(0)); # load arguments
+ &mov ($idx,&wparam(1));
+ &mov ($key,&wparam(2));
+
+ &mov (@T[0],&DWP(0,$idx)); # load 0-127 bits
+ &mov (@T[1],&DWP(4,$idx));
+ &mov (@T[2],&DWP(8,$idx));
+ &mov (@T[3],&DWP(12,$idx));
+
+ &bswap (@T[0]);
+ &bswap (@T[1]);
+ &bswap (@T[2]);
+ &bswap (@T[3]);
+
+ &_saveround (0,$key,@T); # KL<<<0
+
+ &cmp ($Tbl,128);
+ &je (&label("1st128"));
+
+ &mov (@T[0],&DWP(16,$idx)); # load 128-191 bits
+ &mov (@T[1],&DWP(20,$idx));
+ &cmp ($Tbl,192);
+ &je (&label("1st192"));
+ &mov (@T[2],&DWP(24,$idx)); # load 192-255 bits
+ &mov (@T[3],&DWP(28,$idx));
+ &jmp (&label("1st256"));
+&set_label("1st192",4);
+ &mov (@T[2],@T[0]);
+ &mov (@T[3],@T[1]);
+ &not (@T[2]);
+ &not (@T[3]);
+&set_label("1st256",4);
+ &bswap (@T[0]);
+ &bswap (@T[1]);
+ &bswap (@T[2]);
+ &bswap (@T[3]);
+
+ &_saveround (4,$key,@T); # temporary storage for KR!
+
+ &xor (@T[0],&DWP(0*8+0,$key)); # KR^KL
+ &xor (@T[1],&DWP(0*8+4,$key));
+ &xor (@T[2],&DWP(1*8+0,$key));
+ &xor (@T[3],&DWP(1*8+4,$key));
+
+&set_label("1st128",4);
+ &call (&label("pic_point"));
+ &set_label("pic_point");
+ &blindpop($Tbl);
+ &lea ($Tbl,&DWP(&label("Camellia_SBOX")."-".&label("pic_point"),$Tbl));
+ &lea ($key,&DWP(&label("Camellia_SIGMA")."-".&label("Camellia_SBOX"),$Tbl));
+
+ &mov ($idx,&DWP($step*8,$key)); # prefetch SIGMA[0]
+ &mov (&swtmp(0),@T[0]); # save s[0-3]
+ &mov (&swtmp(1),@T[1]);
+ &mov (&swtmp(2),@T[2]);
+ &mov (&swtmp(3),@T[3]);
+ &Camellia_Feistel($step++);
+ &Camellia_Feistel($step++);
+ &mov (@T[2],&swtmp(2));
+ &mov (@T[3],&swtmp(3));
+
+ &mov ($idx,&wparam(2));
+ &xor (@T[0],&DWP(0*8+0,$idx)); # ^KL
+ &xor (@T[1],&DWP(0*8+4,$idx));
+ &xor (@T[2],&DWP(1*8+0,$idx));
+ &xor (@T[3],&DWP(1*8+4,$idx));
+
+ &mov ($idx,&DWP($step*8,$key)); # prefetch SIGMA[4]
+ &mov (&swtmp(0),@T[0]); # save s[0-3]
+ &mov (&swtmp(1),@T[1]);
+ &mov (&swtmp(2),@T[2]);
+ &mov (&swtmp(3),@T[3]);
+ &Camellia_Feistel($step++);
+ &Camellia_Feistel($step++);
+ &mov (@T[2],&swtmp(2));
+ &mov (@T[3],&swtmp(3));
+
+ &mov ($idx,&wparam(0));
+ &cmp ($idx,128);
+ &jne (&label("2nd256"));
+
+ &mov ($key,&wparam(2));
+ &lea ($key,&DWP(128,$key)); # size optimization
+
+ ####### process KA
+ &_saveround (2,$key,-128,@T); # KA<<<0
+ &_rotl128 (@T,15,6,@T); # KA<<<15
+ &_rotl128 (@T,15,8,@T); # KA<<<(15+15=30)
+ &_rotl128 (@T,15,12,@T[0],@T[1]); # KA<<<(30+15=45)
+ &_rotl128 (@T,15,14,@T); # KA<<<(45+15=60)
+ push (@T,shift(@T)); # rotl128(@T,32);
+ &_rotl128 (@T,2,20,@T); # KA<<<(60+32+2=94)
+ &_rotl128 (@T,17,24,@T); # KA<<<(94+17=111)
+
+ ####### process KL
+ &_loadround (0,$key,-128,@T); # load KL
+ &_rotl128 (@T,15,4,@T); # KL<<<15
+ &_rotl128 (@T,30,10,@T); # KL<<<(15+30=45)
+ &_rotl128 (@T,15,13,@T[2],@T[3]); # KL<<<(45+15=60)
+ &_rotl128 (@T,17,16,@T); # KL<<<(60+17=77)
+ &_rotl128 (@T,17,18,@T); # KL<<<(77+17=94)
+ &_rotl128 (@T,17,22,@T); # KL<<<(94+17=111)
+
+ while (@T[0] ne "eax") # restore order
+ { unshift (@T,pop(@T)); }
+
+ &mov ("eax",3); # 3 grandRounds
+ &jmp (&label("done"));
+
+&set_label("2nd256",16);
+ &mov ($idx,&wparam(2));
+ &_saveround (6,$idx,@T); # temporary storage for KA!
+
+ &xor (@T[0],&DWP(4*8+0,$idx)); # KA^KR
+ &xor (@T[1],&DWP(4*8+4,$idx));
+ &xor (@T[2],&DWP(5*8+0,$idx));
+ &xor (@T[3],&DWP(5*8+4,$idx));
+
+ &mov ($idx,&DWP($step*8,$key)); # prefetch SIGMA[8]
+ &mov (&swtmp(0),@T[0]); # save s[0-3]
+ &mov (&swtmp(1),@T[1]);
+ &mov (&swtmp(2),@T[2]);
+ &mov (&swtmp(3),@T[3]);
+ &Camellia_Feistel($step++);
+ &Camellia_Feistel($step++);
+ &mov (@T[2],&swtmp(2));
+ &mov (@T[3],&swtmp(3));
+
+ &mov ($key,&wparam(2));
+ &lea ($key,&DWP(128,$key)); # size optimization
+
+ ####### process KB
+ &_saveround (2,$key,-128,@T); # KB<<<0
+ &_rotl128 (@T,30,10,@T); # KB<<<30
+ &_rotl128 (@T,30,20,@T); # KB<<<(30+30=60)
+ push (@T,shift(@T)); # rotl128(@T,32);
+ &_rotl128 (@T,19,32,@T); # KB<<<(60+32+19=111)
+
+ ####### process KR
+ &_loadround (4,$key,-128,@T); # load KR
+ &_rotl128 (@T,15,4,@T); # KR<<<15
+ &_rotl128 (@T,15,8,@T); # KR<<<(15+15=30)
+ &_rotl128 (@T,30,18,@T); # KR<<<(30+30=60)
+ push (@T,shift(@T)); # rotl128(@T,32);
+ &_rotl128 (@T,2,26,@T); # KR<<<(60+32+2=94)
+
+ ####### process KA
+ &_loadround (6,$key,-128,@T); # load KA
+ &_rotl128 (@T,15,6,@T); # KA<<<15
+ &_rotl128 (@T,30,14,@T); # KA<<<(15+30=45)
+ push (@T,shift(@T)); # rotl128(@T,32);
+ &_rotl128 (@T,0,24,@T); # KA<<<(45+32+0=77)
+ &_rotl128 (@T,17,28,@T); # KA<<<(77+17=94)
+
+ ####### process KL
+ &_loadround (0,$key,-128,@T); # load KL
+ push (@T,shift(@T)); # rotl128(@T,32);
+ &_rotl128 (@T,13,12,@T); # KL<<<(32+13=45)
+ &_rotl128 (@T,15,16,@T); # KL<<<(45+15=60)
+ &_rotl128 (@T,17,22,@T); # KL<<<(60+17=77)
+ push (@T,shift(@T)); # rotl128(@T,32);
+ &_rotl128 (@T,2,30,@T); # KL<<<(77+32+2=111)
+
+ while (@T[0] ne "eax") # restore order
+ { unshift (@T,pop(@T)); }
+
+ &mov ("eax",4); # 4 grandRounds
+&set_label("done");
+ &lea ("edx",&DWP(272-128,$key)); # end of key schedule
+ &stack_pop(4);
+}
+&function_end("Camellia_Ekeygen");
+
+if ($OPENSSL) {
+# int Camellia_set_key (
+# const unsigned char *userKey,
+# int bits,
+# CAMELLIA_KEY *key)
+&function_begin_B("Camellia_set_key");
+ &push ("ebx");
+ &mov ("ecx",&wparam(0)); # pull arguments
+ &mov ("ebx",&wparam(1));
+ &mov ("edx",&wparam(2));
+
+ &mov ("eax",-1);
+ &test ("ecx","ecx");
+ &jz (&label("done")); # userKey==NULL?
+ &test ("edx","edx");
+ &jz (&label("done")); # key==NULL?
+
+ &mov ("eax",-2);
+ &cmp ("ebx",256);
+ &je (&label("arg_ok")); # bits==256?
+ &cmp ("ebx",192);
+ &je (&label("arg_ok")); # bits==192?
+ &cmp ("ebx",128);
+ &jne (&label("done")); # bits!=128?
+&set_label("arg_ok",4);
+
+ &push ("edx"); # push arguments
+ &push ("ecx");
+ &push ("ebx");
+ &call ("Camellia_Ekeygen");
+ &stack_pop(3);
+
+ # eax holds grandRounds and edx points at where to put it
+ &mov (&DWP(0,"edx"),"eax");
+ &xor ("eax","eax");
+&set_label("done",4);
+ &pop ("ebx");
+ &ret ();
+&function_end_B("Camellia_set_key");
+}
+
+@SBOX=(
+112,130, 44,236,179, 39,192,229,228,133, 87, 53,234, 12,174, 65,
+ 35,239,107,147, 69, 25,165, 33,237, 14, 79, 78, 29,101,146,189,
+134,184,175,143,124,235, 31,206, 62, 48,220, 95, 94,197, 11, 26,
+166,225, 57,202,213, 71, 93, 61,217, 1, 90,214, 81, 86,108, 77,
+139, 13,154,102,251,204,176, 45,116, 18, 43, 32,240,177,132,153,
+223, 76,203,194, 52,126,118, 5,109,183,169, 49,209, 23, 4,215,
+ 20, 88, 58, 97,222, 27, 17, 28, 50, 15,156, 22, 83, 24,242, 34,
+254, 68,207,178,195,181,122,145, 36, 8,232,168, 96,252,105, 80,
+170,208,160,125,161,137, 98,151, 84, 91, 30,149,224,255,100,210,
+ 16,196, 0, 72,163,247,117,219,138, 3,230,218, 9, 63,221,148,
+135, 92,131, 2,205, 74,144, 51,115,103,246,243,157,127,191,226,
+ 82,155,216, 38,200, 55,198, 59,129,150,111, 75, 19,190, 99, 46,
+233,121,167,140,159,110,188,142, 41,245,249,182, 47,253,180, 89,
+120,152, 6,106,231, 70,113,186,212, 37,171, 66,136,162,141,250,
+114, 7,185, 85,248,238,172, 10, 54, 73, 42,104, 60, 56,241,164,
+ 64, 40,211,123,187,201, 67,193, 21,227,173,244,119,199,128,158);
+
+sub S1110 { my $i=shift; $i=@SBOX[$i]; return $i<<24|$i<<16|$i<<8; }
+sub S4404 { my $i=shift; $i=($i<<1|$i>>7)&0xff; $i=@SBOX[$i]; return $i<<24|$i<<16|$i; }
+sub S0222 { my $i=shift; $i=@SBOX[$i]; $i=($i<<1|$i>>7)&0xff; return $i<<16|$i<<8|$i; }
+sub S3033 { my $i=shift; $i=@SBOX[$i]; $i=($i>>1|$i<<7)&0xff; return $i<<24|$i<<8|$i; }
+
+&set_label("Camellia_SIGMA",64);
+&data_word(
+ 0xa09e667f, 0x3bcc908b, 0xb67ae858, 0x4caa73b2,
+ 0xc6ef372f, 0xe94f82be, 0x54ff53a5, 0xf1d36f1c,
+ 0x10e527fa, 0xde682d1d, 0xb05688c2, 0xb3e6c1fd,
+ 0, 0, 0, 0);
+&set_label("Camellia_SBOX",64);
+# tables are interleaved, remember?
+for ($i=0;$i<256;$i++) { &data_word(&S1110($i),&S4404($i)); }
+for ($i=0;$i<256;$i++) { &data_word(&S0222($i),&S3033($i)); }
+
+# void Camellia_cbc_encrypt (const void char *inp, unsigned char *out,
+# size_t length, const CAMELLIA_KEY *key,
+# unsigned char *ivp,const int enc);
+{
+# stack frame layout
+# -4(%esp) # return address 0(%esp)
+# 0(%esp) # s0 4(%esp)
+# 4(%esp) # s1 8(%esp)
+# 8(%esp) # s2 12(%esp)
+# 12(%esp) # s3 16(%esp)
+# 16(%esp) # end of key schedule 20(%esp)
+# 20(%esp) # %esp backup
+my $_inp=&DWP(24,"esp"); #copy of wparam(0)
+my $_out=&DWP(28,"esp"); #copy of wparam(1)
+my $_len=&DWP(32,"esp"); #copy of wparam(2)
+my $_key=&DWP(36,"esp"); #copy of wparam(3)
+my $_ivp=&DWP(40,"esp"); #copy of wparam(4)
+my $ivec=&DWP(44,"esp"); #ivec[16]
+my $_tmp=&DWP(44,"esp"); #volatile variable [yes, aliases with ivec]
+my ($s0,$s1,$s2,$s3) = @T;
+
+&function_begin("Camellia_cbc_encrypt");
+ &mov ($s2 eq "ecx"? $s2 : "",&wparam(2)); # load len
+ &cmp ($s2,0);
+ &je (&label("enc_out"));
+
+ &pushf ();
+ &cld ();
+
+ &mov ($s0,&wparam(0)); # load inp
+ &mov ($s1,&wparam(1)); # load out
+ #&mov ($s2,&wparam(2)); # load len
+ &mov ($s3,&wparam(3)); # load key
+ &mov ($Tbl,&wparam(4)); # load ivp
+
+ # allocate aligned stack frame...
+ &lea ($idx,&DWP(-64,"esp"));
+ &and ($idx,-64);
+
+ # place stack frame just "above mod 1024" the key schedule
+ # this ensures that cache associativity of 2 suffices
+ &lea ($key,&DWP(-64-63,$s3));
+ &sub ($key,$idx);
+ &neg ($key);
+ &and ($key,0x3C0); # modulo 1024, but aligned to cache-line
+ &sub ($idx,$key);
+
+ &mov ($key,&wparam(5)); # load enc
+
+ &exch ("esp",$idx);
+ &add ("esp",4); # reserve for return address!
+ &mov ($_esp,$idx); # save %esp
+
+ &mov ($_inp,$s0); # save copy of inp
+ &mov ($_out,$s1); # save copy of out
+ &mov ($_len,$s2); # save copy of len
+ &mov ($_key,$s3); # save copy of key
+ &mov ($_ivp,$Tbl); # save copy of ivp
+
+ &call (&label("pic_point")); # make it PIC!
+ &set_label("pic_point");
+ &blindpop($Tbl);
+ &lea ($Tbl,&DWP(&label("Camellia_SBOX")."-".&label("pic_point"),$Tbl));
+
+ &mov ($idx,32);
+ &set_label("prefetch_sbox",4);
+ &mov ($s0,&DWP(0,$Tbl));
+ &mov ($s1,&DWP(32,$Tbl));
+ &mov ($s2,&DWP(64,$Tbl));
+ &mov ($s3,&DWP(96,$Tbl));
+ &lea ($Tbl,&DWP(128,$Tbl));
+ &dec ($idx);
+ &jnz (&label("prefetch_sbox"));
+ &mov ($s0,$_key);
+ &sub ($Tbl,4096);
+ &mov ($idx,$_inp);
+ &mov ($s3,&DWP(272,$s0)); # load grandRounds
+
+ &cmp ($key,0);
+ &je (&label("DECRYPT"));
+
+ &mov ($s2,$_len);
+ &mov ($key,$_ivp);
+ &shl ($s3,6);
+ &lea ($s3,&DWP(0,$s0,$s3));
+ &mov ($_end,$s3);
+
+ &test ($s2,0xFFFFFFF0);
+ &jz (&label("enc_tail")); # short input...
+
+ &mov ($s0,&DWP(0,$key)); # load iv
+ &mov ($s1,&DWP(4,$key));
+
+ &set_label("enc_loop",4);
+ &mov ($s2,&DWP(8,$key));
+ &mov ($s3,&DWP(12,$key));
+
+ &xor ($s0,&DWP(0,$idx)); # xor input data
+ &xor ($s1,&DWP(4,$idx));
+ &xor ($s2,&DWP(8,$idx));
+ &bswap ($s0);
+ &xor ($s3,&DWP(12,$idx));
+ &bswap ($s1);
+ &mov ($key,$_key); # load key
+ &bswap ($s2);
+ &bswap ($s3);
+
+ &call ("_x86_Camellia_encrypt");
+
+ &mov ($idx,$_inp); # load inp
+ &mov ($key,$_out); # load out
+
+ &bswap ($s0);
+ &bswap ($s1);
+ &bswap ($s2);
+ &mov (&DWP(0,$key),$s0); # save output data
+ &bswap ($s3);
+ &mov (&DWP(4,$key),$s1);
+ &mov (&DWP(8,$key),$s2);
+ &mov (&DWP(12,$key),$s3);
+
+ &mov ($s2,$_len); # load len
+
+ &lea ($idx,&DWP(16,$idx));
+ &mov ($_inp,$idx); # save inp
+
+ &lea ($s3,&DWP(16,$key));
+ &mov ($_out,$s3); # save out
+
+ &sub ($s2,16);
+ &test ($s2,0xFFFFFFF0);
+ &mov ($_len,$s2); # save len
+ &jnz (&label("enc_loop"));
+ &test ($s2,15);
+ &jnz (&label("enc_tail"));
+ &mov ($idx,$_ivp); # load ivp
+ &mov ($s2,&DWP(8,$key)); # restore last dwords
+ &mov ($s3,&DWP(12,$key));
+ &mov (&DWP(0,$idx),$s0); # save ivec
+ &mov (&DWP(4,$idx),$s1);
+ &mov (&DWP(8,$idx),$s2);
+ &mov (&DWP(12,$idx),$s3);
+
+ &mov ("esp",$_esp);
+ &popf ();
+ &set_label("enc_out");
+ &function_end_A();
+ &pushf (); # kludge, never executed
+
+ &set_label("enc_tail",4);
+ &mov ($s0,$key eq "edi" ? $key : "");
+ &mov ($key,$_out); # load out
+ &push ($s0); # push ivp
+ &mov ($s1,16);
+ &sub ($s1,$s2);
+ &cmp ($key,$idx); # compare with inp
+ &je (&label("enc_in_place"));
+ &align (4);
+ &data_word(0xA4F3F689); # rep movsb # copy input
+ &jmp (&label("enc_skip_in_place"));
+ &set_label("enc_in_place");
+ &lea ($key,&DWP(0,$key,$s2));
+ &set_label("enc_skip_in_place");
+ &mov ($s2,$s1);
+ &xor ($s0,$s0);
+ &align (4);
+ &data_word(0xAAF3F689); # rep stosb # zero tail
+ &pop ($key); # pop ivp
+
+ &mov ($idx,$_out); # output as input
+ &mov ($s0,&DWP(0,$key));
+ &mov ($s1,&DWP(4,$key));
+ &mov ($_len,16); # len=16
+ &jmp (&label("enc_loop")); # one more spin...
+
+#----------------------------- DECRYPT -----------------------------#
+&set_label("DECRYPT",16);
+ &shl ($s3,6);
+ &lea ($s3,&DWP(0,$s0,$s3));
+ &mov ($_end,$s0);
+ &mov ($_key,$s3);
+
+ &cmp ($idx,$_out);
+ &je (&label("dec_in_place")); # in-place processing...
+
+ &mov ($key,$_ivp); # load ivp
+ &mov ($_tmp,$key);
+
+ &set_label("dec_loop",4);
+ &mov ($s0,&DWP(0,$idx)); # read input
+ &mov ($s1,&DWP(4,$idx));
+ &mov ($s2,&DWP(8,$idx));
+ &bswap ($s0);
+ &mov ($s3,&DWP(12,$idx));
+ &bswap ($s1);
+ &mov ($key,$_key); # load key
+ &bswap ($s2);
+ &bswap ($s3);
+
+ &call ("_x86_Camellia_decrypt");
+
+ &mov ($key,$_tmp); # load ivp
+ &mov ($idx,$_len); # load len
+
+ &bswap ($s0);
+ &bswap ($s1);
+ &bswap ($s2);
+ &xor ($s0,&DWP(0,$key)); # xor iv
+ &bswap ($s3);
+ &xor ($s1,&DWP(4,$key));
+ &xor ($s2,&DWP(8,$key));
+ &xor ($s3,&DWP(12,$key));
+
+ &sub ($idx,16);
+ &jc (&label("dec_partial"));
+ &mov ($_len,$idx); # save len
+ &mov ($idx,$_inp); # load inp
+ &mov ($key,$_out); # load out
+
+ &mov (&DWP(0,$key),$s0); # write output
+ &mov (&DWP(4,$key),$s1);
+ &mov (&DWP(8,$key),$s2);
+ &mov (&DWP(12,$key),$s3);
+
+ &mov ($_tmp,$idx); # save ivp
+ &lea ($idx,&DWP(16,$idx));
+ &mov ($_inp,$idx); # save inp
+
+ &lea ($key,&DWP(16,$key));
+ &mov ($_out,$key); # save out
+
+ &jnz (&label("dec_loop"));
+ &mov ($key,$_tmp); # load temp ivp
+ &set_label("dec_end");
+ &mov ($idx,$_ivp); # load user ivp
+ &mov ($s0,&DWP(0,$key)); # load iv
+ &mov ($s1,&DWP(4,$key));
+ &mov ($s2,&DWP(8,$key));
+ &mov ($s3,&DWP(12,$key));
+ &mov (&DWP(0,$idx),$s0); # copy back to user
+ &mov (&DWP(4,$idx),$s1);
+ &mov (&DWP(8,$idx),$s2);
+ &mov (&DWP(12,$idx),$s3);
+ &jmp (&label("dec_out"));
+
+ &set_label("dec_partial",4);
+ &lea ($key,$ivec);
+ &mov (&DWP(0,$key),$s0); # dump output to stack
+ &mov (&DWP(4,$key),$s1);
+ &mov (&DWP(8,$key),$s2);
+ &mov (&DWP(12,$key),$s3);
+ &lea ($s2 eq "ecx" ? $s2 : "",&DWP(16,$idx));
+ &mov ($idx eq "esi" ? $idx : "",$key);
+ &mov ($key eq "edi" ? $key : "",$_out); # load out
+ &data_word(0xA4F3F689); # rep movsb # copy output
+ &mov ($key,$_inp); # use inp as temp ivp
+ &jmp (&label("dec_end"));
+
+ &set_label("dec_in_place",4);
+ &set_label("dec_in_place_loop");
+ &lea ($key,$ivec);
+ &mov ($s0,&DWP(0,$idx)); # read input
+ &mov ($s1,&DWP(4,$idx));
+ &mov ($s2,&DWP(8,$idx));
+ &mov ($s3,&DWP(12,$idx));
+
+ &mov (&DWP(0,$key),$s0); # copy to temp
+ &mov (&DWP(4,$key),$s1);
+ &mov (&DWP(8,$key),$s2);
+ &bswap ($s0);
+ &mov (&DWP(12,$key),$s3);
+ &bswap ($s1);
+ &mov ($key,$_key); # load key
+ &bswap ($s2);
+ &bswap ($s3);
+
+ &call ("_x86_Camellia_decrypt");
+
+ &mov ($key,$_ivp); # load ivp
+ &mov ($idx,$_out); # load out
+
+ &bswap ($s0);
+ &bswap ($s1);
+ &bswap ($s2);
+ &xor ($s0,&DWP(0,$key)); # xor iv
+ &bswap ($s3);
+ &xor ($s1,&DWP(4,$key));
+ &xor ($s2,&DWP(8,$key));
+ &xor ($s3,&DWP(12,$key));
+
+ &mov (&DWP(0,$idx),$s0); # write output
+ &mov (&DWP(4,$idx),$s1);
+ &mov (&DWP(8,$idx),$s2);
+ &mov (&DWP(12,$idx),$s3);
+
+ &lea ($idx,&DWP(16,$idx));
+ &mov ($_out,$idx); # save out
+
+ &lea ($idx,$ivec);
+ &mov ($s0,&DWP(0,$idx)); # read temp
+ &mov ($s1,&DWP(4,$idx));
+ &mov ($s2,&DWP(8,$idx));
+ &mov ($s3,&DWP(12,$idx));
+
+ &mov (&DWP(0,$key),$s0); # copy iv
+ &mov (&DWP(4,$key),$s1);
+ &mov (&DWP(8,$key),$s2);
+ &mov (&DWP(12,$key),$s3);
+
+ &mov ($idx,$_inp); # load inp
+
+ &lea ($idx,&DWP(16,$idx));
+ &mov ($_inp,$idx); # save inp
+
+ &mov ($s2,$_len); # load len
+ &sub ($s2,16);
+ &jc (&label("dec_in_place_partial"));
+ &mov ($_len,$s2); # save len
+ &jnz (&label("dec_in_place_loop"));
+ &jmp (&label("dec_out"));
+
+ &set_label("dec_in_place_partial",4);
+ # one can argue if this is actually required...
+ &mov ($key eq "edi" ? $key : "",$_out);
+ &lea ($idx eq "esi" ? $idx : "",$ivec);
+ &lea ($key,&DWP(0,$key,$s2));
+ &lea ($idx,&DWP(16,$idx,$s2));
+ &neg ($s2 eq "ecx" ? $s2 : "");
+ &data_word(0xA4F3F689); # rep movsb # restore tail
+
+ &set_label("dec_out",4);
+ &mov ("esp",$_esp);
+ &popf ();
+&function_end("Camellia_cbc_encrypt");
+}
+
+&asciz("Camellia for x86 by <appro\@openssl.org>");
+
+&asm_finish();
+
+close STDOUT;
diff --git a/openssl-1.1.0h/crypto/camellia/asm/cmll-x86_64.pl b/openssl-1.1.0h/crypto/camellia/asm/cmll-x86_64.pl
new file mode 100644
index 0000000..da5ad7b
--- /dev/null
+++ b/openssl-1.1.0h/crypto/camellia/asm/cmll-x86_64.pl
@@ -0,0 +1,1088 @@
+#! /usr/bin/env perl
+# Copyright 2008-2016 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License"). You may not use
+# this file except in compliance with the License. You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+
+# ====================================================================
+# Copyright (c) 2008 Andy Polyakov <appro@openssl.org>
+#
+# This module may be used under the terms of either the GNU General
+# Public License version 2 or later, the GNU Lesser General Public
+# License version 2.1 or later, the Mozilla Public License version
+# 1.1 or the BSD License. The exact terms of either license are
+# distributed along with this module. For further details see
+# http://www.openssl.org/~appro/camellia/.
+# ====================================================================
+
+# Performance in cycles per processed byte (less is better) in
+# 'openssl speed ...' benchmark:
+#
+# AMD64 Core2 EM64T
+# -evp camellia-128-ecb 16.7 21.0 22.7
+# + over gcc 3.4.6 +25% +5% 0%
+#
+# camellia-128-cbc 15.7 20.4 21.1
+#
+# 128-bit key setup 128 216 205 cycles/key
+# + over gcc 3.4.6 +54% +39% +15%
+#
+# Numbers in "+" rows represent performance improvement over compiler
+# generated code. Key setup timings are impressive on AMD and Core2
+# thanks to 64-bit operations being covertly deployed. Improvement on
+# EM64T, pre-Core2 Intel x86_64 CPU, is not as impressive, because it
+# apparently emulates some of 64-bit operations in [32-bit] microcode.
+
+$flavour = shift;
+$output = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
+*STDOUT=*OUT;
+
+sub hi() { my $r=shift; $r =~ s/%[er]([a-d])x/%\1h/; $r; }
+sub lo() { my $r=shift; $r =~ s/%[er]([a-d])x/%\1l/;
+ $r =~ s/%[er]([sd]i)/%\1l/;
+ $r =~ s/%(r[0-9]+)[d]?/%\1b/; $r; }
+
+$t0="%eax";$t1="%ebx";$t2="%ecx";$t3="%edx";
+@S=("%r8d","%r9d","%r10d","%r11d");
+$i0="%esi";
+$i1="%edi";
+$Tbl="%rbp"; # size optimization
+$inp="%r12";
+$out="%r13";
+$key="%r14";
+$keyend="%r15";
+$arg0d=$win64?"%ecx":"%edi";
+
+# const unsigned int Camellia_SBOX[4][256];
+# Well, sort of... Camellia_SBOX[0][] is interleaved with [1][],
+# and [2][] - with [3][]. This is done to minimize code size.
+$SBOX1_1110=0; # Camellia_SBOX[0]
+$SBOX4_4404=4; # Camellia_SBOX[1]
+$SBOX2_0222=2048; # Camellia_SBOX[2]
+$SBOX3_3033=2052; # Camellia_SBOX[3]
+
+sub Camellia_Feistel {
+my $i=@_[0];
+my $seed=defined(@_[1])?@_[1]:0;
+my $scale=$seed<0?-8:8;
+my $j=($i&1)*2;
+my ($s0,$s1,$s2,$s3)=(@S[($j)%4],@S[($j+1)%4],@S[($j+2)%4],@S[($j+3)%4]);
+
+$code.=<<___;
+ xor $s0,$t0 # t0^=key[0]
+ xor $s1,$t1 # t1^=key[1]
+ movz `&hi("$t0")`,$i0 # (t0>>8)&0xff
+ movz `&lo("$t1")`,$i1 # (t1>>0)&0xff
+ mov $SBOX3_3033($Tbl,$i0,8),$t3 # t3=SBOX3_3033[0]
+ mov $SBOX1_1110($Tbl,$i1,8),$t2 # t2=SBOX1_1110[1]
+ movz `&lo("$t0")`,$i0 # (t0>>0)&0xff
+ shr \$16,$t0
+ movz `&hi("$t1")`,$i1 # (t1>>8)&0xff
+ xor $SBOX4_4404($Tbl,$i0,8),$t3 # t3^=SBOX4_4404[0]
+ shr \$16,$t1
+ xor $SBOX4_4404($Tbl,$i1,8),$t2 # t2^=SBOX4_4404[1]
+ movz `&hi("$t0")`,$i0 # (t0>>24)&0xff
+ movz `&lo("$t1")`,$i1 # (t1>>16)&0xff
+ xor $SBOX1_1110($Tbl,$i0,8),$t3 # t3^=SBOX1_1110[0]
+ xor $SBOX3_3033($Tbl,$i1,8),$t2 # t2^=SBOX3_3033[1]
+ movz `&lo("$t0")`,$i0 # (t0>>16)&0xff
+ movz `&hi("$t1")`,$i1 # (t1>>24)&0xff
+ xor $SBOX2_0222($Tbl,$i0,8),$t3 # t3^=SBOX2_0222[0]
+ xor $SBOX2_0222($Tbl,$i1,8),$t2 # t2^=SBOX2_0222[1]
+ mov `$seed+($i+1)*$scale`($key),$t1 # prefetch key[i+1]
+ mov `$seed+($i+1)*$scale+4`($key),$t0
+ xor $t3,$t2 # t2^=t3
+ ror \$8,$t3 # t3=RightRotate(t3,8)
+ xor $t2,$s2
+ xor $t2,$s3
+ xor $t3,$s3
+___
+}
+
+# void Camellia_EncryptBlock_Rounds(
+# int grandRounds,
+# const Byte plaintext[],
+# const KEY_TABLE_TYPE keyTable,
+# Byte ciphertext[])
+$code=<<___;
+.text
+
+# V1.x API
+.globl Camellia_EncryptBlock
+.type Camellia_EncryptBlock,\@abi-omnipotent
+.align 16
+Camellia_EncryptBlock:
+ movl \$128,%eax
+ subl $arg0d,%eax
+ movl \$3,$arg0d
+ adcl \$0,$arg0d # keyBitLength==128?3:4
+ jmp .Lenc_rounds
+.size Camellia_EncryptBlock,.-Camellia_EncryptBlock
+# V2
+.globl Camellia_EncryptBlock_Rounds
+.type Camellia_EncryptBlock_Rounds,\@function,4
+.align 16
+.Lenc_rounds:
+Camellia_EncryptBlock_Rounds:
+ push %rbx
+ push %rbp
+ push %r13
+ push %r14
+ push %r15
+.Lenc_prologue:
+
+ #mov %rsi,$inp # put away arguments
+ mov %rcx,$out
+ mov %rdx,$key
+
+ shl \$6,%edi # process grandRounds
+ lea .LCamellia_SBOX(%rip),$Tbl
+ lea ($key,%rdi),$keyend
+
+ mov 0(%rsi),@S[0] # load plaintext
+ mov 4(%rsi),@S[1]
+ mov 8(%rsi),@S[2]
+ bswap @S[0]
+ mov 12(%rsi),@S[3]
+ bswap @S[1]
+ bswap @S[2]
+ bswap @S[3]
+
+ call _x86_64_Camellia_encrypt
+
+ bswap @S[0]
+ bswap @S[1]
+ bswap @S[2]
+ mov @S[0],0($out)
+ bswap @S[3]
+ mov @S[1],4($out)
+ mov @S[2],8($out)
+ mov @S[3],12($out)
+
+ mov 0(%rsp),%r15
+ mov 8(%rsp),%r14
+ mov 16(%rsp),%r13
+ mov 24(%rsp),%rbp
+ mov 32(%rsp),%rbx
+ lea 40(%rsp),%rsp
+.Lenc_epilogue:
+ ret
+.size Camellia_EncryptBlock_Rounds,.-Camellia_EncryptBlock_Rounds
+
+.type _x86_64_Camellia_encrypt,\@abi-omnipotent
+.align 16
+_x86_64_Camellia_encrypt:
+ xor 0($key),@S[1]
+ xor 4($key),@S[0] # ^=key[0-3]
+ xor 8($key),@S[3]
+ xor 12($key),@S[2]
+.align 16
+.Leloop:
+ mov 16($key),$t1 # prefetch key[4-5]
+ mov 20($key),$t0
+
+___
+ for ($i=0;$i<6;$i++) { Camellia_Feistel($i,16); }
+$code.=<<___;
+ lea 16*4($key),$key
+ cmp $keyend,$key
+ mov 8($key),$t3 # prefetch key[2-3]
+ mov 12($key),$t2
+ je .Ledone
+
+ and @S[0],$t0
+ or @S[3],$t3
+ rol \$1,$t0
+ xor $t3,@S[2] # s2^=s3|key[3];
+ xor $t0,@S[1] # s1^=LeftRotate(s0&key[0],1);
+ and @S[2],$t2
+ or @S[1],$t1
+ rol \$1,$t2
+ xor $t1,@S[0] # s0^=s1|key[1];
+ xor $t2,@S[3] # s3^=LeftRotate(s2&key[2],1);
+ jmp .Leloop
+
+.align 16
+.Ledone:
+ xor @S[2],$t0 # SwapHalf
+ xor @S[3],$t1
+ xor @S[0],$t2
+ xor @S[1],$t3
+
+ mov $t0,@S[0]
+ mov $t1,@S[1]
+ mov $t2,@S[2]
+ mov $t3,@S[3]
+
+ .byte 0xf3,0xc3 # rep ret
+.size _x86_64_Camellia_encrypt,.-_x86_64_Camellia_encrypt
+
+# V1.x API
+.globl Camellia_DecryptBlock
+.type Camellia_DecryptBlock,\@abi-omnipotent
+.align 16
+Camellia_DecryptBlock:
+ movl \$128,%eax
+ subl $arg0d,%eax
+ movl \$3,$arg0d
+ adcl \$0,$arg0d # keyBitLength==128?3:4
+ jmp .Ldec_rounds
+.size Camellia_DecryptBlock,.-Camellia_DecryptBlock
+# V2
+.globl Camellia_DecryptBlock_Rounds
+.type Camellia_DecryptBlock_Rounds,\@function,4
+.align 16
+.Ldec_rounds:
+Camellia_DecryptBlock_Rounds:
+ push %rbx
+ push %rbp
+ push %r13
+ push %r14
+ push %r15
+.Ldec_prologue:
+
+ #mov %rsi,$inp # put away arguments
+ mov %rcx,$out
+ mov %rdx,$keyend
+
+ shl \$6,%edi # process grandRounds
+ lea .LCamellia_SBOX(%rip),$Tbl
+ lea ($keyend,%rdi),$key
+
+ mov 0(%rsi),@S[0] # load plaintext
+ mov 4(%rsi),@S[1]
+ mov 8(%rsi),@S[2]
+ bswap @S[0]
+ mov 12(%rsi),@S[3]
+ bswap @S[1]
+ bswap @S[2]
+ bswap @S[3]
+
+ call _x86_64_Camellia_decrypt
+
+ bswap @S[0]
+ bswap @S[1]
+ bswap @S[2]
+ mov @S[0],0($out)
+ bswap @S[3]
+ mov @S[1],4($out)
+ mov @S[2],8($out)
+ mov @S[3],12($out)
+
+ mov 0(%rsp),%r15
+ mov 8(%rsp),%r14
+ mov 16(%rsp),%r13
+ mov 24(%rsp),%rbp
+ mov 32(%rsp),%rbx
+ lea 40(%rsp),%rsp
+.Ldec_epilogue:
+ ret
+.size Camellia_DecryptBlock_Rounds,.-Camellia_DecryptBlock_Rounds
+
+.type _x86_64_Camellia_decrypt,\@abi-omnipotent
+.align 16
+_x86_64_Camellia_decrypt:
+ xor 0($key),@S[1]
+ xor 4($key),@S[0] # ^=key[0-3]
+ xor 8($key),@S[3]
+ xor 12($key),@S[2]
+.align 16
+.Ldloop:
+ mov -8($key),$t1 # prefetch key[4-5]
+ mov -4($key),$t0
+
+___
+ for ($i=0;$i<6;$i++) { Camellia_Feistel($i,-8); }
+$code.=<<___;
+ lea -16*4($key),$key
+ cmp $keyend,$key
+ mov 0($key),$t3 # prefetch key[2-3]
+ mov 4($key),$t2
+ je .Lddone
+
+ and @S[0],$t0
+ or @S[3],$t3
+ rol \$1,$t0
+ xor $t3,@S[2] # s2^=s3|key[3];
+ xor $t0,@S[1] # s1^=LeftRotate(s0&key[0],1);
+ and @S[2],$t2
+ or @S[1],$t1
+ rol \$1,$t2
+ xor $t1,@S[0] # s0^=s1|key[1];
+ xor $t2,@S[3] # s3^=LeftRotate(s2&key[2],1);
+
+ jmp .Ldloop
+
+.align 16
+.Lddone:
+ xor @S[2],$t2
+ xor @S[3],$t3
+ xor @S[0],$t0
+ xor @S[1],$t1
+
+ mov $t2,@S[0] # SwapHalf
+ mov $t3,@S[1]
+ mov $t0,@S[2]
+ mov $t1,@S[3]
+
+ .byte 0xf3,0xc3 # rep ret
+.size _x86_64_Camellia_decrypt,.-_x86_64_Camellia_decrypt
+___
+
+sub _saveround {
+my ($rnd,$key,@T)=@_;
+my $bias=int(@T[0])?shift(@T):0;
+
+ if ($#T==3) {
+ $code.=<<___;
+ mov @T[1],`$bias+$rnd*8+0`($key)
+ mov @T[0],`$bias+$rnd*8+4`($key)
+ mov @T[3],`$bias+$rnd*8+8`($key)
+ mov @T[2],`$bias+$rnd*8+12`($key)
+___
+ } else {
+ $code.=" mov @T[0],`$bias+$rnd*8+0`($key)\n";
+ $code.=" mov @T[1],`$bias+$rnd*8+8`($key)\n" if ($#T>=1);
+ }
+}
+
+sub _loadround {
+my ($rnd,$key,@T)=@_;
+my $bias=int(@T[0])?shift(@T):0;
+
+$code.=" mov `$bias+$rnd*8+0`($key),@T[0]\n";
+$code.=" mov `$bias+$rnd*8+8`($key),@T[1]\n" if ($#T>=1);
+}
+
+# shld is very slow on Intel EM64T family. Even on AMD it limits
+# instruction decode rate [because it's VectorPath] and consequently
+# performance...
+sub __rotl128 {
+my ($i0,$i1,$rot)=@_;
+
+ if ($rot) {
+ $code.=<<___;
+ mov $i0,%r11
+ shld \$$rot,$i1,$i0
+ shld \$$rot,%r11,$i1
+___
+ }
+}
+
+# ... Implementing 128-bit rotate without shld gives 80% better
+# performance EM64T, +15% on AMD64 and only ~7% degradation on
+# Core2. This is therefore preferred.
+sub _rotl128 {
+my ($i0,$i1,$rot)=@_;
+
+ if ($rot) {
+ $code.=<<___;
+ mov $i0,%r11
+ shl \$$rot,$i0
+ mov $i1,%r9
+ shr \$`64-$rot`,%r9
+ shr \$`64-$rot`,%r11
+ or %r9,$i0
+ shl \$$rot,$i1
+ or %r11,$i1
+___
+ }
+}
+
+{ my $step=0;
+
+$code.=<<___;
+.globl Camellia_Ekeygen
+.type Camellia_Ekeygen,\@function,3
+.align 16
+Camellia_Ekeygen:
+ push %rbx
+ push %rbp
+ push %r13
+ push %r14
+ push %r15
+.Lkey_prologue:
+
+ mov %edi,${keyend}d # put away arguments, keyBitLength
+ mov %rdx,$out # keyTable
+
+ mov 0(%rsi),@S[0] # load 0-127 bits
+ mov 4(%rsi),@S[1]
+ mov 8(%rsi),@S[2]
+ mov 12(%rsi),@S[3]
+
+ bswap @S[0]
+ bswap @S[1]
+ bswap @S[2]
+ bswap @S[3]
+___
+ &_saveround (0,$out,@S); # KL<<<0
+$code.=<<___;
+ cmp \$128,$keyend # check keyBitLength
+ je .L1st128
+
+ mov 16(%rsi),@S[0] # load 128-191 bits
+ mov 20(%rsi),@S[1]
+ cmp \$192,$keyend
+ je .L1st192
+ mov 24(%rsi),@S[2] # load 192-255 bits
+ mov 28(%rsi),@S[3]
+ jmp .L1st256
+.L1st192:
+ mov @S[0],@S[2]
+ mov @S[1],@S[3]
+ not @S[2]
+ not @S[3]
+.L1st256:
+ bswap @S[0]
+ bswap @S[1]
+ bswap @S[2]
+ bswap @S[3]
+___
+ &_saveround (4,$out,@S); # temp storage for KR!
+$code.=<<___;
+ xor 0($out),@S[1] # KR^KL
+ xor 4($out),@S[0]
+ xor 8($out),@S[3]
+ xor 12($out),@S[2]
+
+.L1st128:
+ lea .LCamellia_SIGMA(%rip),$key
+ lea .LCamellia_SBOX(%rip),$Tbl
+
+ mov 0($key),$t1
+ mov 4($key),$t0
+___
+ &Camellia_Feistel($step++);
+ &Camellia_Feistel($step++);
+$code.=<<___;
+ xor 0($out),@S[1] # ^KL
+ xor 4($out),@S[0]
+ xor 8($out),@S[3]
+ xor 12($out),@S[2]
+___
+ &Camellia_Feistel($step++);
+ &Camellia_Feistel($step++);
+$code.=<<___;
+ cmp \$128,$keyend
+ jne .L2nd256
+
+ lea 128($out),$out # size optimization
+ shl \$32,%r8 # @S[0]||
+ shl \$32,%r10 # @S[2]||
+ or %r9,%r8 # ||@S[1]
+ or %r11,%r10 # ||@S[3]
+___
+ &_loadround (0,$out,-128,"%rax","%rbx"); # KL
+ &_saveround (2,$out,-128,"%r8","%r10"); # KA<<<0
+ &_rotl128 ("%rax","%rbx",15);
+ &_saveround (4,$out,-128,"%rax","%rbx"); # KL<<<15
+ &_rotl128 ("%r8","%r10",15);
+ &_saveround (6,$out,-128,"%r8","%r10"); # KA<<<15
+ &_rotl128 ("%r8","%r10",15); # 15+15=30
+ &_saveround (8,$out,-128,"%r8","%r10"); # KA<<<30
+ &_rotl128 ("%rax","%rbx",30); # 15+30=45
+ &_saveround (10,$out,-128,"%rax","%rbx"); # KL<<<45
+ &_rotl128 ("%r8","%r10",15); # 30+15=45
+ &_saveround (12,$out,-128,"%r8"); # KA<<<45
+ &_rotl128 ("%rax","%rbx",15); # 45+15=60
+ &_saveround (13,$out,-128,"%rbx"); # KL<<<60
+ &_rotl128 ("%r8","%r10",15); # 45+15=60
+ &_saveround (14,$out,-128,"%r8","%r10"); # KA<<<60
+ &_rotl128 ("%rax","%rbx",17); # 60+17=77
+ &_saveround (16,$out,-128,"%rax","%rbx"); # KL<<<77
+ &_rotl128 ("%rax","%rbx",17); # 77+17=94
+ &_saveround (18,$out,-128,"%rax","%rbx"); # KL<<<94
+ &_rotl128 ("%r8","%r10",34); # 60+34=94
+ &_saveround (20,$out,-128,"%r8","%r10"); # KA<<<94
+ &_rotl128 ("%rax","%rbx",17); # 94+17=111
+ &_saveround (22,$out,-128,"%rax","%rbx"); # KL<<<111
+ &_rotl128 ("%r8","%r10",17); # 94+17=111
+ &_saveround (24,$out,-128,"%r8","%r10"); # KA<<<111
+$code.=<<___;
+ mov \$3,%eax
+ jmp .Ldone
+.align 16
+.L2nd256:
+___
+ &_saveround (6,$out,@S); # temp storage for KA!
+$code.=<<___;
+ xor `4*8+0`($out),@S[1] # KA^KR
+ xor `4*8+4`($out),@S[0]
+ xor `5*8+0`($out),@S[3]
+ xor `5*8+4`($out),@S[2]
+___
+ &Camellia_Feistel($step++);
+ &Camellia_Feistel($step++);
+
+ &_loadround (0,$out,"%rax","%rbx"); # KL
+ &_loadround (4,$out,"%rcx","%rdx"); # KR
+ &_loadround (6,$out,"%r14","%r15"); # KA
+$code.=<<___;
+ lea 128($out),$out # size optimization
+ shl \$32,%r8 # @S[0]||
+ shl \$32,%r10 # @S[2]||
+ or %r9,%r8 # ||@S[1]
+ or %r11,%r10 # ||@S[3]
+___
+ &_saveround (2,$out,-128,"%r8","%r10"); # KB<<<0
+ &_rotl128 ("%rcx","%rdx",15);
+ &_saveround (4,$out,-128,"%rcx","%rdx"); # KR<<<15
+ &_rotl128 ("%r14","%r15",15);
+ &_saveround (6,$out,-128,"%r14","%r15"); # KA<<<15
+ &_rotl128 ("%rcx","%rdx",15); # 15+15=30
+ &_saveround (8,$out,-128,"%rcx","%rdx"); # KR<<<30
+ &_rotl128 ("%r8","%r10",30);
+ &_saveround (10,$out,-128,"%r8","%r10"); # KB<<<30
+ &_rotl128 ("%rax","%rbx",45);
+ &_saveround (12,$out,-128,"%rax","%rbx"); # KL<<<45
+ &_rotl128 ("%r14","%r15",30); # 15+30=45
+ &_saveround (14,$out,-128,"%r14","%r15"); # KA<<<45
+ &_rotl128 ("%rax","%rbx",15); # 45+15=60
+ &_saveround (16,$out,-128,"%rax","%rbx"); # KL<<<60
+ &_rotl128 ("%rcx","%rdx",30); # 30+30=60
+ &_saveround (18,$out,-128,"%rcx","%rdx"); # KR<<<60
+ &_rotl128 ("%r8","%r10",30); # 30+30=60
+ &_saveround (20,$out,-128,"%r8","%r10"); # KB<<<60
+ &_rotl128 ("%rax","%rbx",17); # 60+17=77
+ &_saveround (22,$out,-128,"%rax","%rbx"); # KL<<<77
+ &_rotl128 ("%r14","%r15",32); # 45+32=77
+ &_saveround (24,$out,-128,"%r14","%r15"); # KA<<<77
+ &_rotl128 ("%rcx","%rdx",34); # 60+34=94
+ &_saveround (26,$out,-128,"%rcx","%rdx"); # KR<<<94
+ &_rotl128 ("%r14","%r15",17); # 77+17=94
+ &_saveround (28,$out,-128,"%r14","%r15"); # KA<<<77
+ &_rotl128 ("%rax","%rbx",34); # 77+34=111
+ &_saveround (30,$out,-128,"%rax","%rbx"); # KL<<<111
+ &_rotl128 ("%r8","%r10",51); # 60+51=111
+ &_saveround (32,$out,-128,"%r8","%r10"); # KB<<<111
+$code.=<<___;
+ mov \$4,%eax
+.Ldone:
+ mov 0(%rsp),%r15
+ mov 8(%rsp),%r14
+ mov 16(%rsp),%r13
+ mov 24(%rsp),%rbp
+ mov 32(%rsp),%rbx
+ lea 40(%rsp),%rsp
+.Lkey_epilogue:
+ ret
+.size Camellia_Ekeygen,.-Camellia_Ekeygen
+___
+}
+
+@SBOX=(
+112,130, 44,236,179, 39,192,229,228,133, 87, 53,234, 12,174, 65,
+ 35,239,107,147, 69, 25,165, 33,237, 14, 79, 78, 29,101,146,189,
+134,184,175,143,124,235, 31,206, 62, 48,220, 95, 94,197, 11, 26,
+166,225, 57,202,213, 71, 93, 61,217, 1, 90,214, 81, 86,108, 77,
+139, 13,154,102,251,204,176, 45,116, 18, 43, 32,240,177,132,153,
+223, 76,203,194, 52,126,118, 5,109,183,169, 49,209, 23, 4,215,
+ 20, 88, 58, 97,222, 27, 17, 28, 50, 15,156, 22, 83, 24,242, 34,
+254, 68,207,178,195,181,122,145, 36, 8,232,168, 96,252,105, 80,
+170,208,160,125,161,137, 98,151, 84, 91, 30,149,224,255,100,210,
+ 16,196, 0, 72,163,247,117,219,138, 3,230,218, 9, 63,221,148,
+135, 92,131, 2,205, 74,144, 51,115,103,246,243,157,127,191,226,
+ 82,155,216, 38,200, 55,198, 59,129,150,111, 75, 19,190, 99, 46,
+233,121,167,140,159,110,188,142, 41,245,249,182, 47,253,180, 89,
+120,152, 6,106,231, 70,113,186,212, 37,171, 66,136,162,141,250,
+114, 7,185, 85,248,238,172, 10, 54, 73, 42,104, 60, 56,241,164,
+ 64, 40,211,123,187,201, 67,193, 21,227,173,244,119,199,128,158);
+
+sub S1110 { my $i=shift; $i=@SBOX[$i]; $i=$i<<24|$i<<16|$i<<8; sprintf("0x%08x",$i); }
+sub S4404 { my $i=shift; $i=($i<<1|$i>>7)&0xff; $i=@SBOX[$i]; $i=$i<<24|$i<<16|$i; sprintf("0x%08x",$i); }
+sub S0222 { my $i=shift; $i=@SBOX[$i]; $i=($i<<1|$i>>7)&0xff; $i=$i<<16|$i<<8|$i; sprintf("0x%08x",$i); }
+sub S3033 { my $i=shift; $i=@SBOX[$i]; $i=($i>>1|$i<<7)&0xff; $i=$i<<24|$i<<8|$i; sprintf("0x%08x",$i); }
+
+$code.=<<___;
+.align 64
+.LCamellia_SIGMA:
+.long 0x3bcc908b, 0xa09e667f, 0x4caa73b2, 0xb67ae858
+.long 0xe94f82be, 0xc6ef372f, 0xf1d36f1c, 0x54ff53a5
+.long 0xde682d1d, 0x10e527fa, 0xb3e6c1fd, 0xb05688c2
+.long 0, 0, 0, 0
+.LCamellia_SBOX:
+___
+# tables are interleaved, remember?
+sub data_word { $code.=".long\t".join(',',@_)."\n"; }
+for ($i=0;$i<256;$i++) { &data_word(&S1110($i),&S4404($i)); }
+for ($i=0;$i<256;$i++) { &data_word(&S0222($i),&S3033($i)); }
+
+# void Camellia_cbc_encrypt (const void char *inp, unsigned char *out,
+# size_t length, const CAMELLIA_KEY *key,
+# unsigned char *ivp,const int enc);
+{
+$_key="0(%rsp)";
+$_end="8(%rsp)"; # inp+len&~15
+$_res="16(%rsp)"; # len&15
+$ivec="24(%rsp)";
+$_ivp="40(%rsp)";
+$_rsp="48(%rsp)";
+
+$code.=<<___;
+.globl Camellia_cbc_encrypt
+.type Camellia_cbc_encrypt,\@function,6
+.align 16
+Camellia_cbc_encrypt:
+ cmp \$0,%rdx
+ je .Lcbc_abort
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+.Lcbc_prologue:
+
+ mov %rsp,%rbp
+ sub \$64,%rsp
+ and \$-64,%rsp
+
+ # place stack frame just "above mod 1024" the key schedule,
+ # this ensures that cache associativity suffices
+ lea -64-63(%rcx),%r10
+ sub %rsp,%r10
+ neg %r10
+ and \$0x3C0,%r10
+ sub %r10,%rsp
+ #add \$8,%rsp # 8 is reserved for callee's ra
+
+ mov %rdi,$inp # inp argument
+ mov %rsi,$out # out argument
+ mov %r8,%rbx # ivp argument
+ mov %rcx,$key # key argument
+ mov 272(%rcx),${keyend}d # grandRounds
+
+ mov %r8,$_ivp
+ mov %rbp,$_rsp
+
+.Lcbc_body:
+ lea .LCamellia_SBOX(%rip),$Tbl
+
+ mov \$32,%ecx
+.align 4
+.Lcbc_prefetch_sbox:
+ mov 0($Tbl),%rax
+ mov 32($Tbl),%rsi
+ mov 64($Tbl),%rdi
+ mov 96($Tbl),%r11
+ lea 128($Tbl),$Tbl
+ loop .Lcbc_prefetch_sbox
+ sub \$4096,$Tbl
+ shl \$6,$keyend
+ mov %rdx,%rcx # len argument
+ lea ($key,$keyend),$keyend
+
+ cmp \$0,%r9d # enc argument
+ je .LCBC_DECRYPT
+
+ and \$-16,%rdx
+ and \$15,%rcx # length residue
+ lea ($inp,%rdx),%rdx
+ mov $key,$_key
+ mov %rdx,$_end
+ mov %rcx,$_res
+
+ cmp $inp,%rdx
+ mov 0(%rbx),@S[0] # load IV
+ mov 4(%rbx),@S[1]
+ mov 8(%rbx),@S[2]
+ mov 12(%rbx),@S[3]
+ je .Lcbc_enc_tail
+ jmp .Lcbc_eloop
+
+.align 16
+.Lcbc_eloop:
+ xor 0($inp),@S[0]
+ xor 4($inp),@S[1]
+ xor 8($inp),@S[2]
+ bswap @S[0]
+ xor 12($inp),@S[3]
+ bswap @S[1]
+ bswap @S[2]
+ bswap @S[3]
+
+ call _x86_64_Camellia_encrypt
+
+ mov $_key,$key # "rewind" the key
+ bswap @S[0]
+ mov $_end,%rdx
+ bswap @S[1]
+ mov $_res,%rcx
+ bswap @S[2]
+ mov @S[0],0($out)
+ bswap @S[3]
+ mov @S[1],4($out)
+ mov @S[2],8($out)
+ lea 16($inp),$inp
+ mov @S[3],12($out)
+ cmp %rdx,$inp
+ lea 16($out),$out
+ jne .Lcbc_eloop
+
+ cmp \$0,%rcx
+ jne .Lcbc_enc_tail
+
+ mov $_ivp,$out
+ mov @S[0],0($out) # write out IV residue
+ mov @S[1],4($out)
+ mov @S[2],8($out)
+ mov @S[3],12($out)
+ jmp .Lcbc_done
+
+.align 16
+.Lcbc_enc_tail:
+ xor %rax,%rax
+ mov %rax,0+$ivec
+ mov %rax,8+$ivec
+ mov %rax,$_res
+
+.Lcbc_enc_pushf:
+ pushfq
+ cld
+ mov $inp,%rsi
+ lea 8+$ivec,%rdi
+ .long 0x9066A4F3 # rep movsb
+ popfq
+.Lcbc_enc_popf:
+
+ lea $ivec,$inp
+ lea 16+$ivec,%rax
+ mov %rax,$_end
+ jmp .Lcbc_eloop # one more time
+
+.align 16
+.LCBC_DECRYPT:
+ xchg $key,$keyend
+ add \$15,%rdx
+ and \$15,%rcx # length residue
+ and \$-16,%rdx
+ mov $key,$_key
+ lea ($inp,%rdx),%rdx
+ mov %rdx,$_end
+ mov %rcx,$_res
+
+ mov (%rbx),%rax # load IV
+ mov 8(%rbx),%rbx
+ jmp .Lcbc_dloop
+.align 16
+.Lcbc_dloop:
+ mov 0($inp),@S[0]
+ mov 4($inp),@S[1]
+ mov 8($inp),@S[2]
+ bswap @S[0]
+ mov 12($inp),@S[3]
+ bswap @S[1]
+ mov %rax,0+$ivec # save IV to temporary storage
+ bswap @S[2]
+ mov %rbx,8+$ivec
+ bswap @S[3]
+
+ call _x86_64_Camellia_decrypt
+
+ mov $_key,$key # "rewind" the key
+ mov $_end,%rdx
+ mov $_res,%rcx
+
+ bswap @S[0]
+ mov ($inp),%rax # load IV for next iteration
+ bswap @S[1]
+ mov 8($inp),%rbx
+ bswap @S[2]
+ xor 0+$ivec,@S[0]
+ bswap @S[3]
+ xor 4+$ivec,@S[1]
+ xor 8+$ivec,@S[2]
+ lea 16($inp),$inp
+ xor 12+$ivec,@S[3]
+ cmp %rdx,$inp
+ je .Lcbc_ddone
+
+ mov @S[0],0($out)
+ mov @S[1],4($out)
+ mov @S[2],8($out)
+ mov @S[3],12($out)
+
+ lea 16($out),$out
+ jmp .Lcbc_dloop
+
+.align 16
+.Lcbc_ddone:
+ mov $_ivp,%rdx
+ cmp \$0,%rcx
+ jne .Lcbc_dec_tail
+
+ mov @S[0],0($out)
+ mov @S[1],4($out)
+ mov @S[2],8($out)
+ mov @S[3],12($out)
+
+ mov %rax,(%rdx) # write out IV residue
+ mov %rbx,8(%rdx)
+ jmp .Lcbc_done
+.align 16
+.Lcbc_dec_tail:
+ mov @S[0],0+$ivec
+ mov @S[1],4+$ivec
+ mov @S[2],8+$ivec
+ mov @S[3],12+$ivec
+
+.Lcbc_dec_pushf:
+ pushfq
+ cld
+ lea 8+$ivec,%rsi
+ lea ($out),%rdi
+ .long 0x9066A4F3 # rep movsb
+ popfq
+.Lcbc_dec_popf:
+
+ mov %rax,(%rdx) # write out IV residue
+ mov %rbx,8(%rdx)
+ jmp .Lcbc_done
+
+.align 16
+.Lcbc_done:
+ mov $_rsp,%rcx
+ mov 0(%rcx),%r15
+ mov 8(%rcx),%r14
+ mov 16(%rcx),%r13
+ mov 24(%rcx),%r12
+ mov 32(%rcx),%rbp
+ mov 40(%rcx),%rbx
+ lea 48(%rcx),%rsp
+.Lcbc_abort:
+ ret
+.size Camellia_cbc_encrypt,.-Camellia_cbc_encrypt
+
+.asciz "Camellia for x86_64 by <appro\@openssl.org>"
+___
+}
+
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+# CONTEXT *context,DISPATCHER_CONTEXT *disp)
+if ($win64) {
+$rec="%rcx";
+$frame="%rdx";
+$context="%r8";
+$disp="%r9";
+
+$code.=<<___;
+.extern __imp_RtlVirtualUnwind
+.type common_se_handler,\@abi-omnipotent
+.align 16
+common_se_handler:
+ push %rsi
+ push %rdi
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ pushfq
+ lea -64(%rsp),%rsp
+
+ mov 120($context),%rax # pull context->Rax
+ mov 248($context),%rbx # pull context->Rip
+
+ mov 8($disp),%rsi # disp->ImageBase
+ mov 56($disp),%r11 # disp->HandlerData
+
+ mov 0(%r11),%r10d # HandlerData[0]
+ lea (%rsi,%r10),%r10 # prologue label
+ cmp %r10,%rbx # context->Rip<prologue label
+ jb .Lin_prologue
+
+ mov 152($context),%rax # pull context->Rsp
+
+ mov 4(%r11),%r10d # HandlerData[1]
+ lea (%rsi,%r10),%r10 # epilogue label
+ cmp %r10,%rbx # context->Rip>=epilogue label
+ jae .Lin_prologue
+
+ lea 40(%rax),%rax
+ mov -8(%rax),%rbx
+ mov -16(%rax),%rbp
+ mov -24(%rax),%r13
+ mov -32(%rax),%r14
+ mov -40(%rax),%r15
+ mov %rbx,144($context) # restore context->Rbx
+ mov %rbp,160($context) # restore context->Rbp
+ mov %r13,224($context) # restore context->R13
+ mov %r14,232($context) # restore context->R14
+ mov %r15,240($context) # restore context->R15
+
+.Lin_prologue:
+ mov 8(%rax),%rdi
+ mov 16(%rax),%rsi
+ mov %rax,152($context) # restore context->Rsp
+ mov %rsi,168($context) # restore context->Rsi
+ mov %rdi,176($context) # restore context->Rdi
+
+ jmp .Lcommon_seh_exit
+.size common_se_handler,.-common_se_handler
+
+.type cbc_se_handler,\@abi-omnipotent
+.align 16
+cbc_se_handler:
+ push %rsi
+ push %rdi
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ pushfq
+ lea -64(%rsp),%rsp
+
+ mov 120($context),%rax # pull context->Rax
+ mov 248($context),%rbx # pull context->Rip
+
+ lea .Lcbc_prologue(%rip),%r10
+ cmp %r10,%rbx # context->Rip<.Lcbc_prologue
+ jb .Lin_cbc_prologue
+
+ lea .Lcbc_body(%rip),%r10
+ cmp %r10,%rbx # context->Rip<.Lcbc_body
+ jb .Lin_cbc_frame_setup
+
+ mov 152($context),%rax # pull context->Rsp
+
+ lea .Lcbc_abort(%rip),%r10
+ cmp %r10,%rbx # context->Rip>=.Lcbc_abort
+ jae .Lin_cbc_prologue
+
+ # handle pushf/popf in Camellia_cbc_encrypt
+ lea .Lcbc_enc_pushf(%rip),%r10
+ cmp %r10,%rbx # context->Rip<=.Lcbc_enc_pushf
+ jbe .Lin_cbc_no_flag
+ lea 8(%rax),%rax
+ lea .Lcbc_enc_popf(%rip),%r10
+ cmp %r10,%rbx # context->Rip<.Lcbc_enc_popf
+ jb .Lin_cbc_no_flag
+ lea -8(%rax),%rax
+ lea .Lcbc_dec_pushf(%rip),%r10
+ cmp %r10,%rbx # context->Rip<=.Lcbc_dec_pushf
+ jbe .Lin_cbc_no_flag
+ lea 8(%rax),%rax
+ lea .Lcbc_dec_popf(%rip),%r10
+ cmp %r10,%rbx # context->Rip<.Lcbc_dec_popf
+ jb .Lin_cbc_no_flag
+ lea -8(%rax),%rax
+
+.Lin_cbc_no_flag:
+ mov 48(%rax),%rax # $_rsp
+ lea 48(%rax),%rax
+
+.Lin_cbc_frame_setup:
+ mov -8(%rax),%rbx
+ mov -16(%rax),%rbp
+ mov -24(%rax),%r12
+ mov -32(%rax),%r13
+ mov -40(%rax),%r14
+ mov -48(%rax),%r15
+ mov %rbx,144($context) # restore context->Rbx
+ mov %rbp,160($context) # restore context->Rbp
+ mov %r12,216($context) # restore context->R12
+ mov %r13,224($context) # restore context->R13
+ mov %r14,232($context) # restore context->R14
+ mov %r15,240($context) # restore context->R15
+
+.Lin_cbc_prologue:
+ mov 8(%rax),%rdi
+ mov 16(%rax),%rsi
+ mov %rax,152($context) # restore context->Rsp
+ mov %rsi,168($context) # restore context->Rsi
+ mov %rdi,176($context) # restore context->Rdi
+
+.align 4
+.Lcommon_seh_exit:
+
+ mov 40($disp),%rdi # disp->ContextRecord
+ mov $context,%rsi # context
+ mov \$`1232/8`,%ecx # sizeof(CONTEXT)
+ .long 0xa548f3fc # cld; rep movsq
+
+ mov $disp,%rsi
+ xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
+ mov 8(%rsi),%rdx # arg2, disp->ImageBase
+ mov 0(%rsi),%r8 # arg3, disp->ControlPc
+ mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
+ mov 40(%rsi),%r10 # disp->ContextRecord
+ lea 56(%rsi),%r11 # &disp->HandlerData
+ lea 24(%rsi),%r12 # &disp->EstablisherFrame
+ mov %r10,32(%rsp) # arg5
+ mov %r11,40(%rsp) # arg6
+ mov %r12,48(%rsp) # arg7
+ mov %rcx,56(%rsp) # arg8, (NULL)
+ call *__imp_RtlVirtualUnwind(%rip)
+
+ mov \$1,%eax # ExceptionContinueSearch
+ lea 64(%rsp),%rsp
+ popfq
+ pop %r15
+ pop %r14
+ pop %r13
+ pop %r12
+ pop %rbp
+ pop %rbx
+ pop %rdi
+ pop %rsi
+ ret
+.size cbc_se_handler,.-cbc_se_handler
+
+.section .pdata
+.align 4
+ .rva .LSEH_begin_Camellia_EncryptBlock_Rounds
+ .rva .LSEH_end_Camellia_EncryptBlock_Rounds
+ .rva .LSEH_info_Camellia_EncryptBlock_Rounds
+
+ .rva .LSEH_begin_Camellia_DecryptBlock_Rounds
+ .rva .LSEH_end_Camellia_DecryptBlock_Rounds
+ .rva .LSEH_info_Camellia_DecryptBlock_Rounds
+
+ .rva .LSEH_begin_Camellia_Ekeygen
+ .rva .LSEH_end_Camellia_Ekeygen
+ .rva .LSEH_info_Camellia_Ekeygen
+
+ .rva .LSEH_begin_Camellia_cbc_encrypt
+ .rva .LSEH_end_Camellia_cbc_encrypt
+ .rva .LSEH_info_Camellia_cbc_encrypt
+
+.section .xdata
+.align 8
+.LSEH_info_Camellia_EncryptBlock_Rounds:
+ .byte 9,0,0,0
+ .rva common_se_handler
+ .rva .Lenc_prologue,.Lenc_epilogue # HandlerData[]
+.LSEH_info_Camellia_DecryptBlock_Rounds:
+ .byte 9,0,0,0
+ .rva common_se_handler
+ .rva .Ldec_prologue,.Ldec_epilogue # HandlerData[]
+.LSEH_info_Camellia_Ekeygen:
+ .byte 9,0,0,0
+ .rva common_se_handler
+ .rva .Lkey_prologue,.Lkey_epilogue # HandlerData[]
+.LSEH_info_Camellia_cbc_encrypt:
+ .byte 9,0,0,0
+ .rva cbc_se_handler
+___
+}
+
+$code =~ s/\`([^\`]*)\`/eval $1/gem;
+print $code;
+close STDOUT;
diff --git a/openssl-1.1.0h/crypto/camellia/asm/cmllt4-sparcv9.pl b/openssl-1.1.0h/crypto/camellia/asm/cmllt4-sparcv9.pl
new file mode 100644
index 0000000..ffe4a7d
--- /dev/null
+++ b/openssl-1.1.0h/crypto/camellia/asm/cmllt4-sparcv9.pl
@@ -0,0 +1,939 @@
+#! /usr/bin/env perl
+# Copyright 2012-2016 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License"). You may not use
+# this file except in compliance with the License. You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+
+# ====================================================================
+# Written by David S. Miller <davem@devemloft.net> and Andy Polyakov
+# <appro@openssl.org>. The module is licensed under 2-clause BSD
+# license. October 2012. All rights reserved.
+# ====================================================================
+
+######################################################################
+# Camellia for SPARC T4.
+#
+# As with AES below results [for aligned data] are virtually identical
+# to critical path lenths for 3-cycle instruction latency:
+#
+# 128-bit key 192/256-
+# CBC encrypt 4.14/4.21(*) 5.46/5.52
+# (*) numbers after slash are for
+# misaligned data.
+#
+# As with Intel AES-NI, question is if it's possible to improve
+# performance of parallelizeable modes by interleaving round
+# instructions. In Camellia every instruction is dependent on
+# previous, which means that there is place for 2 additional ones
+# in between two dependent. Can we expect 3x performance improvement?
+# At least one can argue that it should be possible to break 2x
+# barrier... For some reason not even 2x appears to be possible:
+#
+# 128-bit key 192/256-
+# CBC decrypt 2.21/2.74 2.99/3.40
+# CTR 2.15/2.68(*) 2.93/3.34
+# (*) numbers after slash are for
+# misaligned data.
+#
+# This is for 2x interleave. But compared to 1x interleave CBC decrypt
+# improved by ... 0% for 128-bit key, and 11% for 192/256-bit one.
+# So that out-of-order execution logic can take non-interleaved code
+# to 1.87x, but can't take 2x interleaved one any further. There
+# surely is some explanation... As result 3x interleave was not even
+# attempted. Instead an effort was made to share specific modes
+# implementations with AES module (therefore sparct4_modes.pl).
+#
+# To anchor to something else, software C implementation processes
+# one byte in 38 cycles with 128-bit key on same processor.
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+push(@INC,"${dir}","${dir}../../perlasm");
+require "sparcv9_modes.pl";
+
+$output = pop;
+open STDOUT,">$output";
+
+$::evp=1; # if $evp is set to 0, script generates module with
+# Camellia_[en|de]crypt, Camellia_set_key and Camellia_cbc_encrypt
+# entry points. These are fully compatible with openssl/camellia.h.
+
+######################################################################
+# single-round subroutines
+#
+{
+my ($inp,$out,$key,$rounds,$tmp,$mask)=map("%o$_",(0..5));
+
+$code=<<___;
+#include "sparc_arch.h"
+
+.text
+
+.globl cmll_t4_encrypt
+.align 32
+cmll_t4_encrypt:
+ andcc $inp, 7, %g1 ! is input aligned?
+ andn $inp, 7, $inp
+
+ ldx [$key + 0], %g4
+ ldx [$key + 8], %g5
+
+ ldx [$inp + 0], %o4
+ bz,pt %icc, 1f
+ ldx [$inp + 8], %o5
+ ldx [$inp + 16], $inp
+ sll %g1, 3, %g1
+ sub %g0, %g1, %o3
+ sllx %o4, %g1, %o4
+ sllx %o5, %g1, %g1
+ srlx %o5, %o3, %o5
+ srlx $inp, %o3, %o3
+ or %o5, %o4, %o4
+ or %o3, %g1, %o5
+1:
+ ld [$key + 272], $rounds ! grandRounds, 3 or 4
+ ldd [$key + 16], %f12
+ ldd [$key + 24], %f14
+ xor %g4, %o4, %o4
+ xor %g5, %o5, %o5
+ ldd [$key + 32], %f16
+ ldd [$key + 40], %f18
+ movxtod %o4, %f0
+ movxtod %o5, %f2
+ ldd [$key + 48], %f20
+ ldd [$key + 56], %f22
+ sub $rounds, 1, $rounds
+ ldd [$key + 64], %f24
+ ldd [$key + 72], %f26
+ add $key, 80, $key
+
+.Lenc:
+ camellia_f %f12, %f2, %f0, %f2
+ ldd [$key + 0], %f12
+ sub $rounds,1,$rounds
+ camellia_f %f14, %f0, %f2, %f0
+ ldd [$key + 8], %f14
+ camellia_f %f16, %f2, %f0, %f2
+ ldd [$key + 16], %f16
+ camellia_f %f18, %f0, %f2, %f0
+ ldd [$key + 24], %f18
+ camellia_f %f20, %f2, %f0, %f2
+ ldd [$key + 32], %f20
+ camellia_f %f22, %f0, %f2, %f0
+ ldd [$key + 40], %f22
+ camellia_fl %f24, %f0, %f0
+ ldd [$key + 48], %f24
+ camellia_fli %f26, %f2, %f2
+ ldd [$key + 56], %f26
+ brnz,pt $rounds, .Lenc
+ add $key, 64, $key
+
+ andcc $out, 7, $tmp ! is output aligned?
+ camellia_f %f12, %f2, %f0, %f2
+ camellia_f %f14, %f0, %f2, %f0
+ camellia_f %f16, %f2, %f0, %f2
+ camellia_f %f18, %f0, %f2, %f0
+ camellia_f %f20, %f2, %f0, %f4
+ camellia_f %f22, %f0, %f4, %f2
+ fxor %f24, %f4, %f0
+ fxor %f26, %f2, %f2
+
+ bnz,pn %icc, 2f
+ nop
+
+ std %f0, [$out + 0]
+ retl
+ std %f2, [$out + 8]
+
+2: alignaddrl $out, %g0, $out
+ mov 0xff, $mask
+ srl $mask, $tmp, $mask
+
+ faligndata %f0, %f0, %f4
+ faligndata %f0, %f2, %f6
+ faligndata %f2, %f2, %f8
+
+ stda %f4, [$out + $mask]0xc0 ! partial store
+ std %f6, [$out + 8]
+ add $out, 16, $out
+ orn %g0, $mask, $mask
+ retl
+ stda %f8, [$out + $mask]0xc0 ! partial store
+.type cmll_t4_encrypt,#function
+.size cmll_t4_encrypt,.-cmll_t4_encrypt
+
+.globl cmll_t4_decrypt
+.align 32
+cmll_t4_decrypt:
+ ld [$key + 272], $rounds ! grandRounds, 3 or 4
+ andcc $inp, 7, %g1 ! is input aligned?
+ andn $inp, 7, $inp
+
+ sll $rounds, 6, $rounds
+ add $rounds, $key, $key
+
+ ldx [$inp + 0], %o4
+ bz,pt %icc, 1f
+ ldx [$inp + 8], %o5
+ ldx [$inp + 16], $inp
+ sll %g1, 3, %g1
+ sub %g0, %g1, %g4
+ sllx %o4, %g1, %o4
+ sllx %o5, %g1, %g1
+ srlx %o5, %g4, %o5
+ srlx $inp, %g4, %g4
+ or %o5, %o4, %o4
+ or %g4, %g1, %o5
+1:
+ ldx [$key + 0], %g4
+ ldx [$key + 8], %g5
+ ldd [$key - 8], %f12
+ ldd [$key - 16], %f14
+ xor %g4, %o4, %o4
+ xor %g5, %o5, %o5
+ ldd [$key - 24], %f16
+ ldd [$key - 32], %f18
+ movxtod %o4, %f0
+ movxtod %o5, %f2
+ ldd [$key - 40], %f20
+ ldd [$key - 48], %f22
+ sub $rounds, 64, $rounds
+ ldd [$key - 56], %f24
+ ldd [$key - 64], %f26
+ sub $key, 64, $key
+
+.Ldec:
+ camellia_f %f12, %f2, %f0, %f2
+ ldd [$key - 8], %f12
+ sub $rounds, 64, $rounds
+ camellia_f %f14, %f0, %f2, %f0
+ ldd [$key - 16], %f14
+ camellia_f %f16, %f2, %f0, %f2
+ ldd [$key - 24], %f16
+ camellia_f %f18, %f0, %f2, %f0
+ ldd [$key - 32], %f18
+ camellia_f %f20, %f2, %f0, %f2
+ ldd [$key - 40], %f20
+ camellia_f %f22, %f0, %f2, %f0
+ ldd [$key - 48], %f22
+ camellia_fl %f24, %f0, %f0
+ ldd [$key - 56], %f24
+ camellia_fli %f26, %f2, %f2
+ ldd [$key - 64], %f26
+ brnz,pt $rounds, .Ldec
+ sub $key, 64, $key
+
+ andcc $out, 7, $tmp ! is output aligned?
+ camellia_f %f12, %f2, %f0, %f2
+ camellia_f %f14, %f0, %f2, %f0
+ camellia_f %f16, %f2, %f0, %f2
+ camellia_f %f18, %f0, %f2, %f0
+ camellia_f %f20, %f2, %f0, %f4
+ camellia_f %f22, %f0, %f4, %f2
+ fxor %f26, %f4, %f0
+ fxor %f24, %f2, %f2
+
+ bnz,pn %icc, 2f
+ nop
+
+ std %f0, [$out + 0]
+ retl
+ std %f2, [$out + 8]
+
+2: alignaddrl $out, %g0, $out
+ mov 0xff, $mask
+ srl $mask, $tmp, $mask
+
+ faligndata %f0, %f0, %f4
+ faligndata %f0, %f2, %f6
+ faligndata %f2, %f2, %f8
+
+ stda %f4, [$out + $mask]0xc0 ! partial store
+ std %f6, [$out + 8]
+ add $out, 16, $out
+ orn %g0, $mask, $mask
+ retl
+ stda %f8, [$out + $mask]0xc0 ! partial store
+.type cmll_t4_decrypt,#function
+.size cmll_t4_decrypt,.-cmll_t4_decrypt
+___
+}
+
+######################################################################
+# key setup subroutines
+#
+{
+sub ROTL128 {
+ my $rot = shift;
+
+ "srlx %o4, 64-$rot, %g4\n\t".
+ "sllx %o4, $rot, %o4\n\t".
+ "srlx %o5, 64-$rot, %g5\n\t".
+ "sllx %o5, $rot, %o5\n\t".
+ "or %o4, %g5, %o4\n\t".
+ "or %o5, %g4, %o5";
+}
+
+my ($inp,$bits,$out,$tmp)=map("%o$_",(0..5));
+$code.=<<___;
+.globl cmll_t4_set_key
+.align 32
+cmll_t4_set_key:
+ and $inp, 7, $tmp
+ alignaddr $inp, %g0, $inp
+ cmp $bits, 192
+ ldd [$inp + 0], %f0
+ bl,pt %icc,.L128
+ ldd [$inp + 8], %f2
+
+ be,pt %icc,.L192
+ ldd [$inp + 16], %f4
+
+ brz,pt $tmp, .L256aligned
+ ldd [$inp + 24], %f6
+
+ ldd [$inp + 32], %f8
+ faligndata %f0, %f2, %f0
+ faligndata %f2, %f4, %f2
+ faligndata %f4, %f6, %f4
+ b .L256aligned
+ faligndata %f6, %f8, %f6
+
+.align 16
+.L192:
+ brz,a,pt $tmp, .L256aligned
+ fnot2 %f4, %f6
+
+ ldd [$inp + 24], %f6
+ nop
+ faligndata %f0, %f2, %f0
+ faligndata %f2, %f4, %f2
+ faligndata %f4, %f6, %f4
+ fnot2 %f4, %f6
+
+.L256aligned:
+ std %f0, [$out + 0] ! k[0, 1]
+ fsrc2 %f0, %f28
+ std %f2, [$out + 8] ! k[2, 3]
+ fsrc2 %f2, %f30
+ fxor %f4, %f0, %f0
+ b .L128key
+ fxor %f6, %f2, %f2
+
+.align 16
+.L128:
+ brz,pt $tmp, .L128aligned
+ nop
+
+ ldd [$inp + 16], %f4
+ nop
+ faligndata %f0, %f2, %f0
+ faligndata %f2, %f4, %f2
+
+.L128aligned:
+ std %f0, [$out + 0] ! k[0, 1]
+ fsrc2 %f0, %f28
+ std %f2, [$out + 8] ! k[2, 3]
+ fsrc2 %f2, %f30
+
+.L128key:
+ mov %o7, %o5
+1: call .+8
+ add %o7, SIGMA-1b, %o4
+ mov %o5, %o7
+
+ ldd [%o4 + 0], %f16
+ ldd [%o4 + 8], %f18
+ ldd [%o4 + 16], %f20
+ ldd [%o4 + 24], %f22
+
+ camellia_f %f16, %f2, %f0, %f2
+ camellia_f %f18, %f0, %f2, %f0
+ fxor %f28, %f0, %f0
+ fxor %f30, %f2, %f2
+ camellia_f %f20, %f2, %f0, %f2
+ camellia_f %f22, %f0, %f2, %f0
+
+ bge,pn %icc, .L256key
+ nop
+ std %f0, [$out + 0x10] ! k[ 4, 5]
+ std %f2, [$out + 0x18] ! k[ 6, 7]
+
+ movdtox %f0, %o4
+ movdtox %f2, %o5
+ `&ROTL128(15)`
+ stx %o4, [$out + 0x30] ! k[12, 13]
+ stx %o5, [$out + 0x38] ! k[14, 15]
+ `&ROTL128(15)`
+ stx %o4, [$out + 0x40] ! k[16, 17]
+ stx %o5, [$out + 0x48] ! k[18, 19]
+ `&ROTL128(15)`
+ stx %o4, [$out + 0x60] ! k[24, 25]
+ `&ROTL128(15)`
+ stx %o4, [$out + 0x70] ! k[28, 29]
+ stx %o5, [$out + 0x78] ! k[30, 31]
+ `&ROTL128(34)`
+ stx %o4, [$out + 0xa0] ! k[40, 41]
+ stx %o5, [$out + 0xa8] ! k[42, 43]
+ `&ROTL128(17)`
+ stx %o4, [$out + 0xc0] ! k[48, 49]
+ stx %o5, [$out + 0xc8] ! k[50, 51]
+
+ movdtox %f28, %o4 ! k[ 0, 1]
+ movdtox %f30, %o5 ! k[ 2, 3]
+ `&ROTL128(15)`
+ stx %o4, [$out + 0x20] ! k[ 8, 9]
+ stx %o5, [$out + 0x28] ! k[10, 11]
+ `&ROTL128(30)`
+ stx %o4, [$out + 0x50] ! k[20, 21]
+ stx %o5, [$out + 0x58] ! k[22, 23]
+ `&ROTL128(15)`
+ stx %o5, [$out + 0x68] ! k[26, 27]
+ `&ROTL128(17)`
+ stx %o4, [$out + 0x80] ! k[32, 33]
+ stx %o5, [$out + 0x88] ! k[34, 35]
+ `&ROTL128(17)`
+ stx %o4, [$out + 0x90] ! k[36, 37]
+ stx %o5, [$out + 0x98] ! k[38, 39]
+ `&ROTL128(17)`
+ stx %o4, [$out + 0xb0] ! k[44, 45]
+ stx %o5, [$out + 0xb8] ! k[46, 47]
+
+ mov 3, $tmp
+ st $tmp, [$out + 0x110]
+ retl
+ xor %o0, %o0, %o0
+
+.align 16
+.L256key:
+ ldd [%o4 + 32], %f24
+ ldd [%o4 + 40], %f26
+
+ std %f0, [$out + 0x30] ! k[12, 13]
+ std %f2, [$out + 0x38] ! k[14, 15]
+
+ fxor %f4, %f0, %f0
+ fxor %f6, %f2, %f2
+ camellia_f %f24, %f2, %f0, %f2
+ camellia_f %f26, %f0, %f2, %f0
+
+ std %f0, [$out + 0x10] ! k[ 4, 5]
+ std %f2, [$out + 0x18] ! k[ 6, 7]
+
+ movdtox %f0, %o4
+ movdtox %f2, %o5
+ `&ROTL128(30)`
+ stx %o4, [$out + 0x50] ! k[20, 21]
+ stx %o5, [$out + 0x58] ! k[22, 23]
+ `&ROTL128(30)`
+ stx %o4, [$out + 0xa0] ! k[40, 41]
+ stx %o5, [$out + 0xa8] ! k[42, 43]
+ `&ROTL128(51)`
+ stx %o4, [$out + 0x100] ! k[64, 65]
+ stx %o5, [$out + 0x108] ! k[66, 67]
+
+ movdtox %f4, %o4 ! k[ 8, 9]
+ movdtox %f6, %o5 ! k[10, 11]
+ `&ROTL128(15)`
+ stx %o4, [$out + 0x20] ! k[ 8, 9]
+ stx %o5, [$out + 0x28] ! k[10, 11]
+ `&ROTL128(15)`
+ stx %o4, [$out + 0x40] ! k[16, 17]
+ stx %o5, [$out + 0x48] ! k[18, 19]
+ `&ROTL128(30)`
+ stx %o4, [$out + 0x90] ! k[36, 37]
+ stx %o5, [$out + 0x98] ! k[38, 39]
+ `&ROTL128(34)`
+ stx %o4, [$out + 0xd0] ! k[52, 53]
+ stx %o5, [$out + 0xd8] ! k[54, 55]
+ ldx [$out + 0x30], %o4 ! k[12, 13]
+ ldx [$out + 0x38], %o5 ! k[14, 15]
+ `&ROTL128(15)`
+ stx %o4, [$out + 0x30] ! k[12, 13]
+ stx %o5, [$out + 0x38] ! k[14, 15]
+ `&ROTL128(30)`
+ stx %o4, [$out + 0x70] ! k[28, 29]
+ stx %o5, [$out + 0x78] ! k[30, 31]
+ srlx %o4, 32, %g4
+ srlx %o5, 32, %g5
+ st %o4, [$out + 0xc0] ! k[48]
+ st %g5, [$out + 0xc4] ! k[49]
+ st %o5, [$out + 0xc8] ! k[50]
+ st %g4, [$out + 0xcc] ! k[51]
+ `&ROTL128(49)`
+ stx %o4, [$out + 0xe0] ! k[56, 57]
+ stx %o5, [$out + 0xe8] ! k[58, 59]
+
+ movdtox %f28, %o4 ! k[ 0, 1]
+ movdtox %f30, %o5 ! k[ 2, 3]
+ `&ROTL128(45)`
+ stx %o4, [$out + 0x60] ! k[24, 25]
+ stx %o5, [$out + 0x68] ! k[26, 27]
+ `&ROTL128(15)`
+ stx %o4, [$out + 0x80] ! k[32, 33]
+ stx %o5, [$out + 0x88] ! k[34, 35]
+ `&ROTL128(17)`
+ stx %o4, [$out + 0xb0] ! k[44, 45]
+ stx %o5, [$out + 0xb8] ! k[46, 47]
+ `&ROTL128(34)`
+ stx %o4, [$out + 0xf0] ! k[60, 61]
+ stx %o5, [$out + 0xf8] ! k[62, 63]
+
+ mov 4, $tmp
+ st $tmp, [$out + 0x110]
+ retl
+ xor %o0, %o0, %o0
+.type cmll_t4_set_key,#function
+.size cmll_t4_set_key,.-cmll_t4_set_key
+.align 32
+SIGMA:
+ .long 0xa09e667f, 0x3bcc908b, 0xb67ae858, 0x4caa73b2
+ .long 0xc6ef372f, 0xe94f82be, 0x54ff53a5, 0xf1d36f1c
+ .long 0x10e527fa, 0xde682d1d, 0xb05688c2, 0xb3e6c1fd
+.type SIGMA,#object
+.size SIGMA,.-SIGMA
+.asciz "Camellia for SPARC T4, David S. Miller, Andy Polyakov"
+___
+}
+
+{{{
+my ($inp,$out,$len,$key,$ivec,$enc)=map("%i$_",(0..5));
+my ($ileft,$iright,$ooff,$omask,$ivoff)=map("%l$_",(1..7));
+
+$code.=<<___;
+.align 32
+_cmll128_load_enckey:
+ ldx [$key + 0], %g4
+ ldx [$key + 8], %g5
+___
+for ($i=2; $i<26;$i++) { # load key schedule
+ $code.=<<___;
+ ldd [$key + `8*$i`], %f`12+2*$i`
+___
+}
+$code.=<<___;
+ retl
+ nop
+.type _cmll128_load_enckey,#function
+.size _cmll128_load_enckey,.-_cmll128_load_enckey
+_cmll256_load_enckey=_cmll128_load_enckey
+
+.align 32
+_cmll256_load_deckey:
+ ldd [$key + 64], %f62
+ ldd [$key + 72], %f60
+ b .Load_deckey
+ add $key, 64, $key
+_cmll128_load_deckey:
+ ldd [$key + 0], %f60
+ ldd [$key + 8], %f62
+.Load_deckey:
+___
+for ($i=2; $i<24;$i++) { # load key schedule
+ $code.=<<___;
+ ldd [$key + `8*$i`], %f`62-2*$i`
+___
+}
+$code.=<<___;
+ ldx [$key + 192], %g4
+ retl
+ ldx [$key + 200], %g5
+.type _cmll256_load_deckey,#function
+.size _cmll256_load_deckey,.-_cmll256_load_deckey
+
+.align 32
+_cmll128_encrypt_1x:
+___
+for ($i=0; $i<3; $i++) {
+ $code.=<<___;
+ camellia_f %f`16+16*$i+0`, %f2, %f0, %f2
+ camellia_f %f`16+16*$i+2`, %f0, %f2, %f0
+ camellia_f %f`16+16*$i+4`, %f2, %f0, %f2
+ camellia_f %f`16+16*$i+6`, %f0, %f2, %f0
+___
+$code.=<<___ if ($i<2);
+ camellia_f %f`16+16*$i+8`, %f2, %f0, %f2
+ camellia_f %f`16+16*$i+10`, %f0, %f2, %f0
+ camellia_fl %f`16+16*$i+12`, %f0, %f0
+ camellia_fli %f`16+16*$i+14`, %f2, %f2
+___
+}
+$code.=<<___;
+ camellia_f %f56, %f2, %f0, %f4
+ camellia_f %f58, %f0, %f4, %f2
+ fxor %f60, %f4, %f0
+ retl
+ fxor %f62, %f2, %f2
+.type _cmll128_encrypt_1x,#function
+.size _cmll128_encrypt_1x,.-_cmll128_encrypt_1x
+_cmll128_decrypt_1x=_cmll128_encrypt_1x
+
+.align 32
+_cmll128_encrypt_2x:
+___
+for ($i=0; $i<3; $i++) {
+ $code.=<<___;
+ camellia_f %f`16+16*$i+0`, %f2, %f0, %f2
+ camellia_f %f`16+16*$i+0`, %f6, %f4, %f6
+ camellia_f %f`16+16*$i+2`, %f0, %f2, %f0
+ camellia_f %f`16+16*$i+2`, %f4, %f6, %f4
+ camellia_f %f`16+16*$i+4`, %f2, %f0, %f2
+ camellia_f %f`16+16*$i+4`, %f6, %f4, %f6
+ camellia_f %f`16+16*$i+6`, %f0, %f2, %f0
+ camellia_f %f`16+16*$i+6`, %f4, %f6, %f4
+___
+$code.=<<___ if ($i<2);
+ camellia_f %f`16+16*$i+8`, %f2, %f0, %f2
+ camellia_f %f`16+16*$i+8`, %f6, %f4, %f6
+ camellia_f %f`16+16*$i+10`, %f0, %f2, %f0
+ camellia_f %f`16+16*$i+10`, %f4, %f6, %f4
+ camellia_fl %f`16+16*$i+12`, %f0, %f0
+ camellia_fl %f`16+16*$i+12`, %f4, %f4
+ camellia_fli %f`16+16*$i+14`, %f2, %f2
+ camellia_fli %f`16+16*$i+14`, %f6, %f6
+___
+}
+$code.=<<___;
+ camellia_f %f56, %f2, %f0, %f8
+ camellia_f %f56, %f6, %f4, %f10
+ camellia_f %f58, %f0, %f8, %f2
+ camellia_f %f58, %f4, %f10, %f6
+ fxor %f60, %f8, %f0
+ fxor %f60, %f10, %f4
+ fxor %f62, %f2, %f2
+ retl
+ fxor %f62, %f6, %f6
+.type _cmll128_encrypt_2x,#function
+.size _cmll128_encrypt_2x,.-_cmll128_encrypt_2x
+_cmll128_decrypt_2x=_cmll128_encrypt_2x
+
+.align 32
+_cmll256_encrypt_1x:
+ camellia_f %f16, %f2, %f0, %f2
+ camellia_f %f18, %f0, %f2, %f0
+ ldd [$key + 208], %f16
+ ldd [$key + 216], %f18
+ camellia_f %f20, %f2, %f0, %f2
+ camellia_f %f22, %f0, %f2, %f0
+ ldd [$key + 224], %f20
+ ldd [$key + 232], %f22
+ camellia_f %f24, %f2, %f0, %f2
+ camellia_f %f26, %f0, %f2, %f0
+ ldd [$key + 240], %f24
+ ldd [$key + 248], %f26
+ camellia_fl %f28, %f0, %f0
+ camellia_fli %f30, %f2, %f2
+ ldd [$key + 256], %f28
+ ldd [$key + 264], %f30
+___
+for ($i=1; $i<3; $i++) {
+ $code.=<<___;
+ camellia_f %f`16+16*$i+0`, %f2, %f0, %f2
+ camellia_f %f`16+16*$i+2`, %f0, %f2, %f0
+ camellia_f %f`16+16*$i+4`, %f2, %f0, %f2
+ camellia_f %f`16+16*$i+6`, %f0, %f2, %f0
+ camellia_f %f`16+16*$i+8`, %f2, %f0, %f2
+ camellia_f %f`16+16*$i+10`, %f0, %f2, %f0
+ camellia_fl %f`16+16*$i+12`, %f0, %f0
+ camellia_fli %f`16+16*$i+14`, %f2, %f2
+___
+}
+$code.=<<___;
+ camellia_f %f16, %f2, %f0, %f2
+ camellia_f %f18, %f0, %f2, %f0
+ ldd [$key + 16], %f16
+ ldd [$key + 24], %f18
+ camellia_f %f20, %f2, %f0, %f2
+ camellia_f %f22, %f0, %f2, %f0
+ ldd [$key + 32], %f20
+ ldd [$key + 40], %f22
+ camellia_f %f24, %f2, %f0, %f4
+ camellia_f %f26, %f0, %f4, %f2
+ ldd [$key + 48], %f24
+ ldd [$key + 56], %f26
+ fxor %f28, %f4, %f0
+ fxor %f30, %f2, %f2
+ ldd [$key + 64], %f28
+ retl
+ ldd [$key + 72], %f30
+.type _cmll256_encrypt_1x,#function
+.size _cmll256_encrypt_1x,.-_cmll256_encrypt_1x
+
+.align 32
+_cmll256_encrypt_2x:
+ camellia_f %f16, %f2, %f0, %f2
+ camellia_f %f16, %f6, %f4, %f6
+ camellia_f %f18, %f0, %f2, %f0
+ camellia_f %f18, %f4, %f6, %f4
+ ldd [$key + 208], %f16
+ ldd [$key + 216], %f18
+ camellia_f %f20, %f2, %f0, %f2
+ camellia_f %f20, %f6, %f4, %f6
+ camellia_f %f22, %f0, %f2, %f0
+ camellia_f %f22, %f4, %f6, %f4
+ ldd [$key + 224], %f20
+ ldd [$key + 232], %f22
+ camellia_f %f24, %f2, %f0, %f2
+ camellia_f %f24, %f6, %f4, %f6
+ camellia_f %f26, %f0, %f2, %f0
+ camellia_f %f26, %f4, %f6, %f4
+ ldd [$key + 240], %f24
+ ldd [$key + 248], %f26
+ camellia_fl %f28, %f0, %f0
+ camellia_fl %f28, %f4, %f4
+ camellia_fli %f30, %f2, %f2
+ camellia_fli %f30, %f6, %f6
+ ldd [$key + 256], %f28
+ ldd [$key + 264], %f30
+___
+for ($i=1; $i<3; $i++) {
+ $code.=<<___;
+ camellia_f %f`16+16*$i+0`, %f2, %f0, %f2
+ camellia_f %f`16+16*$i+0`, %f6, %f4, %f6
+ camellia_f %f`16+16*$i+2`, %f0, %f2, %f0
+ camellia_f %f`16+16*$i+2`, %f4, %f6, %f4
+ camellia_f %f`16+16*$i+4`, %f2, %f0, %f2
+ camellia_f %f`16+16*$i+4`, %f6, %f4, %f6
+ camellia_f %f`16+16*$i+6`, %f0, %f2, %f0
+ camellia_f %f`16+16*$i+6`, %f4, %f6, %f4
+ camellia_f %f`16+16*$i+8`, %f2, %f0, %f2
+ camellia_f %f`16+16*$i+8`, %f6, %f4, %f6
+ camellia_f %f`16+16*$i+10`, %f0, %f2, %f0
+ camellia_f %f`16+16*$i+10`, %f4, %f6, %f4
+ camellia_fl %f`16+16*$i+12`, %f0, %f0
+ camellia_fl %f`16+16*$i+12`, %f4, %f4
+ camellia_fli %f`16+16*$i+14`, %f2, %f2
+ camellia_fli %f`16+16*$i+14`, %f6, %f6
+___
+}
+$code.=<<___;
+ camellia_f %f16, %f2, %f0, %f2
+ camellia_f %f16, %f6, %f4, %f6
+ camellia_f %f18, %f0, %f2, %f0
+ camellia_f %f18, %f4, %f6, %f4
+ ldd [$key + 16], %f16
+ ldd [$key + 24], %f18
+ camellia_f %f20, %f2, %f0, %f2
+ camellia_f %f20, %f6, %f4, %f6
+ camellia_f %f22, %f0, %f2, %f0
+ camellia_f %f22, %f4, %f6, %f4
+ ldd [$key + 32], %f20
+ ldd [$key + 40], %f22
+ camellia_f %f24, %f2, %f0, %f8
+ camellia_f %f24, %f6, %f4, %f10
+ camellia_f %f26, %f0, %f8, %f2
+ camellia_f %f26, %f4, %f10, %f6
+ ldd [$key + 48], %f24
+ ldd [$key + 56], %f26
+ fxor %f28, %f8, %f0
+ fxor %f28, %f10, %f4
+ fxor %f30, %f2, %f2
+ fxor %f30, %f6, %f6
+ ldd [$key + 64], %f28
+ retl
+ ldd [$key + 72], %f30
+.type _cmll256_encrypt_2x,#function
+.size _cmll256_encrypt_2x,.-_cmll256_encrypt_2x
+
+.align 32
+_cmll256_decrypt_1x:
+ camellia_f %f16, %f2, %f0, %f2
+ camellia_f %f18, %f0, %f2, %f0
+ ldd [$key - 8], %f16
+ ldd [$key - 16], %f18
+ camellia_f %f20, %f2, %f0, %f2
+ camellia_f %f22, %f0, %f2, %f0
+ ldd [$key - 24], %f20
+ ldd [$key - 32], %f22
+ camellia_f %f24, %f2, %f0, %f2
+ camellia_f %f26, %f0, %f2, %f0
+ ldd [$key - 40], %f24
+ ldd [$key - 48], %f26
+ camellia_fl %f28, %f0, %f0
+ camellia_fli %f30, %f2, %f2
+ ldd [$key - 56], %f28
+ ldd [$key - 64], %f30
+___
+for ($i=1; $i<3; $i++) {
+ $code.=<<___;
+ camellia_f %f`16+16*$i+0`, %f2, %f0, %f2
+ camellia_f %f`16+16*$i+2`, %f0, %f2, %f0
+ camellia_f %f`16+16*$i+4`, %f2, %f0, %f2
+ camellia_f %f`16+16*$i+6`, %f0, %f2, %f0
+ camellia_f %f`16+16*$i+8`, %f2, %f0, %f2
+ camellia_f %f`16+16*$i+10`, %f0, %f2, %f0
+ camellia_fl %f`16+16*$i+12`, %f0, %f0
+ camellia_fli %f`16+16*$i+14`, %f2, %f2
+___
+}
+$code.=<<___;
+ camellia_f %f16, %f2, %f0, %f2
+ camellia_f %f18, %f0, %f2, %f0
+ ldd [$key + 184], %f16
+ ldd [$key + 176], %f18
+ camellia_f %f20, %f2, %f0, %f2
+ camellia_f %f22, %f0, %f2, %f0
+ ldd [$key + 168], %f20
+ ldd [$key + 160], %f22
+ camellia_f %f24, %f2, %f0, %f4
+ camellia_f %f26, %f0, %f4, %f2
+ ldd [$key + 152], %f24
+ ldd [$key + 144], %f26
+ fxor %f30, %f4, %f0
+ fxor %f28, %f2, %f2
+ ldd [$key + 136], %f28
+ retl
+ ldd [$key + 128], %f30
+.type _cmll256_decrypt_1x,#function
+.size _cmll256_decrypt_1x,.-_cmll256_decrypt_1x
+
+.align 32
+_cmll256_decrypt_2x:
+ camellia_f %f16, %f2, %f0, %f2
+ camellia_f %f16, %f6, %f4, %f6
+ camellia_f %f18, %f0, %f2, %f0
+ camellia_f %f18, %f4, %f6, %f4
+ ldd [$key - 8], %f16
+ ldd [$key - 16], %f18
+ camellia_f %f20, %f2, %f0, %f2
+ camellia_f %f20, %f6, %f4, %f6
+ camellia_f %f22, %f0, %f2, %f0
+ camellia_f %f22, %f4, %f6, %f4
+ ldd [$key - 24], %f20
+ ldd [$key - 32], %f22
+ camellia_f %f24, %f2, %f0, %f2
+ camellia_f %f24, %f6, %f4, %f6
+ camellia_f %f26, %f0, %f2, %f0
+ camellia_f %f26, %f4, %f6, %f4
+ ldd [$key - 40], %f24
+ ldd [$key - 48], %f26
+ camellia_fl %f28, %f0, %f0
+ camellia_fl %f28, %f4, %f4
+ camellia_fli %f30, %f2, %f2
+ camellia_fli %f30, %f6, %f6
+ ldd [$key - 56], %f28
+ ldd [$key - 64], %f30
+___
+for ($i=1; $i<3; $i++) {
+ $code.=<<___;
+ camellia_f %f`16+16*$i+0`, %f2, %f0, %f2
+ camellia_f %f`16+16*$i+0`, %f6, %f4, %f6
+ camellia_f %f`16+16*$i+2`, %f0, %f2, %f0
+ camellia_f %f`16+16*$i+2`, %f4, %f6, %f4
+ camellia_f %f`16+16*$i+4`, %f2, %f0, %f2
+ camellia_f %f`16+16*$i+4`, %f6, %f4, %f6
+ camellia_f %f`16+16*$i+6`, %f0, %f2, %f0
+ camellia_f %f`16+16*$i+6`, %f4, %f6, %f4
+ camellia_f %f`16+16*$i+8`, %f2, %f0, %f2
+ camellia_f %f`16+16*$i+8`, %f6, %f4, %f6
+ camellia_f %f`16+16*$i+10`, %f0, %f2, %f0
+ camellia_f %f`16+16*$i+10`, %f4, %f6, %f4
+ camellia_fl %f`16+16*$i+12`, %f0, %f0
+ camellia_fl %f`16+16*$i+12`, %f4, %f4
+ camellia_fli %f`16+16*$i+14`, %f2, %f2
+ camellia_fli %f`16+16*$i+14`, %f6, %f6
+___
+}
+$code.=<<___;
+ camellia_f %f16, %f2, %f0, %f2
+ camellia_f %f16, %f6, %f4, %f6
+ camellia_f %f18, %f0, %f2, %f0
+ camellia_f %f18, %f4, %f6, %f4
+ ldd [$key + 184], %f16
+ ldd [$key + 176], %f18
+ camellia_f %f20, %f2, %f0, %f2
+ camellia_f %f20, %f6, %f4, %f6
+ camellia_f %f22, %f0, %f2, %f0
+ camellia_f %f22, %f4, %f6, %f4
+ ldd [$key + 168], %f20
+ ldd [$key + 160], %f22
+ camellia_f %f24, %f2, %f0, %f8
+ camellia_f %f24, %f6, %f4, %f10
+ camellia_f %f26, %f0, %f8, %f2
+ camellia_f %f26, %f4, %f10, %f6
+ ldd [$key + 152], %f24
+ ldd [$key + 144], %f26
+ fxor %f30, %f8, %f0
+ fxor %f30, %f10, %f4
+ fxor %f28, %f2, %f2
+ fxor %f28, %f6, %f6
+ ldd [$key + 136], %f28
+ retl
+ ldd [$key + 128], %f30
+.type _cmll256_decrypt_2x,#function
+.size _cmll256_decrypt_2x,.-_cmll256_decrypt_2x
+___
+
+&alg_cbc_encrypt_implement("cmll",128);
+&alg_cbc_encrypt_implement("cmll",256);
+
+&alg_cbc_decrypt_implement("cmll",128);
+&alg_cbc_decrypt_implement("cmll",256);
+
+if ($::evp) {
+ &alg_ctr32_implement("cmll",128);
+ &alg_ctr32_implement("cmll",256);
+}
+}}}
+
+if (!$::evp) {
+$code.=<<___;
+.global Camellia_encrypt
+Camellia_encrypt=cmll_t4_encrypt
+.global Camellia_decrypt
+Camellia_decrypt=cmll_t4_decrypt
+.global Camellia_set_key
+.align 32
+Camellia_set_key:
+ andcc %o2, 7, %g0 ! double-check alignment
+ bnz,a,pn %icc, 1f
+ mov -1, %o0
+ brz,a,pn %o0, 1f
+ mov -1, %o0
+ brz,a,pn %o2, 1f
+ mov -1, %o0
+ andncc %o1, 0x1c0, %g0
+ bnz,a,pn %icc, 1f
+ mov -2, %o0
+ cmp %o1, 128
+ bl,a,pn %icc, 1f
+ mov -2, %o0
+ b cmll_t4_set_key
+ nop
+1: retl
+ nop
+.type Camellia_set_key,#function
+.size Camellia_set_key,.-Camellia_set_key
+___
+
+my ($inp,$out,$len,$key,$ivec,$enc)=map("%o$_",(0..5));
+
+$code.=<<___;
+.globl Camellia_cbc_encrypt
+.align 32
+Camellia_cbc_encrypt:
+ ld [$key + 272], %g1
+ nop
+ brz $enc, .Lcbc_decrypt
+ cmp %g1, 3
+
+ be,pt %icc, cmll128_t4_cbc_encrypt
+ nop
+ ba cmll256_t4_cbc_encrypt
+ nop
+
+.Lcbc_decrypt:
+ be,pt %icc, cmll128_t4_cbc_decrypt
+ nop
+ ba cmll256_t4_cbc_decrypt
+ nop
+.type Camellia_cbc_encrypt,#function
+.size Camellia_cbc_encrypt,.-Camellia_cbc_encrypt
+___
+}
+
+&emit_assembler();
+
+close STDOUT;