diff options
author | Wojtek Kosior <wk@koszkonutek-tmp.pl.eu.org> | 2021-04-30 00:33:56 +0200 |
---|---|---|
committer | Wojtek Kosior <wk@koszkonutek-tmp.pl.eu.org> | 2021-04-30 00:33:56 +0200 |
commit | aa4d426b4d3527d7e166df1a05058c9a4a0f6683 (patch) | |
tree | 4ff17ce8b89a2321b9d0ed4bcfc37c447bcb6820 /openssl-1.1.0h/crypto/ripemd | |
download | smtps-and-pop3s-console-program-master.tar.gz smtps-and-pop3s-console-program-master.zip |
Diffstat (limited to 'openssl-1.1.0h/crypto/ripemd')
-rw-r--r-- | openssl-1.1.0h/crypto/ripemd/asm/rmd-586.pl | 603 | ||||
-rw-r--r-- | openssl-1.1.0h/crypto/ripemd/build.info | 6 | ||||
-rw-r--r-- | openssl-1.1.0h/crypto/ripemd/rmd_dgst.c | 282 | ||||
-rw-r--r-- | openssl-1.1.0h/crypto/ripemd/rmd_locl.h | 88 | ||||
-rw-r--r-- | openssl-1.1.0h/crypto/ripemd/rmd_one.c | 28 | ||||
-rw-r--r-- | openssl-1.1.0h/crypto/ripemd/rmdconst.h | 350 |
6 files changed, 1357 insertions, 0 deletions
diff --git a/openssl-1.1.0h/crypto/ripemd/asm/rmd-586.pl b/openssl-1.1.0h/crypto/ripemd/asm/rmd-586.pl new file mode 100644 index 0000000..544c496 --- /dev/null +++ b/openssl-1.1.0h/crypto/ripemd/asm/rmd-586.pl @@ -0,0 +1,603 @@ +#! /usr/bin/env perl +# Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. +# +# Licensed under the OpenSSL license (the "License"). You may not use +# this file except in compliance with the License. You can obtain a copy +# in the file LICENSE in the source distribution or at +# https://www.openssl.org/source/license.html + + +# Normal is the +# ripemd160_block_asm_data_order(RIPEMD160_CTX *c, ULONG *X,int blocks); + +$normal=0; + +$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; +push(@INC,"${dir}","${dir}../../perlasm"); +require "x86asm.pl"; + +$output=pop; +open STDOUT,">$output"; + +&asm_init($ARGV[0],$0); + +$A="ecx"; +$B="esi"; +$C="edi"; +$D="ebx"; +$E="ebp"; +$tmp1="eax"; +$tmp2="edx"; + +$KL1=0x5A827999; +$KL2=0x6ED9EBA1; +$KL3=0x8F1BBCDC; +$KL4=0xA953FD4E; +$KR0=0x50A28BE6; +$KR1=0x5C4DD124; +$KR2=0x6D703EF3; +$KR3=0x7A6D76E9; + + +@wl=( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15, + 7, 4,13, 1,10, 6,15, 3,12, 0, 9, 5, 2,14,11, 8, + 3,10,14, 4, 9,15, 8, 1, 2, 7, 0, 6,13,11, 5,12, + 1, 9,11,10, 0, 8,12, 4,13, 3, 7,15,14, 5, 6, 2, + 4, 0, 5, 9, 7,12, 2,10,14, 1, 3, 8,11, 6,15,13, + ); + +@wr=( 5,14, 7, 0, 9, 2,11, 4,13, 6,15, 8, 1,10, 3,12, + 6,11, 3, 7, 0,13, 5,10,14,15, 8,12, 4, 9, 1, 2, + 15, 5, 1, 3, 7,14, 6, 9,11, 8,12, 2,10, 0, 4,13, + 8, 6, 4, 1, 3,11,15, 0, 5,12, 2,13, 9, 7,10,14, + 12,15,10, 4, 1, 5, 8, 7, 6, 2,13,14, 0, 3, 9,11, + ); + +@sl=( 11,14,15,12, 5, 8, 7, 9,11,13,14,15, 6, 7, 9, 8, + 7, 6, 8,13,11, 9, 7,15, 7,12,15, 9,11, 7,13,12, + 11,13, 6, 7,14, 9,13,15,14, 8,13, 6, 5,12, 7, 5, + 11,12,14,15,14,15, 9, 8, 9,14, 5, 6, 8, 6, 5,12, + 9,15, 5,11, 6, 8,13,12, 5,12,13,14,11, 8, 5, 6, + ); + +@sr=( 8, 9, 9,11,13,15,15, 5, 7, 7, 8,11,14,14,12, 6, + 9,13,15, 7,12, 8, 9,11, 7, 7,12, 7, 6,15,13,11, + 9, 7,15,11, 8, 6, 6,14,12,13, 5,14,13,13, 7, 5, + 15, 5, 8,11,14,14, 6,14, 6, 9,12, 9,12, 5,15, 8, + 8, 5,12, 9,12, 5,14, 6, 8,13, 6, 5,15,13,11,11, + ); + +&ripemd160_block("ripemd160_block_asm_data_order"); +&asm_finish(); + +close STDOUT; + +sub Xv + { + local($n)=@_; + return(&swtmp($n)); + # tmp on stack + } + +sub Np + { + local($p)=@_; + local(%n)=($A,$E,$B,$A,$C,$B,$D,$C,$E,$D); + return($n{$p}); + } + +sub RIP1 + { + local($a,$b,$c,$d,$e,$pos,$s,$o,$pos2)=@_; + + &comment($p++); + if ($p & 1) + { + #&mov($tmp1, $c) if $o == -1; + &xor($tmp1, $d) if $o == -1; + &mov($tmp2, &Xv($pos)); + &xor($tmp1, $b); + &add($a, $tmp2); + &rotl($c, 10); + &add($a, $tmp1); + &mov($tmp1, &Np($c)); # NEXT + # XXX + &rotl($a, $s); + &add($a, $e); + } + else + { + &xor($tmp1, $d); + &mov($tmp2, &Xv($pos)); + &xor($tmp1, $b); + &add($a, $tmp1); + &mov($tmp1, &Np($c)) if $o <= 0; + &mov($tmp1, -1) if $o == 1; + # XXX if $o == 2; + &rotl($c, 10); + &add($a, $tmp2); + &xor($tmp1, &Np($d)) if $o <= 0; + &mov($tmp2, &Xv($pos2)) if $o == 1; + &mov($tmp2, &wparam(0)) if $o == 2; + &rotl($a, $s); + &add($a, $e); + } + } + +sub RIP2 + { + local($a,$b,$c,$d,$e,$pos,$pos2,$s,$K,$o)=@_; + +# XXXXXX + &comment($p++); + if ($p & 1) + { +# &mov($tmp2, &Xv($pos)) if $o < -1; +# &mov($tmp1, -1) if $o < -1; + + &add($a, $tmp2); + &mov($tmp2, $c); + &sub($tmp1, $b); + &and($tmp2, $b); + &and($tmp1, $d); + &or($tmp2, $tmp1); + &mov($tmp1, &Xv($pos2)) if $o <= 0; # XXXXXXXXXXXXXX + # XXX + &rotl($c, 10); + &lea($a, &DWP($K,$a,$tmp2,1)); + &mov($tmp2, -1) if $o <= 0; + # XXX + &rotl($a, $s); + &add($a, $e); + } + else + { + # XXX + &add($a, $tmp1); + &mov($tmp1, $c); + &sub($tmp2, $b); + &and($tmp1, $b); + &and($tmp2, $d); + if ($o != 2) + { + &or($tmp1, $tmp2); + &mov($tmp2, &Xv($pos2)) if $o <= 0; + &mov($tmp2, -1) if $o == 1; + &rotl($c, 10); + &lea($a, &DWP($K,$a,$tmp1,1)); + &mov($tmp1, -1) if $o <= 0; + &sub($tmp2, &Np($c)) if $o == 1; + } else { + &or($tmp2, $tmp1); + &mov($tmp1, &Np($c)); + &rotl($c, 10); + &lea($a, &DWP($K,$a,$tmp2,1)); + &xor($tmp1, &Np($d)); + } + &rotl($a, $s); + &add($a, $e); + } + } + +sub RIP3 + { + local($a,$b,$c,$d,$e,$pos,$s,$K,$o,$pos2)=@_; + + &comment($p++); + if ($p & 1) + { +# &mov($tmp2, -1) if $o < -1; +# &sub($tmp2, $c) if $o < -1; + &mov($tmp1, &Xv($pos)); + &or($tmp2, $b); + &add($a, $tmp1); + &xor($tmp2, $d); + &mov($tmp1, -1) if $o <= 0; # NEXT + # XXX + &rotl($c, 10); + &lea($a, &DWP($K,$a,$tmp2,1)); + &sub($tmp1, &Np($c)) if $o <= 0; # NEXT + # XXX + &rotl($a, $s); + &add($a, $e); + } + else + { + &mov($tmp2, &Xv($pos)); + &or($tmp1, $b); + &add($a, $tmp2); + &xor($tmp1, $d); + &mov($tmp2, -1) if $o <= 0; # NEXT + &mov($tmp2, -1) if $o == 1; + &mov($tmp2, &Xv($pos2)) if $o == 2; + &rotl($c, 10); + &lea($a, &DWP($K,$a,$tmp1,1)); + &sub($tmp2, &Np($c)) if $o <= 0; # NEXT + &mov($tmp1, &Np($d)) if $o == 1; + &mov($tmp1, -1) if $o == 2; + &rotl($a, $s); + &add($a, $e); + } + } + +sub RIP4 + { + local($a,$b,$c,$d,$e,$pos,$s,$K,$o)=@_; + + &comment($p++); + if ($p & 1) + { +# &mov($tmp2, -1) if $o == -2; +# &mov($tmp1, $d) if $o == -2; + &sub($tmp2, $d); + &and($tmp1, $b); + &and($tmp2, $c); + &or($tmp2, $tmp1); + &mov($tmp1, &Xv($pos)); + &rotl($c, 10); + &lea($a, &DWP($K,$a,$tmp2)); + &mov($tmp2, -1) unless $o > 0; # NEXT + # XXX + &add($a, $tmp1); + &mov($tmp1, &Np($d)) unless $o > 0; # NEXT + # XXX + &rotl($a, $s); + &add($a, $e); + } + else + { + &sub($tmp2, $d); + &and($tmp1, $b); + &and($tmp2, $c); + &or($tmp2, $tmp1); + &mov($tmp1, &Xv($pos)); + &rotl($c, 10); + &lea($a, &DWP($K,$a,$tmp2)); + &mov($tmp2, -1) if $o == 0; # NEXT + &mov($tmp2, -1) if $o == 1; + &mov($tmp2, -1) if $o == 2; + # XXX + &add($a, $tmp1); + &mov($tmp1, &Np($d)) if $o == 0; # NEXT + &sub($tmp2, &Np($d)) if $o == 1; + &sub($tmp2, &Np($c)) if $o == 2; + # XXX + &rotl($a, $s); + &add($a, $e); + } + } + +sub RIP5 + { + local($a,$b,$c,$d,$e,$pos,$s,$K,$o)=@_; + + &comment($p++); + if ($p & 1) + { + &mov($tmp2, -1) if $o == -2; + &sub($tmp2, $d) if $o == -2; + &mov($tmp1, &Xv($pos)); + &or($tmp2, $c); + &add($a, $tmp1); + &xor($tmp2, $b); + &mov($tmp1, -1) if $o <= 0; + # XXX + &rotl($c, 10); + &lea($a, &DWP($K,$a,$tmp2,1)); + &sub($tmp1, &Np($d)) if $o <= 0; + # XXX + &rotl($a, $s); + &add($a, $e); + } + else + { + &mov($tmp2, &Xv($pos)); + &or($tmp1, $c); + &add($a, $tmp2); + &xor($tmp1, $b); + &mov($tmp2, -1) if $o <= 0; + &mov($tmp2, &wparam(0)) if $o == 1; # Middle code + &mov($tmp2, -1) if $o == 2; + &rotl($c, 10); + &lea($a, &DWP($K,$a,$tmp1,1)); + &sub($tmp2, &Np($d)) if $o <= 0; + &mov(&swtmp(16), $A) if $o == 1; + &mov($tmp1, &Np($d)) if $o == 2; + &rotl($a, $s); + &add($a, $e); + } + } + +sub ripemd160_block + { + local($name)=@_; + + &function_begin_B($name,"",3); + + # parameter 1 is the RIPEMD160_CTX structure. + # A 0 + # B 4 + # C 8 + # D 12 + # E 16 + + &mov($tmp2, &wparam(0)); + &mov($tmp1, &wparam(1)); + &push("esi"); + &mov($A, &DWP( 0,$tmp2,"",0)); + &push("edi"); + &mov($B, &DWP( 4,$tmp2,"",0)); + &push("ebp"); + &mov($C, &DWP( 8,$tmp2,"",0)); + &push("ebx"); + &stack_push(16+5+6); + # Special comment about the figure of 6. + # Idea is to pad the current frame so + # that the top of the stack gets fairly + # aligned. Well, as you realize it would + # always depend on how the frame below is + # aligned. The good news are that gcc-2.95 + # and later does keep first argument at + # least double-wise aligned. + # <appro@fy.chalmers.se> + + &set_label("start") unless $normal; + &comment(""); + + # &mov($tmp1, &wparam(1)); # Done at end of loop + # &mov($tmp2, &wparam(0)); # Done at end of loop + + for ($z=0; $z<16; $z+=2) + { + &mov($D, &DWP( $z*4,$tmp1,"",0)); + &mov($E, &DWP( ($z+1)*4,$tmp1,"",0)); + &mov(&swtmp($z), $D); + &mov(&swtmp($z+1), $E); + } + &mov($tmp1, $C); + &mov($D, &DWP(12,$tmp2,"",0)); + &mov($E, &DWP(16,$tmp2,"",0)); + + &RIP1($A,$B,$C,$D,$E,$wl[ 0],$sl[ 0],-1); + &RIP1($E,$A,$B,$C,$D,$wl[ 1],$sl[ 1],0); + &RIP1($D,$E,$A,$B,$C,$wl[ 2],$sl[ 2],0); + &RIP1($C,$D,$E,$A,$B,$wl[ 3],$sl[ 3],0); + &RIP1($B,$C,$D,$E,$A,$wl[ 4],$sl[ 4],0); + &RIP1($A,$B,$C,$D,$E,$wl[ 5],$sl[ 5],0); + &RIP1($E,$A,$B,$C,$D,$wl[ 6],$sl[ 6],0); + &RIP1($D,$E,$A,$B,$C,$wl[ 7],$sl[ 7],0); + &RIP1($C,$D,$E,$A,$B,$wl[ 8],$sl[ 8],0); + &RIP1($B,$C,$D,$E,$A,$wl[ 9],$sl[ 9],0); + &RIP1($A,$B,$C,$D,$E,$wl[10],$sl[10],0); + &RIP1($E,$A,$B,$C,$D,$wl[11],$sl[11],0); + &RIP1($D,$E,$A,$B,$C,$wl[12],$sl[12],0); + &RIP1($C,$D,$E,$A,$B,$wl[13],$sl[13],0); + &RIP1($B,$C,$D,$E,$A,$wl[14],$sl[14],0); + &RIP1($A,$B,$C,$D,$E,$wl[15],$sl[15],1,$wl[16]); + + &RIP2($E,$A,$B,$C,$D,$wl[16],$wl[17],$sl[16],$KL1,-1); + &RIP2($D,$E,$A,$B,$C,$wl[17],$wl[18],$sl[17],$KL1,0); + &RIP2($C,$D,$E,$A,$B,$wl[18],$wl[19],$sl[18],$KL1,0); + &RIP2($B,$C,$D,$E,$A,$wl[19],$wl[20],$sl[19],$KL1,0); + &RIP2($A,$B,$C,$D,$E,$wl[20],$wl[21],$sl[20],$KL1,0); + &RIP2($E,$A,$B,$C,$D,$wl[21],$wl[22],$sl[21],$KL1,0); + &RIP2($D,$E,$A,$B,$C,$wl[22],$wl[23],$sl[22],$KL1,0); + &RIP2($C,$D,$E,$A,$B,$wl[23],$wl[24],$sl[23],$KL1,0); + &RIP2($B,$C,$D,$E,$A,$wl[24],$wl[25],$sl[24],$KL1,0); + &RIP2($A,$B,$C,$D,$E,$wl[25],$wl[26],$sl[25],$KL1,0); + &RIP2($E,$A,$B,$C,$D,$wl[26],$wl[27],$sl[26],$KL1,0); + &RIP2($D,$E,$A,$B,$C,$wl[27],$wl[28],$sl[27],$KL1,0); + &RIP2($C,$D,$E,$A,$B,$wl[28],$wl[29],$sl[28],$KL1,0); + &RIP2($B,$C,$D,$E,$A,$wl[29],$wl[30],$sl[29],$KL1,0); + &RIP2($A,$B,$C,$D,$E,$wl[30],$wl[31],$sl[30],$KL1,0); + &RIP2($E,$A,$B,$C,$D,$wl[31],$wl[32],$sl[31],$KL1,1); + + &RIP3($D,$E,$A,$B,$C,$wl[32],$sl[32],$KL2,-1); + &RIP3($C,$D,$E,$A,$B,$wl[33],$sl[33],$KL2,0); + &RIP3($B,$C,$D,$E,$A,$wl[34],$sl[34],$KL2,0); + &RIP3($A,$B,$C,$D,$E,$wl[35],$sl[35],$KL2,0); + &RIP3($E,$A,$B,$C,$D,$wl[36],$sl[36],$KL2,0); + &RIP3($D,$E,$A,$B,$C,$wl[37],$sl[37],$KL2,0); + &RIP3($C,$D,$E,$A,$B,$wl[38],$sl[38],$KL2,0); + &RIP3($B,$C,$D,$E,$A,$wl[39],$sl[39],$KL2,0); + &RIP3($A,$B,$C,$D,$E,$wl[40],$sl[40],$KL2,0); + &RIP3($E,$A,$B,$C,$D,$wl[41],$sl[41],$KL2,0); + &RIP3($D,$E,$A,$B,$C,$wl[42],$sl[42],$KL2,0); + &RIP3($C,$D,$E,$A,$B,$wl[43],$sl[43],$KL2,0); + &RIP3($B,$C,$D,$E,$A,$wl[44],$sl[44],$KL2,0); + &RIP3($A,$B,$C,$D,$E,$wl[45],$sl[45],$KL2,0); + &RIP3($E,$A,$B,$C,$D,$wl[46],$sl[46],$KL2,0); + &RIP3($D,$E,$A,$B,$C,$wl[47],$sl[47],$KL2,1); + + &RIP4($C,$D,$E,$A,$B,$wl[48],$sl[48],$KL3,-1); + &RIP4($B,$C,$D,$E,$A,$wl[49],$sl[49],$KL3,0); + &RIP4($A,$B,$C,$D,$E,$wl[50],$sl[50],$KL3,0); + &RIP4($E,$A,$B,$C,$D,$wl[51],$sl[51],$KL3,0); + &RIP4($D,$E,$A,$B,$C,$wl[52],$sl[52],$KL3,0); + &RIP4($C,$D,$E,$A,$B,$wl[53],$sl[53],$KL3,0); + &RIP4($B,$C,$D,$E,$A,$wl[54],$sl[54],$KL3,0); + &RIP4($A,$B,$C,$D,$E,$wl[55],$sl[55],$KL3,0); + &RIP4($E,$A,$B,$C,$D,$wl[56],$sl[56],$KL3,0); + &RIP4($D,$E,$A,$B,$C,$wl[57],$sl[57],$KL3,0); + &RIP4($C,$D,$E,$A,$B,$wl[58],$sl[58],$KL3,0); + &RIP4($B,$C,$D,$E,$A,$wl[59],$sl[59],$KL3,0); + &RIP4($A,$B,$C,$D,$E,$wl[60],$sl[60],$KL3,0); + &RIP4($E,$A,$B,$C,$D,$wl[61],$sl[61],$KL3,0); + &RIP4($D,$E,$A,$B,$C,$wl[62],$sl[62],$KL3,0); + &RIP4($C,$D,$E,$A,$B,$wl[63],$sl[63],$KL3,1); + + &RIP5($B,$C,$D,$E,$A,$wl[64],$sl[64],$KL4,-1); + &RIP5($A,$B,$C,$D,$E,$wl[65],$sl[65],$KL4,0); + &RIP5($E,$A,$B,$C,$D,$wl[66],$sl[66],$KL4,0); + &RIP5($D,$E,$A,$B,$C,$wl[67],$sl[67],$KL4,0); + &RIP5($C,$D,$E,$A,$B,$wl[68],$sl[68],$KL4,0); + &RIP5($B,$C,$D,$E,$A,$wl[69],$sl[69],$KL4,0); + &RIP5($A,$B,$C,$D,$E,$wl[70],$sl[70],$KL4,0); + &RIP5($E,$A,$B,$C,$D,$wl[71],$sl[71],$KL4,0); + &RIP5($D,$E,$A,$B,$C,$wl[72],$sl[72],$KL4,0); + &RIP5($C,$D,$E,$A,$B,$wl[73],$sl[73],$KL4,0); + &RIP5($B,$C,$D,$E,$A,$wl[74],$sl[74],$KL4,0); + &RIP5($A,$B,$C,$D,$E,$wl[75],$sl[75],$KL4,0); + &RIP5($E,$A,$B,$C,$D,$wl[76],$sl[76],$KL4,0); + &RIP5($D,$E,$A,$B,$C,$wl[77],$sl[77],$KL4,0); + &RIP5($C,$D,$E,$A,$B,$wl[78],$sl[78],$KL4,0); + &RIP5($B,$C,$D,$E,$A,$wl[79],$sl[79],$KL4,1); + + # &mov($tmp2, &wparam(0)); # moved into last RIP5 + # &mov(&swtmp(16), $A); + &mov($A, &DWP( 0,$tmp2,"",0)); + &mov(&swtmp(16+1), $B); + &mov(&swtmp(16+2), $C); + &mov($B, &DWP( 4,$tmp2,"",0)); + &mov(&swtmp(16+3), $D); + &mov($C, &DWP( 8,$tmp2,"",0)); + &mov(&swtmp(16+4), $E); + &mov($D, &DWP(12,$tmp2,"",0)); + &mov($E, &DWP(16,$tmp2,"",0)); + + &RIP5($A,$B,$C,$D,$E,$wr[ 0],$sr[ 0],$KR0,-2); + &RIP5($E,$A,$B,$C,$D,$wr[ 1],$sr[ 1],$KR0,0); + &RIP5($D,$E,$A,$B,$C,$wr[ 2],$sr[ 2],$KR0,0); + &RIP5($C,$D,$E,$A,$B,$wr[ 3],$sr[ 3],$KR0,0); + &RIP5($B,$C,$D,$E,$A,$wr[ 4],$sr[ 4],$KR0,0); + &RIP5($A,$B,$C,$D,$E,$wr[ 5],$sr[ 5],$KR0,0); + &RIP5($E,$A,$B,$C,$D,$wr[ 6],$sr[ 6],$KR0,0); + &RIP5($D,$E,$A,$B,$C,$wr[ 7],$sr[ 7],$KR0,0); + &RIP5($C,$D,$E,$A,$B,$wr[ 8],$sr[ 8],$KR0,0); + &RIP5($B,$C,$D,$E,$A,$wr[ 9],$sr[ 9],$KR0,0); + &RIP5($A,$B,$C,$D,$E,$wr[10],$sr[10],$KR0,0); + &RIP5($E,$A,$B,$C,$D,$wr[11],$sr[11],$KR0,0); + &RIP5($D,$E,$A,$B,$C,$wr[12],$sr[12],$KR0,0); + &RIP5($C,$D,$E,$A,$B,$wr[13],$sr[13],$KR0,0); + &RIP5($B,$C,$D,$E,$A,$wr[14],$sr[14],$KR0,0); + &RIP5($A,$B,$C,$D,$E,$wr[15],$sr[15],$KR0,2); + + &RIP4($E,$A,$B,$C,$D,$wr[16],$sr[16],$KR1,-2); + &RIP4($D,$E,$A,$B,$C,$wr[17],$sr[17],$KR1,0); + &RIP4($C,$D,$E,$A,$B,$wr[18],$sr[18],$KR1,0); + &RIP4($B,$C,$D,$E,$A,$wr[19],$sr[19],$KR1,0); + &RIP4($A,$B,$C,$D,$E,$wr[20],$sr[20],$KR1,0); + &RIP4($E,$A,$B,$C,$D,$wr[21],$sr[21],$KR1,0); + &RIP4($D,$E,$A,$B,$C,$wr[22],$sr[22],$KR1,0); + &RIP4($C,$D,$E,$A,$B,$wr[23],$sr[23],$KR1,0); + &RIP4($B,$C,$D,$E,$A,$wr[24],$sr[24],$KR1,0); + &RIP4($A,$B,$C,$D,$E,$wr[25],$sr[25],$KR1,0); + &RIP4($E,$A,$B,$C,$D,$wr[26],$sr[26],$KR1,0); + &RIP4($D,$E,$A,$B,$C,$wr[27],$sr[27],$KR1,0); + &RIP4($C,$D,$E,$A,$B,$wr[28],$sr[28],$KR1,0); + &RIP4($B,$C,$D,$E,$A,$wr[29],$sr[29],$KR1,0); + &RIP4($A,$B,$C,$D,$E,$wr[30],$sr[30],$KR1,0); + &RIP4($E,$A,$B,$C,$D,$wr[31],$sr[31],$KR1,2); + + &RIP3($D,$E,$A,$B,$C,$wr[32],$sr[32],$KR2,-2); + &RIP3($C,$D,$E,$A,$B,$wr[33],$sr[33],$KR2,0); + &RIP3($B,$C,$D,$E,$A,$wr[34],$sr[34],$KR2,0); + &RIP3($A,$B,$C,$D,$E,$wr[35],$sr[35],$KR2,0); + &RIP3($E,$A,$B,$C,$D,$wr[36],$sr[36],$KR2,0); + &RIP3($D,$E,$A,$B,$C,$wr[37],$sr[37],$KR2,0); + &RIP3($C,$D,$E,$A,$B,$wr[38],$sr[38],$KR2,0); + &RIP3($B,$C,$D,$E,$A,$wr[39],$sr[39],$KR2,0); + &RIP3($A,$B,$C,$D,$E,$wr[40],$sr[40],$KR2,0); + &RIP3($E,$A,$B,$C,$D,$wr[41],$sr[41],$KR2,0); + &RIP3($D,$E,$A,$B,$C,$wr[42],$sr[42],$KR2,0); + &RIP3($C,$D,$E,$A,$B,$wr[43],$sr[43],$KR2,0); + &RIP3($B,$C,$D,$E,$A,$wr[44],$sr[44],$KR2,0); + &RIP3($A,$B,$C,$D,$E,$wr[45],$sr[45],$KR2,0); + &RIP3($E,$A,$B,$C,$D,$wr[46],$sr[46],$KR2,0); + &RIP3($D,$E,$A,$B,$C,$wr[47],$sr[47],$KR2,2,$wr[48]); + + &RIP2($C,$D,$E,$A,$B,$wr[48],$wr[49],$sr[48],$KR3,-2); + &RIP2($B,$C,$D,$E,$A,$wr[49],$wr[50],$sr[49],$KR3,0); + &RIP2($A,$B,$C,$D,$E,$wr[50],$wr[51],$sr[50],$KR3,0); + &RIP2($E,$A,$B,$C,$D,$wr[51],$wr[52],$sr[51],$KR3,0); + &RIP2($D,$E,$A,$B,$C,$wr[52],$wr[53],$sr[52],$KR3,0); + &RIP2($C,$D,$E,$A,$B,$wr[53],$wr[54],$sr[53],$KR3,0); + &RIP2($B,$C,$D,$E,$A,$wr[54],$wr[55],$sr[54],$KR3,0); + &RIP2($A,$B,$C,$D,$E,$wr[55],$wr[56],$sr[55],$KR3,0); + &RIP2($E,$A,$B,$C,$D,$wr[56],$wr[57],$sr[56],$KR3,0); + &RIP2($D,$E,$A,$B,$C,$wr[57],$wr[58],$sr[57],$KR3,0); + &RIP2($C,$D,$E,$A,$B,$wr[58],$wr[59],$sr[58],$KR3,0); + &RIP2($B,$C,$D,$E,$A,$wr[59],$wr[60],$sr[59],$KR3,0); + &RIP2($A,$B,$C,$D,$E,$wr[60],$wr[61],$sr[60],$KR3,0); + &RIP2($E,$A,$B,$C,$D,$wr[61],$wr[62],$sr[61],$KR3,0); + &RIP2($D,$E,$A,$B,$C,$wr[62],$wr[63],$sr[62],$KR3,0); + &RIP2($C,$D,$E,$A,$B,$wr[63],$wr[64],$sr[63],$KR3,2); + + &RIP1($B,$C,$D,$E,$A,$wr[64],$sr[64],-2); + &RIP1($A,$B,$C,$D,$E,$wr[65],$sr[65],0); + &RIP1($E,$A,$B,$C,$D,$wr[66],$sr[66],0); + &RIP1($D,$E,$A,$B,$C,$wr[67],$sr[67],0); + &RIP1($C,$D,$E,$A,$B,$wr[68],$sr[68],0); + &RIP1($B,$C,$D,$E,$A,$wr[69],$sr[69],0); + &RIP1($A,$B,$C,$D,$E,$wr[70],$sr[70],0); + &RIP1($E,$A,$B,$C,$D,$wr[71],$sr[71],0); + &RIP1($D,$E,$A,$B,$C,$wr[72],$sr[72],0); + &RIP1($C,$D,$E,$A,$B,$wr[73],$sr[73],0); + &RIP1($B,$C,$D,$E,$A,$wr[74],$sr[74],0); + &RIP1($A,$B,$C,$D,$E,$wr[75],$sr[75],0); + &RIP1($E,$A,$B,$C,$D,$wr[76],$sr[76],0); + &RIP1($D,$E,$A,$B,$C,$wr[77],$sr[77],0); + &RIP1($C,$D,$E,$A,$B,$wr[78],$sr[78],0); + &RIP1($B,$C,$D,$E,$A,$wr[79],$sr[79],2); + + # &mov($tmp2, &wparam(0)); # Moved into last round + + &mov($tmp1, &DWP( 4,$tmp2,"",0)); # ctx->B + &add($D, $tmp1); + &mov($tmp1, &swtmp(16+2)); # $c + &add($D, $tmp1); + + &mov($tmp1, &DWP( 8,$tmp2,"",0)); # ctx->C + &add($E, $tmp1); + &mov($tmp1, &swtmp(16+3)); # $d + &add($E, $tmp1); + + &mov($tmp1, &DWP(12,$tmp2,"",0)); # ctx->D + &add($A, $tmp1); + &mov($tmp1, &swtmp(16+4)); # $e + &add($A, $tmp1); + + + &mov($tmp1, &DWP(16,$tmp2,"",0)); # ctx->E + &add($B, $tmp1); + &mov($tmp1, &swtmp(16+0)); # $a + &add($B, $tmp1); + + &mov($tmp1, &DWP( 0,$tmp2,"",0)); # ctx->A + &add($C, $tmp1); + &mov($tmp1, &swtmp(16+1)); # $b + &add($C, $tmp1); + + &mov($tmp1, &wparam(2)); + + &mov(&DWP( 0,$tmp2,"",0), $D); + &mov(&DWP( 4,$tmp2,"",0), $E); + &mov(&DWP( 8,$tmp2,"",0), $A); + &sub($tmp1,1); + &mov(&DWP(12,$tmp2,"",0), $B); + &mov(&DWP(16,$tmp2,"",0), $C); + + &jle(&label("get_out")); + + &mov(&wparam(2),$tmp1); + &mov($C, $A); + &mov($tmp1, &wparam(1)); + &mov($A, $D); + &add($tmp1, 64); + &mov($B, $E); + &mov(&wparam(1),$tmp1); + + &jmp(&label("start")); + + &set_label("get_out"); + + &stack_pop(16+5+6); + + &pop("ebx"); + &pop("ebp"); + &pop("edi"); + &pop("esi"); + &ret(); + &function_end_B($name); + } + diff --git a/openssl-1.1.0h/crypto/ripemd/build.info b/openssl-1.1.0h/crypto/ripemd/build.info new file mode 100644 index 0000000..c45050c --- /dev/null +++ b/openssl-1.1.0h/crypto/ripemd/build.info @@ -0,0 +1,6 @@ +LIBS=../../libcrypto +SOURCE[../../libcrypto]=\ + rmd_dgst.c rmd_one.c {- $target{rmd160_asm_src} -} + +GENERATE[rmd-586.s]=asm/rmd-586.pl $(PERLASM_SCHEME) $(CFLAGS) $(LIB_CFLAGS) +DEPEND[rmd-586.s]=../perlasm/x86asm.pl diff --git a/openssl-1.1.0h/crypto/ripemd/rmd_dgst.c b/openssl-1.1.0h/crypto/ripemd/rmd_dgst.c new file mode 100644 index 0000000..a1670c7 --- /dev/null +++ b/openssl-1.1.0h/crypto/ripemd/rmd_dgst.c @@ -0,0 +1,282 @@ +/* + * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#include <stdio.h> +#include "rmd_locl.h" +#include <openssl/opensslv.h> + +#ifdef RMD160_ASM +void ripemd160_block_x86(RIPEMD160_CTX *c, unsigned long *p, size_t num); +# define ripemd160_block ripemd160_block_x86 +#else +void ripemd160_block(RIPEMD160_CTX *c, unsigned long *p, size_t num); +#endif + +int RIPEMD160_Init(RIPEMD160_CTX *c) +{ + memset(c, 0, sizeof(*c)); + c->A = RIPEMD160_A; + c->B = RIPEMD160_B; + c->C = RIPEMD160_C; + c->D = RIPEMD160_D; + c->E = RIPEMD160_E; + return 1; +} + +#ifndef ripemd160_block_data_order +# ifdef X +# undef X +# endif +void ripemd160_block_data_order(RIPEMD160_CTX *ctx, const void *p, size_t num) +{ + const unsigned char *data = p; + register unsigned MD32_REG_T A, B, C, D, E; + unsigned MD32_REG_T a, b, c, d, e, l; +# ifndef MD32_XARRAY + /* See comment in crypto/sha/sha_locl.h for details. */ + unsigned MD32_REG_T XX0, XX1, XX2, XX3, XX4, XX5, XX6, XX7, + XX8, XX9, XX10, XX11, XX12, XX13, XX14, XX15; +# define X(i) XX##i +# else + RIPEMD160_LONG XX[16]; +# define X(i) XX[i] +# endif + + for (; num--;) { + + A = ctx->A; + B = ctx->B; + C = ctx->C; + D = ctx->D; + E = ctx->E; + + (void)HOST_c2l(data, l); + X(0) = l; + (void)HOST_c2l(data, l); + X(1) = l; + RIP1(A, B, C, D, E, WL00, SL00); + (void)HOST_c2l(data, l); + X(2) = l; + RIP1(E, A, B, C, D, WL01, SL01); + (void)HOST_c2l(data, l); + X(3) = l; + RIP1(D, E, A, B, C, WL02, SL02); + (void)HOST_c2l(data, l); + X(4) = l; + RIP1(C, D, E, A, B, WL03, SL03); + (void)HOST_c2l(data, l); + X(5) = l; + RIP1(B, C, D, E, A, WL04, SL04); + (void)HOST_c2l(data, l); + X(6) = l; + RIP1(A, B, C, D, E, WL05, SL05); + (void)HOST_c2l(data, l); + X(7) = l; + RIP1(E, A, B, C, D, WL06, SL06); + (void)HOST_c2l(data, l); + X(8) = l; + RIP1(D, E, A, B, C, WL07, SL07); + (void)HOST_c2l(data, l); + X(9) = l; + RIP1(C, D, E, A, B, WL08, SL08); + (void)HOST_c2l(data, l); + X(10) = l; + RIP1(B, C, D, E, A, WL09, SL09); + (void)HOST_c2l(data, l); + X(11) = l; + RIP1(A, B, C, D, E, WL10, SL10); + (void)HOST_c2l(data, l); + X(12) = l; + RIP1(E, A, B, C, D, WL11, SL11); + (void)HOST_c2l(data, l); + X(13) = l; + RIP1(D, E, A, B, C, WL12, SL12); + (void)HOST_c2l(data, l); + X(14) = l; + RIP1(C, D, E, A, B, WL13, SL13); + (void)HOST_c2l(data, l); + X(15) = l; + RIP1(B, C, D, E, A, WL14, SL14); + RIP1(A, B, C, D, E, WL15, SL15); + + RIP2(E, A, B, C, D, WL16, SL16, KL1); + RIP2(D, E, A, B, C, WL17, SL17, KL1); + RIP2(C, D, E, A, B, WL18, SL18, KL1); + RIP2(B, C, D, E, A, WL19, SL19, KL1); + RIP2(A, B, C, D, E, WL20, SL20, KL1); + RIP2(E, A, B, C, D, WL21, SL21, KL1); + RIP2(D, E, A, B, C, WL22, SL22, KL1); + RIP2(C, D, E, A, B, WL23, SL23, KL1); + RIP2(B, C, D, E, A, WL24, SL24, KL1); + RIP2(A, B, C, D, E, WL25, SL25, KL1); + RIP2(E, A, B, C, D, WL26, SL26, KL1); + RIP2(D, E, A, B, C, WL27, SL27, KL1); + RIP2(C, D, E, A, B, WL28, SL28, KL1); + RIP2(B, C, D, E, A, WL29, SL29, KL1); + RIP2(A, B, C, D, E, WL30, SL30, KL1); + RIP2(E, A, B, C, D, WL31, SL31, KL1); + + RIP3(D, E, A, B, C, WL32, SL32, KL2); + RIP3(C, D, E, A, B, WL33, SL33, KL2); + RIP3(B, C, D, E, A, WL34, SL34, KL2); + RIP3(A, B, C, D, E, WL35, SL35, KL2); + RIP3(E, A, B, C, D, WL36, SL36, KL2); + RIP3(D, E, A, B, C, WL37, SL37, KL2); + RIP3(C, D, E, A, B, WL38, SL38, KL2); + RIP3(B, C, D, E, A, WL39, SL39, KL2); + RIP3(A, B, C, D, E, WL40, SL40, KL2); + RIP3(E, A, B, C, D, WL41, SL41, KL2); + RIP3(D, E, A, B, C, WL42, SL42, KL2); + RIP3(C, D, E, A, B, WL43, SL43, KL2); + RIP3(B, C, D, E, A, WL44, SL44, KL2); + RIP3(A, B, C, D, E, WL45, SL45, KL2); + RIP3(E, A, B, C, D, WL46, SL46, KL2); + RIP3(D, E, A, B, C, WL47, SL47, KL2); + + RIP4(C, D, E, A, B, WL48, SL48, KL3); + RIP4(B, C, D, E, A, WL49, SL49, KL3); + RIP4(A, B, C, D, E, WL50, SL50, KL3); + RIP4(E, A, B, C, D, WL51, SL51, KL3); + RIP4(D, E, A, B, C, WL52, SL52, KL3); + RIP4(C, D, E, A, B, WL53, SL53, KL3); + RIP4(B, C, D, E, A, WL54, SL54, KL3); + RIP4(A, B, C, D, E, WL55, SL55, KL3); + RIP4(E, A, B, C, D, WL56, SL56, KL3); + RIP4(D, E, A, B, C, WL57, SL57, KL3); + RIP4(C, D, E, A, B, WL58, SL58, KL3); + RIP4(B, C, D, E, A, WL59, SL59, KL3); + RIP4(A, B, C, D, E, WL60, SL60, KL3); + RIP4(E, A, B, C, D, WL61, SL61, KL3); + RIP4(D, E, A, B, C, WL62, SL62, KL3); + RIP4(C, D, E, A, B, WL63, SL63, KL3); + + RIP5(B, C, D, E, A, WL64, SL64, KL4); + RIP5(A, B, C, D, E, WL65, SL65, KL4); + RIP5(E, A, B, C, D, WL66, SL66, KL4); + RIP5(D, E, A, B, C, WL67, SL67, KL4); + RIP5(C, D, E, A, B, WL68, SL68, KL4); + RIP5(B, C, D, E, A, WL69, SL69, KL4); + RIP5(A, B, C, D, E, WL70, SL70, KL4); + RIP5(E, A, B, C, D, WL71, SL71, KL4); + RIP5(D, E, A, B, C, WL72, SL72, KL4); + RIP5(C, D, E, A, B, WL73, SL73, KL4); + RIP5(B, C, D, E, A, WL74, SL74, KL4); + RIP5(A, B, C, D, E, WL75, SL75, KL4); + RIP5(E, A, B, C, D, WL76, SL76, KL4); + RIP5(D, E, A, B, C, WL77, SL77, KL4); + RIP5(C, D, E, A, B, WL78, SL78, KL4); + RIP5(B, C, D, E, A, WL79, SL79, KL4); + + a = A; + b = B; + c = C; + d = D; + e = E; + /* Do other half */ + A = ctx->A; + B = ctx->B; + C = ctx->C; + D = ctx->D; + E = ctx->E; + + RIP5(A, B, C, D, E, WR00, SR00, KR0); + RIP5(E, A, B, C, D, WR01, SR01, KR0); + RIP5(D, E, A, B, C, WR02, SR02, KR0); + RIP5(C, D, E, A, B, WR03, SR03, KR0); + RIP5(B, C, D, E, A, WR04, SR04, KR0); + RIP5(A, B, C, D, E, WR05, SR05, KR0); + RIP5(E, A, B, C, D, WR06, SR06, KR0); + RIP5(D, E, A, B, C, WR07, SR07, KR0); + RIP5(C, D, E, A, B, WR08, SR08, KR0); + RIP5(B, C, D, E, A, WR09, SR09, KR0); + RIP5(A, B, C, D, E, WR10, SR10, KR0); + RIP5(E, A, B, C, D, WR11, SR11, KR0); + RIP5(D, E, A, B, C, WR12, SR12, KR0); + RIP5(C, D, E, A, B, WR13, SR13, KR0); + RIP5(B, C, D, E, A, WR14, SR14, KR0); + RIP5(A, B, C, D, E, WR15, SR15, KR0); + + RIP4(E, A, B, C, D, WR16, SR16, KR1); + RIP4(D, E, A, B, C, WR17, SR17, KR1); + RIP4(C, D, E, A, B, WR18, SR18, KR1); + RIP4(B, C, D, E, A, WR19, SR19, KR1); + RIP4(A, B, C, D, E, WR20, SR20, KR1); + RIP4(E, A, B, C, D, WR21, SR21, KR1); + RIP4(D, E, A, B, C, WR22, SR22, KR1); + RIP4(C, D, E, A, B, WR23, SR23, KR1); + RIP4(B, C, D, E, A, WR24, SR24, KR1); + RIP4(A, B, C, D, E, WR25, SR25, KR1); + RIP4(E, A, B, C, D, WR26, SR26, KR1); + RIP4(D, E, A, B, C, WR27, SR27, KR1); + RIP4(C, D, E, A, B, WR28, SR28, KR1); + RIP4(B, C, D, E, A, WR29, SR29, KR1); + RIP4(A, B, C, D, E, WR30, SR30, KR1); + RIP4(E, A, B, C, D, WR31, SR31, KR1); + + RIP3(D, E, A, B, C, WR32, SR32, KR2); + RIP3(C, D, E, A, B, WR33, SR33, KR2); + RIP3(B, C, D, E, A, WR34, SR34, KR2); + RIP3(A, B, C, D, E, WR35, SR35, KR2); + RIP3(E, A, B, C, D, WR36, SR36, KR2); + RIP3(D, E, A, B, C, WR37, SR37, KR2); + RIP3(C, D, E, A, B, WR38, SR38, KR2); + RIP3(B, C, D, E, A, WR39, SR39, KR2); + RIP3(A, B, C, D, E, WR40, SR40, KR2); + RIP3(E, A, B, C, D, WR41, SR41, KR2); + RIP3(D, E, A, B, C, WR42, SR42, KR2); + RIP3(C, D, E, A, B, WR43, SR43, KR2); + RIP3(B, C, D, E, A, WR44, SR44, KR2); + RIP3(A, B, C, D, E, WR45, SR45, KR2); + RIP3(E, A, B, C, D, WR46, SR46, KR2); + RIP3(D, E, A, B, C, WR47, SR47, KR2); + + RIP2(C, D, E, A, B, WR48, SR48, KR3); + RIP2(B, C, D, E, A, WR49, SR49, KR3); + RIP2(A, B, C, D, E, WR50, SR50, KR3); + RIP2(E, A, B, C, D, WR51, SR51, KR3); + RIP2(D, E, A, B, C, WR52, SR52, KR3); + RIP2(C, D, E, A, B, WR53, SR53, KR3); + RIP2(B, C, D, E, A, WR54, SR54, KR3); + RIP2(A, B, C, D, E, WR55, SR55, KR3); + RIP2(E, A, B, C, D, WR56, SR56, KR3); + RIP2(D, E, A, B, C, WR57, SR57, KR3); + RIP2(C, D, E, A, B, WR58, SR58, KR3); + RIP2(B, C, D, E, A, WR59, SR59, KR3); + RIP2(A, B, C, D, E, WR60, SR60, KR3); + RIP2(E, A, B, C, D, WR61, SR61, KR3); + RIP2(D, E, A, B, C, WR62, SR62, KR3); + RIP2(C, D, E, A, B, WR63, SR63, KR3); + + RIP1(B, C, D, E, A, WR64, SR64); + RIP1(A, B, C, D, E, WR65, SR65); + RIP1(E, A, B, C, D, WR66, SR66); + RIP1(D, E, A, B, C, WR67, SR67); + RIP1(C, D, E, A, B, WR68, SR68); + RIP1(B, C, D, E, A, WR69, SR69); + RIP1(A, B, C, D, E, WR70, SR70); + RIP1(E, A, B, C, D, WR71, SR71); + RIP1(D, E, A, B, C, WR72, SR72); + RIP1(C, D, E, A, B, WR73, SR73); + RIP1(B, C, D, E, A, WR74, SR74); + RIP1(A, B, C, D, E, WR75, SR75); + RIP1(E, A, B, C, D, WR76, SR76); + RIP1(D, E, A, B, C, WR77, SR77); + RIP1(C, D, E, A, B, WR78, SR78); + RIP1(B, C, D, E, A, WR79, SR79); + + D = ctx->B + c + D; + ctx->B = ctx->C + d + E; + ctx->C = ctx->D + e + A; + ctx->D = ctx->E + a + B; + ctx->E = ctx->A + b + C; + ctx->A = D; + + } +} +#endif diff --git a/openssl-1.1.0h/crypto/ripemd/rmd_locl.h b/openssl-1.1.0h/crypto/ripemd/rmd_locl.h new file mode 100644 index 0000000..9c5ba15 --- /dev/null +++ b/openssl-1.1.0h/crypto/ripemd/rmd_locl.h @@ -0,0 +1,88 @@ +/* + * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#include <stdlib.h> +#include <string.h> +#include <openssl/opensslconf.h> +#include <openssl/ripemd.h> + +/* + * DO EXAMINE COMMENTS IN crypto/md5/md5_locl.h & crypto/md5/md5_dgst.c + * FOR EXPLANATIONS ON FOLLOWING "CODE." + * <appro@fy.chalmers.se> + */ +#ifdef RMD160_ASM +# if defined(__i386) || defined(__i386__) || defined(_M_IX86) +# define ripemd160_block_data_order ripemd160_block_asm_data_order +# endif +#endif + +void ripemd160_block_data_order(RIPEMD160_CTX *c, const void *p, size_t num); + +#define DATA_ORDER_IS_LITTLE_ENDIAN + +#define HASH_LONG RIPEMD160_LONG +#define HASH_CTX RIPEMD160_CTX +#define HASH_CBLOCK RIPEMD160_CBLOCK +#define HASH_UPDATE RIPEMD160_Update +#define HASH_TRANSFORM RIPEMD160_Transform +#define HASH_FINAL RIPEMD160_Final +#define HASH_MAKE_STRING(c,s) do { \ + unsigned long ll; \ + ll=(c)->A; (void)HOST_l2c(ll,(s)); \ + ll=(c)->B; (void)HOST_l2c(ll,(s)); \ + ll=(c)->C; (void)HOST_l2c(ll,(s)); \ + ll=(c)->D; (void)HOST_l2c(ll,(s)); \ + ll=(c)->E; (void)HOST_l2c(ll,(s)); \ + } while (0) +#define HASH_BLOCK_DATA_ORDER ripemd160_block_data_order + +#include "internal/md32_common.h" + +/* + * Transformed F2 and F4 are courtesy of Wei Dai <weidai@eskimo.com> + */ +#define F1(x,y,z) ((x) ^ (y) ^ (z)) +#define F2(x,y,z) ((((y) ^ (z)) & (x)) ^ (z)) +#define F3(x,y,z) (((~(y)) | (x)) ^ (z)) +#define F4(x,y,z) ((((x) ^ (y)) & (z)) ^ (y)) +#define F5(x,y,z) (((~(z)) | (y)) ^ (x)) + +#define RIPEMD160_A 0x67452301L +#define RIPEMD160_B 0xEFCDAB89L +#define RIPEMD160_C 0x98BADCFEL +#define RIPEMD160_D 0x10325476L +#define RIPEMD160_E 0xC3D2E1F0L + +#include "rmdconst.h" + +#define RIP1(a,b,c,d,e,w,s) { \ + a+=F1(b,c,d)+X(w); \ + a=ROTATE(a,s)+e; \ + c=ROTATE(c,10); } + +#define RIP2(a,b,c,d,e,w,s,K) { \ + a+=F2(b,c,d)+X(w)+K; \ + a=ROTATE(a,s)+e; \ + c=ROTATE(c,10); } + +#define RIP3(a,b,c,d,e,w,s,K) { \ + a+=F3(b,c,d)+X(w)+K; \ + a=ROTATE(a,s)+e; \ + c=ROTATE(c,10); } + +#define RIP4(a,b,c,d,e,w,s,K) { \ + a+=F4(b,c,d)+X(w)+K; \ + a=ROTATE(a,s)+e; \ + c=ROTATE(c,10); } + +#define RIP5(a,b,c,d,e,w,s,K) { \ + a+=F5(b,c,d)+X(w)+K; \ + a=ROTATE(a,s)+e; \ + c=ROTATE(c,10); } diff --git a/openssl-1.1.0h/crypto/ripemd/rmd_one.c b/openssl-1.1.0h/crypto/ripemd/rmd_one.c new file mode 100644 index 0000000..c3193bd --- /dev/null +++ b/openssl-1.1.0h/crypto/ripemd/rmd_one.c @@ -0,0 +1,28 @@ +/* + * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#include <stdio.h> +#include <string.h> +#include <openssl/ripemd.h> +#include <openssl/crypto.h> + +unsigned char *RIPEMD160(const unsigned char *d, size_t n, unsigned char *md) +{ + RIPEMD160_CTX c; + static unsigned char m[RIPEMD160_DIGEST_LENGTH]; + + if (md == NULL) + md = m; + if (!RIPEMD160_Init(&c)) + return NULL; + RIPEMD160_Update(&c, d, n); + RIPEMD160_Final(md, &c); + OPENSSL_cleanse(&c, sizeof(c)); /* security consideration */ + return (md); +} diff --git a/openssl-1.1.0h/crypto/ripemd/rmdconst.h b/openssl-1.1.0h/crypto/ripemd/rmdconst.h new file mode 100644 index 0000000..b810132 --- /dev/null +++ b/openssl-1.1.0h/crypto/ripemd/rmdconst.h @@ -0,0 +1,350 @@ +/* + * Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved. + * + * Licensed under the OpenSSL license (the "License"). You may not use + * this file except in compliance with the License. You can obtain a copy + * in the file LICENSE in the source distribution or at + * https://www.openssl.org/source/license.html + */ + +#define KL0 0x00000000L +#define KL1 0x5A827999L +#define KL2 0x6ED9EBA1L +#define KL3 0x8F1BBCDCL +#define KL4 0xA953FD4EL + +#define KR0 0x50A28BE6L +#define KR1 0x5C4DD124L +#define KR2 0x6D703EF3L +#define KR3 0x7A6D76E9L +#define KR4 0x00000000L + +#define WL00 0 +#define SL00 11 +#define WL01 1 +#define SL01 14 +#define WL02 2 +#define SL02 15 +#define WL03 3 +#define SL03 12 +#define WL04 4 +#define SL04 5 +#define WL05 5 +#define SL05 8 +#define WL06 6 +#define SL06 7 +#define WL07 7 +#define SL07 9 +#define WL08 8 +#define SL08 11 +#define WL09 9 +#define SL09 13 +#define WL10 10 +#define SL10 14 +#define WL11 11 +#define SL11 15 +#define WL12 12 +#define SL12 6 +#define WL13 13 +#define SL13 7 +#define WL14 14 +#define SL14 9 +#define WL15 15 +#define SL15 8 + +#define WL16 7 +#define SL16 7 +#define WL17 4 +#define SL17 6 +#define WL18 13 +#define SL18 8 +#define WL19 1 +#define SL19 13 +#define WL20 10 +#define SL20 11 +#define WL21 6 +#define SL21 9 +#define WL22 15 +#define SL22 7 +#define WL23 3 +#define SL23 15 +#define WL24 12 +#define SL24 7 +#define WL25 0 +#define SL25 12 +#define WL26 9 +#define SL26 15 +#define WL27 5 +#define SL27 9 +#define WL28 2 +#define SL28 11 +#define WL29 14 +#define SL29 7 +#define WL30 11 +#define SL30 13 +#define WL31 8 +#define SL31 12 + +#define WL32 3 +#define SL32 11 +#define WL33 10 +#define SL33 13 +#define WL34 14 +#define SL34 6 +#define WL35 4 +#define SL35 7 +#define WL36 9 +#define SL36 14 +#define WL37 15 +#define SL37 9 +#define WL38 8 +#define SL38 13 +#define WL39 1 +#define SL39 15 +#define WL40 2 +#define SL40 14 +#define WL41 7 +#define SL41 8 +#define WL42 0 +#define SL42 13 +#define WL43 6 +#define SL43 6 +#define WL44 13 +#define SL44 5 +#define WL45 11 +#define SL45 12 +#define WL46 5 +#define SL46 7 +#define WL47 12 +#define SL47 5 + +#define WL48 1 +#define SL48 11 +#define WL49 9 +#define SL49 12 +#define WL50 11 +#define SL50 14 +#define WL51 10 +#define SL51 15 +#define WL52 0 +#define SL52 14 +#define WL53 8 +#define SL53 15 +#define WL54 12 +#define SL54 9 +#define WL55 4 +#define SL55 8 +#define WL56 13 +#define SL56 9 +#define WL57 3 +#define SL57 14 +#define WL58 7 +#define SL58 5 +#define WL59 15 +#define SL59 6 +#define WL60 14 +#define SL60 8 +#define WL61 5 +#define SL61 6 +#define WL62 6 +#define SL62 5 +#define WL63 2 +#define SL63 12 + +#define WL64 4 +#define SL64 9 +#define WL65 0 +#define SL65 15 +#define WL66 5 +#define SL66 5 +#define WL67 9 +#define SL67 11 +#define WL68 7 +#define SL68 6 +#define WL69 12 +#define SL69 8 +#define WL70 2 +#define SL70 13 +#define WL71 10 +#define SL71 12 +#define WL72 14 +#define SL72 5 +#define WL73 1 +#define SL73 12 +#define WL74 3 +#define SL74 13 +#define WL75 8 +#define SL75 14 +#define WL76 11 +#define SL76 11 +#define WL77 6 +#define SL77 8 +#define WL78 15 +#define SL78 5 +#define WL79 13 +#define SL79 6 + +#define WR00 5 +#define SR00 8 +#define WR01 14 +#define SR01 9 +#define WR02 7 +#define SR02 9 +#define WR03 0 +#define SR03 11 +#define WR04 9 +#define SR04 13 +#define WR05 2 +#define SR05 15 +#define WR06 11 +#define SR06 15 +#define WR07 4 +#define SR07 5 +#define WR08 13 +#define SR08 7 +#define WR09 6 +#define SR09 7 +#define WR10 15 +#define SR10 8 +#define WR11 8 +#define SR11 11 +#define WR12 1 +#define SR12 14 +#define WR13 10 +#define SR13 14 +#define WR14 3 +#define SR14 12 +#define WR15 12 +#define SR15 6 + +#define WR16 6 +#define SR16 9 +#define WR17 11 +#define SR17 13 +#define WR18 3 +#define SR18 15 +#define WR19 7 +#define SR19 7 +#define WR20 0 +#define SR20 12 +#define WR21 13 +#define SR21 8 +#define WR22 5 +#define SR22 9 +#define WR23 10 +#define SR23 11 +#define WR24 14 +#define SR24 7 +#define WR25 15 +#define SR25 7 +#define WR26 8 +#define SR26 12 +#define WR27 12 +#define SR27 7 +#define WR28 4 +#define SR28 6 +#define WR29 9 +#define SR29 15 +#define WR30 1 +#define SR30 13 +#define WR31 2 +#define SR31 11 + +#define WR32 15 +#define SR32 9 +#define WR33 5 +#define SR33 7 +#define WR34 1 +#define SR34 15 +#define WR35 3 +#define SR35 11 +#define WR36 7 +#define SR36 8 +#define WR37 14 +#define SR37 6 +#define WR38 6 +#define SR38 6 +#define WR39 9 +#define SR39 14 +#define WR40 11 +#define SR40 12 +#define WR41 8 +#define SR41 13 +#define WR42 12 +#define SR42 5 +#define WR43 2 +#define SR43 14 +#define WR44 10 +#define SR44 13 +#define WR45 0 +#define SR45 13 +#define WR46 4 +#define SR46 7 +#define WR47 13 +#define SR47 5 + +#define WR48 8 +#define SR48 15 +#define WR49 6 +#define SR49 5 +#define WR50 4 +#define SR50 8 +#define WR51 1 +#define SR51 11 +#define WR52 3 +#define SR52 14 +#define WR53 11 +#define SR53 14 +#define WR54 15 +#define SR54 6 +#define WR55 0 +#define SR55 14 +#define WR56 5 +#define SR56 6 +#define WR57 12 +#define SR57 9 +#define WR58 2 +#define SR58 12 +#define WR59 13 +#define SR59 9 +#define WR60 9 +#define SR60 12 +#define WR61 7 +#define SR61 5 +#define WR62 10 +#define SR62 15 +#define WR63 14 +#define SR63 8 + +#define WR64 12 +#define SR64 8 +#define WR65 15 +#define SR65 5 +#define WR66 10 +#define SR66 12 +#define WR67 4 +#define SR67 9 +#define WR68 1 +#define SR68 12 +#define WR69 5 +#define SR69 5 +#define WR70 8 +#define SR70 14 +#define WR71 7 +#define SR71 6 +#define WR72 6 +#define SR72 8 +#define WR73 2 +#define SR73 13 +#define WR74 13 +#define SR74 6 +#define WR75 14 +#define SR75 5 +#define WR76 0 +#define SR76 15 +#define WR77 3 +#define SR77 13 +#define WR78 9 +#define SR78 11 +#define WR79 11 +#define SR79 11 |