VirtualBox

source: vbox/trunk/src/libs/openssl-1.1.0g/crypto/sha/asm/sha256-586.pl@ 69881

Last change on this file since 69881 was 69881, checked in by vboxsync, 7 years ago

Update OpenSSL to 1.1.0g.
bugref:8070: src/libs maintenance

  • Property svn:eol-style set to LF
  • Property svn:executable set to *
File size: 35.9 KB
Line 
1#! /usr/bin/env perl
2# Copyright 2007-2016 The OpenSSL Project Authors. All Rights Reserved.
3#
4# Licensed under the OpenSSL license (the "License"). You may not use
5# this file except in compliance with the License. You can obtain a copy
6# in the file LICENSE in the source distribution or at
7# https://www.openssl.org/source/license.html
8
9#
10# ====================================================================
11# Written by Andy Polyakov <[email protected]> for the OpenSSL
12# project. The module is, however, dual licensed under OpenSSL and
13# CRYPTOGAMS licenses depending on where you obtain it. For further
14# details see http://www.openssl.org/~appro/cryptogams/.
15# ====================================================================
16#
17# SHA256 block transform for x86. September 2007.
18#
19# Performance improvement over compiler generated code varies from
20# 10% to 40% [see below]. Not very impressive on some µ-archs, but
21# it's 5 times smaller and optimizies amount of writes.
22#
23# May 2012.
24#
25# Optimization including two of Pavel Semjanov's ideas, alternative
26# Maj and full unroll, resulted in ~20-25% improvement on most CPUs,
27# ~7% on Pentium, ~40% on Atom. As fully unrolled loop body is almost
28# 15x larger, 8KB vs. 560B, it's fired only for longer inputs. But not
29# on P4, where it kills performance, nor Sandy Bridge, where folded
30# loop is approximately as fast...
31#
32# June 2012.
33#
34# Add AMD XOP-specific code path, >30% improvement on Bulldozer over
35# May version, >60% over original. Add AVX+shrd code path, >25%
36# improvement on Sandy Bridge over May version, 60% over original.
37#
38# May 2013.
39#
40# Replace AMD XOP code path with SSSE3 to cover more processors.
41# (Biggest improvement coefficient is on upcoming Atom Silvermont,
42# not shown.) Add AVX+BMI code path.
43#
44# March 2014.
45#
46# Add support for Intel SHA Extensions.
47#
48# Performance in clock cycles per processed byte (less is better):
49#
50# gcc icc x86 asm(*) SIMD x86_64 asm(**)
51# Pentium 46 57 40/38 - -
52# PIII 36 33 27/24 - -
53# P4 41 38 28 - 17.3
54# AMD K8 27 25 19/15.5 - 14.9
55# Core2 26 23 18/15.6 14.3 13.8
56# Westmere 27 - 19/15.7 13.4 12.3
57# Sandy Bridge 25 - 15.9 12.4 11.6
58# Ivy Bridge 24 - 15.0 11.4 10.3
59# Haswell 22 - 13.9 9.46 7.80
60# Bulldozer 36 - 27/22 17.0 13.6
61# VIA Nano 36 - 25/22 16.8 16.5
62# Atom 50 - 30/25 21.9 18.9
63# Silvermont 40 - 34/31 22.9 20.6
64#
65# (*) numbers after slash are for unrolled loop, where applicable;
66# (**) x86_64 assembly performance is presented for reference
67# purposes, results are best-available;
68
69$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
70push(@INC,"${dir}","${dir}../../perlasm");
71require "x86asm.pl";
72
73$output=pop;
74open STDOUT,">$output";
75
76&asm_init($ARGV[0],"sha512-586.pl",$ARGV[$#ARGV] eq "386");
77
78$xmm=$avx=0;
79for (@ARGV) { $xmm=1 if (/-DOPENSSL_IA32_SSE2/); }
80
81if ($xmm && `$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
82 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
83 $avx = ($1>=2.19) + ($1>=2.22);
84}
85
86if ($xmm && !$avx && $ARGV[0] eq "win32n" &&
87 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
88 $avx = ($1>=2.03) + ($1>=2.10);
89}
90
91if ($xmm && !$avx && $ARGV[0] eq "win32" &&
92 `ml 2>&1` =~ /Version ([0-9]+)\./) {
93 $avx = ($1>=10) + ($1>=11);
94}
95
96if ($xmm && !$avx && `$ENV{CC} -v 2>&1` =~ /(^clang version|based on LLVM) ([3-9]\.[0-9]+)/) {
97 $avx = ($2>=3.0) + ($2>3.0);
98}
99
100$shaext=$xmm; ### set to zero if compiling for 1.0.1
101
102$unroll_after = 64*4; # If pre-evicted from L1P cache first spin of
103 # fully unrolled loop was measured to run about
104 # 3-4x slower. If slowdown coefficient is N and
105 # unrolled loop is m times faster, then you break
106 # even at (N-1)/(m-1) blocks. Then it needs to be
107 # adjusted for probability of code being evicted,
108 # code size/cache size=1/4. Typical m is 1.15...
109
110$A="eax";
111$E="edx";
112$T="ebx";
113$Aoff=&DWP(4,"esp");
114$Boff=&DWP(8,"esp");
115$Coff=&DWP(12,"esp");
116$Doff=&DWP(16,"esp");
117$Eoff=&DWP(20,"esp");
118$Foff=&DWP(24,"esp");
119$Goff=&DWP(28,"esp");
120$Hoff=&DWP(32,"esp");
121$Xoff=&DWP(36,"esp");
122$K256="ebp";
123
124sub BODY_16_63() {
125 &mov ($T,"ecx"); # "ecx" is preloaded
126 &mov ("esi",&DWP(4*(9+15+16-14),"esp"));
127 &ror ("ecx",18-7);
128 &mov ("edi","esi");
129 &ror ("esi",19-17);
130 &xor ("ecx",$T);
131 &shr ($T,3);
132 &ror ("ecx",7);
133 &xor ("esi","edi");
134 &xor ($T,"ecx"); # T = sigma0(X[-15])
135 &ror ("esi",17);
136 &add ($T,&DWP(4*(9+15+16),"esp")); # T += X[-16]
137 &shr ("edi",10);
138 &add ($T,&DWP(4*(9+15+16-9),"esp")); # T += X[-7]
139 #&xor ("edi","esi") # sigma1(X[-2])
140 # &add ($T,"edi"); # T += sigma1(X[-2])
141 # &mov (&DWP(4*(9+15),"esp"),$T); # save X[0]
142
143 &BODY_00_15(1);
144}
145sub BODY_00_15() {
146 my $in_16_63=shift;
147
148 &mov ("ecx",$E);
149 &xor ("edi","esi") if ($in_16_63); # sigma1(X[-2])
150 &mov ("esi",$Foff);
151 &ror ("ecx",25-11);
152 &add ($T,"edi") if ($in_16_63); # T += sigma1(X[-2])
153 &mov ("edi",$Goff);
154 &xor ("ecx",$E);
155 &xor ("esi","edi");
156 &mov ($T,&DWP(4*(9+15),"esp")) if (!$in_16_63);
157 &mov (&DWP(4*(9+15),"esp"),$T) if ($in_16_63); # save X[0]
158 &ror ("ecx",11-6);
159 &and ("esi",$E);
160 &mov ($Eoff,$E); # modulo-scheduled
161 &xor ($E,"ecx");
162 &add ($T,$Hoff); # T += h
163 &xor ("esi","edi"); # Ch(e,f,g)
164 &ror ($E,6); # Sigma1(e)
165 &mov ("ecx",$A);
166 &add ($T,"esi"); # T += Ch(e,f,g)
167
168 &ror ("ecx",22-13);
169 &add ($T,$E); # T += Sigma1(e)
170 &mov ("edi",$Boff);
171 &xor ("ecx",$A);
172 &mov ($Aoff,$A); # modulo-scheduled
173 &lea ("esp",&DWP(-4,"esp"));
174 &ror ("ecx",13-2);
175 &mov ("esi",&DWP(0,$K256));
176 &xor ("ecx",$A);
177 &mov ($E,$Eoff); # e in next iteration, d in this one
178 &xor ($A,"edi"); # a ^= b
179 &ror ("ecx",2); # Sigma0(a)
180
181 &add ($T,"esi"); # T+= K[i]
182 &mov (&DWP(0,"esp"),$A); # (b^c) in next round
183 &add ($E,$T); # d += T
184 &and ($A,&DWP(4,"esp")); # a &= (b^c)
185 &add ($T,"ecx"); # T += Sigma0(a)
186 &xor ($A,"edi"); # h = Maj(a,b,c) = Ch(a^b,c,b)
187 &mov ("ecx",&DWP(4*(9+15+16-1),"esp")) if ($in_16_63); # preload T
188 &add ($K256,4);
189 &add ($A,$T); # h += T
190}
191
192&external_label("OPENSSL_ia32cap_P") if (!$i386);
193
194&function_begin("sha256_block_data_order");
195 &mov ("esi",wparam(0)); # ctx
196 &mov ("edi",wparam(1)); # inp
197 &mov ("eax",wparam(2)); # num
198 &mov ("ebx","esp"); # saved sp
199
200 &call (&label("pic_point")); # make it PIC!
201&set_label("pic_point");
202 &blindpop($K256);
203 &lea ($K256,&DWP(&label("K256")."-".&label("pic_point"),$K256));
204
205 &sub ("esp",16);
206 &and ("esp",-64);
207
208 &shl ("eax",6);
209 &add ("eax","edi");
210 &mov (&DWP(0,"esp"),"esi"); # ctx
211 &mov (&DWP(4,"esp"),"edi"); # inp
212 &mov (&DWP(8,"esp"),"eax"); # inp+num*128
213 &mov (&DWP(12,"esp"),"ebx"); # saved sp
214 if (!$i386 && $xmm) {
215 &picmeup("edx","OPENSSL_ia32cap_P",$K256,&label("K256"));
216 &mov ("ecx",&DWP(0,"edx"));
217 &mov ("ebx",&DWP(4,"edx"));
218 &test ("ecx",1<<20); # check for P4
219 &jnz (&label("loop"));
220 &mov ("edx",&DWP(8,"edx")) if ($xmm);
221 &test ("ecx",1<<24); # check for FXSR
222 &jz ($unroll_after?&label("no_xmm"):&label("loop"));
223 &and ("ecx",1<<30); # mask "Intel CPU" bit
224 &and ("ebx",1<<28|1<<9); # mask AVX and SSSE3 bits
225 &test ("edx",1<<29) if ($shaext); # check for SHA
226 &jnz (&label("shaext")) if ($shaext);
227 &or ("ecx","ebx");
228 &and ("ecx",1<<28|1<<30);
229 &cmp ("ecx",1<<28|1<<30);
230 if ($xmm) {
231 &je (&label("AVX")) if ($avx);
232 &test ("ebx",1<<9); # check for SSSE3
233 &jnz (&label("SSSE3"));
234 } else {
235 &je (&label("loop_shrd"));
236 }
237 if ($unroll_after) {
238&set_label("no_xmm");
239 &sub ("eax","edi");
240 &cmp ("eax",$unroll_after);
241 &jae (&label("unrolled"));
242 } }
243 &jmp (&label("loop"));
244
245sub COMPACT_LOOP() {
246my $suffix=shift;
247
248&set_label("loop$suffix",$suffix?32:16);
249 # copy input block to stack reversing byte and dword order
250 for($i=0;$i<4;$i++) {
251 &mov ("eax",&DWP($i*16+0,"edi"));
252 &mov ("ebx",&DWP($i*16+4,"edi"));
253 &mov ("ecx",&DWP($i*16+8,"edi"));
254 &bswap ("eax");
255 &mov ("edx",&DWP($i*16+12,"edi"));
256 &bswap ("ebx");
257 &push ("eax");
258 &bswap ("ecx");
259 &push ("ebx");
260 &bswap ("edx");
261 &push ("ecx");
262 &push ("edx");
263 }
264 &add ("edi",64);
265 &lea ("esp",&DWP(-4*9,"esp"));# place for A,B,C,D,E,F,G,H
266 &mov (&DWP(4*(9+16)+4,"esp"),"edi");
267
268 # copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
269 &mov ($A,&DWP(0,"esi"));
270 &mov ("ebx",&DWP(4,"esi"));
271 &mov ("ecx",&DWP(8,"esi"));
272 &mov ("edi",&DWP(12,"esi"));
273 # &mov ($Aoff,$A);
274 &mov ($Boff,"ebx");
275 &xor ("ebx","ecx");
276 &mov ($Coff,"ecx");
277 &mov ($Doff,"edi");
278 &mov (&DWP(0,"esp"),"ebx"); # magic
279 &mov ($E,&DWP(16,"esi"));
280 &mov ("ebx",&DWP(20,"esi"));
281 &mov ("ecx",&DWP(24,"esi"));
282 &mov ("edi",&DWP(28,"esi"));
283 # &mov ($Eoff,$E);
284 &mov ($Foff,"ebx");
285 &mov ($Goff,"ecx");
286 &mov ($Hoff,"edi");
287
288&set_label("00_15$suffix",16);
289
290 &BODY_00_15();
291
292 &cmp ("esi",0xc19bf174);
293 &jne (&label("00_15$suffix"));
294
295 &mov ("ecx",&DWP(4*(9+15+16-1),"esp")); # preloaded in BODY_00_15(1)
296 &jmp (&label("16_63$suffix"));
297
298&set_label("16_63$suffix",16);
299
300 &BODY_16_63();
301
302 &cmp ("esi",0xc67178f2);
303 &jne (&label("16_63$suffix"));
304
305 &mov ("esi",&DWP(4*(9+16+64)+0,"esp"));#ctx
306 # &mov ($A,$Aoff);
307 &mov ("ebx",$Boff);
308 # &mov ("edi",$Coff);
309 &mov ("ecx",$Doff);
310 &add ($A,&DWP(0,"esi"));
311 &add ("ebx",&DWP(4,"esi"));
312 &add ("edi",&DWP(8,"esi"));
313 &add ("ecx",&DWP(12,"esi"));
314 &mov (&DWP(0,"esi"),$A);
315 &mov (&DWP(4,"esi"),"ebx");
316 &mov (&DWP(8,"esi"),"edi");
317 &mov (&DWP(12,"esi"),"ecx");
318 # &mov ($E,$Eoff);
319 &mov ("eax",$Foff);
320 &mov ("ebx",$Goff);
321 &mov ("ecx",$Hoff);
322 &mov ("edi",&DWP(4*(9+16+64)+4,"esp"));#inp
323 &add ($E,&DWP(16,"esi"));
324 &add ("eax",&DWP(20,"esi"));
325 &add ("ebx",&DWP(24,"esi"));
326 &add ("ecx",&DWP(28,"esi"));
327 &mov (&DWP(16,"esi"),$E);
328 &mov (&DWP(20,"esi"),"eax");
329 &mov (&DWP(24,"esi"),"ebx");
330 &mov (&DWP(28,"esi"),"ecx");
331
332 &lea ("esp",&DWP(4*(9+16+64),"esp"));# destroy frame
333 &sub ($K256,4*64); # rewind K
334
335 &cmp ("edi",&DWP(8,"esp")); # are we done yet?
336 &jb (&label("loop$suffix"));
337}
338 &COMPACT_LOOP();
339 &mov ("esp",&DWP(12,"esp")); # restore sp
340&function_end_A();
341 if (!$i386 && !$xmm) {
342 # ~20% improvement on Sandy Bridge
343 local *ror = sub { &shrd(@_[0],@_) };
344 &COMPACT_LOOP("_shrd");
345 &mov ("esp",&DWP(12,"esp")); # restore sp
346&function_end_A();
347 }
348
349&set_label("K256",64); # Yes! I keep it in the code segment!
350@K256=( 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5,
351 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5,
352 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3,
353 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174,
354 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc,
355 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da,
356 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7,
357 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967,
358 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13,
359 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85,
360 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3,
361 0xd192e819,0xd6990624,0xf40e3585,0x106aa070,
362 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5,
363 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3,
364 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208,
365 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 );
366&data_word(@K256);
367&data_word(0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f); # byte swap mask
368&asciz("SHA256 block transform for x86, CRYPTOGAMS by <appro\@openssl.org>");
369
370($a,$b,$c,$d,$e,$f,$g,$h)=(0..7); # offsets
371sub off { &DWP(4*(((shift)-$i)&7),"esp"); }
372
373if (!$i386 && $unroll_after) {
374my @AH=($A,$K256);
375
376&set_label("unrolled",16);
377 &lea ("esp",&DWP(-96,"esp"));
378 # copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
379 &mov ($AH[0],&DWP(0,"esi"));
380 &mov ($AH[1],&DWP(4,"esi"));
381 &mov ("ecx",&DWP(8,"esi"));
382 &mov ("ebx",&DWP(12,"esi"));
383 #&mov (&DWP(0,"esp"),$AH[0]);
384 &mov (&DWP(4,"esp"),$AH[1]);
385 &xor ($AH[1],"ecx"); # magic
386 &mov (&DWP(8,"esp"),"ecx");
387 &mov (&DWP(12,"esp"),"ebx");
388 &mov ($E,&DWP(16,"esi"));
389 &mov ("ebx",&DWP(20,"esi"));
390 &mov ("ecx",&DWP(24,"esi"));
391 &mov ("esi",&DWP(28,"esi"));
392 #&mov (&DWP(16,"esp"),$E);
393 &mov (&DWP(20,"esp"),"ebx");
394 &mov (&DWP(24,"esp"),"ecx");
395 &mov (&DWP(28,"esp"),"esi");
396 &jmp (&label("grand_loop"));
397
398&set_label("grand_loop",16);
399 # copy input block to stack reversing byte order
400 for($i=0;$i<5;$i++) {
401 &mov ("ebx",&DWP(12*$i+0,"edi"));
402 &mov ("ecx",&DWP(12*$i+4,"edi"));
403 &bswap ("ebx");
404 &mov ("esi",&DWP(12*$i+8,"edi"));
405 &bswap ("ecx");
406 &mov (&DWP(32+12*$i+0,"esp"),"ebx");
407 &bswap ("esi");
408 &mov (&DWP(32+12*$i+4,"esp"),"ecx");
409 &mov (&DWP(32+12*$i+8,"esp"),"esi");
410 }
411 &mov ("ebx",&DWP($i*12,"edi"));
412 &add ("edi",64);
413 &bswap ("ebx");
414 &mov (&DWP(96+4,"esp"),"edi");
415 &mov (&DWP(32+12*$i,"esp"),"ebx");
416
417 my ($t1,$t2) = ("ecx","esi");
418
419 for ($i=0;$i<64;$i++) {
420
421 if ($i>=16) {
422 &mov ($T,$t1); # $t1 is preloaded
423 # &mov ($t2,&DWP(32+4*(($i+14)&15),"esp"));
424 &ror ($t1,18-7);
425 &mov ("edi",$t2);
426 &ror ($t2,19-17);
427 &xor ($t1,$T);
428 &shr ($T,3);
429 &ror ($t1,7);
430 &xor ($t2,"edi");
431 &xor ($T,$t1); # T = sigma0(X[-15])
432 &ror ($t2,17);
433 &add ($T,&DWP(32+4*($i&15),"esp")); # T += X[-16]
434 &shr ("edi",10);
435 &add ($T,&DWP(32+4*(($i+9)&15),"esp")); # T += X[-7]
436 #&xor ("edi",$t2) # sigma1(X[-2])
437 # &add ($T,"edi"); # T += sigma1(X[-2])
438 # &mov (&DWP(4*(9+15),"esp"),$T); # save X[0]
439 }
440 &mov ($t1,$E);
441 &xor ("edi",$t2) if ($i>=16); # sigma1(X[-2])
442 &mov ($t2,&off($f));
443 &ror ($E,25-11);
444 &add ($T,"edi") if ($i>=16); # T += sigma1(X[-2])
445 &mov ("edi",&off($g));
446 &xor ($E,$t1);
447 &mov ($T,&DWP(32+4*($i&15),"esp")) if ($i<16); # X[i]
448 &mov (&DWP(32+4*($i&15),"esp"),$T) if ($i>=16 && $i<62); # save X[0]
449 &xor ($t2,"edi");
450 &ror ($E,11-6);
451 &and ($t2,$t1);
452 &mov (&off($e),$t1); # save $E, modulo-scheduled
453 &xor ($E,$t1);
454 &add ($T,&off($h)); # T += h
455 &xor ("edi",$t2); # Ch(e,f,g)
456 &ror ($E,6); # Sigma1(e)
457 &mov ($t1,$AH[0]);
458 &add ($T,"edi"); # T += Ch(e,f,g)
459
460 &ror ($t1,22-13);
461 &mov ($t2,$AH[0]);
462 &mov ("edi",&off($b));
463 &xor ($t1,$AH[0]);
464 &mov (&off($a),$AH[0]); # save $A, modulo-scheduled
465 &xor ($AH[0],"edi"); # a ^= b, (b^c) in next round
466 &ror ($t1,13-2);
467 &and ($AH[1],$AH[0]); # (b^c) &= (a^b)
468 &lea ($E,&DWP(@K256[$i],$T,$E)); # T += Sigma1(1)+K[i]
469 &xor ($t1,$t2);
470 &xor ($AH[1],"edi"); # h = Maj(a,b,c) = Ch(a^b,c,b)
471 &mov ($t2,&DWP(32+4*(($i+2)&15),"esp")) if ($i>=15 && $i<63);
472 &ror ($t1,2); # Sigma0(a)
473
474 &add ($AH[1],$E); # h += T
475 &add ($E,&off($d)); # d += T
476 &add ($AH[1],$t1); # h += Sigma0(a)
477 &mov ($t1,&DWP(32+4*(($i+15)&15),"esp")) if ($i>=15 && $i<63);
478
479 @AH = reverse(@AH); # rotate(a,h)
480 ($t1,$t2) = ($t2,$t1); # rotate(t1,t2)
481 }
482 &mov ("esi",&DWP(96,"esp")); #ctx
483 #&mov ($AH[0],&DWP(0,"esp"));
484 &xor ($AH[1],"edi"); #&mov ($AH[1],&DWP(4,"esp"));
485 #&mov ("edi", &DWP(8,"esp"));
486 &mov ("ecx",&DWP(12,"esp"));
487 &add ($AH[0],&DWP(0,"esi"));
488 &add ($AH[1],&DWP(4,"esi"));
489 &add ("edi",&DWP(8,"esi"));
490 &add ("ecx",&DWP(12,"esi"));
491 &mov (&DWP(0,"esi"),$AH[0]);
492 &mov (&DWP(4,"esi"),$AH[1]);
493 &mov (&DWP(8,"esi"),"edi");
494 &mov (&DWP(12,"esi"),"ecx");
495 #&mov (&DWP(0,"esp"),$AH[0]);
496 &mov (&DWP(4,"esp"),$AH[1]);
497 &xor ($AH[1],"edi"); # magic
498 &mov (&DWP(8,"esp"),"edi");
499 &mov (&DWP(12,"esp"),"ecx");
500 #&mov ($E,&DWP(16,"esp"));
501 &mov ("edi",&DWP(20,"esp"));
502 &mov ("ebx",&DWP(24,"esp"));
503 &mov ("ecx",&DWP(28,"esp"));
504 &add ($E,&DWP(16,"esi"));
505 &add ("edi",&DWP(20,"esi"));
506 &add ("ebx",&DWP(24,"esi"));
507 &add ("ecx",&DWP(28,"esi"));
508 &mov (&DWP(16,"esi"),$E);
509 &mov (&DWP(20,"esi"),"edi");
510 &mov (&DWP(24,"esi"),"ebx");
511 &mov (&DWP(28,"esi"),"ecx");
512 #&mov (&DWP(16,"esp"),$E);
513 &mov (&DWP(20,"esp"),"edi");
514 &mov ("edi",&DWP(96+4,"esp")); # inp
515 &mov (&DWP(24,"esp"),"ebx");
516 &mov (&DWP(28,"esp"),"ecx");
517
518 &cmp ("edi",&DWP(96+8,"esp")); # are we done yet?
519 &jb (&label("grand_loop"));
520
521 &mov ("esp",&DWP(96+12,"esp")); # restore sp
522&function_end_A();
523}
524 if (!$i386 && $xmm) {{{
525if ($shaext) {
526######################################################################
527# Intel SHA Extensions implementation of SHA256 update function.
528#
529my ($ctx,$inp,$end)=("esi","edi","eax");
530my ($Wi,$ABEF,$CDGH,$TMP)=map("xmm$_",(0..2,7));
531my @MSG=map("xmm$_",(3..6));
532
533sub sha256op38 {
534 my ($opcodelet,$dst,$src)=@_;
535 if ("$dst:$src" =~ /xmm([0-7]):xmm([0-7])/)
536 { &data_byte(0x0f,0x38,$opcodelet,0xc0|($1<<3)|$2); }
537}
538sub sha256rnds2 { sha256op38(0xcb,@_); }
539sub sha256msg1 { sha256op38(0xcc,@_); }
540sub sha256msg2 { sha256op38(0xcd,@_); }
541
542&set_label("shaext",32);
543 &sub ("esp",32);
544
545 &movdqu ($ABEF,&QWP(0,$ctx)); # DCBA
546 &lea ($K256,&DWP(0x80,$K256));
547 &movdqu ($CDGH,&QWP(16,$ctx)); # HGFE
548 &movdqa ($TMP,&QWP(0x100-0x80,$K256)); # byte swap mask
549
550 &pshufd ($Wi,$ABEF,0x1b); # ABCD
551 &pshufd ($ABEF,$ABEF,0xb1); # CDAB
552 &pshufd ($CDGH,$CDGH,0x1b); # EFGH
553 &palignr ($ABEF,$CDGH,8); # ABEF
554 &punpcklqdq ($CDGH,$Wi); # CDGH
555 &jmp (&label("loop_shaext"));
556
557&set_label("loop_shaext",16);
558 &movdqu (@MSG[0],&QWP(0,$inp));
559 &movdqu (@MSG[1],&QWP(0x10,$inp));
560 &movdqu (@MSG[2],&QWP(0x20,$inp));
561 &pshufb (@MSG[0],$TMP);
562 &movdqu (@MSG[3],&QWP(0x30,$inp));
563 &movdqa (&QWP(16,"esp"),$CDGH); # offload
564
565 &movdqa ($Wi,&QWP(0*16-0x80,$K256));
566 &paddd ($Wi,@MSG[0]);
567 &pshufb (@MSG[1],$TMP);
568 &sha256rnds2 ($CDGH,$ABEF); # 0-3
569 &pshufd ($Wi,$Wi,0x0e);
570 &nop ();
571 &movdqa (&QWP(0,"esp"),$ABEF); # offload
572 &sha256rnds2 ($ABEF,$CDGH);
573
574 &movdqa ($Wi,&QWP(1*16-0x80,$K256));
575 &paddd ($Wi,@MSG[1]);
576 &pshufb (@MSG[2],$TMP);
577 &sha256rnds2 ($CDGH,$ABEF); # 4-7
578 &pshufd ($Wi,$Wi,0x0e);
579 &lea ($inp,&DWP(0x40,$inp));
580 &sha256msg1 (@MSG[0],@MSG[1]);
581 &sha256rnds2 ($ABEF,$CDGH);
582
583 &movdqa ($Wi,&QWP(2*16-0x80,$K256));
584 &paddd ($Wi,@MSG[2]);
585 &pshufb (@MSG[3],$TMP);
586 &sha256rnds2 ($CDGH,$ABEF); # 8-11
587 &pshufd ($Wi,$Wi,0x0e);
588 &movdqa ($TMP,@MSG[3]);
589 &palignr ($TMP,@MSG[2],4);
590 &nop ();
591 &paddd (@MSG[0],$TMP);
592 &sha256msg1 (@MSG[1],@MSG[2]);
593 &sha256rnds2 ($ABEF,$CDGH);
594
595 &movdqa ($Wi,&QWP(3*16-0x80,$K256));
596 &paddd ($Wi,@MSG[3]);
597 &sha256msg2 (@MSG[0],@MSG[3]);
598 &sha256rnds2 ($CDGH,$ABEF); # 12-15
599 &pshufd ($Wi,$Wi,0x0e);
600 &movdqa ($TMP,@MSG[0]);
601 &palignr ($TMP,@MSG[3],4);
602 &nop ();
603 &paddd (@MSG[1],$TMP);
604 &sha256msg1 (@MSG[2],@MSG[3]);
605 &sha256rnds2 ($ABEF,$CDGH);
606
607for($i=4;$i<16-3;$i++) {
608 &movdqa ($Wi,&QWP($i*16-0x80,$K256));
609 &paddd ($Wi,@MSG[0]);
610 &sha256msg2 (@MSG[1],@MSG[0]);
611 &sha256rnds2 ($CDGH,$ABEF); # 16-19...
612 &pshufd ($Wi,$Wi,0x0e);
613 &movdqa ($TMP,@MSG[1]);
614 &palignr ($TMP,@MSG[0],4);
615 &nop ();
616 &paddd (@MSG[2],$TMP);
617 &sha256msg1 (@MSG[3],@MSG[0]);
618 &sha256rnds2 ($ABEF,$CDGH);
619
620 push(@MSG,shift(@MSG));
621}
622 &movdqa ($Wi,&QWP(13*16-0x80,$K256));
623 &paddd ($Wi,@MSG[0]);
624 &sha256msg2 (@MSG[1],@MSG[0]);
625 &sha256rnds2 ($CDGH,$ABEF); # 52-55
626 &pshufd ($Wi,$Wi,0x0e);
627 &movdqa ($TMP,@MSG[1])
628 &palignr ($TMP,@MSG[0],4);
629 &sha256rnds2 ($ABEF,$CDGH);
630 &paddd (@MSG[2],$TMP);
631
632 &movdqa ($Wi,&QWP(14*16-0x80,$K256));
633 &paddd ($Wi,@MSG[1]);
634 &sha256rnds2 ($CDGH,$ABEF); # 56-59
635 &pshufd ($Wi,$Wi,0x0e);
636 &sha256msg2 (@MSG[2],@MSG[1]);
637 &movdqa ($TMP,&QWP(0x100-0x80,$K256)); # byte swap mask
638 &sha256rnds2 ($ABEF,$CDGH);
639
640 &movdqa ($Wi,&QWP(15*16-0x80,$K256));
641 &paddd ($Wi,@MSG[2]);
642 &nop ();
643 &sha256rnds2 ($CDGH,$ABEF); # 60-63
644 &pshufd ($Wi,$Wi,0x0e);
645 &cmp ($end,$inp);
646 &nop ();
647 &sha256rnds2 ($ABEF,$CDGH);
648
649 &paddd ($CDGH,&QWP(16,"esp"));
650 &paddd ($ABEF,&QWP(0,"esp"));
651 &jnz (&label("loop_shaext"));
652
653 &pshufd ($CDGH,$CDGH,0xb1); # DCHG
654 &pshufd ($TMP,$ABEF,0x1b); # FEBA
655 &pshufd ($ABEF,$ABEF,0xb1); # BAFE
656 &punpckhqdq ($ABEF,$CDGH); # DCBA
657 &palignr ($CDGH,$TMP,8); # HGFE
658
659 &mov ("esp",&DWP(32+12,"esp"));
660 &movdqu (&QWP(0,$ctx),$ABEF);
661 &movdqu (&QWP(16,$ctx),$CDGH);
662&function_end_A();
663}
664
665my @X = map("xmm$_",(0..3));
666my ($t0,$t1,$t2,$t3) = map("xmm$_",(4..7));
667my @AH = ($A,$T);
668
669&set_label("SSSE3",32);
670 &lea ("esp",&DWP(-96,"esp"));
671 # copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
672 &mov ($AH[0],&DWP(0,"esi"));
673 &mov ($AH[1],&DWP(4,"esi"));
674 &mov ("ecx",&DWP(8,"esi"));
675 &mov ("edi",&DWP(12,"esi"));
676 #&mov (&DWP(0,"esp"),$AH[0]);
677 &mov (&DWP(4,"esp"),$AH[1]);
678 &xor ($AH[1],"ecx"); # magic
679 &mov (&DWP(8,"esp"),"ecx");
680 &mov (&DWP(12,"esp"),"edi");
681 &mov ($E,&DWP(16,"esi"));
682 &mov ("edi",&DWP(20,"esi"));
683 &mov ("ecx",&DWP(24,"esi"));
684 &mov ("esi",&DWP(28,"esi"));
685 #&mov (&DWP(16,"esp"),$E);
686 &mov (&DWP(20,"esp"),"edi");
687 &mov ("edi",&DWP(96+4,"esp")); # inp
688 &mov (&DWP(24,"esp"),"ecx");
689 &mov (&DWP(28,"esp"),"esi");
690 &movdqa ($t3,&QWP(256,$K256));
691 &jmp (&label("grand_ssse3"));
692
693&set_label("grand_ssse3",16);
694 # load input, reverse byte order, add K256[0..15], save to stack
695 &movdqu (@X[0],&QWP(0,"edi"));
696 &movdqu (@X[1],&QWP(16,"edi"));
697 &movdqu (@X[2],&QWP(32,"edi"));
698 &movdqu (@X[3],&QWP(48,"edi"));
699 &add ("edi",64);
700 &pshufb (@X[0],$t3);
701 &mov (&DWP(96+4,"esp"),"edi");
702 &pshufb (@X[1],$t3);
703 &movdqa ($t0,&QWP(0,$K256));
704 &pshufb (@X[2],$t3);
705 &movdqa ($t1,&QWP(16,$K256));
706 &paddd ($t0,@X[0]);
707 &pshufb (@X[3],$t3);
708 &movdqa ($t2,&QWP(32,$K256));
709 &paddd ($t1,@X[1]);
710 &movdqa ($t3,&QWP(48,$K256));
711 &movdqa (&QWP(32+0,"esp"),$t0);
712 &paddd ($t2,@X[2]);
713 &movdqa (&QWP(32+16,"esp"),$t1);
714 &paddd ($t3,@X[3]);
715 &movdqa (&QWP(32+32,"esp"),$t2);
716 &movdqa (&QWP(32+48,"esp"),$t3);
717 &jmp (&label("ssse3_00_47"));
718
719&set_label("ssse3_00_47",16);
720 &add ($K256,64);
721
722sub SSSE3_00_47 () {
723my $j = shift;
724my $body = shift;
725my @X = @_;
726my @insns = (&$body,&$body,&$body,&$body); # 120 instructions
727
728 eval(shift(@insns));
729 &movdqa ($t0,@X[1]);
730 eval(shift(@insns)); # @
731 eval(shift(@insns));
732 &movdqa ($t3,@X[3]);
733 eval(shift(@insns));
734 eval(shift(@insns));
735 &palignr ($t0,@X[0],4); # X[1..4]
736 eval(shift(@insns));
737 eval(shift(@insns)); # @
738 eval(shift(@insns));
739 &palignr ($t3,@X[2],4); # X[9..12]
740 eval(shift(@insns));
741 eval(shift(@insns));
742 eval(shift(@insns));
743 &movdqa ($t1,$t0);
744 eval(shift(@insns)); # @
745 eval(shift(@insns));
746 &movdqa ($t2,$t0);
747 eval(shift(@insns));
748 eval(shift(@insns));
749 &psrld ($t0,3);
750 eval(shift(@insns));
751 eval(shift(@insns)); # @
752 &paddd (@X[0],$t3); # X[0..3] += X[9..12]
753 eval(shift(@insns));
754 eval(shift(@insns));
755 &psrld ($t2,7);
756 eval(shift(@insns));
757 eval(shift(@insns));
758 eval(shift(@insns)); # @
759 eval(shift(@insns));
760 &pshufd ($t3,@X[3],0b11111010); # X[14..15]
761 eval(shift(@insns));
762 eval(shift(@insns));
763 &pslld ($t1,32-18);
764 eval(shift(@insns));
765 eval(shift(@insns)); # @
766 &pxor ($t0,$t2);
767 eval(shift(@insns));
768 eval(shift(@insns));
769 &psrld ($t2,18-7);
770 eval(shift(@insns));
771 eval(shift(@insns));
772 eval(shift(@insns)); # @
773 &pxor ($t0,$t1);
774 eval(shift(@insns));
775 eval(shift(@insns));
776 &pslld ($t1,18-7);
777 eval(shift(@insns));
778 eval(shift(@insns));
779 eval(shift(@insns)); # @
780 &pxor ($t0,$t2);
781 eval(shift(@insns));
782 eval(shift(@insns));
783 &movdqa ($t2,$t3);
784 eval(shift(@insns));
785 eval(shift(@insns));
786 eval(shift(@insns)); # @
787 &pxor ($t0,$t1); # sigma0(X[1..4])
788 eval(shift(@insns));
789 eval(shift(@insns));
790 &psrld ($t3,10);
791 eval(shift(@insns));
792 eval(shift(@insns));
793 eval(shift(@insns)); # @
794 &paddd (@X[0],$t0); # X[0..3] += sigma0(X[1..4])
795 eval(shift(@insns));
796 eval(shift(@insns));
797 &psrlq ($t2,17);
798 eval(shift(@insns));
799 eval(shift(@insns));
800 eval(shift(@insns)); # @
801 &pxor ($t3,$t2);
802 eval(shift(@insns));
803 eval(shift(@insns));
804 &psrlq ($t2,19-17);
805 eval(shift(@insns));
806 eval(shift(@insns));
807 eval(shift(@insns)); # @
808 &pxor ($t3,$t2);
809 eval(shift(@insns));
810 eval(shift(@insns));
811 &pshufd ($t3,$t3,0b10000000);
812 eval(shift(@insns));
813 eval(shift(@insns));
814 eval(shift(@insns)); # @
815 eval(shift(@insns));
816 eval(shift(@insns));
817 eval(shift(@insns));
818 eval(shift(@insns));
819 eval(shift(@insns)); # @
820 eval(shift(@insns));
821 &psrldq ($t3,8);
822 eval(shift(@insns));
823 eval(shift(@insns));
824 eval(shift(@insns));
825 &paddd (@X[0],$t3); # X[0..1] += sigma1(X[14..15])
826 eval(shift(@insns)); # @
827 eval(shift(@insns));
828 eval(shift(@insns));
829 eval(shift(@insns));
830 eval(shift(@insns));
831 eval(shift(@insns)); # @
832 eval(shift(@insns));
833 &pshufd ($t3,@X[0],0b01010000); # X[16..17]
834 eval(shift(@insns));
835 eval(shift(@insns));
836 eval(shift(@insns));
837 &movdqa ($t2,$t3);
838 eval(shift(@insns)); # @
839 &psrld ($t3,10);
840 eval(shift(@insns));
841 &psrlq ($t2,17);
842 eval(shift(@insns));
843 eval(shift(@insns));
844 eval(shift(@insns));
845 eval(shift(@insns)); # @
846 &pxor ($t3,$t2);
847 eval(shift(@insns));
848 eval(shift(@insns));
849 &psrlq ($t2,19-17);
850 eval(shift(@insns));
851 eval(shift(@insns));
852 eval(shift(@insns)); # @
853 &pxor ($t3,$t2);
854 eval(shift(@insns));
855 eval(shift(@insns));
856 eval(shift(@insns));
857 &pshufd ($t3,$t3,0b00001000);
858 eval(shift(@insns));
859 eval(shift(@insns)); # @
860 &movdqa ($t2,&QWP(16*$j,$K256));
861 eval(shift(@insns));
862 eval(shift(@insns));
863 &pslldq ($t3,8);
864 eval(shift(@insns));
865 eval(shift(@insns));
866 eval(shift(@insns)); # @
867 eval(shift(@insns));
868 eval(shift(@insns));
869 eval(shift(@insns));
870 eval(shift(@insns));
871 eval(shift(@insns)); # @
872 &paddd (@X[0],$t3); # X[2..3] += sigma1(X[16..17])
873 eval(shift(@insns));
874 eval(shift(@insns));
875 eval(shift(@insns));
876 eval(shift(@insns));
877 &paddd ($t2,@X[0]);
878 eval(shift(@insns)); # @
879
880 foreach (@insns) { eval; } # remaining instructions
881
882 &movdqa (&QWP(32+16*$j,"esp"),$t2);
883}
884
885sub body_00_15 () {
886 (
887 '&mov ("ecx",$E);',
888 '&ror ($E,25-11);',
889 '&mov ("esi",&off($f));',
890 '&xor ($E,"ecx");',
891 '&mov ("edi",&off($g));',
892 '&xor ("esi","edi");',
893 '&ror ($E,11-6);',
894 '&and ("esi","ecx");',
895 '&mov (&off($e),"ecx");', # save $E, modulo-scheduled
896 '&xor ($E,"ecx");',
897 '&xor ("edi","esi");', # Ch(e,f,g)
898 '&ror ($E,6);', # T = Sigma1(e)
899 '&mov ("ecx",$AH[0]);',
900 '&add ($E,"edi");', # T += Ch(e,f,g)
901 '&mov ("edi",&off($b));',
902 '&mov ("esi",$AH[0]);',
903
904 '&ror ("ecx",22-13);',
905 '&mov (&off($a),$AH[0]);', # save $A, modulo-scheduled
906 '&xor ("ecx",$AH[0]);',
907 '&xor ($AH[0],"edi");', # a ^= b, (b^c) in next round
908 '&add ($E,&off($h));', # T += h
909 '&ror ("ecx",13-2);',
910 '&and ($AH[1],$AH[0]);', # (b^c) &= (a^b)
911 '&xor ("ecx","esi");',
912 '&add ($E,&DWP(32+4*($i&15),"esp"));', # T += K[i]+X[i]
913 '&xor ($AH[1],"edi");', # h = Maj(a,b,c) = Ch(a^b,c,b)
914 '&ror ("ecx",2);', # Sigma0(a)
915
916 '&add ($AH[1],$E);', # h += T
917 '&add ($E,&off($d));', # d += T
918 '&add ($AH[1],"ecx");'. # h += Sigma0(a)
919
920 '@AH = reverse(@AH); $i++;' # rotate(a,h)
921 );
922}
923
924 for ($i=0,$j=0; $j<4; $j++) {
925 &SSSE3_00_47($j,\&body_00_15,@X);
926 push(@X,shift(@X)); # rotate(@X)
927 }
928 &cmp (&DWP(16*$j,$K256),0x00010203);
929 &jne (&label("ssse3_00_47"));
930
931 for ($i=0; $i<16; ) {
932 foreach(body_00_15()) { eval; }
933 }
934
935 &mov ("esi",&DWP(96,"esp")); #ctx
936 #&mov ($AH[0],&DWP(0,"esp"));
937 &xor ($AH[1],"edi"); #&mov ($AH[1],&DWP(4,"esp"));
938 #&mov ("edi", &DWP(8,"esp"));
939 &mov ("ecx",&DWP(12,"esp"));
940 &add ($AH[0],&DWP(0,"esi"));
941 &add ($AH[1],&DWP(4,"esi"));
942 &add ("edi",&DWP(8,"esi"));
943 &add ("ecx",&DWP(12,"esi"));
944 &mov (&DWP(0,"esi"),$AH[0]);
945 &mov (&DWP(4,"esi"),$AH[1]);
946 &mov (&DWP(8,"esi"),"edi");
947 &mov (&DWP(12,"esi"),"ecx");
948 #&mov (&DWP(0,"esp"),$AH[0]);
949 &mov (&DWP(4,"esp"),$AH[1]);
950 &xor ($AH[1],"edi"); # magic
951 &mov (&DWP(8,"esp"),"edi");
952 &mov (&DWP(12,"esp"),"ecx");
953 #&mov ($E,&DWP(16,"esp"));
954 &mov ("edi",&DWP(20,"esp"));
955 &mov ("ecx",&DWP(24,"esp"));
956 &add ($E,&DWP(16,"esi"));
957 &add ("edi",&DWP(20,"esi"));
958 &add ("ecx",&DWP(24,"esi"));
959 &mov (&DWP(16,"esi"),$E);
960 &mov (&DWP(20,"esi"),"edi");
961 &mov (&DWP(20,"esp"),"edi");
962 &mov ("edi",&DWP(28,"esp"));
963 &mov (&DWP(24,"esi"),"ecx");
964 #&mov (&DWP(16,"esp"),$E);
965 &add ("edi",&DWP(28,"esi"));
966 &mov (&DWP(24,"esp"),"ecx");
967 &mov (&DWP(28,"esi"),"edi");
968 &mov (&DWP(28,"esp"),"edi");
969 &mov ("edi",&DWP(96+4,"esp")); # inp
970
971 &movdqa ($t3,&QWP(64,$K256));
972 &sub ($K256,3*64); # rewind K
973 &cmp ("edi",&DWP(96+8,"esp")); # are we done yet?
974 &jb (&label("grand_ssse3"));
975
976 &mov ("esp",&DWP(96+12,"esp")); # restore sp
977&function_end_A();
978 if ($avx) {
979&set_label("AVX",32);
980 if ($avx>1) {
981 &and ("edx",1<<8|1<<3); # check for BMI2+BMI1
982 &cmp ("edx",1<<8|1<<3);
983 &je (&label("AVX_BMI"));
984 }
985 &lea ("esp",&DWP(-96,"esp"));
986 &vzeroall ();
987 # copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
988 &mov ($AH[0],&DWP(0,"esi"));
989 &mov ($AH[1],&DWP(4,"esi"));
990 &mov ("ecx",&DWP(8,"esi"));
991 &mov ("edi",&DWP(12,"esi"));
992 #&mov (&DWP(0,"esp"),$AH[0]);
993 &mov (&DWP(4,"esp"),$AH[1]);
994 &xor ($AH[1],"ecx"); # magic
995 &mov (&DWP(8,"esp"),"ecx");
996 &mov (&DWP(12,"esp"),"edi");
997 &mov ($E,&DWP(16,"esi"));
998 &mov ("edi",&DWP(20,"esi"));
999 &mov ("ecx",&DWP(24,"esi"));
1000 &mov ("esi",&DWP(28,"esi"));
1001 #&mov (&DWP(16,"esp"),$E);
1002 &mov (&DWP(20,"esp"),"edi");
1003 &mov ("edi",&DWP(96+4,"esp")); # inp
1004 &mov (&DWP(24,"esp"),"ecx");
1005 &mov (&DWP(28,"esp"),"esi");
1006 &vmovdqa ($t3,&QWP(256,$K256));
1007 &jmp (&label("grand_avx"));
1008
1009&set_label("grand_avx",32);
1010 # load input, reverse byte order, add K256[0..15], save to stack
1011 &vmovdqu (@X[0],&QWP(0,"edi"));
1012 &vmovdqu (@X[1],&QWP(16,"edi"));
1013 &vmovdqu (@X[2],&QWP(32,"edi"));
1014 &vmovdqu (@X[3],&QWP(48,"edi"));
1015 &add ("edi",64);
1016 &vpshufb (@X[0],@X[0],$t3);
1017 &mov (&DWP(96+4,"esp"),"edi");
1018 &vpshufb (@X[1],@X[1],$t3);
1019 &vpshufb (@X[2],@X[2],$t3);
1020 &vpaddd ($t0,@X[0],&QWP(0,$K256));
1021 &vpshufb (@X[3],@X[3],$t3);
1022 &vpaddd ($t1,@X[1],&QWP(16,$K256));
1023 &vpaddd ($t2,@X[2],&QWP(32,$K256));
1024 &vpaddd ($t3,@X[3],&QWP(48,$K256));
1025 &vmovdqa (&QWP(32+0,"esp"),$t0);
1026 &vmovdqa (&QWP(32+16,"esp"),$t1);
1027 &vmovdqa (&QWP(32+32,"esp"),$t2);
1028 &vmovdqa (&QWP(32+48,"esp"),$t3);
1029 &jmp (&label("avx_00_47"));
1030
1031&set_label("avx_00_47",16);
1032 &add ($K256,64);
1033
1034sub Xupdate_AVX () {
1035 (
1036 '&vpalignr ($t0,@X[1],@X[0],4);', # X[1..4]
1037 '&vpalignr ($t3,@X[3],@X[2],4);', # X[9..12]
1038 '&vpsrld ($t2,$t0,7);',
1039 '&vpaddd (@X[0],@X[0],$t3);', # X[0..3] += X[9..16]
1040 '&vpsrld ($t3,$t0,3);',
1041 '&vpslld ($t1,$t0,14);',
1042 '&vpxor ($t0,$t3,$t2);',
1043 '&vpshufd ($t3,@X[3],0b11111010)',# X[14..15]
1044 '&vpsrld ($t2,$t2,18-7);',
1045 '&vpxor ($t0,$t0,$t1);',
1046 '&vpslld ($t1,$t1,25-14);',
1047 '&vpxor ($t0,$t0,$t2);',
1048 '&vpsrld ($t2,$t3,10);',
1049 '&vpxor ($t0,$t0,$t1);', # sigma0(X[1..4])
1050 '&vpsrlq ($t1,$t3,17);',
1051 '&vpaddd (@X[0],@X[0],$t0);', # X[0..3] += sigma0(X[1..4])
1052 '&vpxor ($t2,$t2,$t1);',
1053 '&vpsrlq ($t3,$t3,19);',
1054 '&vpxor ($t2,$t2,$t3);', # sigma1(X[14..15]
1055 '&vpshufd ($t3,$t2,0b10000100);',
1056 '&vpsrldq ($t3,$t3,8);',
1057 '&vpaddd (@X[0],@X[0],$t3);', # X[0..1] += sigma1(X[14..15])
1058 '&vpshufd ($t3,@X[0],0b01010000)',# X[16..17]
1059 '&vpsrld ($t2,$t3,10);',
1060 '&vpsrlq ($t1,$t3,17);',
1061 '&vpxor ($t2,$t2,$t1);',
1062 '&vpsrlq ($t3,$t3,19);',
1063 '&vpxor ($t2,$t2,$t3);', # sigma1(X[16..17]
1064 '&vpshufd ($t3,$t2,0b11101000);',
1065 '&vpslldq ($t3,$t3,8);',
1066 '&vpaddd (@X[0],@X[0],$t3);' # X[2..3] += sigma1(X[16..17])
1067 );
1068}
1069
1070local *ror = sub { &shrd(@_[0],@_) };
1071sub AVX_00_47 () {
1072my $j = shift;
1073my $body = shift;
1074my @X = @_;
1075my @insns = (&$body,&$body,&$body,&$body); # 120 instructions
1076my $insn;
1077
1078 foreach (Xupdate_AVX()) { # 31 instructions
1079 eval;
1080 eval(shift(@insns));
1081 eval(shift(@insns));
1082 eval($insn = shift(@insns));
1083 eval(shift(@insns)) if ($insn =~ /rorx/ && @insns[0] =~ /rorx/);
1084 }
1085 &vpaddd ($t2,@X[0],&QWP(16*$j,$K256));
1086 foreach (@insns) { eval; } # remaining instructions
1087 &vmovdqa (&QWP(32+16*$j,"esp"),$t2);
1088}
1089
1090 for ($i=0,$j=0; $j<4; $j++) {
1091 &AVX_00_47($j,\&body_00_15,@X);
1092 push(@X,shift(@X)); # rotate(@X)
1093 }
1094 &cmp (&DWP(16*$j,$K256),0x00010203);
1095 &jne (&label("avx_00_47"));
1096
1097 for ($i=0; $i<16; ) {
1098 foreach(body_00_15()) { eval; }
1099 }
1100
1101 &mov ("esi",&DWP(96,"esp")); #ctx
1102 #&mov ($AH[0],&DWP(0,"esp"));
1103 &xor ($AH[1],"edi"); #&mov ($AH[1],&DWP(4,"esp"));
1104 #&mov ("edi", &DWP(8,"esp"));
1105 &mov ("ecx",&DWP(12,"esp"));
1106 &add ($AH[0],&DWP(0,"esi"));
1107 &add ($AH[1],&DWP(4,"esi"));
1108 &add ("edi",&DWP(8,"esi"));
1109 &add ("ecx",&DWP(12,"esi"));
1110 &mov (&DWP(0,"esi"),$AH[0]);
1111 &mov (&DWP(4,"esi"),$AH[1]);
1112 &mov (&DWP(8,"esi"),"edi");
1113 &mov (&DWP(12,"esi"),"ecx");
1114 #&mov (&DWP(0,"esp"),$AH[0]);
1115 &mov (&DWP(4,"esp"),$AH[1]);
1116 &xor ($AH[1],"edi"); # magic
1117 &mov (&DWP(8,"esp"),"edi");
1118 &mov (&DWP(12,"esp"),"ecx");
1119 #&mov ($E,&DWP(16,"esp"));
1120 &mov ("edi",&DWP(20,"esp"));
1121 &mov ("ecx",&DWP(24,"esp"));
1122 &add ($E,&DWP(16,"esi"));
1123 &add ("edi",&DWP(20,"esi"));
1124 &add ("ecx",&DWP(24,"esi"));
1125 &mov (&DWP(16,"esi"),$E);
1126 &mov (&DWP(20,"esi"),"edi");
1127 &mov (&DWP(20,"esp"),"edi");
1128 &mov ("edi",&DWP(28,"esp"));
1129 &mov (&DWP(24,"esi"),"ecx");
1130 #&mov (&DWP(16,"esp"),$E);
1131 &add ("edi",&DWP(28,"esi"));
1132 &mov (&DWP(24,"esp"),"ecx");
1133 &mov (&DWP(28,"esi"),"edi");
1134 &mov (&DWP(28,"esp"),"edi");
1135 &mov ("edi",&DWP(96+4,"esp")); # inp
1136
1137 &vmovdqa ($t3,&QWP(64,$K256));
1138 &sub ($K256,3*64); # rewind K
1139 &cmp ("edi",&DWP(96+8,"esp")); # are we done yet?
1140 &jb (&label("grand_avx"));
1141
1142 &mov ("esp",&DWP(96+12,"esp")); # restore sp
1143 &vzeroall ();
1144&function_end_A();
1145 if ($avx>1) {
1146sub bodyx_00_15 () { # +10%
1147 (
1148 '&rorx ("ecx",$E,6)',
1149 '&rorx ("esi",$E,11)',
1150 '&mov (&off($e),$E)', # save $E, modulo-scheduled
1151 '&rorx ("edi",$E,25)',
1152 '&xor ("ecx","esi")',
1153 '&andn ("esi",$E,&off($g))',
1154 '&xor ("ecx","edi")', # Sigma1(e)
1155 '&and ($E,&off($f))',
1156 '&mov (&off($a),$AH[0]);', # save $A, modulo-scheduled
1157 '&or ($E,"esi")', # T = Ch(e,f,g)
1158
1159 '&rorx ("edi",$AH[0],2)',
1160 '&rorx ("esi",$AH[0],13)',
1161 '&lea ($E,&DWP(0,$E,"ecx"))', # T += Sigma1(e)
1162 '&rorx ("ecx",$AH[0],22)',
1163 '&xor ("esi","edi")',
1164 '&mov ("edi",&off($b))',
1165 '&xor ("ecx","esi")', # Sigma0(a)
1166
1167 '&xor ($AH[0],"edi")', # a ^= b, (b^c) in next round
1168 '&add ($E,&off($h))', # T += h
1169 '&and ($AH[1],$AH[0])', # (b^c) &= (a^b)
1170 '&add ($E,&DWP(32+4*($i&15),"esp"))', # T += K[i]+X[i]
1171 '&xor ($AH[1],"edi")', # h = Maj(a,b,c) = Ch(a^b,c,b)
1172
1173 '&add ("ecx",$E)', # h += T
1174 '&add ($E,&off($d))', # d += T
1175 '&lea ($AH[1],&DWP(0,$AH[1],"ecx"));'. # h += Sigma0(a)
1176
1177 '@AH = reverse(@AH); $i++;' # rotate(a,h)
1178 );
1179}
1180
1181&set_label("AVX_BMI",32);
1182 &lea ("esp",&DWP(-96,"esp"));
1183 &vzeroall ();
1184 # copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
1185 &mov ($AH[0],&DWP(0,"esi"));
1186 &mov ($AH[1],&DWP(4,"esi"));
1187 &mov ("ecx",&DWP(8,"esi"));
1188 &mov ("edi",&DWP(12,"esi"));
1189 #&mov (&DWP(0,"esp"),$AH[0]);
1190 &mov (&DWP(4,"esp"),$AH[1]);
1191 &xor ($AH[1],"ecx"); # magic
1192 &mov (&DWP(8,"esp"),"ecx");
1193 &mov (&DWP(12,"esp"),"edi");
1194 &mov ($E,&DWP(16,"esi"));
1195 &mov ("edi",&DWP(20,"esi"));
1196 &mov ("ecx",&DWP(24,"esi"));
1197 &mov ("esi",&DWP(28,"esi"));
1198 #&mov (&DWP(16,"esp"),$E);
1199 &mov (&DWP(20,"esp"),"edi");
1200 &mov ("edi",&DWP(96+4,"esp")); # inp
1201 &mov (&DWP(24,"esp"),"ecx");
1202 &mov (&DWP(28,"esp"),"esi");
1203 &vmovdqa ($t3,&QWP(256,$K256));
1204 &jmp (&label("grand_avx_bmi"));
1205
1206&set_label("grand_avx_bmi",32);
1207 # load input, reverse byte order, add K256[0..15], save to stack
1208 &vmovdqu (@X[0],&QWP(0,"edi"));
1209 &vmovdqu (@X[1],&QWP(16,"edi"));
1210 &vmovdqu (@X[2],&QWP(32,"edi"));
1211 &vmovdqu (@X[3],&QWP(48,"edi"));
1212 &add ("edi",64);
1213 &vpshufb (@X[0],@X[0],$t3);
1214 &mov (&DWP(96+4,"esp"),"edi");
1215 &vpshufb (@X[1],@X[1],$t3);
1216 &vpshufb (@X[2],@X[2],$t3);
1217 &vpaddd ($t0,@X[0],&QWP(0,$K256));
1218 &vpshufb (@X[3],@X[3],$t3);
1219 &vpaddd ($t1,@X[1],&QWP(16,$K256));
1220 &vpaddd ($t2,@X[2],&QWP(32,$K256));
1221 &vpaddd ($t3,@X[3],&QWP(48,$K256));
1222 &vmovdqa (&QWP(32+0,"esp"),$t0);
1223 &vmovdqa (&QWP(32+16,"esp"),$t1);
1224 &vmovdqa (&QWP(32+32,"esp"),$t2);
1225 &vmovdqa (&QWP(32+48,"esp"),$t3);
1226 &jmp (&label("avx_bmi_00_47"));
1227
1228&set_label("avx_bmi_00_47",16);
1229 &add ($K256,64);
1230
1231 for ($i=0,$j=0; $j<4; $j++) {
1232 &AVX_00_47($j,\&bodyx_00_15,@X);
1233 push(@X,shift(@X)); # rotate(@X)
1234 }
1235 &cmp (&DWP(16*$j,$K256),0x00010203);
1236 &jne (&label("avx_bmi_00_47"));
1237
1238 for ($i=0; $i<16; ) {
1239 foreach(bodyx_00_15()) { eval; }
1240 }
1241
1242 &mov ("esi",&DWP(96,"esp")); #ctx
1243 #&mov ($AH[0],&DWP(0,"esp"));
1244 &xor ($AH[1],"edi"); #&mov ($AH[1],&DWP(4,"esp"));
1245 #&mov ("edi", &DWP(8,"esp"));
1246 &mov ("ecx",&DWP(12,"esp"));
1247 &add ($AH[0],&DWP(0,"esi"));
1248 &add ($AH[1],&DWP(4,"esi"));
1249 &add ("edi",&DWP(8,"esi"));
1250 &add ("ecx",&DWP(12,"esi"));
1251 &mov (&DWP(0,"esi"),$AH[0]);
1252 &mov (&DWP(4,"esi"),$AH[1]);
1253 &mov (&DWP(8,"esi"),"edi");
1254 &mov (&DWP(12,"esi"),"ecx");
1255 #&mov (&DWP(0,"esp"),$AH[0]);
1256 &mov (&DWP(4,"esp"),$AH[1]);
1257 &xor ($AH[1],"edi"); # magic
1258 &mov (&DWP(8,"esp"),"edi");
1259 &mov (&DWP(12,"esp"),"ecx");
1260 #&mov ($E,&DWP(16,"esp"));
1261 &mov ("edi",&DWP(20,"esp"));
1262 &mov ("ecx",&DWP(24,"esp"));
1263 &add ($E,&DWP(16,"esi"));
1264 &add ("edi",&DWP(20,"esi"));
1265 &add ("ecx",&DWP(24,"esi"));
1266 &mov (&DWP(16,"esi"),$E);
1267 &mov (&DWP(20,"esi"),"edi");
1268 &mov (&DWP(20,"esp"),"edi");
1269 &mov ("edi",&DWP(28,"esp"));
1270 &mov (&DWP(24,"esi"),"ecx");
1271 #&mov (&DWP(16,"esp"),$E);
1272 &add ("edi",&DWP(28,"esi"));
1273 &mov (&DWP(24,"esp"),"ecx");
1274 &mov (&DWP(28,"esi"),"edi");
1275 &mov (&DWP(28,"esp"),"edi");
1276 &mov ("edi",&DWP(96+4,"esp")); # inp
1277
1278 &vmovdqa ($t3,&QWP(64,$K256));
1279 &sub ($K256,3*64); # rewind K
1280 &cmp ("edi",&DWP(96+8,"esp")); # are we done yet?
1281 &jb (&label("grand_avx_bmi"));
1282
1283 &mov ("esp",&DWP(96+12,"esp")); # restore sp
1284 &vzeroall ();
1285&function_end_A();
1286 }
1287 }
1288 }}}
1289&function_end_B("sha256_block_data_order");
1290
1291&asm_finish();
1292
1293close STDOUT;
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette