VirtualBox

source: vbox/trunk/src/libs/openssl-1.1.0g/crypto/sha/asm/sha512-x86_64.pl@ 69890

Last change on this file since 69890 was 69890, checked in by vboxsync, 7 years ago

Added OpenSSL 1.1.0g with unneeded files removed, otherwise unmodified.
bugref:8070: src/libs maintenance

  • Property svn:eol-style set to LF
  • Property svn:executable set to *
File size: 60.0 KB
Line 
1#! /usr/bin/env perl
2# Copyright 2005-2016 The OpenSSL Project Authors. All Rights Reserved.
3#
4# Licensed under the OpenSSL license (the "License"). You may not use
5# this file except in compliance with the License. You can obtain a copy
6# in the file LICENSE in the source distribution or at
7# https://www.openssl.org/source/license.html
8
9#
10# ====================================================================
11# Written by Andy Polyakov <[email protected]> for the OpenSSL
12# project. Rights for redistribution and usage in source and binary
13# forms are granted according to the OpenSSL license.
14# ====================================================================
15#
16# sha256/512_block procedure for x86_64.
17#
18# 40% improvement over compiler-generated code on Opteron. On EM64T
19# sha256 was observed to run >80% faster and sha512 - >40%. No magical
20# tricks, just straight implementation... I really wonder why gcc
21# [being armed with inline assembler] fails to generate as fast code.
22# The only thing which is cool about this module is that it's very
23# same instruction sequence used for both SHA-256 and SHA-512. In
24# former case the instructions operate on 32-bit operands, while in
25# latter - on 64-bit ones. All I had to do is to get one flavor right,
26# the other one passed the test right away:-)
27#
28# sha256_block runs in ~1005 cycles on Opteron, which gives you
29# asymptotic performance of 64*1000/1005=63.7MBps times CPU clock
30# frequency in GHz. sha512_block runs in ~1275 cycles, which results
31# in 128*1000/1275=100MBps per GHz. Is there room for improvement?
32# Well, if you compare it to IA-64 implementation, which maintains
33# X[16] in register bank[!], tends to 4 instructions per CPU clock
34# cycle and runs in 1003 cycles, 1275 is very good result for 3-way
35# issue Opteron pipeline and X[16] maintained in memory. So that *if*
36# there is a way to improve it, *then* the only way would be to try to
37# offload X[16] updates to SSE unit, but that would require "deeper"
38# loop unroll, which in turn would naturally cause size blow-up, not
39# to mention increased complexity! And once again, only *if* it's
40# actually possible to noticeably improve overall ILP, instruction
41# level parallelism, on a given CPU implementation in this case.
42#
43# Special note on Intel EM64T. While Opteron CPU exhibits perfect
44# performance ratio of 1.5 between 64- and 32-bit flavors [see above],
45# [currently available] EM64T CPUs apparently are far from it. On the
46# contrary, 64-bit version, sha512_block, is ~30% *slower* than 32-bit
47# sha256_block:-( This is presumably because 64-bit shifts/rotates
48# apparently are not atomic instructions, but implemented in microcode.
49#
50# May 2012.
51#
52# Optimization including one of Pavel Semjanov's ideas, alternative
53# Maj, resulted in >=5% improvement on most CPUs, +20% SHA256 and
54# unfortunately -2% SHA512 on P4 [which nobody should care about
55# that much].
56#
57# June 2012.
58#
59# Add SIMD code paths, see below for improvement coefficients. SSSE3
60# code path was not attempted for SHA512, because improvement is not
61# estimated to be high enough, noticeably less than 9%, to justify
62# the effort, not on pre-AVX processors. [Obviously with exclusion
63# for VIA Nano, but it has SHA512 instruction that is faster and
64# should be used instead.] For reference, corresponding estimated
65# upper limit for improvement for SSSE3 SHA256 is 28%. The fact that
66# higher coefficients are observed on VIA Nano and Bulldozer has more
67# to do with specifics of their architecture [which is topic for
68# separate discussion].
69#
70# November 2012.
71#
72# Add AVX2 code path. Two consecutive input blocks are loaded to
73# 256-bit %ymm registers, with data from first block to least
74# significant 128-bit halves and data from second to most significant.
75# The data is then processed with same SIMD instruction sequence as
76# for AVX, but with %ymm as operands. Side effect is increased stack
77# frame, 448 additional bytes in SHA256 and 1152 in SHA512, and 1.2KB
78# code size increase.
79#
80# March 2014.
81#
82# Add support for Intel SHA Extensions.
83
84######################################################################
85# Current performance in cycles per processed byte (less is better):
86#
87# SHA256 SSSE3 AVX/XOP(*) SHA512 AVX/XOP(*)
88#
89# AMD K8 14.9 - - 9.57 -
90# P4 17.3 - - 30.8 -
91# Core 2 15.6 13.8(+13%) - 9.97 -
92# Westmere 14.8 12.3(+19%) - 9.58 -
93# Sandy Bridge 17.4 14.2(+23%) 11.6(+50%(**)) 11.2 8.10(+38%(**))
94# Ivy Bridge 12.6 10.5(+20%) 10.3(+22%) 8.17 7.22(+13%)
95# Haswell 12.2 9.28(+31%) 7.80(+56%) 7.66 5.40(+42%)
96# Skylake 11.4 9.03(+26%) 7.70(+48%) 7.25 5.20(+40%)
97# Bulldozer 21.1 13.6(+54%) 13.6(+54%(***)) 13.5 8.58(+57%)
98# VIA Nano 23.0 16.5(+39%) - 14.7 -
99# Atom 23.0 18.9(+22%) - 14.7 -
100# Silvermont 27.4 20.6(+33%) - 17.5 -
101# Goldmont 18.9 14.3(+32%) 4.16(+350%) 12.0 -
102#
103# (*) whichever best applicable, including SHAEXT;
104# (**) switch from ror to shrd stands for fair share of improvement;
105# (***) execution time is fully determined by remaining integer-only
106# part, body_00_15; reducing the amount of SIMD instructions
107# below certain limit makes no difference/sense; to conserve
108# space SHA256 XOP code path is therefore omitted;
109
110$flavour = shift;
111$output = shift;
112if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
113
114$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
115
116$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
117( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
118( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
119die "can't locate x86_64-xlate.pl";
120
121if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
122 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
123 $avx = ($1>=2.19) + ($1>=2.22);
124}
125
126if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
127 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
128 $avx = ($1>=2.09) + ($1>=2.10);
129}
130
131if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
132 `ml64 2>&1` =~ /Version ([0-9]+)\./) {
133 $avx = ($1>=10) + ($1>=11);
134}
135
136if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9]\.[0-9]+)/) {
137 $avx = ($2>=3.0) + ($2>3.0);
138}
139
140$shaext=1; ### set to zero if compiling for 1.0.1
141$avx=1 if (!$shaext && $avx);
142
143open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
144*STDOUT=*OUT;
145
146if ($output =~ /512/) {
147 $func="sha512_block_data_order";
148 $TABLE="K512";
149 $SZ=8;
150 @ROT=($A,$B,$C,$D,$E,$F,$G,$H)=("%rax","%rbx","%rcx","%rdx",
151 "%r8", "%r9", "%r10","%r11");
152 ($T1,$a0,$a1,$a2,$a3)=("%r12","%r13","%r14","%r15","%rdi");
153 @Sigma0=(28,34,39);
154 @Sigma1=(14,18,41);
155 @sigma0=(1, 8, 7);
156 @sigma1=(19,61, 6);
157 $rounds=80;
158} else {
159 $func="sha256_block_data_order";
160 $TABLE="K256";
161 $SZ=4;
162 @ROT=($A,$B,$C,$D,$E,$F,$G,$H)=("%eax","%ebx","%ecx","%edx",
163 "%r8d","%r9d","%r10d","%r11d");
164 ($T1,$a0,$a1,$a2,$a3)=("%r12d","%r13d","%r14d","%r15d","%edi");
165 @Sigma0=( 2,13,22);
166 @Sigma1=( 6,11,25);
167 @sigma0=( 7,18, 3);
168 @sigma1=(17,19,10);
169 $rounds=64;
170}
171
172$ctx="%rdi"; # 1st arg, zapped by $a3
173$inp="%rsi"; # 2nd arg
174$Tbl="%rbp";
175
176$_ctx="16*$SZ+0*8(%rsp)";
177$_inp="16*$SZ+1*8(%rsp)";
178$_end="16*$SZ+2*8(%rsp)";
179$_rsp="16*$SZ+3*8(%rsp)";
180$framesz="16*$SZ+4*8";
181
182
183sub ROUND_00_15()
184{ my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_;
185 my $STRIDE=$SZ;
186 $STRIDE += 16 if ($i%(16/$SZ)==(16/$SZ-1));
187
188$code.=<<___;
189 ror \$`$Sigma1[2]-$Sigma1[1]`,$a0
190 mov $f,$a2
191
192 xor $e,$a0
193 ror \$`$Sigma0[2]-$Sigma0[1]`,$a1
194 xor $g,$a2 # f^g
195
196 mov $T1,`$SZ*($i&0xf)`(%rsp)
197 xor $a,$a1
198 and $e,$a2 # (f^g)&e
199
200 ror \$`$Sigma1[1]-$Sigma1[0]`,$a0
201 add $h,$T1 # T1+=h
202 xor $g,$a2 # Ch(e,f,g)=((f^g)&e)^g
203
204 ror \$`$Sigma0[1]-$Sigma0[0]`,$a1
205 xor $e,$a0
206 add $a2,$T1 # T1+=Ch(e,f,g)
207
208 mov $a,$a2
209 add ($Tbl),$T1 # T1+=K[round]
210 xor $a,$a1
211
212 xor $b,$a2 # a^b, b^c in next round
213 ror \$$Sigma1[0],$a0 # Sigma1(e)
214 mov $b,$h
215
216 and $a2,$a3
217 ror \$$Sigma0[0],$a1 # Sigma0(a)
218 add $a0,$T1 # T1+=Sigma1(e)
219
220 xor $a3,$h # h=Maj(a,b,c)=Ch(a^b,c,b)
221 add $T1,$d # d+=T1
222 add $T1,$h # h+=T1
223
224 lea $STRIDE($Tbl),$Tbl # round++
225___
226$code.=<<___ if ($i<15);
227 add $a1,$h # h+=Sigma0(a)
228___
229 ($a2,$a3) = ($a3,$a2);
230}
231
232sub ROUND_16_XX()
233{ my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_;
234
235$code.=<<___;
236 mov `$SZ*(($i+1)&0xf)`(%rsp),$a0
237 mov `$SZ*(($i+14)&0xf)`(%rsp),$a2
238
239 mov $a0,$T1
240 ror \$`$sigma0[1]-$sigma0[0]`,$a0
241 add $a1,$a # modulo-scheduled h+=Sigma0(a)
242 mov $a2,$a1
243 ror \$`$sigma1[1]-$sigma1[0]`,$a2
244
245 xor $T1,$a0
246 shr \$$sigma0[2],$T1
247 ror \$$sigma0[0],$a0
248 xor $a1,$a2
249 shr \$$sigma1[2],$a1
250
251 ror \$$sigma1[0],$a2
252 xor $a0,$T1 # sigma0(X[(i+1)&0xf])
253 xor $a1,$a2 # sigma1(X[(i+14)&0xf])
254 add `$SZ*(($i+9)&0xf)`(%rsp),$T1
255
256 add `$SZ*($i&0xf)`(%rsp),$T1
257 mov $e,$a0
258 add $a2,$T1
259 mov $a,$a1
260___
261 &ROUND_00_15(@_);
262}
263
264$code=<<___;
265.text
266
267.extern OPENSSL_ia32cap_P
268.globl $func
269.type $func,\@function,3
270.align 16
271$func:
272___
273$code.=<<___ if ($SZ==4 || $avx);
274 lea OPENSSL_ia32cap_P(%rip),%r11
275 mov 0(%r11),%r9d
276 mov 4(%r11),%r10d
277 mov 8(%r11),%r11d
278___
279$code.=<<___ if ($SZ==4 && $shaext);
280 test \$`1<<29`,%r11d # check for SHA
281 jnz _shaext_shortcut
282___
283$code.=<<___ if ($avx && $SZ==8);
284 test \$`1<<11`,%r10d # check for XOP
285 jnz .Lxop_shortcut
286___
287$code.=<<___ if ($avx>1);
288 and \$`1<<8|1<<5|1<<3`,%r11d # check for BMI2+AVX2+BMI1
289 cmp \$`1<<8|1<<5|1<<3`,%r11d
290 je .Lavx2_shortcut
291___
292$code.=<<___ if ($avx);
293 and \$`1<<30`,%r9d # mask "Intel CPU" bit
294 and \$`1<<28|1<<9`,%r10d # mask AVX and SSSE3 bits
295 or %r9d,%r10d
296 cmp \$`1<<28|1<<9|1<<30`,%r10d
297 je .Lavx_shortcut
298___
299$code.=<<___ if ($SZ==4);
300 test \$`1<<9`,%r10d
301 jnz .Lssse3_shortcut
302___
303$code.=<<___;
304 push %rbx
305 push %rbp
306 push %r12
307 push %r13
308 push %r14
309 push %r15
310 mov %rsp,%r11 # copy %rsp
311 shl \$4,%rdx # num*16
312 sub \$$framesz,%rsp
313 lea ($inp,%rdx,$SZ),%rdx # inp+num*16*$SZ
314 and \$-64,%rsp # align stack frame
315 mov $ctx,$_ctx # save ctx, 1st arg
316 mov $inp,$_inp # save inp, 2nd arh
317 mov %rdx,$_end # save end pointer, "3rd" arg
318 mov %r11,$_rsp # save copy of %rsp
319.Lprologue:
320
321 mov $SZ*0($ctx),$A
322 mov $SZ*1($ctx),$B
323 mov $SZ*2($ctx),$C
324 mov $SZ*3($ctx),$D
325 mov $SZ*4($ctx),$E
326 mov $SZ*5($ctx),$F
327 mov $SZ*6($ctx),$G
328 mov $SZ*7($ctx),$H
329 jmp .Lloop
330
331.align 16
332.Lloop:
333 mov $B,$a3
334 lea $TABLE(%rip),$Tbl
335 xor $C,$a3 # magic
336___
337 for($i=0;$i<16;$i++) {
338 $code.=" mov $SZ*$i($inp),$T1\n";
339 $code.=" mov @ROT[4],$a0\n";
340 $code.=" mov @ROT[0],$a1\n";
341 $code.=" bswap $T1\n";
342 &ROUND_00_15($i,@ROT);
343 unshift(@ROT,pop(@ROT));
344 }
345$code.=<<___;
346 jmp .Lrounds_16_xx
347.align 16
348.Lrounds_16_xx:
349___
350 for(;$i<32;$i++) {
351 &ROUND_16_XX($i,@ROT);
352 unshift(@ROT,pop(@ROT));
353 }
354
355$code.=<<___;
356 cmpb \$0,`$SZ-1`($Tbl)
357 jnz .Lrounds_16_xx
358
359 mov $_ctx,$ctx
360 add $a1,$A # modulo-scheduled h+=Sigma0(a)
361 lea 16*$SZ($inp),$inp
362
363 add $SZ*0($ctx),$A
364 add $SZ*1($ctx),$B
365 add $SZ*2($ctx),$C
366 add $SZ*3($ctx),$D
367 add $SZ*4($ctx),$E
368 add $SZ*5($ctx),$F
369 add $SZ*6($ctx),$G
370 add $SZ*7($ctx),$H
371
372 cmp $_end,$inp
373
374 mov $A,$SZ*0($ctx)
375 mov $B,$SZ*1($ctx)
376 mov $C,$SZ*2($ctx)
377 mov $D,$SZ*3($ctx)
378 mov $E,$SZ*4($ctx)
379 mov $F,$SZ*5($ctx)
380 mov $G,$SZ*6($ctx)
381 mov $H,$SZ*7($ctx)
382 jb .Lloop
383
384 mov $_rsp,%rsi
385 mov (%rsi),%r15
386 mov 8(%rsi),%r14
387 mov 16(%rsi),%r13
388 mov 24(%rsi),%r12
389 mov 32(%rsi),%rbp
390 mov 40(%rsi),%rbx
391 lea 48(%rsi),%rsp
392.Lepilogue:
393 ret
394.size $func,.-$func
395___
396
397if ($SZ==4) {
398$code.=<<___;
399.align 64
400.type $TABLE,\@object
401$TABLE:
402 .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
403 .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
404 .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
405 .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
406 .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
407 .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
408 .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
409 .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
410 .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
411 .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
412 .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
413 .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
414 .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
415 .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
416 .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
417 .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
418 .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
419 .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
420 .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
421 .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
422 .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
423 .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
424 .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
425 .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
426 .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
427 .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
428 .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
429 .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
430 .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
431 .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
432 .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
433 .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
434
435 .long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
436 .long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
437 .long 0x03020100,0x0b0a0908,0xffffffff,0xffffffff
438 .long 0x03020100,0x0b0a0908,0xffffffff,0xffffffff
439 .long 0xffffffff,0xffffffff,0x03020100,0x0b0a0908
440 .long 0xffffffff,0xffffffff,0x03020100,0x0b0a0908
441 .asciz "SHA256 block transform for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
442___
443} else {
444$code.=<<___;
445.align 64
446.type $TABLE,\@object
447$TABLE:
448 .quad 0x428a2f98d728ae22,0x7137449123ef65cd
449 .quad 0x428a2f98d728ae22,0x7137449123ef65cd
450 .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
451 .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
452 .quad 0x3956c25bf348b538,0x59f111f1b605d019
453 .quad 0x3956c25bf348b538,0x59f111f1b605d019
454 .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
455 .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
456 .quad 0xd807aa98a3030242,0x12835b0145706fbe
457 .quad 0xd807aa98a3030242,0x12835b0145706fbe
458 .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
459 .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
460 .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
461 .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
462 .quad 0x9bdc06a725c71235,0xc19bf174cf692694
463 .quad 0x9bdc06a725c71235,0xc19bf174cf692694
464 .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
465 .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
466 .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
467 .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
468 .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
469 .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
470 .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
471 .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
472 .quad 0x983e5152ee66dfab,0xa831c66d2db43210
473 .quad 0x983e5152ee66dfab,0xa831c66d2db43210
474 .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
475 .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
476 .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
477 .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
478 .quad 0x06ca6351e003826f,0x142929670a0e6e70
479 .quad 0x06ca6351e003826f,0x142929670a0e6e70
480 .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
481 .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
482 .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
483 .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
484 .quad 0x650a73548baf63de,0x766a0abb3c77b2a8
485 .quad 0x650a73548baf63de,0x766a0abb3c77b2a8
486 .quad 0x81c2c92e47edaee6,0x92722c851482353b
487 .quad 0x81c2c92e47edaee6,0x92722c851482353b
488 .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
489 .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
490 .quad 0xc24b8b70d0f89791,0xc76c51a30654be30
491 .quad 0xc24b8b70d0f89791,0xc76c51a30654be30
492 .quad 0xd192e819d6ef5218,0xd69906245565a910
493 .quad 0xd192e819d6ef5218,0xd69906245565a910
494 .quad 0xf40e35855771202a,0x106aa07032bbd1b8
495 .quad 0xf40e35855771202a,0x106aa07032bbd1b8
496 .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
497 .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
498 .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
499 .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
500 .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
501 .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
502 .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
503 .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
504 .quad 0x748f82ee5defb2fc,0x78a5636f43172f60
505 .quad 0x748f82ee5defb2fc,0x78a5636f43172f60
506 .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
507 .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
508 .quad 0x90befffa23631e28,0xa4506cebde82bde9
509 .quad 0x90befffa23631e28,0xa4506cebde82bde9
510 .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
511 .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
512 .quad 0xca273eceea26619c,0xd186b8c721c0c207
513 .quad 0xca273eceea26619c,0xd186b8c721c0c207
514 .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
515 .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
516 .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
517 .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
518 .quad 0x113f9804bef90dae,0x1b710b35131c471b
519 .quad 0x113f9804bef90dae,0x1b710b35131c471b
520 .quad 0x28db77f523047d84,0x32caab7b40c72493
521 .quad 0x28db77f523047d84,0x32caab7b40c72493
522 .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
523 .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
524 .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
525 .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
526 .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
527 .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
528
529 .quad 0x0001020304050607,0x08090a0b0c0d0e0f
530 .quad 0x0001020304050607,0x08090a0b0c0d0e0f
531 .asciz "SHA512 block transform for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
532___
533}
534
535######################################################################
536# SIMD code paths
537#
538if ($SZ==4 && $shaext) {{{
539######################################################################
540# Intel SHA Extensions implementation of SHA256 update function.
541#
542my ($ctx,$inp,$num,$Tbl)=("%rdi","%rsi","%rdx","%rcx");
543
544my ($Wi,$ABEF,$CDGH,$TMP,$BSWAP,$ABEF_SAVE,$CDGH_SAVE)=map("%xmm$_",(0..2,7..10));
545my @MSG=map("%xmm$_",(3..6));
546
547$code.=<<___;
548.type sha256_block_data_order_shaext,\@function,3
549.align 64
550sha256_block_data_order_shaext:
551_shaext_shortcut:
552___
553$code.=<<___ if ($win64);
554 lea `-8-5*16`(%rsp),%rsp
555 movaps %xmm6,-8-5*16(%rax)
556 movaps %xmm7,-8-4*16(%rax)
557 movaps %xmm8,-8-3*16(%rax)
558 movaps %xmm9,-8-2*16(%rax)
559 movaps %xmm10,-8-1*16(%rax)
560.Lprologue_shaext:
561___
562$code.=<<___;
563 lea K256+0x80(%rip),$Tbl
564 movdqu ($ctx),$ABEF # DCBA
565 movdqu 16($ctx),$CDGH # HGFE
566 movdqa 0x200-0x80($Tbl),$TMP # byte swap mask
567
568 pshufd \$0x1b,$ABEF,$Wi # ABCD
569 pshufd \$0xb1,$ABEF,$ABEF # CDAB
570 pshufd \$0x1b,$CDGH,$CDGH # EFGH
571 movdqa $TMP,$BSWAP # offload
572 palignr \$8,$CDGH,$ABEF # ABEF
573 punpcklqdq $Wi,$CDGH # CDGH
574 jmp .Loop_shaext
575
576.align 16
577.Loop_shaext:
578 movdqu ($inp),@MSG[0]
579 movdqu 0x10($inp),@MSG[1]
580 movdqu 0x20($inp),@MSG[2]
581 pshufb $TMP,@MSG[0]
582 movdqu 0x30($inp),@MSG[3]
583
584 movdqa 0*32-0x80($Tbl),$Wi
585 paddd @MSG[0],$Wi
586 pshufb $TMP,@MSG[1]
587 movdqa $CDGH,$CDGH_SAVE # offload
588 sha256rnds2 $ABEF,$CDGH # 0-3
589 pshufd \$0x0e,$Wi,$Wi
590 nop
591 movdqa $ABEF,$ABEF_SAVE # offload
592 sha256rnds2 $CDGH,$ABEF
593
594 movdqa 1*32-0x80($Tbl),$Wi
595 paddd @MSG[1],$Wi
596 pshufb $TMP,@MSG[2]
597 sha256rnds2 $ABEF,$CDGH # 4-7
598 pshufd \$0x0e,$Wi,$Wi
599 lea 0x40($inp),$inp
600 sha256msg1 @MSG[1],@MSG[0]
601 sha256rnds2 $CDGH,$ABEF
602
603 movdqa 2*32-0x80($Tbl),$Wi
604 paddd @MSG[2],$Wi
605 pshufb $TMP,@MSG[3]
606 sha256rnds2 $ABEF,$CDGH # 8-11
607 pshufd \$0x0e,$Wi,$Wi
608 movdqa @MSG[3],$TMP
609 palignr \$4,@MSG[2],$TMP
610 nop
611 paddd $TMP,@MSG[0]
612 sha256msg1 @MSG[2],@MSG[1]
613 sha256rnds2 $CDGH,$ABEF
614
615 movdqa 3*32-0x80($Tbl),$Wi
616 paddd @MSG[3],$Wi
617 sha256msg2 @MSG[3],@MSG[0]
618 sha256rnds2 $ABEF,$CDGH # 12-15
619 pshufd \$0x0e,$Wi,$Wi
620 movdqa @MSG[0],$TMP
621 palignr \$4,@MSG[3],$TMP
622 nop
623 paddd $TMP,@MSG[1]
624 sha256msg1 @MSG[3],@MSG[2]
625 sha256rnds2 $CDGH,$ABEF
626___
627for($i=4;$i<16-3;$i++) {
628$code.=<<___;
629 movdqa $i*32-0x80($Tbl),$Wi
630 paddd @MSG[0],$Wi
631 sha256msg2 @MSG[0],@MSG[1]
632 sha256rnds2 $ABEF,$CDGH # 16-19...
633 pshufd \$0x0e,$Wi,$Wi
634 movdqa @MSG[1],$TMP
635 palignr \$4,@MSG[0],$TMP
636 nop
637 paddd $TMP,@MSG[2]
638 sha256msg1 @MSG[0],@MSG[3]
639 sha256rnds2 $CDGH,$ABEF
640___
641 push(@MSG,shift(@MSG));
642}
643$code.=<<___;
644 movdqa 13*32-0x80($Tbl),$Wi
645 paddd @MSG[0],$Wi
646 sha256msg2 @MSG[0],@MSG[1]
647 sha256rnds2 $ABEF,$CDGH # 52-55
648 pshufd \$0x0e,$Wi,$Wi
649 movdqa @MSG[1],$TMP
650 palignr \$4,@MSG[0],$TMP
651 sha256rnds2 $CDGH,$ABEF
652 paddd $TMP,@MSG[2]
653
654 movdqa 14*32-0x80($Tbl),$Wi
655 paddd @MSG[1],$Wi
656 sha256rnds2 $ABEF,$CDGH # 56-59
657 pshufd \$0x0e,$Wi,$Wi
658 sha256msg2 @MSG[1],@MSG[2]
659 movdqa $BSWAP,$TMP
660 sha256rnds2 $CDGH,$ABEF
661
662 movdqa 15*32-0x80($Tbl),$Wi
663 paddd @MSG[2],$Wi
664 nop
665 sha256rnds2 $ABEF,$CDGH # 60-63
666 pshufd \$0x0e,$Wi,$Wi
667 dec $num
668 nop
669 sha256rnds2 $CDGH,$ABEF
670
671 paddd $CDGH_SAVE,$CDGH
672 paddd $ABEF_SAVE,$ABEF
673 jnz .Loop_shaext
674
675 pshufd \$0xb1,$CDGH,$CDGH # DCHG
676 pshufd \$0x1b,$ABEF,$TMP # FEBA
677 pshufd \$0xb1,$ABEF,$ABEF # BAFE
678 punpckhqdq $CDGH,$ABEF # DCBA
679 palignr \$8,$TMP,$CDGH # HGFE
680
681 movdqu $ABEF,($ctx)
682 movdqu $CDGH,16($ctx)
683___
684$code.=<<___ if ($win64);
685 movaps -8-5*16(%rax),%xmm6
686 movaps -8-4*16(%rax),%xmm7
687 movaps -8-3*16(%rax),%xmm8
688 movaps -8-2*16(%rax),%xmm9
689 movaps -8-1*16(%rax),%xmm10
690 mov %rax,%rsp
691.Lepilogue_shaext:
692___
693$code.=<<___;
694 ret
695.size sha256_block_data_order_shaext,.-sha256_block_data_order_shaext
696___
697}}}
698{{{
699
700my $a4=$T1;
701my ($a,$b,$c,$d,$e,$f,$g,$h);
702
703sub AUTOLOAD() # thunk [simplified] 32-bit style perlasm
704{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://;
705 my $arg = pop;
706 $arg = "\$$arg" if ($arg*1 eq $arg);
707 $code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n";
708}
709
710sub body_00_15 () {
711 (
712 '($a,$b,$c,$d,$e,$f,$g,$h)=@ROT;'.
713
714 '&ror ($a0,$Sigma1[2]-$Sigma1[1])',
715 '&mov ($a,$a1)',
716 '&mov ($a4,$f)',
717
718 '&ror ($a1,$Sigma0[2]-$Sigma0[1])',
719 '&xor ($a0,$e)',
720 '&xor ($a4,$g)', # f^g
721
722 '&ror ($a0,$Sigma1[1]-$Sigma1[0])',
723 '&xor ($a1,$a)',
724 '&and ($a4,$e)', # (f^g)&e
725
726 '&xor ($a0,$e)',
727 '&add ($h,$SZ*($i&15)."(%rsp)")', # h+=X[i]+K[i]
728 '&mov ($a2,$a)',
729
730 '&xor ($a4,$g)', # Ch(e,f,g)=((f^g)&e)^g
731 '&ror ($a1,$Sigma0[1]-$Sigma0[0])',
732 '&xor ($a2,$b)', # a^b, b^c in next round
733
734 '&add ($h,$a4)', # h+=Ch(e,f,g)
735 '&ror ($a0,$Sigma1[0])', # Sigma1(e)
736 '&and ($a3,$a2)', # (b^c)&(a^b)
737
738 '&xor ($a1,$a)',
739 '&add ($h,$a0)', # h+=Sigma1(e)
740 '&xor ($a3,$b)', # Maj(a,b,c)=Ch(a^b,c,b)
741
742 '&ror ($a1,$Sigma0[0])', # Sigma0(a)
743 '&add ($d,$h)', # d+=h
744 '&add ($h,$a3)', # h+=Maj(a,b,c)
745
746 '&mov ($a0,$d)',
747 '&add ($a1,$h);'. # h+=Sigma0(a)
748 '($a2,$a3) = ($a3,$a2); unshift(@ROT,pop(@ROT)); $i++;'
749 );
750}
751
752######################################################################
753# SSSE3 code path
754#
755if ($SZ==4) { # SHA256 only
756my @X = map("%xmm$_",(0..3));
757my ($t0,$t1,$t2,$t3, $t4,$t5) = map("%xmm$_",(4..9));
758
759$code.=<<___;
760.type ${func}_ssse3,\@function,3
761.align 64
762${func}_ssse3:
763.Lssse3_shortcut:
764 push %rbx
765 push %rbp
766 push %r12
767 push %r13
768 push %r14
769 push %r15
770 mov %rsp,%r11 # copy %rsp
771 shl \$4,%rdx # num*16
772 sub \$`$framesz+$win64*16*4`,%rsp
773 lea ($inp,%rdx,$SZ),%rdx # inp+num*16*$SZ
774 and \$-64,%rsp # align stack frame
775 mov $ctx,$_ctx # save ctx, 1st arg
776 mov $inp,$_inp # save inp, 2nd arh
777 mov %rdx,$_end # save end pointer, "3rd" arg
778 mov %r11,$_rsp # save copy of %rsp
779___
780$code.=<<___ if ($win64);
781 movaps %xmm6,16*$SZ+32(%rsp)
782 movaps %xmm7,16*$SZ+48(%rsp)
783 movaps %xmm8,16*$SZ+64(%rsp)
784 movaps %xmm9,16*$SZ+80(%rsp)
785___
786$code.=<<___;
787.Lprologue_ssse3:
788
789 mov $SZ*0($ctx),$A
790 mov $SZ*1($ctx),$B
791 mov $SZ*2($ctx),$C
792 mov $SZ*3($ctx),$D
793 mov $SZ*4($ctx),$E
794 mov $SZ*5($ctx),$F
795 mov $SZ*6($ctx),$G
796 mov $SZ*7($ctx),$H
797___
798
799$code.=<<___;
800 #movdqa $TABLE+`$SZ*2*$rounds`+32(%rip),$t4
801 #movdqa $TABLE+`$SZ*2*$rounds`+64(%rip),$t5
802 jmp .Lloop_ssse3
803.align 16
804.Lloop_ssse3:
805 movdqa $TABLE+`$SZ*2*$rounds`(%rip),$t3
806 movdqu 0x00($inp),@X[0]
807 movdqu 0x10($inp),@X[1]
808 movdqu 0x20($inp),@X[2]
809 pshufb $t3,@X[0]
810 movdqu 0x30($inp),@X[3]
811 lea $TABLE(%rip),$Tbl
812 pshufb $t3,@X[1]
813 movdqa 0x00($Tbl),$t0
814 movdqa 0x20($Tbl),$t1
815 pshufb $t3,@X[2]
816 paddd @X[0],$t0
817 movdqa 0x40($Tbl),$t2
818 pshufb $t3,@X[3]
819 movdqa 0x60($Tbl),$t3
820 paddd @X[1],$t1
821 paddd @X[2],$t2
822 paddd @X[3],$t3
823 movdqa $t0,0x00(%rsp)
824 mov $A,$a1
825 movdqa $t1,0x10(%rsp)
826 mov $B,$a3
827 movdqa $t2,0x20(%rsp)
828 xor $C,$a3 # magic
829 movdqa $t3,0x30(%rsp)
830 mov $E,$a0
831 jmp .Lssse3_00_47
832
833.align 16
834.Lssse3_00_47:
835 sub \$`-16*2*$SZ`,$Tbl # size optimization
836___
837sub Xupdate_256_SSSE3 () {
838 (
839 '&movdqa ($t0,@X[1]);',
840 '&movdqa ($t3,@X[3])',
841 '&palignr ($t0,@X[0],$SZ)', # X[1..4]
842 '&palignr ($t3,@X[2],$SZ);', # X[9..12]
843 '&movdqa ($t1,$t0)',
844 '&movdqa ($t2,$t0);',
845 '&psrld ($t0,$sigma0[2])',
846 '&paddd (@X[0],$t3);', # X[0..3] += X[9..12]
847 '&psrld ($t2,$sigma0[0])',
848 '&pshufd ($t3,@X[3],0b11111010)',# X[14..15]
849 '&pslld ($t1,8*$SZ-$sigma0[1]);'.
850 '&pxor ($t0,$t2)',
851 '&psrld ($t2,$sigma0[1]-$sigma0[0]);'.
852 '&pxor ($t0,$t1)',
853 '&pslld ($t1,$sigma0[1]-$sigma0[0]);'.
854 '&pxor ($t0,$t2);',
855 '&movdqa ($t2,$t3)',
856 '&pxor ($t0,$t1);', # sigma0(X[1..4])
857 '&psrld ($t3,$sigma1[2])',
858 '&paddd (@X[0],$t0);', # X[0..3] += sigma0(X[1..4])
859 '&psrlq ($t2,$sigma1[0])',
860 '&pxor ($t3,$t2);',
861 '&psrlq ($t2,$sigma1[1]-$sigma1[0])',
862 '&pxor ($t3,$t2)',
863 '&pshufb ($t3,$t4)', # sigma1(X[14..15])
864 '&paddd (@X[0],$t3)', # X[0..1] += sigma1(X[14..15])
865 '&pshufd ($t3,@X[0],0b01010000)',# X[16..17]
866 '&movdqa ($t2,$t3);',
867 '&psrld ($t3,$sigma1[2])',
868 '&psrlq ($t2,$sigma1[0])',
869 '&pxor ($t3,$t2);',
870 '&psrlq ($t2,$sigma1[1]-$sigma1[0])',
871 '&pxor ($t3,$t2);',
872 '&movdqa ($t2,16*2*$j."($Tbl)")',
873 '&pshufb ($t3,$t5)',
874 '&paddd (@X[0],$t3)' # X[2..3] += sigma1(X[16..17])
875 );
876}
877
878sub SSSE3_256_00_47 () {
879my $j = shift;
880my $body = shift;
881my @X = @_;
882my @insns = (&$body,&$body,&$body,&$body); # 104 instructions
883
884 if (0) {
885 foreach (Xupdate_256_SSSE3()) { # 36 instructions
886 eval;
887 eval(shift(@insns));
888 eval(shift(@insns));
889 eval(shift(@insns));
890 }
891 } else { # squeeze extra 4% on Westmere and 19% on Atom
892 eval(shift(@insns)); #@
893 &movdqa ($t0,@X[1]);
894 eval(shift(@insns));
895 eval(shift(@insns));
896 &movdqa ($t3,@X[3]);
897 eval(shift(@insns)); #@
898 eval(shift(@insns));
899 eval(shift(@insns));
900 eval(shift(@insns)); #@
901 eval(shift(@insns));
902 &palignr ($t0,@X[0],$SZ); # X[1..4]
903 eval(shift(@insns));
904 eval(shift(@insns));
905 &palignr ($t3,@X[2],$SZ); # X[9..12]
906 eval(shift(@insns));
907 eval(shift(@insns));
908 eval(shift(@insns));
909 eval(shift(@insns)); #@
910 &movdqa ($t1,$t0);
911 eval(shift(@insns));
912 eval(shift(@insns));
913 &movdqa ($t2,$t0);
914 eval(shift(@insns)); #@
915 eval(shift(@insns));
916 &psrld ($t0,$sigma0[2]);
917 eval(shift(@insns));
918 eval(shift(@insns));
919 eval(shift(@insns));
920 &paddd (@X[0],$t3); # X[0..3] += X[9..12]
921 eval(shift(@insns)); #@
922 eval(shift(@insns));
923 &psrld ($t2,$sigma0[0]);
924 eval(shift(@insns));
925 eval(shift(@insns));
926 &pshufd ($t3,@X[3],0b11111010); # X[4..15]
927 eval(shift(@insns));
928 eval(shift(@insns)); #@
929 &pslld ($t1,8*$SZ-$sigma0[1]);
930 eval(shift(@insns));
931 eval(shift(@insns));
932 &pxor ($t0,$t2);
933 eval(shift(@insns)); #@
934 eval(shift(@insns));
935 eval(shift(@insns));
936 eval(shift(@insns)); #@
937 &psrld ($t2,$sigma0[1]-$sigma0[0]);
938 eval(shift(@insns));
939 &pxor ($t0,$t1);
940 eval(shift(@insns));
941 eval(shift(@insns));
942 &pslld ($t1,$sigma0[1]-$sigma0[0]);
943 eval(shift(@insns));
944 eval(shift(@insns));
945 &pxor ($t0,$t2);
946 eval(shift(@insns));
947 eval(shift(@insns)); #@
948 &movdqa ($t2,$t3);
949 eval(shift(@insns));
950 eval(shift(@insns));
951 &pxor ($t0,$t1); # sigma0(X[1..4])
952 eval(shift(@insns)); #@
953 eval(shift(@insns));
954 eval(shift(@insns));
955 &psrld ($t3,$sigma1[2]);
956 eval(shift(@insns));
957 eval(shift(@insns));
958 &paddd (@X[0],$t0); # X[0..3] += sigma0(X[1..4])
959 eval(shift(@insns)); #@
960 eval(shift(@insns));
961 &psrlq ($t2,$sigma1[0]);
962 eval(shift(@insns));
963 eval(shift(@insns));
964 eval(shift(@insns));
965 &pxor ($t3,$t2);
966 eval(shift(@insns)); #@
967 eval(shift(@insns));
968 eval(shift(@insns));
969 eval(shift(@insns)); #@
970 &psrlq ($t2,$sigma1[1]-$sigma1[0]);
971 eval(shift(@insns));
972 eval(shift(@insns));
973 &pxor ($t3,$t2);
974 eval(shift(@insns)); #@
975 eval(shift(@insns));
976 eval(shift(@insns));
977 #&pshufb ($t3,$t4); # sigma1(X[14..15])
978 &pshufd ($t3,$t3,0b10000000);
979 eval(shift(@insns));
980 eval(shift(@insns));
981 eval(shift(@insns));
982 &psrldq ($t3,8);
983 eval(shift(@insns));
984 eval(shift(@insns)); #@
985 eval(shift(@insns));
986 eval(shift(@insns));
987 eval(shift(@insns)); #@
988 &paddd (@X[0],$t3); # X[0..1] += sigma1(X[14..15])
989 eval(shift(@insns));
990 eval(shift(@insns));
991 eval(shift(@insns));
992 &pshufd ($t3,@X[0],0b01010000); # X[16..17]
993 eval(shift(@insns));
994 eval(shift(@insns)); #@
995 eval(shift(@insns));
996 &movdqa ($t2,$t3);
997 eval(shift(@insns));
998 eval(shift(@insns));
999 &psrld ($t3,$sigma1[2]);
1000 eval(shift(@insns));
1001 eval(shift(@insns)); #@
1002 &psrlq ($t2,$sigma1[0]);
1003 eval(shift(@insns));
1004 eval(shift(@insns));
1005 &pxor ($t3,$t2);
1006 eval(shift(@insns)); #@
1007 eval(shift(@insns));
1008 eval(shift(@insns));
1009 eval(shift(@insns)); #@
1010 eval(shift(@insns));
1011 &psrlq ($t2,$sigma1[1]-$sigma1[0]);
1012 eval(shift(@insns));
1013 eval(shift(@insns));
1014 eval(shift(@insns));
1015 &pxor ($t3,$t2);
1016 eval(shift(@insns));
1017 eval(shift(@insns));
1018 eval(shift(@insns)); #@
1019 #&pshufb ($t3,$t5);
1020 &pshufd ($t3,$t3,0b00001000);
1021 eval(shift(@insns));
1022 eval(shift(@insns));
1023 &movdqa ($t2,16*2*$j."($Tbl)");
1024 eval(shift(@insns)); #@
1025 eval(shift(@insns));
1026 &pslldq ($t3,8);
1027 eval(shift(@insns));
1028 eval(shift(@insns));
1029 eval(shift(@insns));
1030 &paddd (@X[0],$t3); # X[2..3] += sigma1(X[16..17])
1031 eval(shift(@insns)); #@
1032 eval(shift(@insns));
1033 eval(shift(@insns));
1034 }
1035 &paddd ($t2,@X[0]);
1036 foreach (@insns) { eval; } # remaining instructions
1037 &movdqa (16*$j."(%rsp)",$t2);
1038}
1039
1040 for ($i=0,$j=0; $j<4; $j++) {
1041 &SSSE3_256_00_47($j,\&body_00_15,@X);
1042 push(@X,shift(@X)); # rotate(@X)
1043 }
1044 &cmpb ($SZ-1+16*2*$SZ."($Tbl)",0);
1045 &jne (".Lssse3_00_47");
1046
1047 for ($i=0; $i<16; ) {
1048 foreach(body_00_15()) { eval; }
1049 }
1050$code.=<<___;
1051 mov $_ctx,$ctx
1052 mov $a1,$A
1053
1054 add $SZ*0($ctx),$A
1055 lea 16*$SZ($inp),$inp
1056 add $SZ*1($ctx),$B
1057 add $SZ*2($ctx),$C
1058 add $SZ*3($ctx),$D
1059 add $SZ*4($ctx),$E
1060 add $SZ*5($ctx),$F
1061 add $SZ*6($ctx),$G
1062 add $SZ*7($ctx),$H
1063
1064 cmp $_end,$inp
1065
1066 mov $A,$SZ*0($ctx)
1067 mov $B,$SZ*1($ctx)
1068 mov $C,$SZ*2($ctx)
1069 mov $D,$SZ*3($ctx)
1070 mov $E,$SZ*4($ctx)
1071 mov $F,$SZ*5($ctx)
1072 mov $G,$SZ*6($ctx)
1073 mov $H,$SZ*7($ctx)
1074 jb .Lloop_ssse3
1075
1076 mov $_rsp,%rsi
1077___
1078$code.=<<___ if ($win64);
1079 movaps 16*$SZ+32(%rsp),%xmm6
1080 movaps 16*$SZ+48(%rsp),%xmm7
1081 movaps 16*$SZ+64(%rsp),%xmm8
1082 movaps 16*$SZ+80(%rsp),%xmm9
1083___
1084$code.=<<___;
1085 mov (%rsi),%r15
1086 mov 8(%rsi),%r14
1087 mov 16(%rsi),%r13
1088 mov 24(%rsi),%r12
1089 mov 32(%rsi),%rbp
1090 mov 40(%rsi),%rbx
1091 lea 48(%rsi),%rsp
1092.Lepilogue_ssse3:
1093 ret
1094.size ${func}_ssse3,.-${func}_ssse3
1095___
1096}
1097
1098if ($avx) {{
1099######################################################################
1100# XOP code path
1101#
1102if ($SZ==8) { # SHA512 only
1103$code.=<<___;
1104.type ${func}_xop,\@function,3
1105.align 64
1106${func}_xop:
1107.Lxop_shortcut:
1108 push %rbx
1109 push %rbp
1110 push %r12
1111 push %r13
1112 push %r14
1113 push %r15
1114 mov %rsp,%r11 # copy %rsp
1115 shl \$4,%rdx # num*16
1116 sub \$`$framesz+$win64*16*($SZ==4?4:6)`,%rsp
1117 lea ($inp,%rdx,$SZ),%rdx # inp+num*16*$SZ
1118 and \$-64,%rsp # align stack frame
1119 mov $ctx,$_ctx # save ctx, 1st arg
1120 mov $inp,$_inp # save inp, 2nd arh
1121 mov %rdx,$_end # save end pointer, "3rd" arg
1122 mov %r11,$_rsp # save copy of %rsp
1123___
1124$code.=<<___ if ($win64);
1125 movaps %xmm6,16*$SZ+32(%rsp)
1126 movaps %xmm7,16*$SZ+48(%rsp)
1127 movaps %xmm8,16*$SZ+64(%rsp)
1128 movaps %xmm9,16*$SZ+80(%rsp)
1129___
1130$code.=<<___ if ($win64 && $SZ>4);
1131 movaps %xmm10,16*$SZ+96(%rsp)
1132 movaps %xmm11,16*$SZ+112(%rsp)
1133___
1134$code.=<<___;
1135.Lprologue_xop:
1136
1137 vzeroupper
1138 mov $SZ*0($ctx),$A
1139 mov $SZ*1($ctx),$B
1140 mov $SZ*2($ctx),$C
1141 mov $SZ*3($ctx),$D
1142 mov $SZ*4($ctx),$E
1143 mov $SZ*5($ctx),$F
1144 mov $SZ*6($ctx),$G
1145 mov $SZ*7($ctx),$H
1146 jmp .Lloop_xop
1147___
1148 if ($SZ==4) { # SHA256
1149 my @X = map("%xmm$_",(0..3));
1150 my ($t0,$t1,$t2,$t3) = map("%xmm$_",(4..7));
1151
1152$code.=<<___;
1153.align 16
1154.Lloop_xop:
1155 vmovdqa $TABLE+`$SZ*2*$rounds`(%rip),$t3
1156 vmovdqu 0x00($inp),@X[0]
1157 vmovdqu 0x10($inp),@X[1]
1158 vmovdqu 0x20($inp),@X[2]
1159 vmovdqu 0x30($inp),@X[3]
1160 vpshufb $t3,@X[0],@X[0]
1161 lea $TABLE(%rip),$Tbl
1162 vpshufb $t3,@X[1],@X[1]
1163 vpshufb $t3,@X[2],@X[2]
1164 vpaddd 0x00($Tbl),@X[0],$t0
1165 vpshufb $t3,@X[3],@X[3]
1166 vpaddd 0x20($Tbl),@X[1],$t1
1167 vpaddd 0x40($Tbl),@X[2],$t2
1168 vpaddd 0x60($Tbl),@X[3],$t3
1169 vmovdqa $t0,0x00(%rsp)
1170 mov $A,$a1
1171 vmovdqa $t1,0x10(%rsp)
1172 mov $B,$a3
1173 vmovdqa $t2,0x20(%rsp)
1174 xor $C,$a3 # magic
1175 vmovdqa $t3,0x30(%rsp)
1176 mov $E,$a0
1177 jmp .Lxop_00_47
1178
1179.align 16
1180.Lxop_00_47:
1181 sub \$`-16*2*$SZ`,$Tbl # size optimization
1182___
1183sub XOP_256_00_47 () {
1184my $j = shift;
1185my $body = shift;
1186my @X = @_;
1187my @insns = (&$body,&$body,&$body,&$body); # 104 instructions
1188
1189 &vpalignr ($t0,@X[1],@X[0],$SZ); # X[1..4]
1190 eval(shift(@insns));
1191 eval(shift(@insns));
1192 &vpalignr ($t3,@X[3],@X[2],$SZ); # X[9..12]
1193 eval(shift(@insns));
1194 eval(shift(@insns));
1195 &vprotd ($t1,$t0,8*$SZ-$sigma0[1]);
1196 eval(shift(@insns));
1197 eval(shift(@insns));
1198 &vpsrld ($t0,$t0,$sigma0[2]);
1199 eval(shift(@insns));
1200 eval(shift(@insns));
1201 &vpaddd (@X[0],@X[0],$t3); # X[0..3] += X[9..12]
1202 eval(shift(@insns));
1203 eval(shift(@insns));
1204 eval(shift(@insns));
1205 eval(shift(@insns));
1206 &vprotd ($t2,$t1,$sigma0[1]-$sigma0[0]);
1207 eval(shift(@insns));
1208 eval(shift(@insns));
1209 &vpxor ($t0,$t0,$t1);
1210 eval(shift(@insns));
1211 eval(shift(@insns));
1212 eval(shift(@insns));
1213 eval(shift(@insns));
1214 &vprotd ($t3,@X[3],8*$SZ-$sigma1[1]);
1215 eval(shift(@insns));
1216 eval(shift(@insns));
1217 &vpxor ($t0,$t0,$t2); # sigma0(X[1..4])
1218 eval(shift(@insns));
1219 eval(shift(@insns));
1220 &vpsrld ($t2,@X[3],$sigma1[2]);
1221 eval(shift(@insns));
1222 eval(shift(@insns));
1223 &vpaddd (@X[0],@X[0],$t0); # X[0..3] += sigma0(X[1..4])
1224 eval(shift(@insns));
1225 eval(shift(@insns));
1226 &vprotd ($t1,$t3,$sigma1[1]-$sigma1[0]);
1227 eval(shift(@insns));
1228 eval(shift(@insns));
1229 &vpxor ($t3,$t3,$t2);
1230 eval(shift(@insns));
1231 eval(shift(@insns));
1232 eval(shift(@insns));
1233 eval(shift(@insns));
1234 &vpxor ($t3,$t3,$t1); # sigma1(X[14..15])
1235 eval(shift(@insns));
1236 eval(shift(@insns));
1237 eval(shift(@insns));
1238 eval(shift(@insns));
1239 &vpsrldq ($t3,$t3,8);
1240 eval(shift(@insns));
1241 eval(shift(@insns));
1242 eval(shift(@insns));
1243 eval(shift(@insns));
1244 &vpaddd (@X[0],@X[0],$t3); # X[0..1] += sigma1(X[14..15])
1245 eval(shift(@insns));
1246 eval(shift(@insns));
1247 eval(shift(@insns));
1248 eval(shift(@insns));
1249 &vprotd ($t3,@X[0],8*$SZ-$sigma1[1]);
1250 eval(shift(@insns));
1251 eval(shift(@insns));
1252 &vpsrld ($t2,@X[0],$sigma1[2]);
1253 eval(shift(@insns));
1254 eval(shift(@insns));
1255 &vprotd ($t1,$t3,$sigma1[1]-$sigma1[0]);
1256 eval(shift(@insns));
1257 eval(shift(@insns));
1258 &vpxor ($t3,$t3,$t2);
1259 eval(shift(@insns));
1260 eval(shift(@insns));
1261 eval(shift(@insns));
1262 eval(shift(@insns));
1263 &vpxor ($t3,$t3,$t1); # sigma1(X[16..17])
1264 eval(shift(@insns));
1265 eval(shift(@insns));
1266 eval(shift(@insns));
1267 eval(shift(@insns));
1268 &vpslldq ($t3,$t3,8); # 22 instructions
1269 eval(shift(@insns));
1270 eval(shift(@insns));
1271 eval(shift(@insns));
1272 eval(shift(@insns));
1273 &vpaddd (@X[0],@X[0],$t3); # X[2..3] += sigma1(X[16..17])
1274 eval(shift(@insns));
1275 eval(shift(@insns));
1276 eval(shift(@insns));
1277 eval(shift(@insns));
1278 &vpaddd ($t2,@X[0],16*2*$j."($Tbl)");
1279 foreach (@insns) { eval; } # remaining instructions
1280 &vmovdqa (16*$j."(%rsp)",$t2);
1281}
1282
1283 for ($i=0,$j=0; $j<4; $j++) {
1284 &XOP_256_00_47($j,\&body_00_15,@X);
1285 push(@X,shift(@X)); # rotate(@X)
1286 }
1287 &cmpb ($SZ-1+16*2*$SZ."($Tbl)",0);
1288 &jne (".Lxop_00_47");
1289
1290 for ($i=0; $i<16; ) {
1291 foreach(body_00_15()) { eval; }
1292 }
1293
1294 } else { # SHA512
1295 my @X = map("%xmm$_",(0..7));
1296 my ($t0,$t1,$t2,$t3) = map("%xmm$_",(8..11));
1297
1298$code.=<<___;
1299.align 16
1300.Lloop_xop:
1301 vmovdqa $TABLE+`$SZ*2*$rounds`(%rip),$t3
1302 vmovdqu 0x00($inp),@X[0]
1303 lea $TABLE+0x80(%rip),$Tbl # size optimization
1304 vmovdqu 0x10($inp),@X[1]
1305 vmovdqu 0x20($inp),@X[2]
1306 vpshufb $t3,@X[0],@X[0]
1307 vmovdqu 0x30($inp),@X[3]
1308 vpshufb $t3,@X[1],@X[1]
1309 vmovdqu 0x40($inp),@X[4]
1310 vpshufb $t3,@X[2],@X[2]
1311 vmovdqu 0x50($inp),@X[5]
1312 vpshufb $t3,@X[3],@X[3]
1313 vmovdqu 0x60($inp),@X[6]
1314 vpshufb $t3,@X[4],@X[4]
1315 vmovdqu 0x70($inp),@X[7]
1316 vpshufb $t3,@X[5],@X[5]
1317 vpaddq -0x80($Tbl),@X[0],$t0
1318 vpshufb $t3,@X[6],@X[6]
1319 vpaddq -0x60($Tbl),@X[1],$t1
1320 vpshufb $t3,@X[7],@X[7]
1321 vpaddq -0x40($Tbl),@X[2],$t2
1322 vpaddq -0x20($Tbl),@X[3],$t3
1323 vmovdqa $t0,0x00(%rsp)
1324 vpaddq 0x00($Tbl),@X[4],$t0
1325 vmovdqa $t1,0x10(%rsp)
1326 vpaddq 0x20($Tbl),@X[5],$t1
1327 vmovdqa $t2,0x20(%rsp)
1328 vpaddq 0x40($Tbl),@X[6],$t2
1329 vmovdqa $t3,0x30(%rsp)
1330 vpaddq 0x60($Tbl),@X[7],$t3
1331 vmovdqa $t0,0x40(%rsp)
1332 mov $A,$a1
1333 vmovdqa $t1,0x50(%rsp)
1334 mov $B,$a3
1335 vmovdqa $t2,0x60(%rsp)
1336 xor $C,$a3 # magic
1337 vmovdqa $t3,0x70(%rsp)
1338 mov $E,$a0
1339 jmp .Lxop_00_47
1340
1341.align 16
1342.Lxop_00_47:
1343 add \$`16*2*$SZ`,$Tbl
1344___
1345sub XOP_512_00_47 () {
1346my $j = shift;
1347my $body = shift;
1348my @X = @_;
1349my @insns = (&$body,&$body); # 52 instructions
1350
1351 &vpalignr ($t0,@X[1],@X[0],$SZ); # X[1..2]
1352 eval(shift(@insns));
1353 eval(shift(@insns));
1354 &vpalignr ($t3,@X[5],@X[4],$SZ); # X[9..10]
1355 eval(shift(@insns));
1356 eval(shift(@insns));
1357 &vprotq ($t1,$t0,8*$SZ-$sigma0[1]);
1358 eval(shift(@insns));
1359 eval(shift(@insns));
1360 &vpsrlq ($t0,$t0,$sigma0[2]);
1361 eval(shift(@insns));
1362 eval(shift(@insns));
1363 &vpaddq (@X[0],@X[0],$t3); # X[0..1] += X[9..10]
1364 eval(shift(@insns));
1365 eval(shift(@insns));
1366 eval(shift(@insns));
1367 eval(shift(@insns));
1368 &vprotq ($t2,$t1,$sigma0[1]-$sigma0[0]);
1369 eval(shift(@insns));
1370 eval(shift(@insns));
1371 &vpxor ($t0,$t0,$t1);
1372 eval(shift(@insns));
1373 eval(shift(@insns));
1374 eval(shift(@insns));
1375 eval(shift(@insns));
1376 &vprotq ($t3,@X[7],8*$SZ-$sigma1[1]);
1377 eval(shift(@insns));
1378 eval(shift(@insns));
1379 &vpxor ($t0,$t0,$t2); # sigma0(X[1..2])
1380 eval(shift(@insns));
1381 eval(shift(@insns));
1382 &vpsrlq ($t2,@X[7],$sigma1[2]);
1383 eval(shift(@insns));
1384 eval(shift(@insns));
1385 &vpaddq (@X[0],@X[0],$t0); # X[0..1] += sigma0(X[1..2])
1386 eval(shift(@insns));
1387 eval(shift(@insns));
1388 &vprotq ($t1,$t3,$sigma1[1]-$sigma1[0]);
1389 eval(shift(@insns));
1390 eval(shift(@insns));
1391 &vpxor ($t3,$t3,$t2);
1392 eval(shift(@insns));
1393 eval(shift(@insns));
1394 eval(shift(@insns));
1395 eval(shift(@insns));
1396 &vpxor ($t3,$t3,$t1); # sigma1(X[14..15])
1397 eval(shift(@insns));
1398 eval(shift(@insns));
1399 eval(shift(@insns));
1400 eval(shift(@insns));
1401 &vpaddq (@X[0],@X[0],$t3); # X[0..1] += sigma1(X[14..15])
1402 eval(shift(@insns));
1403 eval(shift(@insns));
1404 eval(shift(@insns));
1405 eval(shift(@insns));
1406 &vpaddq ($t2,@X[0],16*2*$j-0x80."($Tbl)");
1407 foreach (@insns) { eval; } # remaining instructions
1408 &vmovdqa (16*$j."(%rsp)",$t2);
1409}
1410
1411 for ($i=0,$j=0; $j<8; $j++) {
1412 &XOP_512_00_47($j,\&body_00_15,@X);
1413 push(@X,shift(@X)); # rotate(@X)
1414 }
1415 &cmpb ($SZ-1+16*2*$SZ-0x80."($Tbl)",0);
1416 &jne (".Lxop_00_47");
1417
1418 for ($i=0; $i<16; ) {
1419 foreach(body_00_15()) { eval; }
1420 }
1421}
1422$code.=<<___;
1423 mov $_ctx,$ctx
1424 mov $a1,$A
1425
1426 add $SZ*0($ctx),$A
1427 lea 16*$SZ($inp),$inp
1428 add $SZ*1($ctx),$B
1429 add $SZ*2($ctx),$C
1430 add $SZ*3($ctx),$D
1431 add $SZ*4($ctx),$E
1432 add $SZ*5($ctx),$F
1433 add $SZ*6($ctx),$G
1434 add $SZ*7($ctx),$H
1435
1436 cmp $_end,$inp
1437
1438 mov $A,$SZ*0($ctx)
1439 mov $B,$SZ*1($ctx)
1440 mov $C,$SZ*2($ctx)
1441 mov $D,$SZ*3($ctx)
1442 mov $E,$SZ*4($ctx)
1443 mov $F,$SZ*5($ctx)
1444 mov $G,$SZ*6($ctx)
1445 mov $H,$SZ*7($ctx)
1446 jb .Lloop_xop
1447
1448 mov $_rsp,%rsi
1449 vzeroupper
1450___
1451$code.=<<___ if ($win64);
1452 movaps 16*$SZ+32(%rsp),%xmm6
1453 movaps 16*$SZ+48(%rsp),%xmm7
1454 movaps 16*$SZ+64(%rsp),%xmm8
1455 movaps 16*$SZ+80(%rsp),%xmm9
1456___
1457$code.=<<___ if ($win64 && $SZ>4);
1458 movaps 16*$SZ+96(%rsp),%xmm10
1459 movaps 16*$SZ+112(%rsp),%xmm11
1460___
1461$code.=<<___;
1462 mov (%rsi),%r15
1463 mov 8(%rsi),%r14
1464 mov 16(%rsi),%r13
1465 mov 24(%rsi),%r12
1466 mov 32(%rsi),%rbp
1467 mov 40(%rsi),%rbx
1468 lea 48(%rsi),%rsp
1469.Lepilogue_xop:
1470 ret
1471.size ${func}_xop,.-${func}_xop
1472___
1473}
1474######################################################################
1475# AVX+shrd code path
1476#
1477local *ror = sub { &shrd(@_[0],@_) };
1478
1479$code.=<<___;
1480.type ${func}_avx,\@function,3
1481.align 64
1482${func}_avx:
1483.Lavx_shortcut:
1484 push %rbx
1485 push %rbp
1486 push %r12
1487 push %r13
1488 push %r14
1489 push %r15
1490 mov %rsp,%r11 # copy %rsp
1491 shl \$4,%rdx # num*16
1492 sub \$`$framesz+$win64*16*($SZ==4?4:6)`,%rsp
1493 lea ($inp,%rdx,$SZ),%rdx # inp+num*16*$SZ
1494 and \$-64,%rsp # align stack frame
1495 mov $ctx,$_ctx # save ctx, 1st arg
1496 mov $inp,$_inp # save inp, 2nd arh
1497 mov %rdx,$_end # save end pointer, "3rd" arg
1498 mov %r11,$_rsp # save copy of %rsp
1499___
1500$code.=<<___ if ($win64);
1501 movaps %xmm6,16*$SZ+32(%rsp)
1502 movaps %xmm7,16*$SZ+48(%rsp)
1503 movaps %xmm8,16*$SZ+64(%rsp)
1504 movaps %xmm9,16*$SZ+80(%rsp)
1505___
1506$code.=<<___ if ($win64 && $SZ>4);
1507 movaps %xmm10,16*$SZ+96(%rsp)
1508 movaps %xmm11,16*$SZ+112(%rsp)
1509___
1510$code.=<<___;
1511.Lprologue_avx:
1512
1513 vzeroupper
1514 mov $SZ*0($ctx),$A
1515 mov $SZ*1($ctx),$B
1516 mov $SZ*2($ctx),$C
1517 mov $SZ*3($ctx),$D
1518 mov $SZ*4($ctx),$E
1519 mov $SZ*5($ctx),$F
1520 mov $SZ*6($ctx),$G
1521 mov $SZ*7($ctx),$H
1522___
1523 if ($SZ==4) { # SHA256
1524 my @X = map("%xmm$_",(0..3));
1525 my ($t0,$t1,$t2,$t3, $t4,$t5) = map("%xmm$_",(4..9));
1526
1527$code.=<<___;
1528 vmovdqa $TABLE+`$SZ*2*$rounds`+32(%rip),$t4
1529 vmovdqa $TABLE+`$SZ*2*$rounds`+64(%rip),$t5
1530 jmp .Lloop_avx
1531.align 16
1532.Lloop_avx:
1533 vmovdqa $TABLE+`$SZ*2*$rounds`(%rip),$t3
1534 vmovdqu 0x00($inp),@X[0]
1535 vmovdqu 0x10($inp),@X[1]
1536 vmovdqu 0x20($inp),@X[2]
1537 vmovdqu 0x30($inp),@X[3]
1538 vpshufb $t3,@X[0],@X[0]
1539 lea $TABLE(%rip),$Tbl
1540 vpshufb $t3,@X[1],@X[1]
1541 vpshufb $t3,@X[2],@X[2]
1542 vpaddd 0x00($Tbl),@X[0],$t0
1543 vpshufb $t3,@X[3],@X[3]
1544 vpaddd 0x20($Tbl),@X[1],$t1
1545 vpaddd 0x40($Tbl),@X[2],$t2
1546 vpaddd 0x60($Tbl),@X[3],$t3
1547 vmovdqa $t0,0x00(%rsp)
1548 mov $A,$a1
1549 vmovdqa $t1,0x10(%rsp)
1550 mov $B,$a3
1551 vmovdqa $t2,0x20(%rsp)
1552 xor $C,$a3 # magic
1553 vmovdqa $t3,0x30(%rsp)
1554 mov $E,$a0
1555 jmp .Lavx_00_47
1556
1557.align 16
1558.Lavx_00_47:
1559 sub \$`-16*2*$SZ`,$Tbl # size optimization
1560___
1561sub Xupdate_256_AVX () {
1562 (
1563 '&vpalignr ($t0,@X[1],@X[0],$SZ)', # X[1..4]
1564 '&vpalignr ($t3,@X[3],@X[2],$SZ)', # X[9..12]
1565 '&vpsrld ($t2,$t0,$sigma0[0]);',
1566 '&vpaddd (@X[0],@X[0],$t3)', # X[0..3] += X[9..12]
1567 '&vpsrld ($t3,$t0,$sigma0[2])',
1568 '&vpslld ($t1,$t0,8*$SZ-$sigma0[1]);',
1569 '&vpxor ($t0,$t3,$t2)',
1570 '&vpshufd ($t3,@X[3],0b11111010)',# X[14..15]
1571 '&vpsrld ($t2,$t2,$sigma0[1]-$sigma0[0]);',
1572 '&vpxor ($t0,$t0,$t1)',
1573 '&vpslld ($t1,$t1,$sigma0[1]-$sigma0[0]);',
1574 '&vpxor ($t0,$t0,$t2)',
1575 '&vpsrld ($t2,$t3,$sigma1[2]);',
1576 '&vpxor ($t0,$t0,$t1)', # sigma0(X[1..4])
1577 '&vpsrlq ($t3,$t3,$sigma1[0]);',
1578 '&vpaddd (@X[0],@X[0],$t0)', # X[0..3] += sigma0(X[1..4])
1579 '&vpxor ($t2,$t2,$t3);',
1580 '&vpsrlq ($t3,$t3,$sigma1[1]-$sigma1[0])',
1581 '&vpxor ($t2,$t2,$t3)',
1582 '&vpshufb ($t2,$t2,$t4)', # sigma1(X[14..15])
1583 '&vpaddd (@X[0],@X[0],$t2)', # X[0..1] += sigma1(X[14..15])
1584 '&vpshufd ($t3,@X[0],0b01010000)',# X[16..17]
1585 '&vpsrld ($t2,$t3,$sigma1[2])',
1586 '&vpsrlq ($t3,$t3,$sigma1[0])',
1587 '&vpxor ($t2,$t2,$t3);',
1588 '&vpsrlq ($t3,$t3,$sigma1[1]-$sigma1[0])',
1589 '&vpxor ($t2,$t2,$t3)',
1590 '&vpshufb ($t2,$t2,$t5)',
1591 '&vpaddd (@X[0],@X[0],$t2)' # X[2..3] += sigma1(X[16..17])
1592 );
1593}
1594
1595sub AVX_256_00_47 () {
1596my $j = shift;
1597my $body = shift;
1598my @X = @_;
1599my @insns = (&$body,&$body,&$body,&$body); # 104 instructions
1600
1601 foreach (Xupdate_256_AVX()) { # 29 instructions
1602 eval;
1603 eval(shift(@insns));
1604 eval(shift(@insns));
1605 eval(shift(@insns));
1606 }
1607 &vpaddd ($t2,@X[0],16*2*$j."($Tbl)");
1608 foreach (@insns) { eval; } # remaining instructions
1609 &vmovdqa (16*$j."(%rsp)",$t2);
1610}
1611
1612 for ($i=0,$j=0; $j<4; $j++) {
1613 &AVX_256_00_47($j,\&body_00_15,@X);
1614 push(@X,shift(@X)); # rotate(@X)
1615 }
1616 &cmpb ($SZ-1+16*2*$SZ."($Tbl)",0);
1617 &jne (".Lavx_00_47");
1618
1619 for ($i=0; $i<16; ) {
1620 foreach(body_00_15()) { eval; }
1621 }
1622
1623 } else { # SHA512
1624 my @X = map("%xmm$_",(0..7));
1625 my ($t0,$t1,$t2,$t3) = map("%xmm$_",(8..11));
1626
1627$code.=<<___;
1628 jmp .Lloop_avx
1629.align 16
1630.Lloop_avx:
1631 vmovdqa $TABLE+`$SZ*2*$rounds`(%rip),$t3
1632 vmovdqu 0x00($inp),@X[0]
1633 lea $TABLE+0x80(%rip),$Tbl # size optimization
1634 vmovdqu 0x10($inp),@X[1]
1635 vmovdqu 0x20($inp),@X[2]
1636 vpshufb $t3,@X[0],@X[0]
1637 vmovdqu 0x30($inp),@X[3]
1638 vpshufb $t3,@X[1],@X[1]
1639 vmovdqu 0x40($inp),@X[4]
1640 vpshufb $t3,@X[2],@X[2]
1641 vmovdqu 0x50($inp),@X[5]
1642 vpshufb $t3,@X[3],@X[3]
1643 vmovdqu 0x60($inp),@X[6]
1644 vpshufb $t3,@X[4],@X[4]
1645 vmovdqu 0x70($inp),@X[7]
1646 vpshufb $t3,@X[5],@X[5]
1647 vpaddq -0x80($Tbl),@X[0],$t0
1648 vpshufb $t3,@X[6],@X[6]
1649 vpaddq -0x60($Tbl),@X[1],$t1
1650 vpshufb $t3,@X[7],@X[7]
1651 vpaddq -0x40($Tbl),@X[2],$t2
1652 vpaddq -0x20($Tbl),@X[3],$t3
1653 vmovdqa $t0,0x00(%rsp)
1654 vpaddq 0x00($Tbl),@X[4],$t0
1655 vmovdqa $t1,0x10(%rsp)
1656 vpaddq 0x20($Tbl),@X[5],$t1
1657 vmovdqa $t2,0x20(%rsp)
1658 vpaddq 0x40($Tbl),@X[6],$t2
1659 vmovdqa $t3,0x30(%rsp)
1660 vpaddq 0x60($Tbl),@X[7],$t3
1661 vmovdqa $t0,0x40(%rsp)
1662 mov $A,$a1
1663 vmovdqa $t1,0x50(%rsp)
1664 mov $B,$a3
1665 vmovdqa $t2,0x60(%rsp)
1666 xor $C,$a3 # magic
1667 vmovdqa $t3,0x70(%rsp)
1668 mov $E,$a0
1669 jmp .Lavx_00_47
1670
1671.align 16
1672.Lavx_00_47:
1673 add \$`16*2*$SZ`,$Tbl
1674___
1675sub Xupdate_512_AVX () {
1676 (
1677 '&vpalignr ($t0,@X[1],@X[0],$SZ)', # X[1..2]
1678 '&vpalignr ($t3,@X[5],@X[4],$SZ)', # X[9..10]
1679 '&vpsrlq ($t2,$t0,$sigma0[0])',
1680 '&vpaddq (@X[0],@X[0],$t3);', # X[0..1] += X[9..10]
1681 '&vpsrlq ($t3,$t0,$sigma0[2])',
1682 '&vpsllq ($t1,$t0,8*$SZ-$sigma0[1]);',
1683 '&vpxor ($t0,$t3,$t2)',
1684 '&vpsrlq ($t2,$t2,$sigma0[1]-$sigma0[0]);',
1685 '&vpxor ($t0,$t0,$t1)',
1686 '&vpsllq ($t1,$t1,$sigma0[1]-$sigma0[0]);',
1687 '&vpxor ($t0,$t0,$t2)',
1688 '&vpsrlq ($t3,@X[7],$sigma1[2]);',
1689 '&vpxor ($t0,$t0,$t1)', # sigma0(X[1..2])
1690 '&vpsllq ($t2,@X[7],8*$SZ-$sigma1[1]);',
1691 '&vpaddq (@X[0],@X[0],$t0)', # X[0..1] += sigma0(X[1..2])
1692 '&vpsrlq ($t1,@X[7],$sigma1[0]);',
1693 '&vpxor ($t3,$t3,$t2)',
1694 '&vpsllq ($t2,$t2,$sigma1[1]-$sigma1[0]);',
1695 '&vpxor ($t3,$t3,$t1)',
1696 '&vpsrlq ($t1,$t1,$sigma1[1]-$sigma1[0]);',
1697 '&vpxor ($t3,$t3,$t2)',
1698 '&vpxor ($t3,$t3,$t1)', # sigma1(X[14..15])
1699 '&vpaddq (@X[0],@X[0],$t3)', # X[0..1] += sigma1(X[14..15])
1700 );
1701}
1702
1703sub AVX_512_00_47 () {
1704my $j = shift;
1705my $body = shift;
1706my @X = @_;
1707my @insns = (&$body,&$body); # 52 instructions
1708
1709 foreach (Xupdate_512_AVX()) { # 23 instructions
1710 eval;
1711 eval(shift(@insns));
1712 eval(shift(@insns));
1713 }
1714 &vpaddq ($t2,@X[0],16*2*$j-0x80."($Tbl)");
1715 foreach (@insns) { eval; } # remaining instructions
1716 &vmovdqa (16*$j."(%rsp)",$t2);
1717}
1718
1719 for ($i=0,$j=0; $j<8; $j++) {
1720 &AVX_512_00_47($j,\&body_00_15,@X);
1721 push(@X,shift(@X)); # rotate(@X)
1722 }
1723 &cmpb ($SZ-1+16*2*$SZ-0x80."($Tbl)",0);
1724 &jne (".Lavx_00_47");
1725
1726 for ($i=0; $i<16; ) {
1727 foreach(body_00_15()) { eval; }
1728 }
1729}
1730$code.=<<___;
1731 mov $_ctx,$ctx
1732 mov $a1,$A
1733
1734 add $SZ*0($ctx),$A
1735 lea 16*$SZ($inp),$inp
1736 add $SZ*1($ctx),$B
1737 add $SZ*2($ctx),$C
1738 add $SZ*3($ctx),$D
1739 add $SZ*4($ctx),$E
1740 add $SZ*5($ctx),$F
1741 add $SZ*6($ctx),$G
1742 add $SZ*7($ctx),$H
1743
1744 cmp $_end,$inp
1745
1746 mov $A,$SZ*0($ctx)
1747 mov $B,$SZ*1($ctx)
1748 mov $C,$SZ*2($ctx)
1749 mov $D,$SZ*3($ctx)
1750 mov $E,$SZ*4($ctx)
1751 mov $F,$SZ*5($ctx)
1752 mov $G,$SZ*6($ctx)
1753 mov $H,$SZ*7($ctx)
1754 jb .Lloop_avx
1755
1756 mov $_rsp,%rsi
1757 vzeroupper
1758___
1759$code.=<<___ if ($win64);
1760 movaps 16*$SZ+32(%rsp),%xmm6
1761 movaps 16*$SZ+48(%rsp),%xmm7
1762 movaps 16*$SZ+64(%rsp),%xmm8
1763 movaps 16*$SZ+80(%rsp),%xmm9
1764___
1765$code.=<<___ if ($win64 && $SZ>4);
1766 movaps 16*$SZ+96(%rsp),%xmm10
1767 movaps 16*$SZ+112(%rsp),%xmm11
1768___
1769$code.=<<___;
1770 mov (%rsi),%r15
1771 mov 8(%rsi),%r14
1772 mov 16(%rsi),%r13
1773 mov 24(%rsi),%r12
1774 mov 32(%rsi),%rbp
1775 mov 40(%rsi),%rbx
1776 lea 48(%rsi),%rsp
1777.Lepilogue_avx:
1778 ret
1779.size ${func}_avx,.-${func}_avx
1780___
1781
1782if ($avx>1) {{
1783######################################################################
1784# AVX2+BMI code path
1785#
1786my $a5=$SZ==4?"%esi":"%rsi"; # zap $inp
1787my $PUSH8=8*2*$SZ;
1788use integer;
1789
1790sub bodyx_00_15 () {
1791 # at start $a1 should be zero, $a3 - $b^$c and $a4 copy of $f
1792 (
1793 '($a,$b,$c,$d,$e,$f,$g,$h)=@ROT;'.
1794
1795 '&add ($h,(32*($i/(16/$SZ))+$SZ*($i%(16/$SZ)))%$PUSH8.$base)', # h+=X[i]+K[i]
1796 '&and ($a4,$e)', # f&e
1797 '&rorx ($a0,$e,$Sigma1[2])',
1798 '&rorx ($a2,$e,$Sigma1[1])',
1799
1800 '&lea ($a,"($a,$a1)")', # h+=Sigma0(a) from the past
1801 '&lea ($h,"($h,$a4)")',
1802 '&andn ($a4,$e,$g)', # ~e&g
1803 '&xor ($a0,$a2)',
1804
1805 '&rorx ($a1,$e,$Sigma1[0])',
1806 '&lea ($h,"($h,$a4)")', # h+=Ch(e,f,g)=(e&f)+(~e&g)
1807 '&xor ($a0,$a1)', # Sigma1(e)
1808 '&mov ($a2,$a)',
1809
1810 '&rorx ($a4,$a,$Sigma0[2])',
1811 '&lea ($h,"($h,$a0)")', # h+=Sigma1(e)
1812 '&xor ($a2,$b)', # a^b, b^c in next round
1813 '&rorx ($a1,$a,$Sigma0[1])',
1814
1815 '&rorx ($a0,$a,$Sigma0[0])',
1816 '&lea ($d,"($d,$h)")', # d+=h
1817 '&and ($a3,$a2)', # (b^c)&(a^b)
1818 '&xor ($a1,$a4)',
1819
1820 '&xor ($a3,$b)', # Maj(a,b,c)=Ch(a^b,c,b)
1821 '&xor ($a1,$a0)', # Sigma0(a)
1822 '&lea ($h,"($h,$a3)");'. # h+=Maj(a,b,c)
1823 '&mov ($a4,$e)', # copy of f in future
1824
1825 '($a2,$a3) = ($a3,$a2); unshift(@ROT,pop(@ROT)); $i++;'
1826 );
1827 # and at the finish one has to $a+=$a1
1828}
1829
1830$code.=<<___;
1831.type ${func}_avx2,\@function,3
1832.align 64
1833${func}_avx2:
1834.Lavx2_shortcut:
1835 push %rbx
1836 push %rbp
1837 push %r12
1838 push %r13
1839 push %r14
1840 push %r15
1841 mov %rsp,%r11 # copy %rsp
1842 sub \$`2*$SZ*$rounds+4*8+$win64*16*($SZ==4?4:6)`,%rsp
1843 shl \$4,%rdx # num*16
1844 and \$-256*$SZ,%rsp # align stack frame
1845 lea ($inp,%rdx,$SZ),%rdx # inp+num*16*$SZ
1846 add \$`2*$SZ*($rounds-8)`,%rsp
1847 mov $ctx,$_ctx # save ctx, 1st arg
1848 mov $inp,$_inp # save inp, 2nd arh
1849 mov %rdx,$_end # save end pointer, "3rd" arg
1850 mov %r11,$_rsp # save copy of %rsp
1851___
1852$code.=<<___ if ($win64);
1853 movaps %xmm6,16*$SZ+32(%rsp)
1854 movaps %xmm7,16*$SZ+48(%rsp)
1855 movaps %xmm8,16*$SZ+64(%rsp)
1856 movaps %xmm9,16*$SZ+80(%rsp)
1857___
1858$code.=<<___ if ($win64 && $SZ>4);
1859 movaps %xmm10,16*$SZ+96(%rsp)
1860 movaps %xmm11,16*$SZ+112(%rsp)
1861___
1862$code.=<<___;
1863.Lprologue_avx2:
1864
1865 vzeroupper
1866 sub \$-16*$SZ,$inp # inp++, size optimization
1867 mov $SZ*0($ctx),$A
1868 mov $inp,%r12 # borrow $T1
1869 mov $SZ*1($ctx),$B
1870 cmp %rdx,$inp # $_end
1871 mov $SZ*2($ctx),$C
1872 cmove %rsp,%r12 # next block or random data
1873 mov $SZ*3($ctx),$D
1874 mov $SZ*4($ctx),$E
1875 mov $SZ*5($ctx),$F
1876 mov $SZ*6($ctx),$G
1877 mov $SZ*7($ctx),$H
1878___
1879 if ($SZ==4) { # SHA256
1880 my @X = map("%ymm$_",(0..3));
1881 my ($t0,$t1,$t2,$t3, $t4,$t5) = map("%ymm$_",(4..9));
1882
1883$code.=<<___;
1884 vmovdqa $TABLE+`$SZ*2*$rounds`+32(%rip),$t4
1885 vmovdqa $TABLE+`$SZ*2*$rounds`+64(%rip),$t5
1886 jmp .Loop_avx2
1887.align 16
1888.Loop_avx2:
1889 vmovdqa $TABLE+`$SZ*2*$rounds`(%rip),$t3
1890 vmovdqu -16*$SZ+0($inp),%xmm0
1891 vmovdqu -16*$SZ+16($inp),%xmm1
1892 vmovdqu -16*$SZ+32($inp),%xmm2
1893 vmovdqu -16*$SZ+48($inp),%xmm3
1894 #mov $inp,$_inp # offload $inp
1895 vinserti128 \$1,(%r12),@X[0],@X[0]
1896 vinserti128 \$1,16(%r12),@X[1],@X[1]
1897 vpshufb $t3,@X[0],@X[0]
1898 vinserti128 \$1,32(%r12),@X[2],@X[2]
1899 vpshufb $t3,@X[1],@X[1]
1900 vinserti128 \$1,48(%r12),@X[3],@X[3]
1901
1902 lea $TABLE(%rip),$Tbl
1903 vpshufb $t3,@X[2],@X[2]
1904 vpaddd 0x00($Tbl),@X[0],$t0
1905 vpshufb $t3,@X[3],@X[3]
1906 vpaddd 0x20($Tbl),@X[1],$t1
1907 vpaddd 0x40($Tbl),@X[2],$t2
1908 vpaddd 0x60($Tbl),@X[3],$t3
1909 vmovdqa $t0,0x00(%rsp)
1910 xor $a1,$a1
1911 vmovdqa $t1,0x20(%rsp)
1912 lea -$PUSH8(%rsp),%rsp
1913 mov $B,$a3
1914 vmovdqa $t2,0x00(%rsp)
1915 xor $C,$a3 # magic
1916 vmovdqa $t3,0x20(%rsp)
1917 mov $F,$a4
1918 sub \$-16*2*$SZ,$Tbl # size optimization
1919 jmp .Lavx2_00_47
1920
1921.align 16
1922.Lavx2_00_47:
1923___
1924
1925sub AVX2_256_00_47 () {
1926my $j = shift;
1927my $body = shift;
1928my @X = @_;
1929my @insns = (&$body,&$body,&$body,&$body); # 96 instructions
1930my $base = "+2*$PUSH8(%rsp)";
1931
1932 &lea ("%rsp","-$PUSH8(%rsp)") if (($j%2)==0);
1933 foreach (Xupdate_256_AVX()) { # 29 instructions
1934 eval;
1935 eval(shift(@insns));
1936 eval(shift(@insns));
1937 eval(shift(@insns));
1938 }
1939 &vpaddd ($t2,@X[0],16*2*$j."($Tbl)");
1940 foreach (@insns) { eval; } # remaining instructions
1941 &vmovdqa ((32*$j)%$PUSH8."(%rsp)",$t2);
1942}
1943
1944 for ($i=0,$j=0; $j<4; $j++) {
1945 &AVX2_256_00_47($j,\&bodyx_00_15,@X);
1946 push(@X,shift(@X)); # rotate(@X)
1947 }
1948 &lea ($Tbl,16*2*$SZ."($Tbl)");
1949 &cmpb (($SZ-1)."($Tbl)",0);
1950 &jne (".Lavx2_00_47");
1951
1952 for ($i=0; $i<16; ) {
1953 my $base=$i<8?"+$PUSH8(%rsp)":"(%rsp)";
1954 foreach(bodyx_00_15()) { eval; }
1955 }
1956 } else { # SHA512
1957 my @X = map("%ymm$_",(0..7));
1958 my ($t0,$t1,$t2,$t3) = map("%ymm$_",(8..11));
1959
1960$code.=<<___;
1961 jmp .Loop_avx2
1962.align 16
1963.Loop_avx2:
1964 vmovdqu -16*$SZ($inp),%xmm0
1965 vmovdqu -16*$SZ+16($inp),%xmm1
1966 vmovdqu -16*$SZ+32($inp),%xmm2
1967 lea $TABLE+0x80(%rip),$Tbl # size optimization
1968 vmovdqu -16*$SZ+48($inp),%xmm3
1969 vmovdqu -16*$SZ+64($inp),%xmm4
1970 vmovdqu -16*$SZ+80($inp),%xmm5
1971 vmovdqu -16*$SZ+96($inp),%xmm6
1972 vmovdqu -16*$SZ+112($inp),%xmm7
1973 #mov $inp,$_inp # offload $inp
1974 vmovdqa `$SZ*2*$rounds-0x80`($Tbl),$t2
1975 vinserti128 \$1,(%r12),@X[0],@X[0]
1976 vinserti128 \$1,16(%r12),@X[1],@X[1]
1977 vpshufb $t2,@X[0],@X[0]
1978 vinserti128 \$1,32(%r12),@X[2],@X[2]
1979 vpshufb $t2,@X[1],@X[1]
1980 vinserti128 \$1,48(%r12),@X[3],@X[3]
1981 vpshufb $t2,@X[2],@X[2]
1982 vinserti128 \$1,64(%r12),@X[4],@X[4]
1983 vpshufb $t2,@X[3],@X[3]
1984 vinserti128 \$1,80(%r12),@X[5],@X[5]
1985 vpshufb $t2,@X[4],@X[4]
1986 vinserti128 \$1,96(%r12),@X[6],@X[6]
1987 vpshufb $t2,@X[5],@X[5]
1988 vinserti128 \$1,112(%r12),@X[7],@X[7]
1989
1990 vpaddq -0x80($Tbl),@X[0],$t0
1991 vpshufb $t2,@X[6],@X[6]
1992 vpaddq -0x60($Tbl),@X[1],$t1
1993 vpshufb $t2,@X[7],@X[7]
1994 vpaddq -0x40($Tbl),@X[2],$t2
1995 vpaddq -0x20($Tbl),@X[3],$t3
1996 vmovdqa $t0,0x00(%rsp)
1997 vpaddq 0x00($Tbl),@X[4],$t0
1998 vmovdqa $t1,0x20(%rsp)
1999 vpaddq 0x20($Tbl),@X[5],$t1
2000 vmovdqa $t2,0x40(%rsp)
2001 vpaddq 0x40($Tbl),@X[6],$t2
2002 vmovdqa $t3,0x60(%rsp)
2003 lea -$PUSH8(%rsp),%rsp
2004 vpaddq 0x60($Tbl),@X[7],$t3
2005 vmovdqa $t0,0x00(%rsp)
2006 xor $a1,$a1
2007 vmovdqa $t1,0x20(%rsp)
2008 mov $B,$a3
2009 vmovdqa $t2,0x40(%rsp)
2010 xor $C,$a3 # magic
2011 vmovdqa $t3,0x60(%rsp)
2012 mov $F,$a4
2013 add \$16*2*$SZ,$Tbl
2014 jmp .Lavx2_00_47
2015
2016.align 16
2017.Lavx2_00_47:
2018___
2019
2020sub AVX2_512_00_47 () {
2021my $j = shift;
2022my $body = shift;
2023my @X = @_;
2024my @insns = (&$body,&$body); # 48 instructions
2025my $base = "+2*$PUSH8(%rsp)";
2026
2027 &lea ("%rsp","-$PUSH8(%rsp)") if (($j%4)==0);
2028 foreach (Xupdate_512_AVX()) { # 23 instructions
2029 eval;
2030 if ($_ !~ /\;$/) {
2031 eval(shift(@insns));
2032 eval(shift(@insns));
2033 eval(shift(@insns));
2034 }
2035 }
2036 &vpaddq ($t2,@X[0],16*2*$j-0x80."($Tbl)");
2037 foreach (@insns) { eval; } # remaining instructions
2038 &vmovdqa ((32*$j)%$PUSH8."(%rsp)",$t2);
2039}
2040
2041 for ($i=0,$j=0; $j<8; $j++) {
2042 &AVX2_512_00_47($j,\&bodyx_00_15,@X);
2043 push(@X,shift(@X)); # rotate(@X)
2044 }
2045 &lea ($Tbl,16*2*$SZ."($Tbl)");
2046 &cmpb (($SZ-1-0x80)."($Tbl)",0);
2047 &jne (".Lavx2_00_47");
2048
2049 for ($i=0; $i<16; ) {
2050 my $base=$i<8?"+$PUSH8(%rsp)":"(%rsp)";
2051 foreach(bodyx_00_15()) { eval; }
2052 }
2053}
2054$code.=<<___;
2055 mov `2*$SZ*$rounds`(%rsp),$ctx # $_ctx
2056 add $a1,$A
2057 #mov `2*$SZ*$rounds+8`(%rsp),$inp # $_inp
2058 lea `2*$SZ*($rounds-8)`(%rsp),$Tbl
2059
2060 add $SZ*0($ctx),$A
2061 add $SZ*1($ctx),$B
2062 add $SZ*2($ctx),$C
2063 add $SZ*3($ctx),$D
2064 add $SZ*4($ctx),$E
2065 add $SZ*5($ctx),$F
2066 add $SZ*6($ctx),$G
2067 add $SZ*7($ctx),$H
2068
2069 mov $A,$SZ*0($ctx)
2070 mov $B,$SZ*1($ctx)
2071 mov $C,$SZ*2($ctx)
2072 mov $D,$SZ*3($ctx)
2073 mov $E,$SZ*4($ctx)
2074 mov $F,$SZ*5($ctx)
2075 mov $G,$SZ*6($ctx)
2076 mov $H,$SZ*7($ctx)
2077
2078 cmp `$PUSH8+2*8`($Tbl),$inp # $_end
2079 je .Ldone_avx2
2080
2081 xor $a1,$a1
2082 mov $B,$a3
2083 xor $C,$a3 # magic
2084 mov $F,$a4
2085 jmp .Lower_avx2
2086.align 16
2087.Lower_avx2:
2088___
2089 for ($i=0; $i<8; ) {
2090 my $base="+16($Tbl)";
2091 foreach(bodyx_00_15()) { eval; }
2092 }
2093$code.=<<___;
2094 lea -$PUSH8($Tbl),$Tbl
2095 cmp %rsp,$Tbl
2096 jae .Lower_avx2
2097
2098 mov `2*$SZ*$rounds`(%rsp),$ctx # $_ctx
2099 add $a1,$A
2100 #mov `2*$SZ*$rounds+8`(%rsp),$inp # $_inp
2101 lea `2*$SZ*($rounds-8)`(%rsp),%rsp
2102
2103 add $SZ*0($ctx),$A
2104 add $SZ*1($ctx),$B
2105 add $SZ*2($ctx),$C
2106 add $SZ*3($ctx),$D
2107 add $SZ*4($ctx),$E
2108 add $SZ*5($ctx),$F
2109 lea `2*16*$SZ`($inp),$inp # inp+=2
2110 add $SZ*6($ctx),$G
2111 mov $inp,%r12
2112 add $SZ*7($ctx),$H
2113 cmp $_end,$inp
2114
2115 mov $A,$SZ*0($ctx)
2116 cmove %rsp,%r12 # next block or stale data
2117 mov $B,$SZ*1($ctx)
2118 mov $C,$SZ*2($ctx)
2119 mov $D,$SZ*3($ctx)
2120 mov $E,$SZ*4($ctx)
2121 mov $F,$SZ*5($ctx)
2122 mov $G,$SZ*6($ctx)
2123 mov $H,$SZ*7($ctx)
2124
2125 jbe .Loop_avx2
2126 lea (%rsp),$Tbl
2127
2128.Ldone_avx2:
2129 lea ($Tbl),%rsp
2130 mov $_rsp,%rsi
2131 vzeroupper
2132___
2133$code.=<<___ if ($win64);
2134 movaps 16*$SZ+32(%rsp),%xmm6
2135 movaps 16*$SZ+48(%rsp),%xmm7
2136 movaps 16*$SZ+64(%rsp),%xmm8
2137 movaps 16*$SZ+80(%rsp),%xmm9
2138___
2139$code.=<<___ if ($win64 && $SZ>4);
2140 movaps 16*$SZ+96(%rsp),%xmm10
2141 movaps 16*$SZ+112(%rsp),%xmm11
2142___
2143$code.=<<___;
2144 mov (%rsi),%r15
2145 mov 8(%rsi),%r14
2146 mov 16(%rsi),%r13
2147 mov 24(%rsi),%r12
2148 mov 32(%rsi),%rbp
2149 mov 40(%rsi),%rbx
2150 lea 48(%rsi),%rsp
2151.Lepilogue_avx2:
2152 ret
2153.size ${func}_avx2,.-${func}_avx2
2154___
2155}}
2156}}}}}
2157
2158# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
2159# CONTEXT *context,DISPATCHER_CONTEXT *disp)
2160if ($win64) {
2161$rec="%rcx";
2162$frame="%rdx";
2163$context="%r8";
2164$disp="%r9";
2165
2166$code.=<<___;
2167.extern __imp_RtlVirtualUnwind
2168.type se_handler,\@abi-omnipotent
2169.align 16
2170se_handler:
2171 push %rsi
2172 push %rdi
2173 push %rbx
2174 push %rbp
2175 push %r12
2176 push %r13
2177 push %r14
2178 push %r15
2179 pushfq
2180 sub \$64,%rsp
2181
2182 mov 120($context),%rax # pull context->Rax
2183 mov 248($context),%rbx # pull context->Rip
2184
2185 mov 8($disp),%rsi # disp->ImageBase
2186 mov 56($disp),%r11 # disp->HanderlData
2187
2188 mov 0(%r11),%r10d # HandlerData[0]
2189 lea (%rsi,%r10),%r10 # prologue label
2190 cmp %r10,%rbx # context->Rip<prologue label
2191 jb .Lin_prologue
2192
2193 mov 152($context),%rax # pull context->Rsp
2194
2195 mov 4(%r11),%r10d # HandlerData[1]
2196 lea (%rsi,%r10),%r10 # epilogue label
2197 cmp %r10,%rbx # context->Rip>=epilogue label
2198 jae .Lin_prologue
2199___
2200$code.=<<___ if ($avx>1);
2201 lea .Lavx2_shortcut(%rip),%r10
2202 cmp %r10,%rbx # context->Rip<avx2_shortcut
2203 jb .Lnot_in_avx2
2204
2205 and \$-256*$SZ,%rax
2206 add \$`2*$SZ*($rounds-8)`,%rax
2207.Lnot_in_avx2:
2208___
2209$code.=<<___;
2210 mov %rax,%rsi # put aside Rsp
2211 mov 16*$SZ+3*8(%rax),%rax # pull $_rsp
2212 lea 48(%rax),%rax
2213
2214 mov -8(%rax),%rbx
2215 mov -16(%rax),%rbp
2216 mov -24(%rax),%r12
2217 mov -32(%rax),%r13
2218 mov -40(%rax),%r14
2219 mov -48(%rax),%r15
2220 mov %rbx,144($context) # restore context->Rbx
2221 mov %rbp,160($context) # restore context->Rbp
2222 mov %r12,216($context) # restore context->R12
2223 mov %r13,224($context) # restore context->R13
2224 mov %r14,232($context) # restore context->R14
2225 mov %r15,240($context) # restore context->R15
2226
2227 lea .Lepilogue(%rip),%r10
2228 cmp %r10,%rbx
2229 jb .Lin_prologue # non-AVX code
2230
2231 lea 16*$SZ+4*8(%rsi),%rsi # Xmm6- save area
2232 lea 512($context),%rdi # &context.Xmm6
2233 mov \$`$SZ==4?8:12`,%ecx
2234 .long 0xa548f3fc # cld; rep movsq
2235
2236.Lin_prologue:
2237 mov 8(%rax),%rdi
2238 mov 16(%rax),%rsi
2239 mov %rax,152($context) # restore context->Rsp
2240 mov %rsi,168($context) # restore context->Rsi
2241 mov %rdi,176($context) # restore context->Rdi
2242
2243 mov 40($disp),%rdi # disp->ContextRecord
2244 mov $context,%rsi # context
2245 mov \$154,%ecx # sizeof(CONTEXT)
2246 .long 0xa548f3fc # cld; rep movsq
2247
2248 mov $disp,%rsi
2249 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
2250 mov 8(%rsi),%rdx # arg2, disp->ImageBase
2251 mov 0(%rsi),%r8 # arg3, disp->ControlPc
2252 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
2253 mov 40(%rsi),%r10 # disp->ContextRecord
2254 lea 56(%rsi),%r11 # &disp->HandlerData
2255 lea 24(%rsi),%r12 # &disp->EstablisherFrame
2256 mov %r10,32(%rsp) # arg5
2257 mov %r11,40(%rsp) # arg6
2258 mov %r12,48(%rsp) # arg7
2259 mov %rcx,56(%rsp) # arg8, (NULL)
2260 call *__imp_RtlVirtualUnwind(%rip)
2261
2262 mov \$1,%eax # ExceptionContinueSearch
2263 add \$64,%rsp
2264 popfq
2265 pop %r15
2266 pop %r14
2267 pop %r13
2268 pop %r12
2269 pop %rbp
2270 pop %rbx
2271 pop %rdi
2272 pop %rsi
2273 ret
2274.size se_handler,.-se_handler
2275___
2276
2277$code.=<<___ if ($SZ==4 && $shaext);
2278.type shaext_handler,\@abi-omnipotent
2279.align 16
2280shaext_handler:
2281 push %rsi
2282 push %rdi
2283 push %rbx
2284 push %rbp
2285 push %r12
2286 push %r13
2287 push %r14
2288 push %r15
2289 pushfq
2290 sub \$64,%rsp
2291
2292 mov 120($context),%rax # pull context->Rax
2293 mov 248($context),%rbx # pull context->Rip
2294
2295 lea .Lprologue_shaext(%rip),%r10
2296 cmp %r10,%rbx # context->Rip<.Lprologue
2297 jb .Lin_prologue
2298
2299 lea .Lepilogue_shaext(%rip),%r10
2300 cmp %r10,%rbx # context->Rip>=.Lepilogue
2301 jae .Lin_prologue
2302
2303 lea -8-5*16(%rax),%rsi
2304 lea 512($context),%rdi # &context.Xmm6
2305 mov \$10,%ecx
2306 .long 0xa548f3fc # cld; rep movsq
2307
2308 jmp .Lin_prologue
2309.size shaext_handler,.-shaext_handler
2310___
2311
2312$code.=<<___;
2313.section .pdata
2314.align 4
2315 .rva .LSEH_begin_$func
2316 .rva .LSEH_end_$func
2317 .rva .LSEH_info_$func
2318___
2319$code.=<<___ if ($SZ==4 && $shaext);
2320 .rva .LSEH_begin_${func}_shaext
2321 .rva .LSEH_end_${func}_shaext
2322 .rva .LSEH_info_${func}_shaext
2323___
2324$code.=<<___ if ($SZ==4);
2325 .rva .LSEH_begin_${func}_ssse3
2326 .rva .LSEH_end_${func}_ssse3
2327 .rva .LSEH_info_${func}_ssse3
2328___
2329$code.=<<___ if ($avx && $SZ==8);
2330 .rva .LSEH_begin_${func}_xop
2331 .rva .LSEH_end_${func}_xop
2332 .rva .LSEH_info_${func}_xop
2333___
2334$code.=<<___ if ($avx);
2335 .rva .LSEH_begin_${func}_avx
2336 .rva .LSEH_end_${func}_avx
2337 .rva .LSEH_info_${func}_avx
2338___
2339$code.=<<___ if ($avx>1);
2340 .rva .LSEH_begin_${func}_avx2
2341 .rva .LSEH_end_${func}_avx2
2342 .rva .LSEH_info_${func}_avx2
2343___
2344$code.=<<___;
2345.section .xdata
2346.align 8
2347.LSEH_info_$func:
2348 .byte 9,0,0,0
2349 .rva se_handler
2350 .rva .Lprologue,.Lepilogue # HandlerData[]
2351___
2352$code.=<<___ if ($SZ==4 && $shaext);
2353.LSEH_info_${func}_shaext:
2354 .byte 9,0,0,0
2355 .rva shaext_handler
2356___
2357$code.=<<___ if ($SZ==4);
2358.LSEH_info_${func}_ssse3:
2359 .byte 9,0,0,0
2360 .rva se_handler
2361 .rva .Lprologue_ssse3,.Lepilogue_ssse3 # HandlerData[]
2362___
2363$code.=<<___ if ($avx && $SZ==8);
2364.LSEH_info_${func}_xop:
2365 .byte 9,0,0,0
2366 .rva se_handler
2367 .rva .Lprologue_xop,.Lepilogue_xop # HandlerData[]
2368___
2369$code.=<<___ if ($avx);
2370.LSEH_info_${func}_avx:
2371 .byte 9,0,0,0
2372 .rva se_handler
2373 .rva .Lprologue_avx,.Lepilogue_avx # HandlerData[]
2374___
2375$code.=<<___ if ($avx>1);
2376.LSEH_info_${func}_avx2:
2377 .byte 9,0,0,0
2378 .rva se_handler
2379 .rva .Lprologue_avx2,.Lepilogue_avx2 # HandlerData[]
2380___
2381}
2382
2383sub sha256op38 {
2384 my $instr = shift;
2385 my %opcodelet = (
2386 "sha256rnds2" => 0xcb,
2387 "sha256msg1" => 0xcc,
2388 "sha256msg2" => 0xcd );
2389
2390 if (defined($opcodelet{$instr}) && @_[0] =~ /%xmm([0-7]),\s*%xmm([0-7])/) {
2391 my @opcode=(0x0f,0x38);
2392 push @opcode,$opcodelet{$instr};
2393 push @opcode,0xc0|($1&7)|(($2&7)<<3); # ModR/M
2394 return ".byte\t".join(',',@opcode);
2395 } else {
2396 return $instr."\t".@_[0];
2397 }
2398}
2399
2400foreach (split("\n",$code)) {
2401 s/\`([^\`]*)\`/eval $1/geo;
2402
2403 s/\b(sha256[^\s]*)\s+(.*)/sha256op38($1,$2)/geo;
2404
2405 print $_,"\n";
2406}
2407close STDOUT;
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette