VirtualBox

source: vbox/trunk/src/libs/openssl-1.1.1f/crypto/md5/asm/md5-x86_64.pl@ 83531

Last change on this file since 83531 was 83531, checked in by vboxsync, 5 years ago

setting svn:sync-process=export for openssl-1.1.1f, all files except tests

  • Property svn:executable set to *
File size: 12.6 KB
Line 
1#! /usr/bin/env perl
2# Author: Marc Bevand <bevand_m (at) epita.fr>
3# Copyright 2005-2020 The OpenSSL Project Authors. All Rights Reserved.
4#
5# Licensed under the OpenSSL license (the "License"). You may not use
6# this file except in compliance with the License. You can obtain a copy
7# in the file LICENSE in the source distribution or at
8# https://www.openssl.org/source/license.html
9
10# MD5 optimized for AMD64.
11
12use strict;
13
14my $code;
15
16# round1_step() does:
17# dst = x + ((dst + F(x,y,z) + X[k] + T_i) <<< s)
18# %r10d = X[k_next]
19# %r11d = z' (copy of z for the next step)
20# Each round1_step() takes about 5.3 clocks (9 instructions, 1.7 IPC)
21sub round1_step
22{
23 my ($pos, $dst, $x, $y, $z, $k_next, $T_i, $s) = @_;
24 $code .= " mov 0*4(%rsi), %r10d /* (NEXT STEP) X[0] */\n" if ($pos == -1);
25 $code .= " mov %edx, %r11d /* (NEXT STEP) z' = %edx */\n" if ($pos == -1);
26 $code .= <<EOF;
27 xor $y, %r11d /* y ^ ... */
28 lea $T_i($dst,%r10d),$dst /* Const + dst + ... */
29 and $x, %r11d /* x & ... */
30 mov $k_next*4(%rsi),%r10d /* (NEXT STEP) X[$k_next] */
31 xor $z, %r11d /* z ^ ... */
32 add %r11d, $dst /* dst += ... */
33 rol \$$s, $dst /* dst <<< s */
34 mov $y, %r11d /* (NEXT STEP) z' = $y */
35 add $x, $dst /* dst += x */
36EOF
37}
38
39# round2_step() does:
40# dst = x + ((dst + G(x,y,z) + X[k] + T_i) <<< s)
41# %r10d = X[k_next]
42# %r11d = z' (copy of z for the next step)
43# %r12d = z' (copy of z for the next step)
44# Each round2_step() takes about 5.4 clocks (11 instructions, 2.0 IPC)
45sub round2_step
46{
47 my ($pos, $dst, $x, $y, $z, $k_next, $T_i, $s) = @_;
48 $code .= " mov %edx, %r11d /* (NEXT STEP) z' = %edx */\n" if ($pos == -1);
49 $code .= " mov %edx, %r12d /* (NEXT STEP) z' = %edx */\n" if ($pos == -1);
50 $code .= <<EOF;
51 not %r11d /* not z */
52 and $x, %r12d /* x & z */
53 lea $T_i($dst,%r10d),$dst /* Const + dst + ... */
54 and $y, %r11d /* y & (not z) */
55 mov $k_next*4(%rsi),%r10d /* (NEXT STEP) X[$k_next] */
56 or %r11d, %r12d /* (y & (not z)) | (x & z) */
57 mov $y, %r11d /* (NEXT STEP) z' = $y */
58 add %r12d, $dst /* dst += ... */
59 mov $y, %r12d /* (NEXT STEP) z' = $y */
60 rol \$$s, $dst /* dst <<< s */
61 add $x, $dst /* dst += x */
62EOF
63}
64
65# round3_step() does:
66# dst = x + ((dst + H(x,y,z) + X[k] + T_i) <<< s)
67# %r10d = X[k_next]
68# %r11d = y' (copy of y for the next step)
69# Each round3_step() takes about 4.2 clocks (8 instructions, 1.9 IPC)
70{ my $round3_alter=0;
71sub round3_step
72{
73 my ($pos, $dst, $x, $y, $z, $k_next, $T_i, $s) = @_;
74 $code .= " mov %ecx, %r11d /* (NEXT STEP) y' = %ecx */\n" if ($pos == -1);
75 $code .= <<EOF;
76 lea $T_i($dst,%r10d),$dst /* Const + dst + ... */
77 xor $z, %r11d /* z ^ ... */
78 mov $k_next*4(%rsi),%r10d /* (NEXT STEP) X[$k_next] */
79 xor $x, %r11d /* x ^ ... */
80 add %r11d, $dst /* dst += ... */
81EOF
82 $code .= <<EOF if ($round3_alter);
83 rol \$$s, $dst /* dst <<< s */
84 mov $x, %r11d /* (NEXT STEP) y' = $x */
85EOF
86 $code .= <<EOF if (!$round3_alter);
87 mov $x, %r11d /* (NEXT STEP) y' = $x */
88 rol \$$s, $dst /* dst <<< s */
89EOF
90 $code .= <<EOF;
91 add $x, $dst /* dst += x */
92EOF
93 $round3_alter^=1;
94}
95}
96
97# round4_step() does:
98# dst = x + ((dst + I(x,y,z) + X[k] + T_i) <<< s)
99# %r10d = X[k_next]
100# %r11d = not z' (copy of not z for the next step)
101# Each round4_step() takes about 5.2 clocks (9 instructions, 1.7 IPC)
102sub round4_step
103{
104 my ($pos, $dst, $x, $y, $z, $k_next, $T_i, $s) = @_;
105 $code .= " mov \$0xffffffff, %r11d\n" if ($pos == -1);
106 $code .= " xor %edx, %r11d /* (NEXT STEP) not z' = not %edx*/\n"
107 if ($pos == -1);
108 $code .= <<EOF;
109 lea $T_i($dst,%r10d),$dst /* Const + dst + ... */
110 or $x, %r11d /* x | ... */
111 mov $k_next*4(%rsi),%r10d /* (NEXT STEP) X[$k_next] */
112 xor $y, %r11d /* y ^ ... */
113 add %r11d, $dst /* dst += ... */
114 mov \$0xffffffff, %r11d
115 rol \$$s, $dst /* dst <<< s */
116 xor $y, %r11d /* (NEXT STEP) not z' = not $y */
117 add $x, $dst /* dst += x */
118EOF
119}
120
121no warnings qw(uninitialized);
122my $flavour = shift;
123my $output = shift;
124if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
125
126my $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
127
128$0 =~ m/(.*[\/\\])[^\/\\]+$/; my $dir=$1; my $xlate;
129( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
130( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
131die "can't locate x86_64-xlate.pl";
132
133open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
134*STDOUT=*OUT;
135
136$code .= <<EOF;
137.text
138.align 16
139
140.globl md5_block_asm_data_order
141.type md5_block_asm_data_order,\@function,3
142md5_block_asm_data_order:
143.cfi_startproc
144 push %rbp
145.cfi_push %rbp
146 push %rbx
147.cfi_push %rbx
148 push %r12
149.cfi_push %r12
150 push %r14
151.cfi_push %r14
152 push %r15
153.cfi_push %r15
154.Lprologue:
155
156 # rdi = arg #1 (ctx, MD5_CTX pointer)
157 # rsi = arg #2 (ptr, data pointer)
158 # rdx = arg #3 (nbr, number of 16-word blocks to process)
159 mov %rdi, %rbp # rbp = ctx
160 shl \$6, %rdx # rdx = nbr in bytes
161 lea (%rsi,%rdx), %rdi # rdi = end
162 mov 0*4(%rbp), %eax # eax = ctx->A
163 mov 1*4(%rbp), %ebx # ebx = ctx->B
164 mov 2*4(%rbp), %ecx # ecx = ctx->C
165 mov 3*4(%rbp), %edx # edx = ctx->D
166 # end is 'rdi'
167 # ptr is 'rsi'
168 # A is 'eax'
169 # B is 'ebx'
170 # C is 'ecx'
171 # D is 'edx'
172
173 cmp %rdi, %rsi # cmp end with ptr
174 je .Lend # jmp if ptr == end
175
176 # BEGIN of loop over 16-word blocks
177.Lloop: # save old values of A, B, C, D
178 mov %eax, %r8d
179 mov %ebx, %r9d
180 mov %ecx, %r14d
181 mov %edx, %r15d
182EOF
183round1_step(-1,'%eax','%ebx','%ecx','%edx', '1','0xd76aa478', '7');
184round1_step( 0,'%edx','%eax','%ebx','%ecx', '2','0xe8c7b756','12');
185round1_step( 0,'%ecx','%edx','%eax','%ebx', '3','0x242070db','17');
186round1_step( 0,'%ebx','%ecx','%edx','%eax', '4','0xc1bdceee','22');
187round1_step( 0,'%eax','%ebx','%ecx','%edx', '5','0xf57c0faf', '7');
188round1_step( 0,'%edx','%eax','%ebx','%ecx', '6','0x4787c62a','12');
189round1_step( 0,'%ecx','%edx','%eax','%ebx', '7','0xa8304613','17');
190round1_step( 0,'%ebx','%ecx','%edx','%eax', '8','0xfd469501','22');
191round1_step( 0,'%eax','%ebx','%ecx','%edx', '9','0x698098d8', '7');
192round1_step( 0,'%edx','%eax','%ebx','%ecx','10','0x8b44f7af','12');
193round1_step( 0,'%ecx','%edx','%eax','%ebx','11','0xffff5bb1','17');
194round1_step( 0,'%ebx','%ecx','%edx','%eax','12','0x895cd7be','22');
195round1_step( 0,'%eax','%ebx','%ecx','%edx','13','0x6b901122', '7');
196round1_step( 0,'%edx','%eax','%ebx','%ecx','14','0xfd987193','12');
197round1_step( 0,'%ecx','%edx','%eax','%ebx','15','0xa679438e','17');
198round1_step( 1,'%ebx','%ecx','%edx','%eax', '1','0x49b40821','22');
199
200round2_step(-1,'%eax','%ebx','%ecx','%edx', '6','0xf61e2562', '5');
201round2_step( 0,'%edx','%eax','%ebx','%ecx','11','0xc040b340', '9');
202round2_step( 0,'%ecx','%edx','%eax','%ebx', '0','0x265e5a51','14');
203round2_step( 0,'%ebx','%ecx','%edx','%eax', '5','0xe9b6c7aa','20');
204round2_step( 0,'%eax','%ebx','%ecx','%edx','10','0xd62f105d', '5');
205round2_step( 0,'%edx','%eax','%ebx','%ecx','15', '0x2441453', '9');
206round2_step( 0,'%ecx','%edx','%eax','%ebx', '4','0xd8a1e681','14');
207round2_step( 0,'%ebx','%ecx','%edx','%eax', '9','0xe7d3fbc8','20');
208round2_step( 0,'%eax','%ebx','%ecx','%edx','14','0x21e1cde6', '5');
209round2_step( 0,'%edx','%eax','%ebx','%ecx', '3','0xc33707d6', '9');
210round2_step( 0,'%ecx','%edx','%eax','%ebx', '8','0xf4d50d87','14');
211round2_step( 0,'%ebx','%ecx','%edx','%eax','13','0x455a14ed','20');
212round2_step( 0,'%eax','%ebx','%ecx','%edx', '2','0xa9e3e905', '5');
213round2_step( 0,'%edx','%eax','%ebx','%ecx', '7','0xfcefa3f8', '9');
214round2_step( 0,'%ecx','%edx','%eax','%ebx','12','0x676f02d9','14');
215round2_step( 1,'%ebx','%ecx','%edx','%eax', '5','0x8d2a4c8a','20');
216
217round3_step(-1,'%eax','%ebx','%ecx','%edx', '8','0xfffa3942', '4');
218round3_step( 0,'%edx','%eax','%ebx','%ecx','11','0x8771f681','11');
219round3_step( 0,'%ecx','%edx','%eax','%ebx','14','0x6d9d6122','16');
220round3_step( 0,'%ebx','%ecx','%edx','%eax', '1','0xfde5380c','23');
221round3_step( 0,'%eax','%ebx','%ecx','%edx', '4','0xa4beea44', '4');
222round3_step( 0,'%edx','%eax','%ebx','%ecx', '7','0x4bdecfa9','11');
223round3_step( 0,'%ecx','%edx','%eax','%ebx','10','0xf6bb4b60','16');
224round3_step( 0,'%ebx','%ecx','%edx','%eax','13','0xbebfbc70','23');
225round3_step( 0,'%eax','%ebx','%ecx','%edx', '0','0x289b7ec6', '4');
226round3_step( 0,'%edx','%eax','%ebx','%ecx', '3','0xeaa127fa','11');
227round3_step( 0,'%ecx','%edx','%eax','%ebx', '6','0xd4ef3085','16');
228round3_step( 0,'%ebx','%ecx','%edx','%eax', '9', '0x4881d05','23');
229round3_step( 0,'%eax','%ebx','%ecx','%edx','12','0xd9d4d039', '4');
230round3_step( 0,'%edx','%eax','%ebx','%ecx','15','0xe6db99e5','11');
231round3_step( 0,'%ecx','%edx','%eax','%ebx', '2','0x1fa27cf8','16');
232round3_step( 1,'%ebx','%ecx','%edx','%eax', '0','0xc4ac5665','23');
233
234round4_step(-1,'%eax','%ebx','%ecx','%edx', '7','0xf4292244', '6');
235round4_step( 0,'%edx','%eax','%ebx','%ecx','14','0x432aff97','10');
236round4_step( 0,'%ecx','%edx','%eax','%ebx', '5','0xab9423a7','15');
237round4_step( 0,'%ebx','%ecx','%edx','%eax','12','0xfc93a039','21');
238round4_step( 0,'%eax','%ebx','%ecx','%edx', '3','0x655b59c3', '6');
239round4_step( 0,'%edx','%eax','%ebx','%ecx','10','0x8f0ccc92','10');
240round4_step( 0,'%ecx','%edx','%eax','%ebx', '1','0xffeff47d','15');
241round4_step( 0,'%ebx','%ecx','%edx','%eax', '8','0x85845dd1','21');
242round4_step( 0,'%eax','%ebx','%ecx','%edx','15','0x6fa87e4f', '6');
243round4_step( 0,'%edx','%eax','%ebx','%ecx', '6','0xfe2ce6e0','10');
244round4_step( 0,'%ecx','%edx','%eax','%ebx','13','0xa3014314','15');
245round4_step( 0,'%ebx','%ecx','%edx','%eax', '4','0x4e0811a1','21');
246round4_step( 0,'%eax','%ebx','%ecx','%edx','11','0xf7537e82', '6');
247round4_step( 0,'%edx','%eax','%ebx','%ecx', '2','0xbd3af235','10');
248round4_step( 0,'%ecx','%edx','%eax','%ebx', '9','0x2ad7d2bb','15');
249round4_step( 1,'%ebx','%ecx','%edx','%eax', '0','0xeb86d391','21');
250$code .= <<EOF;
251 # add old values of A, B, C, D
252 add %r8d, %eax
253 add %r9d, %ebx
254 add %r14d, %ecx
255 add %r15d, %edx
256
257 # loop control
258 add \$64, %rsi # ptr += 64
259 cmp %rdi, %rsi # cmp end with ptr
260 jb .Lloop # jmp if ptr < end
261 # END of loop over 16-word blocks
262
263.Lend:
264 mov %eax, 0*4(%rbp) # ctx->A = A
265 mov %ebx, 1*4(%rbp) # ctx->B = B
266 mov %ecx, 2*4(%rbp) # ctx->C = C
267 mov %edx, 3*4(%rbp) # ctx->D = D
268
269 mov (%rsp),%r15
270.cfi_restore %r15
271 mov 8(%rsp),%r14
272.cfi_restore %r14
273 mov 16(%rsp),%r12
274.cfi_restore %r12
275 mov 24(%rsp),%rbx
276.cfi_restore %rbx
277 mov 32(%rsp),%rbp
278.cfi_restore %rbp
279 add \$40,%rsp
280.cfi_adjust_cfa_offset -40
281.Lepilogue:
282 ret
283.cfi_endproc
284.size md5_block_asm_data_order,.-md5_block_asm_data_order
285EOF
286
287# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
288# CONTEXT *context,DISPATCHER_CONTEXT *disp)
289if ($win64) {
290my $rec="%rcx";
291my $frame="%rdx";
292my $context="%r8";
293my $disp="%r9";
294
295$code.=<<___;
296.extern __imp_RtlVirtualUnwind
297.type se_handler,\@abi-omnipotent
298.align 16
299se_handler:
300 push %rsi
301 push %rdi
302 push %rbx
303 push %rbp
304 push %r12
305 push %r13
306 push %r14
307 push %r15
308 pushfq
309 sub \$64,%rsp
310
311 mov 120($context),%rax # pull context->Rax
312 mov 248($context),%rbx # pull context->Rip
313
314 lea .Lprologue(%rip),%r10
315 cmp %r10,%rbx # context->Rip<.Lprologue
316 jb .Lin_prologue
317
318 mov 152($context),%rax # pull context->Rsp
319
320 lea .Lepilogue(%rip),%r10
321 cmp %r10,%rbx # context->Rip>=.Lepilogue
322 jae .Lin_prologue
323
324 lea 40(%rax),%rax
325
326 mov -8(%rax),%rbp
327 mov -16(%rax),%rbx
328 mov -24(%rax),%r12
329 mov -32(%rax),%r14
330 mov -40(%rax),%r15
331 mov %rbx,144($context) # restore context->Rbx
332 mov %rbp,160($context) # restore context->Rbp
333 mov %r12,216($context) # restore context->R12
334 mov %r14,232($context) # restore context->R14
335 mov %r15,240($context) # restore context->R15
336
337.Lin_prologue:
338 mov 8(%rax),%rdi
339 mov 16(%rax),%rsi
340 mov %rax,152($context) # restore context->Rsp
341 mov %rsi,168($context) # restore context->Rsi
342 mov %rdi,176($context) # restore context->Rdi
343
344 mov 40($disp),%rdi # disp->ContextRecord
345 mov $context,%rsi # context
346 mov \$154,%ecx # sizeof(CONTEXT)
347 .long 0xa548f3fc # cld; rep movsq
348
349 mov $disp,%rsi
350 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
351 mov 8(%rsi),%rdx # arg2, disp->ImageBase
352 mov 0(%rsi),%r8 # arg3, disp->ControlPc
353 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
354 mov 40(%rsi),%r10 # disp->ContextRecord
355 lea 56(%rsi),%r11 # &disp->HandlerData
356 lea 24(%rsi),%r12 # &disp->EstablisherFrame
357 mov %r10,32(%rsp) # arg5
358 mov %r11,40(%rsp) # arg6
359 mov %r12,48(%rsp) # arg7
360 mov %rcx,56(%rsp) # arg8, (NULL)
361 call *__imp_RtlVirtualUnwind(%rip)
362
363 mov \$1,%eax # ExceptionContinueSearch
364 add \$64,%rsp
365 popfq
366 pop %r15
367 pop %r14
368 pop %r13
369 pop %r12
370 pop %rbp
371 pop %rbx
372 pop %rdi
373 pop %rsi
374 ret
375.size se_handler,.-se_handler
376
377.section .pdata
378.align 4
379 .rva .LSEH_begin_md5_block_asm_data_order
380 .rva .LSEH_end_md5_block_asm_data_order
381 .rva .LSEH_info_md5_block_asm_data_order
382
383.section .xdata
384.align 8
385.LSEH_info_md5_block_asm_data_order:
386 .byte 9,0,0,0
387 .rva se_handler
388___
389}
390
391print $code;
392
393close STDOUT or die "error closing STDOUT: $!";
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette