VirtualBox

source: vbox/trunk/src/libs/openssl-1.1.1f/crypto/bn/asm/parisc-mont.pl@ 83531

Last change on this file since 83531 was 83531, checked in by vboxsync, 5 years ago

setting svn:sync-process=export for openssl-1.1.1f, all files except tests

File size: 26.9 KB
Line 
1#! /usr/bin/env perl
2# Copyright 2009-2020 The OpenSSL Project Authors. All Rights Reserved.
3#
4# Licensed under the OpenSSL license (the "License"). You may not use
5# this file except in compliance with the License. You can obtain a copy
6# in the file LICENSE in the source distribution or at
7# https://www.openssl.org/source/license.html
8
9
10# ====================================================================
11# Written by Andy Polyakov <[email protected]> for the OpenSSL
12# project. The module is, however, dual licensed under OpenSSL and
13# CRYPTOGAMS licenses depending on where you obtain it. For further
14# details see http://www.openssl.org/~appro/cryptogams/.
15# ====================================================================
16
17# On PA-7100LC this module performs ~90-50% better, less for longer
18# keys, than code generated by gcc 3.2 for PA-RISC 1.1. Latter means
19# that compiler utilized xmpyu instruction to perform 32x32=64-bit
20# multiplication, which in turn means that "baseline" performance was
21# optimal in respect to instruction set capabilities. Fair comparison
22# with vendor compiler is problematic, because OpenSSL doesn't define
23# BN_LLONG [presumably] for historical reasons, which drives compiler
24# toward 4 times 16x16=32-bit multiplications [plus complementary
25# shifts and additions] instead. This means that you should observe
26# several times improvement over code generated by vendor compiler
27# for PA-RISC 1.1, but the "baseline" is far from optimal. The actual
28# improvement coefficient was never collected on PA-7100LC, or any
29# other 1.1 CPU, because I don't have access to such machine with
30# vendor compiler. But to give you a taste, PA-RISC 1.1 code path
31# reportedly outperformed code generated by cc +DA1.1 +O3 by factor
32# of ~5x on PA-8600.
33#
34# On PA-RISC 2.0 it has to compete with pa-risc2[W].s, which is
35# reportedly ~2x faster than vendor compiler generated code [according
36# to comment in pa-risc2[W].s]. Here comes a catch. Execution core of
37# this implementation is actually 32-bit one, in the sense that it
38# operates on 32-bit values. But pa-risc2[W].s operates on arrays of
39# 64-bit BN_LONGs... How do they interoperate then? No problem. This
40# module picks halves of 64-bit values in reverse order and pretends
41# they were 32-bit BN_LONGs. But can 32-bit core compete with "pure"
42# 64-bit code such as pa-risc2[W].s then? Well, the thing is that
43# 32x32=64-bit multiplication is the best even PA-RISC 2.0 can do,
44# i.e. there is no "wider" multiplication like on most other 64-bit
45# platforms. This means that even being effectively 32-bit, this
46# implementation performs "64-bit" computational task in same amount
47# of arithmetic operations, most notably multiplications. It requires
48# more memory references, most notably to tp[num], but this doesn't
49# seem to exhaust memory port capacity. And indeed, dedicated PA-RISC
50# 2.0 code path provides virtually same performance as pa-risc2[W].s:
51# it's ~10% better for shortest key length and ~10% worse for longest
52# one.
53#
54# In case it wasn't clear. The module has two distinct code paths:
55# PA-RISC 1.1 and PA-RISC 2.0 ones. Latter features carry-free 64-bit
56# additions and 64-bit integer loads, not to mention specific
57# instruction scheduling. In 64-bit build naturally only 2.0 code path
58# is assembled. In 32-bit application context both code paths are
59# assembled, PA-RISC 2.0 CPU is detected at run-time and proper path
60# is taken automatically. Also, in 32-bit build the module imposes
61# couple of limitations: vector lengths has to be even and vector
62# addresses has to be 64-bit aligned. Normally neither is a problem:
63# most common key lengths are even and vectors are commonly malloc-ed,
64# which ensures alignment.
65#
66# Special thanks to polarhome.com for providing HP-UX account on
67# PA-RISC 1.1 machine, and to correspondent who chose to remain
68# anonymous for testing the code on PA-RISC 2.0 machine.
69
70
71$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
72
73$flavour = shift;
74$output = shift;
75
76open STDOUT,">$output";
77
78if ($flavour =~ /64/) {
79 $LEVEL ="2.0W";
80 $SIZE_T =8;
81 $FRAME_MARKER =80;
82 $SAVED_RP =16;
83 $PUSH ="std";
84 $PUSHMA ="std,ma";
85 $POP ="ldd";
86 $POPMB ="ldd,mb";
87 $BN_SZ =$SIZE_T;
88} else {
89 $LEVEL ="1.1"; #$LEVEL.="\n\t.ALLOW\t2.0";
90 $SIZE_T =4;
91 $FRAME_MARKER =48;
92 $SAVED_RP =20;
93 $PUSH ="stw";
94 $PUSHMA ="stwm";
95 $POP ="ldw";
96 $POPMB ="ldwm";
97 $BN_SZ =$SIZE_T;
98 if (open CONF,"<${dir}../../opensslconf.h") {
99 while(<CONF>) {
100 if (m/#\s*define\s+SIXTY_FOUR_BIT/) {
101 $BN_SZ=8;
102 $LEVEL="2.0";
103 last;
104 }
105 }
106 close CONF;
107 }
108}
109
110$FRAME=8*$SIZE_T+$FRAME_MARKER; # 8 saved regs + frame marker
111 # [+ argument transfer]
112$LOCALS=$FRAME-$FRAME_MARKER;
113$FRAME+=32; # local variables
114
115$tp="%r31";
116$ti1="%r29";
117$ti0="%r28";
118
119$rp="%r26";
120$ap="%r25";
121$bp="%r24";
122$np="%r23";
123$n0="%r22"; # passed through stack in 32-bit
124$num="%r21"; # passed through stack in 32-bit
125$idx="%r20";
126$arrsz="%r19";
127
128$nm1="%r7";
129$nm0="%r6";
130$ab1="%r5";
131$ab0="%r4";
132
133$fp="%r3";
134$hi1="%r2";
135$hi0="%r1";
136
137$xfer=$n0; # accommodates [-16..15] offset in fld[dw]s
138
139$fm0="%fr4"; $fti=$fm0;
140$fbi="%fr5L";
141$fn0="%fr5R";
142$fai="%fr6"; $fab0="%fr7"; $fab1="%fr8";
143$fni="%fr9"; $fnm0="%fr10"; $fnm1="%fr11";
144
145$code=<<___;
146 .LEVEL $LEVEL
147 .SPACE \$TEXT\$
148 .SUBSPA \$CODE\$,QUAD=0,ALIGN=8,ACCESS=0x2C,CODE_ONLY
149
150 .EXPORT bn_mul_mont,ENTRY,ARGW0=GR,ARGW1=GR,ARGW2=GR,ARGW3=GR
151 .ALIGN 64
152bn_mul_mont
153 .PROC
154 .CALLINFO FRAME=`$FRAME-8*$SIZE_T`,NO_CALLS,SAVE_RP,SAVE_SP,ENTRY_GR=6
155 .ENTRY
156 $PUSH %r2,-$SAVED_RP(%sp) ; standard prologue
157 $PUSHMA %r3,$FRAME(%sp)
158 $PUSH %r4,`-$FRAME+1*$SIZE_T`(%sp)
159 $PUSH %r5,`-$FRAME+2*$SIZE_T`(%sp)
160 $PUSH %r6,`-$FRAME+3*$SIZE_T`(%sp)
161 $PUSH %r7,`-$FRAME+4*$SIZE_T`(%sp)
162 $PUSH %r8,`-$FRAME+5*$SIZE_T`(%sp)
163 $PUSH %r9,`-$FRAME+6*$SIZE_T`(%sp)
164 $PUSH %r10,`-$FRAME+7*$SIZE_T`(%sp)
165 ldo -$FRAME(%sp),$fp
166___
167$code.=<<___ if ($SIZE_T==4);
168 ldw `-$FRAME_MARKER-4`($fp),$n0
169 ldw `-$FRAME_MARKER-8`($fp),$num
170 nop
171 nop ; alignment
172___
173$code.=<<___ if ($BN_SZ==4);
174 comiclr,<= 6,$num,%r0 ; are vectors long enough?
175 b L\$abort
176 ldi 0,%r28 ; signal "unhandled"
177 add,ev %r0,$num,$num ; is $num even?
178 b L\$abort
179 nop
180 or $ap,$np,$ti1
181 extru,= $ti1,31,3,%r0 ; are ap and np 64-bit aligned?
182 b L\$abort
183 nop
184 nop ; alignment
185 nop
186
187 fldws 0($n0),${fn0}
188 fldws,ma 4($bp),${fbi} ; bp[0]
189___
190$code.=<<___ if ($BN_SZ==8);
191 comib,> 3,$num,L\$abort ; are vectors long enough?
192 ldi 0,%r28 ; signal "unhandled"
193 addl $num,$num,$num ; I operate on 32-bit values
194
195 fldws 4($n0),${fn0} ; only low part of n0
196 fldws 4($bp),${fbi} ; bp[0] in flipped word order
197___
198$code.=<<___;
199 fldds 0($ap),${fai} ; ap[0,1]
200 fldds 0($np),${fni} ; np[0,1]
201
202 sh2addl $num,%r0,$arrsz
203 ldi 31,$hi0
204 ldo 36($arrsz),$hi1 ; space for tp[num+1]
205 andcm $hi1,$hi0,$hi1 ; align
206 addl $hi1,%sp,%sp
207 $PUSH $fp,-$SIZE_T(%sp)
208
209 ldo `$LOCALS+16`($fp),$xfer
210 ldo `$LOCALS+32+4`($fp),$tp
211
212 xmpyu ${fai}L,${fbi},${fab0} ; ap[0]*bp[0]
213 xmpyu ${fai}R,${fbi},${fab1} ; ap[1]*bp[0]
214 xmpyu ${fn0},${fab0}R,${fm0}
215
216 addl $arrsz,$ap,$ap ; point at the end
217 addl $arrsz,$np,$np
218 subi 0,$arrsz,$idx ; j=0
219 ldo 8($idx),$idx ; j++++
220
221 xmpyu ${fni}L,${fm0}R,${fnm0} ; np[0]*m
222 xmpyu ${fni}R,${fm0}R,${fnm1} ; np[1]*m
223 fstds ${fab0},-16($xfer)
224 fstds ${fnm0},-8($xfer)
225 fstds ${fab1},0($xfer)
226 fstds ${fnm1},8($xfer)
227 flddx $idx($ap),${fai} ; ap[2,3]
228 flddx $idx($np),${fni} ; np[2,3]
229___
230$code.=<<___ if ($BN_SZ==4);
231 mtctl $hi0,%cr11 ; $hi0 still holds 31
232 extrd,u,*= $hi0,%sar,1,$hi0 ; executes on PA-RISC 1.0
233 b L\$parisc11
234 nop
235___
236$code.=<<___; # PA-RISC 2.0 code-path
237 xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[0]
238 xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
239 ldd -16($xfer),$ab0
240 fstds ${fab0},-16($xfer)
241
242 extrd,u $ab0,31,32,$hi0
243 extrd,u $ab0,63,32,$ab0
244 ldd -8($xfer),$nm0
245 fstds ${fnm0},-8($xfer)
246 ldo 8($idx),$idx ; j++++
247 addl $ab0,$nm0,$nm0 ; low part is discarded
248 extrd,u $nm0,31,32,$hi1
249
250
251L\$1st
252 xmpyu ${fai}R,${fbi},${fab1} ; ap[j+1]*bp[0]
253 xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j+1]*m
254 ldd 0($xfer),$ab1
255 fstds ${fab1},0($xfer)
256 addl $hi0,$ab1,$ab1
257 extrd,u $ab1,31,32,$hi0
258 ldd 8($xfer),$nm1
259 fstds ${fnm1},8($xfer)
260 extrd,u $ab1,63,32,$ab1
261 addl $hi1,$nm1,$nm1
262 flddx $idx($ap),${fai} ; ap[j,j+1]
263 flddx $idx($np),${fni} ; np[j,j+1]
264 addl $ab1,$nm1,$nm1
265 extrd,u $nm1,31,32,$hi1
266
267 xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[0]
268 xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
269 ldd -16($xfer),$ab0
270 fstds ${fab0},-16($xfer)
271 addl $hi0,$ab0,$ab0
272 extrd,u $ab0,31,32,$hi0
273 ldd -8($xfer),$nm0
274 fstds ${fnm0},-8($xfer)
275 extrd,u $ab0,63,32,$ab0
276 addl $hi1,$nm0,$nm0
277 stw $nm1,-4($tp) ; tp[j-1]
278 addl $ab0,$nm0,$nm0
279 stw,ma $nm0,8($tp) ; tp[j-1]
280 addib,<> 8,$idx,L\$1st ; j++++
281 extrd,u $nm0,31,32,$hi1
282
283 xmpyu ${fai}R,${fbi},${fab1} ; ap[j]*bp[0]
284 xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j]*m
285 ldd 0($xfer),$ab1
286 fstds ${fab1},0($xfer)
287 addl $hi0,$ab1,$ab1
288 extrd,u $ab1,31,32,$hi0
289 ldd 8($xfer),$nm1
290 fstds ${fnm1},8($xfer)
291 extrd,u $ab1,63,32,$ab1
292 addl $hi1,$nm1,$nm1
293 ldd -16($xfer),$ab0
294 addl $ab1,$nm1,$nm1
295 ldd -8($xfer),$nm0
296 extrd,u $nm1,31,32,$hi1
297
298 addl $hi0,$ab0,$ab0
299 extrd,u $ab0,31,32,$hi0
300 stw $nm1,-4($tp) ; tp[j-1]
301 extrd,u $ab0,63,32,$ab0
302 addl $hi1,$nm0,$nm0
303 ldd 0($xfer),$ab1
304 addl $ab0,$nm0,$nm0
305 ldd,mb 8($xfer),$nm1
306 extrd,u $nm0,31,32,$hi1
307 stw,ma $nm0,8($tp) ; tp[j-1]
308
309 ldo -1($num),$num ; i--
310 subi 0,$arrsz,$idx ; j=0
311___
312$code.=<<___ if ($BN_SZ==4);
313 fldws,ma 4($bp),${fbi} ; bp[1]
314___
315$code.=<<___ if ($BN_SZ==8);
316 fldws 0($bp),${fbi} ; bp[1] in flipped word order
317___
318$code.=<<___;
319 flddx $idx($ap),${fai} ; ap[0,1]
320 flddx $idx($np),${fni} ; np[0,1]
321 fldws 8($xfer),${fti}R ; tp[0]
322 addl $hi0,$ab1,$ab1
323 extrd,u $ab1,31,32,$hi0
324 extrd,u $ab1,63,32,$ab1
325 ldo 8($idx),$idx ; j++++
326 xmpyu ${fai}L,${fbi},${fab0} ; ap[0]*bp[1]
327 xmpyu ${fai}R,${fbi},${fab1} ; ap[1]*bp[1]
328 addl $hi1,$nm1,$nm1
329 addl $ab1,$nm1,$nm1
330 extrd,u $nm1,31,32,$hi1
331 fstws,mb ${fab0}L,-8($xfer) ; save high part
332 stw $nm1,-4($tp) ; tp[j-1]
333
334 fcpy,sgl %fr0,${fti}L ; zero high part
335 fcpy,sgl %fr0,${fab0}L
336 addl $hi1,$hi0,$hi0
337 extrd,u $hi0,31,32,$hi1
338 fcnvxf,dbl,dbl ${fti},${fti} ; 32-bit unsigned int -> double
339 fcnvxf,dbl,dbl ${fab0},${fab0}
340 stw $hi0,0($tp)
341 stw $hi1,4($tp)
342
343 fadd,dbl ${fti},${fab0},${fab0} ; add tp[0]
344 fcnvfx,dbl,dbl ${fab0},${fab0} ; double -> 33-bit unsigned int
345 xmpyu ${fn0},${fab0}R,${fm0}
346 ldo `$LOCALS+32+4`($fp),$tp
347L\$outer
348 xmpyu ${fni}L,${fm0}R,${fnm0} ; np[0]*m
349 xmpyu ${fni}R,${fm0}R,${fnm1} ; np[1]*m
350 fstds ${fab0},-16($xfer) ; 33-bit value
351 fstds ${fnm0},-8($xfer)
352 flddx $idx($ap),${fai} ; ap[2]
353 flddx $idx($np),${fni} ; np[2]
354 ldo 8($idx),$idx ; j++++
355 ldd -16($xfer),$ab0 ; 33-bit value
356 ldd -8($xfer),$nm0
357 ldw 0($xfer),$hi0 ; high part
358
359 xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[i]
360 xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
361 extrd,u $ab0,31,32,$ti0 ; carry bit
362 extrd,u $ab0,63,32,$ab0
363 fstds ${fab1},0($xfer)
364 addl $ti0,$hi0,$hi0 ; account carry bit
365 fstds ${fnm1},8($xfer)
366 addl $ab0,$nm0,$nm0 ; low part is discarded
367 ldw 0($tp),$ti1 ; tp[1]
368 extrd,u $nm0,31,32,$hi1
369 fstds ${fab0},-16($xfer)
370 fstds ${fnm0},-8($xfer)
371
372
373L\$inner
374 xmpyu ${fai}R,${fbi},${fab1} ; ap[j+1]*bp[i]
375 xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j+1]*m
376 ldd 0($xfer),$ab1
377 fstds ${fab1},0($xfer)
378 addl $hi0,$ti1,$ti1
379 addl $ti1,$ab1,$ab1
380 ldd 8($xfer),$nm1
381 fstds ${fnm1},8($xfer)
382 extrd,u $ab1,31,32,$hi0
383 extrd,u $ab1,63,32,$ab1
384 flddx $idx($ap),${fai} ; ap[j,j+1]
385 flddx $idx($np),${fni} ; np[j,j+1]
386 addl $hi1,$nm1,$nm1
387 addl $ab1,$nm1,$nm1
388 ldw 4($tp),$ti0 ; tp[j]
389 stw $nm1,-4($tp) ; tp[j-1]
390
391 xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[i]
392 xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
393 ldd -16($xfer),$ab0
394 fstds ${fab0},-16($xfer)
395 addl $hi0,$ti0,$ti0
396 addl $ti0,$ab0,$ab0
397 ldd -8($xfer),$nm0
398 fstds ${fnm0},-8($xfer)
399 extrd,u $ab0,31,32,$hi0
400 extrd,u $nm1,31,32,$hi1
401 ldw 8($tp),$ti1 ; tp[j]
402 extrd,u $ab0,63,32,$ab0
403 addl $hi1,$nm0,$nm0
404 addl $ab0,$nm0,$nm0
405 stw,ma $nm0,8($tp) ; tp[j-1]
406 addib,<> 8,$idx,L\$inner ; j++++
407 extrd,u $nm0,31,32,$hi1
408
409 xmpyu ${fai}R,${fbi},${fab1} ; ap[j]*bp[i]
410 xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j]*m
411 ldd 0($xfer),$ab1
412 fstds ${fab1},0($xfer)
413 addl $hi0,$ti1,$ti1
414 addl $ti1,$ab1,$ab1
415 ldd 8($xfer),$nm1
416 fstds ${fnm1},8($xfer)
417 extrd,u $ab1,31,32,$hi0
418 extrd,u $ab1,63,32,$ab1
419 ldw 4($tp),$ti0 ; tp[j]
420 addl $hi1,$nm1,$nm1
421 addl $ab1,$nm1,$nm1
422 ldd -16($xfer),$ab0
423 ldd -8($xfer),$nm0
424 extrd,u $nm1,31,32,$hi1
425
426 addl $hi0,$ab0,$ab0
427 addl $ti0,$ab0,$ab0
428 stw $nm1,-4($tp) ; tp[j-1]
429 extrd,u $ab0,31,32,$hi0
430 ldw 8($tp),$ti1 ; tp[j]
431 extrd,u $ab0,63,32,$ab0
432 addl $hi1,$nm0,$nm0
433 ldd 0($xfer),$ab1
434 addl $ab0,$nm0,$nm0
435 ldd,mb 8($xfer),$nm1
436 extrd,u $nm0,31,32,$hi1
437 stw,ma $nm0,8($tp) ; tp[j-1]
438
439 addib,= -1,$num,L\$outerdone ; i--
440 subi 0,$arrsz,$idx ; j=0
441___
442$code.=<<___ if ($BN_SZ==4);
443 fldws,ma 4($bp),${fbi} ; bp[i]
444___
445$code.=<<___ if ($BN_SZ==8);
446 ldi 12,$ti0 ; bp[i] in flipped word order
447 addl,ev %r0,$num,$num
448 ldi -4,$ti0
449 addl $ti0,$bp,$bp
450 fldws 0($bp),${fbi}
451___
452$code.=<<___;
453 flddx $idx($ap),${fai} ; ap[0]
454 addl $hi0,$ab1,$ab1
455 flddx $idx($np),${fni} ; np[0]
456 fldws 8($xfer),${fti}R ; tp[0]
457 addl $ti1,$ab1,$ab1
458 extrd,u $ab1,31,32,$hi0
459 extrd,u $ab1,63,32,$ab1
460
461 ldo 8($idx),$idx ; j++++
462 xmpyu ${fai}L,${fbi},${fab0} ; ap[0]*bp[i]
463 xmpyu ${fai}R,${fbi},${fab1} ; ap[1]*bp[i]
464 ldw 4($tp),$ti0 ; tp[j]
465
466 addl $hi1,$nm1,$nm1
467 fstws,mb ${fab0}L,-8($xfer) ; save high part
468 addl $ab1,$nm1,$nm1
469 extrd,u $nm1,31,32,$hi1
470 fcpy,sgl %fr0,${fti}L ; zero high part
471 fcpy,sgl %fr0,${fab0}L
472 stw $nm1,-4($tp) ; tp[j-1]
473
474 fcnvxf,dbl,dbl ${fti},${fti} ; 32-bit unsigned int -> double
475 fcnvxf,dbl,dbl ${fab0},${fab0}
476 addl $hi1,$hi0,$hi0
477 fadd,dbl ${fti},${fab0},${fab0} ; add tp[0]
478 addl $ti0,$hi0,$hi0
479 extrd,u $hi0,31,32,$hi1
480 fcnvfx,dbl,dbl ${fab0},${fab0} ; double -> 33-bit unsigned int
481 stw $hi0,0($tp)
482 stw $hi1,4($tp)
483 xmpyu ${fn0},${fab0}R,${fm0}
484
485 b L\$outer
486 ldo `$LOCALS+32+4`($fp),$tp
487
488
489L\$outerdone
490 addl $hi0,$ab1,$ab1
491 addl $ti1,$ab1,$ab1
492 extrd,u $ab1,31,32,$hi0
493 extrd,u $ab1,63,32,$ab1
494
495 ldw 4($tp),$ti0 ; tp[j]
496
497 addl $hi1,$nm1,$nm1
498 addl $ab1,$nm1,$nm1
499 extrd,u $nm1,31,32,$hi1
500 stw $nm1,-4($tp) ; tp[j-1]
501
502 addl $hi1,$hi0,$hi0
503 addl $ti0,$hi0,$hi0
504 extrd,u $hi0,31,32,$hi1
505 stw $hi0,0($tp)
506 stw $hi1,4($tp)
507
508 ldo `$LOCALS+32`($fp),$tp
509 sub %r0,%r0,%r0 ; clear borrow
510___
511$code.=<<___ if ($BN_SZ==4);
512 ldws,ma 4($tp),$ti0
513 extru,= $rp,31,3,%r0 ; is rp 64-bit aligned?
514 b L\$sub_pa11
515 addl $tp,$arrsz,$tp
516L\$sub
517 ldwx $idx($np),$hi0
518 subb $ti0,$hi0,$hi1
519 ldwx $idx($tp),$ti0
520 addib,<> 4,$idx,L\$sub
521 stws,ma $hi1,4($rp)
522
523 subb $ti0,%r0,$hi1
524___
525$code.=<<___ if ($BN_SZ==8);
526 ldd,ma 8($tp),$ti0
527L\$sub
528 ldd $idx($np),$hi0
529 shrpd $ti0,$ti0,32,$ti0 ; flip word order
530 std $ti0,-8($tp) ; save flipped value
531 sub,db $ti0,$hi0,$hi1
532 ldd,ma 8($tp),$ti0
533 addib,<> 8,$idx,L\$sub
534 std,ma $hi1,8($rp)
535
536 extrd,u $ti0,31,32,$ti0 ; carry in flipped word order
537 sub,db $ti0,%r0,$hi1
538___
539$code.=<<___;
540 ldo `$LOCALS+32`($fp),$tp
541 sub $rp,$arrsz,$rp ; rewind rp
542 subi 0,$arrsz,$idx
543L\$copy
544 ldd 0($tp),$ti0
545 ldd 0($rp),$hi0
546 std,ma %r0,8($tp)
547 comiclr,= 0,$hi1,%r0
548 copy $ti0,$hi0
549 addib,<> 8,$idx,L\$copy
550 std,ma $hi0,8($rp)
551___
552
553if ($BN_SZ==4) { # PA-RISC 1.1 code-path
554$ablo=$ab0;
555$abhi=$ab1;
556$nmlo0=$nm0;
557$nmhi0=$nm1;
558$nmlo1="%r9";
559$nmhi1="%r8";
560
561$code.=<<___;
562 b L\$done
563 nop
564
565 .ALIGN 8
566L\$parisc11
567 xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[0]
568 xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
569 ldw -12($xfer),$ablo
570 ldw -16($xfer),$hi0
571 ldw -4($xfer),$nmlo0
572 ldw -8($xfer),$nmhi0
573 fstds ${fab0},-16($xfer)
574 fstds ${fnm0},-8($xfer)
575
576 ldo 8($idx),$idx ; j++++
577 add $ablo,$nmlo0,$nmlo0 ; discarded
578 addc %r0,$nmhi0,$hi1
579 ldw 4($xfer),$ablo
580 ldw 0($xfer),$abhi
581 nop
582
583
584L\$1st_pa11
585 xmpyu ${fai}R,${fbi},${fab1} ; ap[j+1]*bp[0]
586 flddx $idx($ap),${fai} ; ap[j,j+1]
587 xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j+1]*m
588 flddx $idx($np),${fni} ; np[j,j+1]
589 add $hi0,$ablo,$ablo
590 ldw 12($xfer),$nmlo1
591 addc %r0,$abhi,$hi0
592 ldw 8($xfer),$nmhi1
593 add $ablo,$nmlo1,$nmlo1
594 fstds ${fab1},0($xfer)
595 addc %r0,$nmhi1,$nmhi1
596 fstds ${fnm1},8($xfer)
597 add $hi1,$nmlo1,$nmlo1
598 ldw -12($xfer),$ablo
599 addc %r0,$nmhi1,$hi1
600 ldw -16($xfer),$abhi
601
602 xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[0]
603 ldw -4($xfer),$nmlo0
604 xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
605 ldw -8($xfer),$nmhi0
606 add $hi0,$ablo,$ablo
607 stw $nmlo1,-4($tp) ; tp[j-1]
608 addc %r0,$abhi,$hi0
609 fstds ${fab0},-16($xfer)
610 add $ablo,$nmlo0,$nmlo0
611 fstds ${fnm0},-8($xfer)
612 addc %r0,$nmhi0,$nmhi0
613 ldw 0($xfer),$abhi
614 add $hi1,$nmlo0,$nmlo0
615 ldw 4($xfer),$ablo
616 stws,ma $nmlo0,8($tp) ; tp[j-1]
617 addib,<> 8,$idx,L\$1st_pa11 ; j++++
618 addc %r0,$nmhi0,$hi1
619
620 ldw 8($xfer),$nmhi1
621 ldw 12($xfer),$nmlo1
622 xmpyu ${fai}R,${fbi},${fab1} ; ap[j]*bp[0]
623 xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j]*m
624 add $hi0,$ablo,$ablo
625 fstds ${fab1},0($xfer)
626 addc %r0,$abhi,$hi0
627 fstds ${fnm1},8($xfer)
628 add $ablo,$nmlo1,$nmlo1
629 ldw -16($xfer),$abhi
630 addc %r0,$nmhi1,$nmhi1
631 ldw -12($xfer),$ablo
632 add $hi1,$nmlo1,$nmlo1
633 ldw -8($xfer),$nmhi0
634 addc %r0,$nmhi1,$hi1
635 ldw -4($xfer),$nmlo0
636
637 add $hi0,$ablo,$ablo
638 stw $nmlo1,-4($tp) ; tp[j-1]
639 addc %r0,$abhi,$hi0
640 ldw 0($xfer),$abhi
641 add $ablo,$nmlo0,$nmlo0
642 ldw 4($xfer),$ablo
643 addc %r0,$nmhi0,$nmhi0
644 ldws,mb 8($xfer),$nmhi1
645 add $hi1,$nmlo0,$nmlo0
646 ldw 4($xfer),$nmlo1
647 addc %r0,$nmhi0,$hi1
648 stws,ma $nmlo0,8($tp) ; tp[j-1]
649
650 ldo -1($num),$num ; i--
651 subi 0,$arrsz,$idx ; j=0
652
653 fldws,ma 4($bp),${fbi} ; bp[1]
654 flddx $idx($ap),${fai} ; ap[0,1]
655 flddx $idx($np),${fni} ; np[0,1]
656 fldws 8($xfer),${fti}R ; tp[0]
657 add $hi0,$ablo,$ablo
658 addc %r0,$abhi,$hi0
659 ldo 8($idx),$idx ; j++++
660 xmpyu ${fai}L,${fbi},${fab0} ; ap[0]*bp[1]
661 xmpyu ${fai}R,${fbi},${fab1} ; ap[1]*bp[1]
662 add $hi1,$nmlo1,$nmlo1
663 addc %r0,$nmhi1,$nmhi1
664 add $ablo,$nmlo1,$nmlo1
665 addc %r0,$nmhi1,$hi1
666 fstws,mb ${fab0}L,-8($xfer) ; save high part
667 stw $nmlo1,-4($tp) ; tp[j-1]
668
669 fcpy,sgl %fr0,${fti}L ; zero high part
670 fcpy,sgl %fr0,${fab0}L
671 add $hi1,$hi0,$hi0
672 addc %r0,%r0,$hi1
673 fcnvxf,dbl,dbl ${fti},${fti} ; 32-bit unsigned int -> double
674 fcnvxf,dbl,dbl ${fab0},${fab0}
675 stw $hi0,0($tp)
676 stw $hi1,4($tp)
677
678 fadd,dbl ${fti},${fab0},${fab0} ; add tp[0]
679 fcnvfx,dbl,dbl ${fab0},${fab0} ; double -> 33-bit unsigned int
680 xmpyu ${fn0},${fab0}R,${fm0}
681 ldo `$LOCALS+32+4`($fp),$tp
682L\$outer_pa11
683 xmpyu ${fni}L,${fm0}R,${fnm0} ; np[0]*m
684 xmpyu ${fni}R,${fm0}R,${fnm1} ; np[1]*m
685 fstds ${fab0},-16($xfer) ; 33-bit value
686 fstds ${fnm0},-8($xfer)
687 flddx $idx($ap),${fai} ; ap[2,3]
688 flddx $idx($np),${fni} ; np[2,3]
689 ldw -16($xfer),$abhi ; carry bit actually
690 ldo 8($idx),$idx ; j++++
691 ldw -12($xfer),$ablo
692 ldw -8($xfer),$nmhi0
693 ldw -4($xfer),$nmlo0
694 ldw 0($xfer),$hi0 ; high part
695
696 xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[i]
697 xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
698 fstds ${fab1},0($xfer)
699 addl $abhi,$hi0,$hi0 ; account carry bit
700 fstds ${fnm1},8($xfer)
701 add $ablo,$nmlo0,$nmlo0 ; discarded
702 ldw 0($tp),$ti1 ; tp[1]
703 addc %r0,$nmhi0,$hi1
704 fstds ${fab0},-16($xfer)
705 fstds ${fnm0},-8($xfer)
706 ldw 4($xfer),$ablo
707 ldw 0($xfer),$abhi
708
709
710L\$inner_pa11
711 xmpyu ${fai}R,${fbi},${fab1} ; ap[j+1]*bp[i]
712 flddx $idx($ap),${fai} ; ap[j,j+1]
713 xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j+1]*m
714 flddx $idx($np),${fni} ; np[j,j+1]
715 add $hi0,$ablo,$ablo
716 ldw 4($tp),$ti0 ; tp[j]
717 addc %r0,$abhi,$abhi
718 ldw 12($xfer),$nmlo1
719 add $ti1,$ablo,$ablo
720 ldw 8($xfer),$nmhi1
721 addc %r0,$abhi,$hi0
722 fstds ${fab1},0($xfer)
723 add $ablo,$nmlo1,$nmlo1
724 fstds ${fnm1},8($xfer)
725 addc %r0,$nmhi1,$nmhi1
726 ldw -12($xfer),$ablo
727 add $hi1,$nmlo1,$nmlo1
728 ldw -16($xfer),$abhi
729 addc %r0,$nmhi1,$hi1
730
731 xmpyu ${fai}L,${fbi},${fab0} ; ap[j]*bp[i]
732 ldw 8($tp),$ti1 ; tp[j]
733 xmpyu ${fni}L,${fm0}R,${fnm0} ; np[j]*m
734 ldw -4($xfer),$nmlo0
735 add $hi0,$ablo,$ablo
736 ldw -8($xfer),$nmhi0
737 addc %r0,$abhi,$abhi
738 stw $nmlo1,-4($tp) ; tp[j-1]
739 add $ti0,$ablo,$ablo
740 fstds ${fab0},-16($xfer)
741 addc %r0,$abhi,$hi0
742 fstds ${fnm0},-8($xfer)
743 add $ablo,$nmlo0,$nmlo0
744 ldw 4($xfer),$ablo
745 addc %r0,$nmhi0,$nmhi0
746 ldw 0($xfer),$abhi
747 add $hi1,$nmlo0,$nmlo0
748 stws,ma $nmlo0,8($tp) ; tp[j-1]
749 addib,<> 8,$idx,L\$inner_pa11 ; j++++
750 addc %r0,$nmhi0,$hi1
751
752 xmpyu ${fai}R,${fbi},${fab1} ; ap[j]*bp[i]
753 ldw 12($xfer),$nmlo1
754 xmpyu ${fni}R,${fm0}R,${fnm1} ; np[j]*m
755 ldw 8($xfer),$nmhi1
756 add $hi0,$ablo,$ablo
757 ldw 4($tp),$ti0 ; tp[j]
758 addc %r0,$abhi,$abhi
759 fstds ${fab1},0($xfer)
760 add $ti1,$ablo,$ablo
761 fstds ${fnm1},8($xfer)
762 addc %r0,$abhi,$hi0
763 ldw -16($xfer),$abhi
764 add $ablo,$nmlo1,$nmlo1
765 ldw -12($xfer),$ablo
766 addc %r0,$nmhi1,$nmhi1
767 ldw -8($xfer),$nmhi0
768 add $hi1,$nmlo1,$nmlo1
769 ldw -4($xfer),$nmlo0
770 addc %r0,$nmhi1,$hi1
771
772 add $hi0,$ablo,$ablo
773 stw $nmlo1,-4($tp) ; tp[j-1]
774 addc %r0,$abhi,$abhi
775 add $ti0,$ablo,$ablo
776 ldw 8($tp),$ti1 ; tp[j]
777 addc %r0,$abhi,$hi0
778 ldw 0($xfer),$abhi
779 add $ablo,$nmlo0,$nmlo0
780 ldw 4($xfer),$ablo
781 addc %r0,$nmhi0,$nmhi0
782 ldws,mb 8($xfer),$nmhi1
783 add $hi1,$nmlo0,$nmlo0
784 ldw 4($xfer),$nmlo1
785 addc %r0,$nmhi0,$hi1
786 stws,ma $nmlo0,8($tp) ; tp[j-1]
787
788 addib,= -1,$num,L\$outerdone_pa11; i--
789 subi 0,$arrsz,$idx ; j=0
790
791 fldws,ma 4($bp),${fbi} ; bp[i]
792 flddx $idx($ap),${fai} ; ap[0]
793 add $hi0,$ablo,$ablo
794 addc %r0,$abhi,$abhi
795 flddx $idx($np),${fni} ; np[0]
796 fldws 8($xfer),${fti}R ; tp[0]
797 add $ti1,$ablo,$ablo
798 addc %r0,$abhi,$hi0
799
800 ldo 8($idx),$idx ; j++++
801 xmpyu ${fai}L,${fbi},${fab0} ; ap[0]*bp[i]
802 xmpyu ${fai}R,${fbi},${fab1} ; ap[1]*bp[i]
803 ldw 4($tp),$ti0 ; tp[j]
804
805 add $hi1,$nmlo1,$nmlo1
806 addc %r0,$nmhi1,$nmhi1
807 fstws,mb ${fab0}L,-8($xfer) ; save high part
808 add $ablo,$nmlo1,$nmlo1
809 addc %r0,$nmhi1,$hi1
810 fcpy,sgl %fr0,${fti}L ; zero high part
811 fcpy,sgl %fr0,${fab0}L
812 stw $nmlo1,-4($tp) ; tp[j-1]
813
814 fcnvxf,dbl,dbl ${fti},${fti} ; 32-bit unsigned int -> double
815 fcnvxf,dbl,dbl ${fab0},${fab0}
816 add $hi1,$hi0,$hi0
817 addc %r0,%r0,$hi1
818 fadd,dbl ${fti},${fab0},${fab0} ; add tp[0]
819 add $ti0,$hi0,$hi0
820 addc %r0,$hi1,$hi1
821 fcnvfx,dbl,dbl ${fab0},${fab0} ; double -> 33-bit unsigned int
822 stw $hi0,0($tp)
823 stw $hi1,4($tp)
824 xmpyu ${fn0},${fab0}R,${fm0}
825
826 b L\$outer_pa11
827 ldo `$LOCALS+32+4`($fp),$tp
828
829
830L\$outerdone_pa11
831 add $hi0,$ablo,$ablo
832 addc %r0,$abhi,$abhi
833 add $ti1,$ablo,$ablo
834 addc %r0,$abhi,$hi0
835
836 ldw 4($tp),$ti0 ; tp[j]
837
838 add $hi1,$nmlo1,$nmlo1
839 addc %r0,$nmhi1,$nmhi1
840 add $ablo,$nmlo1,$nmlo1
841 addc %r0,$nmhi1,$hi1
842 stw $nmlo1,-4($tp) ; tp[j-1]
843
844 add $hi1,$hi0,$hi0
845 addc %r0,%r0,$hi1
846 add $ti0,$hi0,$hi0
847 addc %r0,$hi1,$hi1
848 stw $hi0,0($tp)
849 stw $hi1,4($tp)
850
851 ldo `$LOCALS+32+4`($fp),$tp
852 sub %r0,%r0,%r0 ; clear borrow
853 ldw -4($tp),$ti0
854 addl $tp,$arrsz,$tp
855L\$sub_pa11
856 ldwx $idx($np),$hi0
857 subb $ti0,$hi0,$hi1
858 ldwx $idx($tp),$ti0
859 addib,<> 4,$idx,L\$sub_pa11
860 stws,ma $hi1,4($rp)
861
862 subb $ti0,%r0,$hi1
863
864 ldo `$LOCALS+32`($fp),$tp
865 sub $rp,$arrsz,$rp ; rewind rp
866 subi 0,$arrsz,$idx
867L\$copy_pa11
868 ldw 0($tp),$ti0
869 ldw 0($rp),$hi0
870 stws,ma %r0,4($tp)
871 comiclr,= 0,$hi1,%r0
872 copy $ti0,$hi0
873 addib,<> 4,$idx,L\$copy_pa11
874 stws,ma $hi0,4($rp)
875
876 nop ; alignment
877L\$done
878___
879}
880
881
882$code.=<<___;
883 ldi 1,%r28 ; signal "handled"
884 ldo $FRAME($fp),%sp ; destroy tp[num+1]
885
886 $POP `-$FRAME-$SAVED_RP`(%sp),%r2 ; standard epilogue
887 $POP `-$FRAME+1*$SIZE_T`(%sp),%r4
888 $POP `-$FRAME+2*$SIZE_T`(%sp),%r5
889 $POP `-$FRAME+3*$SIZE_T`(%sp),%r6
890 $POP `-$FRAME+4*$SIZE_T`(%sp),%r7
891 $POP `-$FRAME+5*$SIZE_T`(%sp),%r8
892 $POP `-$FRAME+6*$SIZE_T`(%sp),%r9
893 $POP `-$FRAME+7*$SIZE_T`(%sp),%r10
894L\$abort
895 bv (%r2)
896 .EXIT
897 $POPMB -$FRAME(%sp),%r3
898 .PROCEND
899 .STRINGZ "Montgomery Multiplication for PA-RISC, CRYPTOGAMS by <appro\@openssl.org>"
900___
901
902
903# Explicitly encode PA-RISC 2.0 instructions used in this module, so
904# that it can be compiled with .LEVEL 1.0. It should be noted that I
905# wouldn't have to do this, if GNU assembler understood .ALLOW 2.0
906# directive...
907
908my $ldd = sub {
909 my ($mod,$args) = @_;
910 my $orig = "ldd$mod\t$args";
911
912 if ($args =~ /%r([0-9]+)\(%r([0-9]+)\),%r([0-9]+)/) # format 4
913 { my $opcode=(0x03<<26)|($2<<21)|($1<<16)|(3<<6)|$3;
914 sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig;
915 }
916 elsif ($args =~ /(\-?[0-9]+)\(%r([0-9]+)\),%r([0-9]+)/) # format 5
917 { my $opcode=(0x03<<26)|($2<<21)|(1<<12)|(3<<6)|$3;
918 $opcode|=(($1&0xF)<<17)|(($1&0x10)<<12); # encode offset
919 $opcode|=(1<<5) if ($mod =~ /^,m/);
920 $opcode|=(1<<13) if ($mod =~ /^,mb/);
921 sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig;
922 }
923 else { "\t".$orig; }
924};
925
926my $std = sub {
927 my ($mod,$args) = @_;
928 my $orig = "std$mod\t$args";
929
930 if ($args =~ /%r([0-9]+),(\-?[0-9]+)\(%r([0-9]+)\)/) # format 6
931 { my $opcode=(0x03<<26)|($3<<21)|($1<<16)|(1<<12)|(0xB<<6);
932 $opcode|=(($2&0xF)<<1)|(($2&0x10)>>4); # encode offset
933 $opcode|=(1<<5) if ($mod =~ /^,m/);
934 $opcode|=(1<<13) if ($mod =~ /^,mb/);
935 sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig;
936 }
937 else { "\t".$orig; }
938};
939
940my $extrd = sub {
941 my ($mod,$args) = @_;
942 my $orig = "extrd$mod\t$args";
943
944 # I only have ",u" completer, it's implicitly encoded...
945 if ($args =~ /%r([0-9]+),([0-9]+),([0-9]+),%r([0-9]+)/) # format 15
946 { my $opcode=(0x36<<26)|($1<<21)|($4<<16);
947 my $len=32-$3;
948 $opcode |= (($2&0x20)<<6)|(($2&0x1f)<<5); # encode pos
949 $opcode |= (($len&0x20)<<7)|($len&0x1f); # encode len
950 sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig;
951 }
952 elsif ($args =~ /%r([0-9]+),%sar,([0-9]+),%r([0-9]+)/) # format 12
953 { my $opcode=(0x34<<26)|($1<<21)|($3<<16)|(2<<11)|(1<<9);
954 my $len=32-$2;
955 $opcode |= (($len&0x20)<<3)|($len&0x1f); # encode len
956 $opcode |= (1<<13) if ($mod =~ /,\**=/);
957 sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig;
958 }
959 else { "\t".$orig; }
960};
961
962my $shrpd = sub {
963 my ($mod,$args) = @_;
964 my $orig = "shrpd$mod\t$args";
965
966 if ($args =~ /%r([0-9]+),%r([0-9]+),([0-9]+),%r([0-9]+)/) # format 14
967 { my $opcode=(0x34<<26)|($2<<21)|($1<<16)|(1<<10)|$4;
968 my $cpos=63-$3;
969 $opcode |= (($cpos&0x20)<<6)|(($cpos&0x1f)<<5); # encode sa
970 sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig;
971 }
972 else { "\t".$orig; }
973};
974
975my $sub = sub {
976 my ($mod,$args) = @_;
977 my $orig = "sub$mod\t$args";
978
979 if ($mod eq ",db" && $args =~ /%r([0-9]+),%r([0-9]+),%r([0-9]+)/) {
980 my $opcode=(0x02<<26)|($2<<21)|($1<<16)|$3;
981 $opcode|=(1<<10); # e1
982 $opcode|=(1<<8); # e2
983 $opcode|=(1<<5); # d
984 sprintf "\t.WORD\t0x%08x\t; %s",$opcode,$orig
985 }
986 else { "\t".$orig; }
987};
988
989sub assemble {
990 my ($mnemonic,$mod,$args)=@_;
991 my $opcode = eval("\$$mnemonic");
992
993 ref($opcode) eq 'CODE' ? &$opcode($mod,$args) : "\t$mnemonic$mod\t$args";
994}
995
996if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
997 =~ /GNU assembler/) {
998 $gnuas = 1;
999}
1000
1001foreach (split("\n",$code)) {
1002 s/\`([^\`]*)\`/eval $1/ge;
1003 # flip word order in 64-bit mode...
1004 s/(xmpyu\s+)($fai|$fni)([LR])/$1.$2.($3 eq "L"?"R":"L")/e if ($BN_SZ==8);
1005 # assemble 2.0 instructions in 32-bit mode...
1006 s/^\s+([a-z]+)([\S]*)\s+([\S]*)/&assemble($1,$2,$3)/e if ($BN_SZ==4);
1007
1008 s/(\.LEVEL\s+2\.0)W/$1w/ if ($gnuas && $SIZE_T==8);
1009 s/\.SPACE\s+\$TEXT\$/.text/ if ($gnuas && $SIZE_T==8);
1010 s/\.SUBSPA.*// if ($gnuas && $SIZE_T==8);
1011 s/\bbv\b/bve/ if ($SIZE_T==8);
1012
1013 print $_,"\n";
1014}
1015close STDOUT or die "error closing STDOUT: $!";
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette