VirtualBox

source: vbox/trunk/src/libs/openssl-1.1.1l/crypto/bn/asm/armv4-mont.pl@ 91772

Last change on this file since 91772 was 91772, checked in by vboxsync, 3 years ago

openssl-1.1.1l: Applied and adjusted our OpenSSL changes to 1.1.1l. bugref:10126

File size: 19.3 KB
Line 
1#! /usr/bin/env perl
2# Copyright 2007-2020 The OpenSSL Project Authors. All Rights Reserved.
3#
4# Licensed under the OpenSSL license (the "License"). You may not use
5# this file except in compliance with the License. You can obtain a copy
6# in the file LICENSE in the source distribution or at
7# https://www.openssl.org/source/license.html
8
9
10# ====================================================================
11# Written by Andy Polyakov <[email protected]> for the OpenSSL
12# project. The module is, however, dual licensed under OpenSSL and
13# CRYPTOGAMS licenses depending on where you obtain it. For further
14# details see http://www.openssl.org/~appro/cryptogams/.
15# ====================================================================
16
17# January 2007.
18
19# Montgomery multiplication for ARMv4.
20#
21# Performance improvement naturally varies among CPU implementations
22# and compilers. The code was observed to provide +65-35% improvement
23# [depending on key length, less for longer keys] on ARM920T, and
24# +115-80% on Intel IXP425. This is compared to pre-bn_mul_mont code
25# base and compiler generated code with in-lined umull and even umlal
26# instructions. The latter means that this code didn't really have an
27# "advantage" of utilizing some "secret" instruction.
28#
29# The code is interoperable with Thumb ISA and is rather compact, less
30# than 1/2KB. Windows CE port would be trivial, as it's exclusively
31# about decorations, ABI and instruction syntax are identical.
32
33# November 2013
34#
35# Add NEON code path, which handles lengths divisible by 8. RSA/DSA
36# performance improvement on Cortex-A8 is ~45-100% depending on key
37# length, more for longer keys. On Cortex-A15 the span is ~10-105%.
38# On Snapdragon S4 improvement was measured to vary from ~70% to
39# incredible ~380%, yes, 4.8x faster, for RSA4096 sign. But this is
40# rather because original integer-only code seems to perform
41# suboptimally on S4. Situation on Cortex-A9 is unfortunately
42# different. It's being looked into, but the trouble is that
43# performance for vectors longer than 256 bits is actually couple
44# of percent worse than for integer-only code. The code is chosen
45# for execution on all NEON-capable processors, because gain on
46# others outweighs the marginal loss on Cortex-A9.
47
48# September 2015
49#
50# Align Cortex-A9 performance with November 2013 improvements, i.e.
51# NEON code is now ~20-105% faster than integer-only one on this
52# processor. But this optimization further improved performance even
53# on other processors: NEON code path is ~45-180% faster than original
54# integer-only on Cortex-A8, ~10-210% on Cortex-A15, ~70-450% on
55# Snapdragon S4.
56
57$flavour = shift;
58if ($flavour=~/\w[\w\-]*\.\w+$/) { $output=$flavour; undef $flavour; }
59else { while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {} }
60
61if ($flavour && $flavour ne "void") {
62 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
63 ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
64 ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
65 die "can't locate arm-xlate.pl";
66
67 open STDOUT,"| \"$^X\" $xlate $flavour $output";
68} else {
69 open STDOUT,">$output";
70}
71
72$num="r0"; # starts as num argument, but holds &tp[num-1]
73$ap="r1";
74$bp="r2"; $bi="r2"; $rp="r2";
75$np="r3";
76$tp="r4";
77$aj="r5";
78$nj="r6";
79$tj="r7";
80$n0="r8";
81########### # r9 is reserved by ELF as platform specific, e.g. TLS pointer
82$alo="r10"; # sl, gcc uses it to keep @GOT
83$ahi="r11"; # fp
84$nlo="r12"; # ip
85########### # r13 is stack pointer
86$nhi="r14"; # lr
87########### # r15 is program counter
88
89#### argument block layout relative to &tp[num-1], a.k.a. $num
90$_rp="$num,#12*4";
91# ap permanently resides in r1
92$_bp="$num,#13*4";
93# np permanently resides in r3
94$_n0="$num,#14*4";
95$_num="$num,#15*4"; $_bpend=$_num;
96
97$code=<<___;
98#include "arm_arch.h"
99
100.text
101#if defined(__thumb2__)
102.syntax unified
103.thumb
104#else
105.code 32
106#endif
107
108#if __ARM_MAX_ARCH__>=7
109.align 5
110.LOPENSSL_armcap:
111.word OPENSSL_armcap_P-.Lbn_mul_mont
112#endif
113
114.global bn_mul_mont
115.type bn_mul_mont,%function
116
117.align 5
118bn_mul_mont:
119.Lbn_mul_mont:
120 ldr ip,[sp,#4] @ load num
121 stmdb sp!,{r0,r2} @ sp points at argument block
122#if __ARM_MAX_ARCH__>=7
123 tst ip,#7
124 bne .Lialu
125 adr r0,.Lbn_mul_mont
126 ldr r2,.LOPENSSL_armcap
127 ldr r0,[r0,r2]
128#ifdef __APPLE__
129 ldr r0,[r0]
130#endif
131 tst r0,#ARMV7_NEON @ NEON available?
132 ldmia sp, {r0,r2}
133 beq .Lialu
134 add sp,sp,#8
135 b bn_mul8x_mont_neon
136.align 4
137.Lialu:
138#endif
139 cmp ip,#2
140 mov $num,ip @ load num
141#ifdef __thumb2__
142 ittt lt
143#endif
144 movlt r0,#0
145 addlt sp,sp,#2*4
146 blt .Labrt
147
148 stmdb sp!,{r4-r12,lr} @ save 10 registers
149
150 mov $num,$num,lsl#2 @ rescale $num for byte count
151 sub sp,sp,$num @ alloca(4*num)
152 sub sp,sp,#4 @ +extra dword
153 sub $num,$num,#4 @ "num=num-1"
154 add $tp,$bp,$num @ &bp[num-1]
155
156 add $num,sp,$num @ $num to point at &tp[num-1]
157 ldr $n0,[$_n0] @ &n0
158 ldr $bi,[$bp] @ bp[0]
159 ldr $aj,[$ap],#4 @ ap[0],ap++
160 ldr $nj,[$np],#4 @ np[0],np++
161 ldr $n0,[$n0] @ *n0
162 str $tp,[$_bpend] @ save &bp[num]
163
164 umull $alo,$ahi,$aj,$bi @ ap[0]*bp[0]
165 str $n0,[$_n0] @ save n0 value
166 mul $n0,$alo,$n0 @ "tp[0]"*n0
167 mov $nlo,#0
168 umlal $alo,$nlo,$nj,$n0 @ np[0]*n0+"t[0]"
169 mov $tp,sp
170
171.L1st:
172 ldr $aj,[$ap],#4 @ ap[j],ap++
173 mov $alo,$ahi
174 ldr $nj,[$np],#4 @ np[j],np++
175 mov $ahi,#0
176 umlal $alo,$ahi,$aj,$bi @ ap[j]*bp[0]
177 mov $nhi,#0
178 umlal $nlo,$nhi,$nj,$n0 @ np[j]*n0
179 adds $nlo,$nlo,$alo
180 str $nlo,[$tp],#4 @ tp[j-1]=,tp++
181 adc $nlo,$nhi,#0
182 cmp $tp,$num
183 bne .L1st
184
185 adds $nlo,$nlo,$ahi
186 ldr $tp,[$_bp] @ restore bp
187 mov $nhi,#0
188 ldr $n0,[$_n0] @ restore n0
189 adc $nhi,$nhi,#0
190 str $nlo,[$num] @ tp[num-1]=
191 mov $tj,sp
192 str $nhi,[$num,#4] @ tp[num]=
193
194
195.Louter:
196 sub $tj,$num,$tj @ "original" $num-1 value
197 sub $ap,$ap,$tj @ "rewind" ap to &ap[1]
198 ldr $bi,[$tp,#4]! @ *(++bp)
199 sub $np,$np,$tj @ "rewind" np to &np[1]
200 ldr $aj,[$ap,#-4] @ ap[0]
201 ldr $alo,[sp] @ tp[0]
202 ldr $nj,[$np,#-4] @ np[0]
203 ldr $tj,[sp,#4] @ tp[1]
204
205 mov $ahi,#0
206 umlal $alo,$ahi,$aj,$bi @ ap[0]*bp[i]+tp[0]
207 str $tp,[$_bp] @ save bp
208 mul $n0,$alo,$n0
209 mov $nlo,#0
210 umlal $alo,$nlo,$nj,$n0 @ np[0]*n0+"tp[0]"
211 mov $tp,sp
212
213.Linner:
214 ldr $aj,[$ap],#4 @ ap[j],ap++
215 adds $alo,$ahi,$tj @ +=tp[j]
216 ldr $nj,[$np],#4 @ np[j],np++
217 mov $ahi,#0
218 umlal $alo,$ahi,$aj,$bi @ ap[j]*bp[i]
219 mov $nhi,#0
220 umlal $nlo,$nhi,$nj,$n0 @ np[j]*n0
221 adc $ahi,$ahi,#0
222 ldr $tj,[$tp,#8] @ tp[j+1]
223 adds $nlo,$nlo,$alo
224 str $nlo,[$tp],#4 @ tp[j-1]=,tp++
225 adc $nlo,$nhi,#0
226 cmp $tp,$num
227 bne .Linner
228
229 adds $nlo,$nlo,$ahi
230 mov $nhi,#0
231 ldr $tp,[$_bp] @ restore bp
232 adc $nhi,$nhi,#0
233 ldr $n0,[$_n0] @ restore n0
234 adds $nlo,$nlo,$tj
235 ldr $tj,[$_bpend] @ restore &bp[num]
236 adc $nhi,$nhi,#0
237 str $nlo,[$num] @ tp[num-1]=
238 str $nhi,[$num,#4] @ tp[num]=
239
240 cmp $tp,$tj
241#ifdef __thumb2__
242 itt ne
243#endif
244 movne $tj,sp
245 bne .Louter
246
247
248 ldr $rp,[$_rp] @ pull rp
249 mov $aj,sp
250 add $num,$num,#4 @ $num to point at &tp[num]
251 sub $aj,$num,$aj @ "original" num value
252 mov $tp,sp @ "rewind" $tp
253 mov $ap,$tp @ "borrow" $ap
254 sub $np,$np,$aj @ "rewind" $np to &np[0]
255
256 subs $tj,$tj,$tj @ "clear" carry flag
257.Lsub: ldr $tj,[$tp],#4
258 ldr $nj,[$np],#4
259 sbcs $tj,$tj,$nj @ tp[j]-np[j]
260 str $tj,[$rp],#4 @ rp[j]=
261 teq $tp,$num @ preserve carry
262 bne .Lsub
263 sbcs $nhi,$nhi,#0 @ upmost carry
264 mov $tp,sp @ "rewind" $tp
265 sub $rp,$rp,$aj @ "rewind" $rp
266
267.Lcopy: ldr $tj,[$tp] @ conditional copy
268 ldr $aj,[$rp]
269 str sp,[$tp],#4 @ zap tp
270#ifdef __thumb2__
271 it cc
272#endif
273 movcc $aj,$tj
274 str $aj,[$rp],#4
275 teq $tp,$num @ preserve carry
276 bne .Lcopy
277
278 mov sp,$num
279 add sp,sp,#4 @ skip over tp[num+1]
280 ldmia sp!,{r4-r12,lr} @ restore registers
281 add sp,sp,#2*4 @ skip over {r0,r2}
282 mov r0,#1
283.Labrt:
284#if __ARM_ARCH__>=5
285 ret @ bx lr
286#else
287 tst lr,#1
288 moveq pc,lr @ be binary compatible with V4, yet
289 bx lr @ interoperable with Thumb ISA:-)
290#endif
291.size bn_mul_mont,.-bn_mul_mont
292___
293{
294my ($A0,$A1,$A2,$A3)=map("d$_",(0..3));
295my ($N0,$N1,$N2,$N3)=map("d$_",(4..7));
296my ($Z,$Temp)=("q4","q5");
297my @ACC=map("q$_",(6..13));
298my ($Bi,$Ni,$M0)=map("d$_",(28..31));
299my $zero="$Z#lo";
300my $temp="$Temp#lo";
301
302my ($rptr,$aptr,$bptr,$nptr,$n0,$num)=map("r$_",(0..5));
303my ($tinptr,$toutptr,$inner,$outer,$bnptr)=map("r$_",(6..11));
304
305$code.=<<___;
306#if __ARM_MAX_ARCH__>=7
307.arch armv7-a
308.fpu neon
309
310.type bn_mul8x_mont_neon,%function
311.align 5
312bn_mul8x_mont_neon:
313 mov ip,sp
314 stmdb sp!,{r4-r11}
315 vstmdb sp!,{d8-d15} @ ABI specification says so
316 ldmia ip,{r4-r5} @ load rest of parameter block
317 mov ip,sp
318
319 cmp $num,#8
320 bhi .LNEON_8n
321
322 @ special case for $num==8, everything is in register bank...
323
324 vld1.32 {${Bi}[0]}, [$bptr,:32]!
325 veor $zero,$zero,$zero
326 sub $toutptr,sp,$num,lsl#4
327 vld1.32 {$A0-$A3}, [$aptr]! @ can't specify :32 :-(
328 and $toutptr,$toutptr,#-64
329 vld1.32 {${M0}[0]}, [$n0,:32]
330 mov sp,$toutptr @ alloca
331 vzip.16 $Bi,$zero
332
333 vmull.u32 @ACC[0],$Bi,${A0}[0]
334 vmull.u32 @ACC[1],$Bi,${A0}[1]
335 vmull.u32 @ACC[2],$Bi,${A1}[0]
336 vshl.i64 $Ni,@ACC[0]#hi,#16
337 vmull.u32 @ACC[3],$Bi,${A1}[1]
338
339 vadd.u64 $Ni,$Ni,@ACC[0]#lo
340 veor $zero,$zero,$zero
341 vmul.u32 $Ni,$Ni,$M0
342
343 vmull.u32 @ACC[4],$Bi,${A2}[0]
344 vld1.32 {$N0-$N3}, [$nptr]!
345 vmull.u32 @ACC[5],$Bi,${A2}[1]
346 vmull.u32 @ACC[6],$Bi,${A3}[0]
347 vzip.16 $Ni,$zero
348 vmull.u32 @ACC[7],$Bi,${A3}[1]
349
350 vmlal.u32 @ACC[0],$Ni,${N0}[0]
351 sub $outer,$num,#1
352 vmlal.u32 @ACC[1],$Ni,${N0}[1]
353 vmlal.u32 @ACC[2],$Ni,${N1}[0]
354 vmlal.u32 @ACC[3],$Ni,${N1}[1]
355
356 vmlal.u32 @ACC[4],$Ni,${N2}[0]
357 vmov $Temp,@ACC[0]
358 vmlal.u32 @ACC[5],$Ni,${N2}[1]
359 vmov @ACC[0],@ACC[1]
360 vmlal.u32 @ACC[6],$Ni,${N3}[0]
361 vmov @ACC[1],@ACC[2]
362 vmlal.u32 @ACC[7],$Ni,${N3}[1]
363 vmov @ACC[2],@ACC[3]
364 vmov @ACC[3],@ACC[4]
365 vshr.u64 $temp,$temp,#16
366 vmov @ACC[4],@ACC[5]
367 vmov @ACC[5],@ACC[6]
368 vadd.u64 $temp,$temp,$Temp#hi
369 vmov @ACC[6],@ACC[7]
370 veor @ACC[7],@ACC[7]
371 vshr.u64 $temp,$temp,#16
372
373 b .LNEON_outer8
374
375.align 4
376.LNEON_outer8:
377 vld1.32 {${Bi}[0]}, [$bptr,:32]!
378 veor $zero,$zero,$zero
379 vzip.16 $Bi,$zero
380 vadd.u64 @ACC[0]#lo,@ACC[0]#lo,$temp
381
382 vmlal.u32 @ACC[0],$Bi,${A0}[0]
383 vmlal.u32 @ACC[1],$Bi,${A0}[1]
384 vmlal.u32 @ACC[2],$Bi,${A1}[0]
385 vshl.i64 $Ni,@ACC[0]#hi,#16
386 vmlal.u32 @ACC[3],$Bi,${A1}[1]
387
388 vadd.u64 $Ni,$Ni,@ACC[0]#lo
389 veor $zero,$zero,$zero
390 subs $outer,$outer,#1
391 vmul.u32 $Ni,$Ni,$M0
392
393 vmlal.u32 @ACC[4],$Bi,${A2}[0]
394 vmlal.u32 @ACC[5],$Bi,${A2}[1]
395 vmlal.u32 @ACC[6],$Bi,${A3}[0]
396 vzip.16 $Ni,$zero
397 vmlal.u32 @ACC[7],$Bi,${A3}[1]
398
399 vmlal.u32 @ACC[0],$Ni,${N0}[0]
400 vmlal.u32 @ACC[1],$Ni,${N0}[1]
401 vmlal.u32 @ACC[2],$Ni,${N1}[0]
402 vmlal.u32 @ACC[3],$Ni,${N1}[1]
403
404 vmlal.u32 @ACC[4],$Ni,${N2}[0]
405 vmov $Temp,@ACC[0]
406 vmlal.u32 @ACC[5],$Ni,${N2}[1]
407 vmov @ACC[0],@ACC[1]
408 vmlal.u32 @ACC[6],$Ni,${N3}[0]
409 vmov @ACC[1],@ACC[2]
410 vmlal.u32 @ACC[7],$Ni,${N3}[1]
411 vmov @ACC[2],@ACC[3]
412 vmov @ACC[3],@ACC[4]
413 vshr.u64 $temp,$temp,#16
414 vmov @ACC[4],@ACC[5]
415 vmov @ACC[5],@ACC[6]
416 vadd.u64 $temp,$temp,$Temp#hi
417 vmov @ACC[6],@ACC[7]
418 veor @ACC[7],@ACC[7]
419 vshr.u64 $temp,$temp,#16
420
421 bne .LNEON_outer8
422
423 vadd.u64 @ACC[0]#lo,@ACC[0]#lo,$temp
424 mov $toutptr,sp
425 vshr.u64 $temp,@ACC[0]#lo,#16
426 mov $inner,$num
427 vadd.u64 @ACC[0]#hi,@ACC[0]#hi,$temp
428 add $tinptr,sp,#96
429 vshr.u64 $temp,@ACC[0]#hi,#16
430 vzip.16 @ACC[0]#lo,@ACC[0]#hi
431
432 b .LNEON_tail_entry
433
434.align 4
435.LNEON_8n:
436 veor @ACC[0],@ACC[0],@ACC[0]
437 sub $toutptr,sp,#128
438 veor @ACC[1],@ACC[1],@ACC[1]
439 sub $toutptr,$toutptr,$num,lsl#4
440 veor @ACC[2],@ACC[2],@ACC[2]
441 and $toutptr,$toutptr,#-64
442 veor @ACC[3],@ACC[3],@ACC[3]
443 mov sp,$toutptr @ alloca
444 veor @ACC[4],@ACC[4],@ACC[4]
445 add $toutptr,$toutptr,#256
446 veor @ACC[5],@ACC[5],@ACC[5]
447 sub $inner,$num,#8
448 veor @ACC[6],@ACC[6],@ACC[6]
449 veor @ACC[7],@ACC[7],@ACC[7]
450
451.LNEON_8n_init:
452 vst1.64 {@ACC[0]-@ACC[1]},[$toutptr,:256]!
453 subs $inner,$inner,#8
454 vst1.64 {@ACC[2]-@ACC[3]},[$toutptr,:256]!
455 vst1.64 {@ACC[4]-@ACC[5]},[$toutptr,:256]!
456 vst1.64 {@ACC[6]-@ACC[7]},[$toutptr,:256]!
457 bne .LNEON_8n_init
458
459 add $tinptr,sp,#256
460 vld1.32 {$A0-$A3},[$aptr]!
461 add $bnptr,sp,#8
462 vld1.32 {${M0}[0]},[$n0,:32]
463 mov $outer,$num
464 b .LNEON_8n_outer
465
466.align 4
467.LNEON_8n_outer:
468 vld1.32 {${Bi}[0]},[$bptr,:32]! @ *b++
469 veor $zero,$zero,$zero
470 vzip.16 $Bi,$zero
471 add $toutptr,sp,#128
472 vld1.32 {$N0-$N3},[$nptr]!
473
474 vmlal.u32 @ACC[0],$Bi,${A0}[0]
475 vmlal.u32 @ACC[1],$Bi,${A0}[1]
476 veor $zero,$zero,$zero
477 vmlal.u32 @ACC[2],$Bi,${A1}[0]
478 vshl.i64 $Ni,@ACC[0]#hi,#16
479 vmlal.u32 @ACC[3],$Bi,${A1}[1]
480 vadd.u64 $Ni,$Ni,@ACC[0]#lo
481 vmlal.u32 @ACC[4],$Bi,${A2}[0]
482 vmul.u32 $Ni,$Ni,$M0
483 vmlal.u32 @ACC[5],$Bi,${A2}[1]
484 vst1.32 {$Bi},[sp,:64] @ put aside smashed b[8*i+0]
485 vmlal.u32 @ACC[6],$Bi,${A3}[0]
486 vzip.16 $Ni,$zero
487 vmlal.u32 @ACC[7],$Bi,${A3}[1]
488___
489for ($i=0; $i<7;) {
490$code.=<<___;
491 vld1.32 {${Bi}[0]},[$bptr,:32]! @ *b++
492 vmlal.u32 @ACC[0],$Ni,${N0}[0]
493 veor $temp,$temp,$temp
494 vmlal.u32 @ACC[1],$Ni,${N0}[1]
495 vzip.16 $Bi,$temp
496 vmlal.u32 @ACC[2],$Ni,${N1}[0]
497 vshr.u64 @ACC[0]#lo,@ACC[0]#lo,#16
498 vmlal.u32 @ACC[3],$Ni,${N1}[1]
499 vmlal.u32 @ACC[4],$Ni,${N2}[0]
500 vadd.u64 @ACC[0]#lo,@ACC[0]#lo,@ACC[0]#hi
501 vmlal.u32 @ACC[5],$Ni,${N2}[1]
502 vshr.u64 @ACC[0]#lo,@ACC[0]#lo,#16
503 vmlal.u32 @ACC[6],$Ni,${N3}[0]
504 vmlal.u32 @ACC[7],$Ni,${N3}[1]
505 vadd.u64 @ACC[1]#lo,@ACC[1]#lo,@ACC[0]#lo
506 vst1.32 {$Ni},[$bnptr,:64]! @ put aside smashed m[8*i+$i]
507___
508 push(@ACC,shift(@ACC)); $i++;
509$code.=<<___;
510 vmlal.u32 @ACC[0],$Bi,${A0}[0]
511 vld1.64 {@ACC[7]},[$tinptr,:128]!
512 vmlal.u32 @ACC[1],$Bi,${A0}[1]
513 veor $zero,$zero,$zero
514 vmlal.u32 @ACC[2],$Bi,${A1}[0]
515 vshl.i64 $Ni,@ACC[0]#hi,#16
516 vmlal.u32 @ACC[3],$Bi,${A1}[1]
517 vadd.u64 $Ni,$Ni,@ACC[0]#lo
518 vmlal.u32 @ACC[4],$Bi,${A2}[0]
519 vmul.u32 $Ni,$Ni,$M0
520 vmlal.u32 @ACC[5],$Bi,${A2}[1]
521 vst1.32 {$Bi},[$bnptr,:64]! @ put aside smashed b[8*i+$i]
522 vmlal.u32 @ACC[6],$Bi,${A3}[0]
523 vzip.16 $Ni,$zero
524 vmlal.u32 @ACC[7],$Bi,${A3}[1]
525___
526}
527$code.=<<___;
528 vld1.32 {$Bi},[sp,:64] @ pull smashed b[8*i+0]
529 vmlal.u32 @ACC[0],$Ni,${N0}[0]
530 vld1.32 {$A0-$A3},[$aptr]!
531 vmlal.u32 @ACC[1],$Ni,${N0}[1]
532 vmlal.u32 @ACC[2],$Ni,${N1}[0]
533 vshr.u64 @ACC[0]#lo,@ACC[0]#lo,#16
534 vmlal.u32 @ACC[3],$Ni,${N1}[1]
535 vmlal.u32 @ACC[4],$Ni,${N2}[0]
536 vadd.u64 @ACC[0]#lo,@ACC[0]#lo,@ACC[0]#hi
537 vmlal.u32 @ACC[5],$Ni,${N2}[1]
538 vshr.u64 @ACC[0]#lo,@ACC[0]#lo,#16
539 vmlal.u32 @ACC[6],$Ni,${N3}[0]
540 vmlal.u32 @ACC[7],$Ni,${N3}[1]
541 vadd.u64 @ACC[1]#lo,@ACC[1]#lo,@ACC[0]#lo
542 vst1.32 {$Ni},[$bnptr,:64] @ put aside smashed m[8*i+$i]
543 add $bnptr,sp,#8 @ rewind
544___
545 push(@ACC,shift(@ACC));
546$code.=<<___;
547 sub $inner,$num,#8
548 b .LNEON_8n_inner
549
550.align 4
551.LNEON_8n_inner:
552 subs $inner,$inner,#8
553 vmlal.u32 @ACC[0],$Bi,${A0}[0]
554 vld1.64 {@ACC[7]},[$tinptr,:128]
555 vmlal.u32 @ACC[1],$Bi,${A0}[1]
556 vld1.32 {$Ni},[$bnptr,:64]! @ pull smashed m[8*i+0]
557 vmlal.u32 @ACC[2],$Bi,${A1}[0]
558 vld1.32 {$N0-$N3},[$nptr]!
559 vmlal.u32 @ACC[3],$Bi,${A1}[1]
560 it ne
561 addne $tinptr,$tinptr,#16 @ don't advance in last iteration
562 vmlal.u32 @ACC[4],$Bi,${A2}[0]
563 vmlal.u32 @ACC[5],$Bi,${A2}[1]
564 vmlal.u32 @ACC[6],$Bi,${A3}[0]
565 vmlal.u32 @ACC[7],$Bi,${A3}[1]
566___
567for ($i=1; $i<8; $i++) {
568$code.=<<___;
569 vld1.32 {$Bi},[$bnptr,:64]! @ pull smashed b[8*i+$i]
570 vmlal.u32 @ACC[0],$Ni,${N0}[0]
571 vmlal.u32 @ACC[1],$Ni,${N0}[1]
572 vmlal.u32 @ACC[2],$Ni,${N1}[0]
573 vmlal.u32 @ACC[3],$Ni,${N1}[1]
574 vmlal.u32 @ACC[4],$Ni,${N2}[0]
575 vmlal.u32 @ACC[5],$Ni,${N2}[1]
576 vmlal.u32 @ACC[6],$Ni,${N3}[0]
577 vmlal.u32 @ACC[7],$Ni,${N3}[1]
578 vst1.64 {@ACC[0]},[$toutptr,:128]!
579___
580 push(@ACC,shift(@ACC));
581$code.=<<___;
582 vmlal.u32 @ACC[0],$Bi,${A0}[0]
583 vld1.64 {@ACC[7]},[$tinptr,:128]
584 vmlal.u32 @ACC[1],$Bi,${A0}[1]
585 vld1.32 {$Ni},[$bnptr,:64]! @ pull smashed m[8*i+$i]
586 vmlal.u32 @ACC[2],$Bi,${A1}[0]
587 it ne
588 addne $tinptr,$tinptr,#16 @ don't advance in last iteration
589 vmlal.u32 @ACC[3],$Bi,${A1}[1]
590 vmlal.u32 @ACC[4],$Bi,${A2}[0]
591 vmlal.u32 @ACC[5],$Bi,${A2}[1]
592 vmlal.u32 @ACC[6],$Bi,${A3}[0]
593 vmlal.u32 @ACC[7],$Bi,${A3}[1]
594___
595}
596$code.=<<___;
597 it eq
598 subeq $aptr,$aptr,$num,lsl#2 @ rewind
599 vmlal.u32 @ACC[0],$Ni,${N0}[0]
600 vld1.32 {$Bi},[sp,:64] @ pull smashed b[8*i+0]
601 vmlal.u32 @ACC[1],$Ni,${N0}[1]
602 vld1.32 {$A0-$A3},[$aptr]!
603 vmlal.u32 @ACC[2],$Ni,${N1}[0]
604 add $bnptr,sp,#8 @ rewind
605 vmlal.u32 @ACC[3],$Ni,${N1}[1]
606 vmlal.u32 @ACC[4],$Ni,${N2}[0]
607 vmlal.u32 @ACC[5],$Ni,${N2}[1]
608 vmlal.u32 @ACC[6],$Ni,${N3}[0]
609 vst1.64 {@ACC[0]},[$toutptr,:128]!
610 vmlal.u32 @ACC[7],$Ni,${N3}[1]
611
612 bne .LNEON_8n_inner
613___
614 push(@ACC,shift(@ACC));
615$code.=<<___;
616 add $tinptr,sp,#128
617 vst1.64 {@ACC[0]-@ACC[1]},[$toutptr,:256]!
618 veor q2,q2,q2 @ $N0-$N1
619 vst1.64 {@ACC[2]-@ACC[3]},[$toutptr,:256]!
620 veor q3,q3,q3 @ $N2-$N3
621 vst1.64 {@ACC[4]-@ACC[5]},[$toutptr,:256]!
622 vst1.64 {@ACC[6]},[$toutptr,:128]
623
624 subs $outer,$outer,#8
625 vld1.64 {@ACC[0]-@ACC[1]},[$tinptr,:256]!
626 vld1.64 {@ACC[2]-@ACC[3]},[$tinptr,:256]!
627 vld1.64 {@ACC[4]-@ACC[5]},[$tinptr,:256]!
628 vld1.64 {@ACC[6]-@ACC[7]},[$tinptr,:256]!
629
630 itt ne
631 subne $nptr,$nptr,$num,lsl#2 @ rewind
632 bne .LNEON_8n_outer
633
634 add $toutptr,sp,#128
635 vst1.64 {q2-q3}, [sp,:256]! @ start wiping stack frame
636 vshr.u64 $temp,@ACC[0]#lo,#16
637 vst1.64 {q2-q3},[sp,:256]!
638 vadd.u64 @ACC[0]#hi,@ACC[0]#hi,$temp
639 vst1.64 {q2-q3}, [sp,:256]!
640 vshr.u64 $temp,@ACC[0]#hi,#16
641 vst1.64 {q2-q3}, [sp,:256]!
642 vzip.16 @ACC[0]#lo,@ACC[0]#hi
643
644 mov $inner,$num
645 b .LNEON_tail_entry
646
647.align 4
648.LNEON_tail:
649 vadd.u64 @ACC[0]#lo,@ACC[0]#lo,$temp
650 vshr.u64 $temp,@ACC[0]#lo,#16
651 vld1.64 {@ACC[2]-@ACC[3]}, [$tinptr, :256]!
652 vadd.u64 @ACC[0]#hi,@ACC[0]#hi,$temp
653 vld1.64 {@ACC[4]-@ACC[5]}, [$tinptr, :256]!
654 vshr.u64 $temp,@ACC[0]#hi,#16
655 vld1.64 {@ACC[6]-@ACC[7]}, [$tinptr, :256]!
656 vzip.16 @ACC[0]#lo,@ACC[0]#hi
657
658.LNEON_tail_entry:
659___
660for ($i=1; $i<8; $i++) {
661$code.=<<___;
662 vadd.u64 @ACC[1]#lo,@ACC[1]#lo,$temp
663 vst1.32 {@ACC[0]#lo[0]}, [$toutptr, :32]!
664 vshr.u64 $temp,@ACC[1]#lo,#16
665 vadd.u64 @ACC[1]#hi,@ACC[1]#hi,$temp
666 vshr.u64 $temp,@ACC[1]#hi,#16
667 vzip.16 @ACC[1]#lo,@ACC[1]#hi
668___
669 push(@ACC,shift(@ACC));
670}
671 push(@ACC,shift(@ACC));
672$code.=<<___;
673 vld1.64 {@ACC[0]-@ACC[1]}, [$tinptr, :256]!
674 subs $inner,$inner,#8
675 vst1.32 {@ACC[7]#lo[0]}, [$toutptr, :32]!
676 bne .LNEON_tail
677
678 vst1.32 {${temp}[0]}, [$toutptr, :32] @ top-most bit
679 sub $nptr,$nptr,$num,lsl#2 @ rewind $nptr
680 subs $aptr,sp,#0 @ clear carry flag
681 add $bptr,sp,$num,lsl#2
682
683.LNEON_sub:
684 ldmia $aptr!, {r4-r7}
685 ldmia $nptr!, {r8-r11}
686 sbcs r8, r4,r8
687 sbcs r9, r5,r9
688 sbcs r10,r6,r10
689 sbcs r11,r7,r11
690 teq $aptr,$bptr @ preserves carry
691 stmia $rptr!, {r8-r11}
692 bne .LNEON_sub
693
694 ldr r10, [$aptr] @ load top-most bit
695 mov r11,sp
696 veor q0,q0,q0
697 sub r11,$bptr,r11 @ this is num*4
698 veor q1,q1,q1
699 mov $aptr,sp
700 sub $rptr,$rptr,r11 @ rewind $rptr
701 mov $nptr,$bptr @ second 3/4th of frame
702 sbcs r10,r10,#0 @ result is carry flag
703
704.LNEON_copy_n_zap:
705 ldmia $aptr!, {r4-r7}
706 ldmia $rptr, {r8-r11}
707 it cc
708 movcc r8, r4
709 vst1.64 {q0-q1}, [$nptr,:256]! @ wipe
710 itt cc
711 movcc r9, r5
712 movcc r10,r6
713 vst1.64 {q0-q1}, [$nptr,:256]! @ wipe
714 it cc
715 movcc r11,r7
716 ldmia $aptr, {r4-r7}
717 stmia $rptr!, {r8-r11}
718 sub $aptr,$aptr,#16
719 ldmia $rptr, {r8-r11}
720 it cc
721 movcc r8, r4
722 vst1.64 {q0-q1}, [$aptr,:256]! @ wipe
723 itt cc
724 movcc r9, r5
725 movcc r10,r6
726 vst1.64 {q0-q1}, [$nptr,:256]! @ wipe
727 it cc
728 movcc r11,r7
729 teq $aptr,$bptr @ preserves carry
730 stmia $rptr!, {r8-r11}
731 bne .LNEON_copy_n_zap
732
733 mov sp,ip
734 vldmia sp!,{d8-d15}
735 ldmia sp!,{r4-r11}
736 ret @ bx lr
737.size bn_mul8x_mont_neon,.-bn_mul8x_mont_neon
738#endif
739___
740}
741$code.=<<___;
742.asciz "Montgomery multiplication for ARMv4/NEON, CRYPTOGAMS by <appro\@openssl.org>"
743.align 2
744#if __ARM_MAX_ARCH__>=7
745.comm OPENSSL_armcap_P,4,4
746#endif
747___
748
749foreach (split("\n",$code)) {
750 s/\`([^\`]*)\`/eval $1/ge;
751
752 s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/ge or
753 s/\bret\b/bx lr/g or
754 s/\bbx\s+lr\b/.word\t0xe12fff1e/g; # make it possible to compile with -march=armv4
755
756 print $_,"\n";
757}
758
759close STDOUT or die "error closing STDOUT: $!";
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette