VirtualBox

source: vbox/trunk/src/libs/openssl-1.1.1l/crypto/poly1305/asm/poly1305-armv4.pl@ 91772

Last change on this file since 91772 was 91772, checked in by vboxsync, 3 years ago

openssl-1.1.1l: Applied and adjusted our OpenSSL changes to 1.1.1l. bugref:10126

  • Property svn:executable set to *
File size: 29.2 KB
Line 
1#! /usr/bin/env perl
2# Copyright 2016-2021 The OpenSSL Project Authors. All Rights Reserved.
3#
4# Licensed under the OpenSSL license (the "License"). You may not use
5# this file except in compliance with the License. You can obtain a copy
6# in the file LICENSE in the source distribution or at
7# https://www.openssl.org/source/license.html
8
9#
10# ====================================================================
11# Written by Andy Polyakov <[email protected]> for the OpenSSL
12# project. The module is, however, dual licensed under OpenSSL and
13# CRYPTOGAMS licenses depending on where you obtain it. For further
14# details see http://www.openssl.org/~appro/cryptogams/.
15# ====================================================================
16#
17# IALU(*)/gcc-4.4 NEON
18#
19# ARM11xx(ARMv6) 7.78/+100% -
20# Cortex-A5 6.35/+130% 3.00
21# Cortex-A8 6.25/+115% 2.36
22# Cortex-A9 5.10/+95% 2.55
23# Cortex-A15 3.85/+85% 1.25(**)
24# Snapdragon S4 5.70/+100% 1.48(**)
25#
26# (*) this is for -march=armv6, i.e. with bunch of ldrb loading data;
27# (**) these are trade-off results, they can be improved by ~8% but at
28# the cost of 15/12% regression on Cortex-A5/A7, it's even possible
29# to improve Cortex-A9 result, but then A5/A7 loose more than 20%;
30
31$flavour = shift;
32if ($flavour=~/\w[\w\-]*\.\w+$/) { $output=$flavour; undef $flavour; }
33else { while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {} }
34
35if ($flavour && $flavour ne "void") {
36 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
37 ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
38 ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
39 die "can't locate arm-xlate.pl";
40
41 open STDOUT,"| \"$^X\" $xlate $flavour $output";
42} else {
43 open STDOUT,">$output";
44}
45
46($ctx,$inp,$len,$padbit)=map("r$_",(0..3));
47
48$code.=<<___;
49#include "arm_arch.h"
50
51.text
52#if defined(__thumb2__)
53.syntax unified
54.thumb
55#else
56.code 32
57#endif
58
59.globl poly1305_emit
60.globl poly1305_blocks
61.globl poly1305_init
62.type poly1305_init,%function
63.align 5
64poly1305_init:
65.Lpoly1305_init:
66 stmdb sp!,{r4-r11}
67
68 eor r3,r3,r3
69 cmp $inp,#0
70 str r3,[$ctx,#0] @ zero hash value
71 str r3,[$ctx,#4]
72 str r3,[$ctx,#8]
73 str r3,[$ctx,#12]
74 str r3,[$ctx,#16]
75 str r3,[$ctx,#36] @ is_base2_26
76 add $ctx,$ctx,#20
77
78#ifdef __thumb2__
79 it eq
80#endif
81 moveq r0,#0
82 beq .Lno_key
83
84#if __ARM_MAX_ARCH__>=7
85 adr r11,.Lpoly1305_init
86 ldr r12,.LOPENSSL_armcap
87#endif
88 ldrb r4,[$inp,#0]
89 mov r10,#0x0fffffff
90 ldrb r5,[$inp,#1]
91 and r3,r10,#-4 @ 0x0ffffffc
92 ldrb r6,[$inp,#2]
93 ldrb r7,[$inp,#3]
94 orr r4,r4,r5,lsl#8
95 ldrb r5,[$inp,#4]
96 orr r4,r4,r6,lsl#16
97 ldrb r6,[$inp,#5]
98 orr r4,r4,r7,lsl#24
99 ldrb r7,[$inp,#6]
100 and r4,r4,r10
101
102#if __ARM_MAX_ARCH__>=7
103 ldr r12,[r11,r12] @ OPENSSL_armcap_P
104# ifdef __APPLE__
105 ldr r12,[r12]
106# endif
107#endif
108 ldrb r8,[$inp,#7]
109 orr r5,r5,r6,lsl#8
110 ldrb r6,[$inp,#8]
111 orr r5,r5,r7,lsl#16
112 ldrb r7,[$inp,#9]
113 orr r5,r5,r8,lsl#24
114 ldrb r8,[$inp,#10]
115 and r5,r5,r3
116
117#if __ARM_MAX_ARCH__>=7
118 tst r12,#ARMV7_NEON @ check for NEON
119# ifdef __APPLE__
120 adr r9,poly1305_blocks_neon
121 adr r11,poly1305_blocks
122# ifdef __thumb2__
123 it ne
124# endif
125 movne r11,r9
126 adr r12,poly1305_emit
127 adr r10,poly1305_emit_neon
128# ifdef __thumb2__
129 it ne
130# endif
131 movne r12,r10
132# else
133# ifdef __thumb2__
134 itete eq
135# endif
136 addeq r12,r11,#(.Lpoly1305_emit-.Lpoly1305_init)
137 addne r12,r11,#(.Lpoly1305_emit_neon-.Lpoly1305_init)
138 addeq r11,r11,#(.Lpoly1305_blocks-.Lpoly1305_init)
139 addne r11,r11,#(.Lpoly1305_blocks_neon-.Lpoly1305_init)
140# endif
141# ifdef __thumb2__
142 orr r12,r12,#1 @ thumb-ify address
143 orr r11,r11,#1
144# endif
145#endif
146 ldrb r9,[$inp,#11]
147 orr r6,r6,r7,lsl#8
148 ldrb r7,[$inp,#12]
149 orr r6,r6,r8,lsl#16
150 ldrb r8,[$inp,#13]
151 orr r6,r6,r9,lsl#24
152 ldrb r9,[$inp,#14]
153 and r6,r6,r3
154
155 ldrb r10,[$inp,#15]
156 orr r7,r7,r8,lsl#8
157 str r4,[$ctx,#0]
158 orr r7,r7,r9,lsl#16
159 str r5,[$ctx,#4]
160 orr r7,r7,r10,lsl#24
161 str r6,[$ctx,#8]
162 and r7,r7,r3
163 str r7,[$ctx,#12]
164#if __ARM_MAX_ARCH__>=7
165 stmia r2,{r11,r12} @ fill functions table
166 mov r0,#1
167#else
168 mov r0,#0
169#endif
170.Lno_key:
171 ldmia sp!,{r4-r11}
172#if __ARM_ARCH__>=5
173 ret @ bx lr
174#else
175 tst lr,#1
176 moveq pc,lr @ be binary compatible with V4, yet
177 bx lr @ interoperable with Thumb ISA:-)
178#endif
179.size poly1305_init,.-poly1305_init
180___
181{
182my ($h0,$h1,$h2,$h3,$h4,$r0,$r1,$r2,$r3)=map("r$_",(4..12));
183my ($s1,$s2,$s3)=($r1,$r2,$r3);
184
185$code.=<<___;
186.type poly1305_blocks,%function
187.align 5
188poly1305_blocks:
189.Lpoly1305_blocks:
190 stmdb sp!,{r3-r11,lr}
191
192 ands $len,$len,#-16
193 beq .Lno_data
194
195 cmp $padbit,#0
196 add $len,$len,$inp @ end pointer
197 sub sp,sp,#32
198
199 ldmia $ctx,{$h0-$r3} @ load context
200
201 str $ctx,[sp,#12] @ offload stuff
202 mov lr,$inp
203 str $len,[sp,#16]
204 str $r1,[sp,#20]
205 str $r2,[sp,#24]
206 str $r3,[sp,#28]
207 b .Loop
208
209.Loop:
210#if __ARM_ARCH__<7
211 ldrb r0,[lr],#16 @ load input
212# ifdef __thumb2__
213 it hi
214# endif
215 addhi $h4,$h4,#1 @ 1<<128
216 ldrb r1,[lr,#-15]
217 ldrb r2,[lr,#-14]
218 ldrb r3,[lr,#-13]
219 orr r1,r0,r1,lsl#8
220 ldrb r0,[lr,#-12]
221 orr r2,r1,r2,lsl#16
222 ldrb r1,[lr,#-11]
223 orr r3,r2,r3,lsl#24
224 ldrb r2,[lr,#-10]
225 adds $h0,$h0,r3 @ accumulate input
226
227 ldrb r3,[lr,#-9]
228 orr r1,r0,r1,lsl#8
229 ldrb r0,[lr,#-8]
230 orr r2,r1,r2,lsl#16
231 ldrb r1,[lr,#-7]
232 orr r3,r2,r3,lsl#24
233 ldrb r2,[lr,#-6]
234 adcs $h1,$h1,r3
235
236 ldrb r3,[lr,#-5]
237 orr r1,r0,r1,lsl#8
238 ldrb r0,[lr,#-4]
239 orr r2,r1,r2,lsl#16
240 ldrb r1,[lr,#-3]
241 orr r3,r2,r3,lsl#24
242 ldrb r2,[lr,#-2]
243 adcs $h2,$h2,r3
244
245 ldrb r3,[lr,#-1]
246 orr r1,r0,r1,lsl#8
247 str lr,[sp,#8] @ offload input pointer
248 orr r2,r1,r2,lsl#16
249 add $s1,$r1,$r1,lsr#2
250 orr r3,r2,r3,lsl#24
251#else
252 ldr r0,[lr],#16 @ load input
253# ifdef __thumb2__
254 it hi
255# endif
256 addhi $h4,$h4,#1 @ padbit
257 ldr r1,[lr,#-12]
258 ldr r2,[lr,#-8]
259 ldr r3,[lr,#-4]
260# ifdef __ARMEB__
261 rev r0,r0
262 rev r1,r1
263 rev r2,r2
264 rev r3,r3
265# endif
266 adds $h0,$h0,r0 @ accumulate input
267 str lr,[sp,#8] @ offload input pointer
268 adcs $h1,$h1,r1
269 add $s1,$r1,$r1,lsr#2
270 adcs $h2,$h2,r2
271#endif
272 add $s2,$r2,$r2,lsr#2
273 adcs $h3,$h3,r3
274 add $s3,$r3,$r3,lsr#2
275
276 umull r2,r3,$h1,$r0
277 adc $h4,$h4,#0
278 umull r0,r1,$h0,$r0
279 umlal r2,r3,$h4,$s1
280 umlal r0,r1,$h3,$s1
281 ldr $r1,[sp,#20] @ reload $r1
282 umlal r2,r3,$h2,$s3
283 umlal r0,r1,$h1,$s3
284 umlal r2,r3,$h3,$s2
285 umlal r0,r1,$h2,$s2
286 umlal r2,r3,$h0,$r1
287 str r0,[sp,#0] @ future $h0
288 mul r0,$s2,$h4
289 ldr $r2,[sp,#24] @ reload $r2
290 adds r2,r2,r1 @ d1+=d0>>32
291 eor r1,r1,r1
292 adc lr,r3,#0 @ future $h2
293 str r2,[sp,#4] @ future $h1
294
295 mul r2,$s3,$h4
296 eor r3,r3,r3
297 umlal r0,r1,$h3,$s3
298 ldr $r3,[sp,#28] @ reload $r3
299 umlal r2,r3,$h3,$r0
300 umlal r0,r1,$h2,$r0
301 umlal r2,r3,$h2,$r1
302 umlal r0,r1,$h1,$r1
303 umlal r2,r3,$h1,$r2
304 umlal r0,r1,$h0,$r2
305 umlal r2,r3,$h0,$r3
306 ldr $h0,[sp,#0]
307 mul $h4,$r0,$h4
308 ldr $h1,[sp,#4]
309
310 adds $h2,lr,r0 @ d2+=d1>>32
311 ldr lr,[sp,#8] @ reload input pointer
312 adc r1,r1,#0
313 adds $h3,r2,r1 @ d3+=d2>>32
314 ldr r0,[sp,#16] @ reload end pointer
315 adc r3,r3,#0
316 add $h4,$h4,r3 @ h4+=d3>>32
317
318 and r1,$h4,#-4
319 and $h4,$h4,#3
320 add r1,r1,r1,lsr#2 @ *=5
321 adds $h0,$h0,r1
322 adcs $h1,$h1,#0
323 adcs $h2,$h2,#0
324 adcs $h3,$h3,#0
325 adc $h4,$h4,#0
326
327 cmp r0,lr @ done yet?
328 bhi .Loop
329
330 ldr $ctx,[sp,#12]
331 add sp,sp,#32
332 stmia $ctx,{$h0-$h4} @ store the result
333
334.Lno_data:
335#if __ARM_ARCH__>=5
336 ldmia sp!,{r3-r11,pc}
337#else
338 ldmia sp!,{r3-r11,lr}
339 tst lr,#1
340 moveq pc,lr @ be binary compatible with V4, yet
341 bx lr @ interoperable with Thumb ISA:-)
342#endif
343.size poly1305_blocks,.-poly1305_blocks
344___
345}
346{
347my ($ctx,$mac,$nonce)=map("r$_",(0..2));
348my ($h0,$h1,$h2,$h3,$h4,$g0,$g1,$g2,$g3)=map("r$_",(3..11));
349my $g4=$h4;
350
351$code.=<<___;
352.type poly1305_emit,%function
353.align 5
354poly1305_emit:
355.Lpoly1305_emit:
356 stmdb sp!,{r4-r11}
357.Lpoly1305_emit_enter:
358
359 ldmia $ctx,{$h0-$h4}
360 adds $g0,$h0,#5 @ compare to modulus
361 adcs $g1,$h1,#0
362 adcs $g2,$h2,#0
363 adcs $g3,$h3,#0
364 adc $g4,$h4,#0
365 tst $g4,#4 @ did it carry/borrow?
366
367#ifdef __thumb2__
368 it ne
369#endif
370 movne $h0,$g0
371 ldr $g0,[$nonce,#0]
372#ifdef __thumb2__
373 it ne
374#endif
375 movne $h1,$g1
376 ldr $g1,[$nonce,#4]
377#ifdef __thumb2__
378 it ne
379#endif
380 movne $h2,$g2
381 ldr $g2,[$nonce,#8]
382#ifdef __thumb2__
383 it ne
384#endif
385 movne $h3,$g3
386 ldr $g3,[$nonce,#12]
387
388 adds $h0,$h0,$g0
389 adcs $h1,$h1,$g1
390 adcs $h2,$h2,$g2
391 adc $h3,$h3,$g3
392
393#if __ARM_ARCH__>=7
394# ifdef __ARMEB__
395 rev $h0,$h0
396 rev $h1,$h1
397 rev $h2,$h2
398 rev $h3,$h3
399# endif
400 str $h0,[$mac,#0]
401 str $h1,[$mac,#4]
402 str $h2,[$mac,#8]
403 str $h3,[$mac,#12]
404#else
405 strb $h0,[$mac,#0]
406 mov $h0,$h0,lsr#8
407 strb $h1,[$mac,#4]
408 mov $h1,$h1,lsr#8
409 strb $h2,[$mac,#8]
410 mov $h2,$h2,lsr#8
411 strb $h3,[$mac,#12]
412 mov $h3,$h3,lsr#8
413
414 strb $h0,[$mac,#1]
415 mov $h0,$h0,lsr#8
416 strb $h1,[$mac,#5]
417 mov $h1,$h1,lsr#8
418 strb $h2,[$mac,#9]
419 mov $h2,$h2,lsr#8
420 strb $h3,[$mac,#13]
421 mov $h3,$h3,lsr#8
422
423 strb $h0,[$mac,#2]
424 mov $h0,$h0,lsr#8
425 strb $h1,[$mac,#6]
426 mov $h1,$h1,lsr#8
427 strb $h2,[$mac,#10]
428 mov $h2,$h2,lsr#8
429 strb $h3,[$mac,#14]
430 mov $h3,$h3,lsr#8
431
432 strb $h0,[$mac,#3]
433 strb $h1,[$mac,#7]
434 strb $h2,[$mac,#11]
435 strb $h3,[$mac,#15]
436#endif
437 ldmia sp!,{r4-r11}
438#if __ARM_ARCH__>=5
439 ret @ bx lr
440#else
441 tst lr,#1
442 moveq pc,lr @ be binary compatible with V4, yet
443 bx lr @ interoperable with Thumb ISA:-)
444#endif
445.size poly1305_emit,.-poly1305_emit
446___
447{
448my ($R0,$R1,$S1,$R2,$S2,$R3,$S3,$R4,$S4) = map("d$_",(0..9));
449my ($D0,$D1,$D2,$D3,$D4, $H0,$H1,$H2,$H3,$H4) = map("q$_",(5..14));
450my ($T0,$T1,$MASK) = map("q$_",(15,4,0));
451
452my ($in2,$zeros,$tbl0,$tbl1) = map("r$_",(4..7));
453
454$code.=<<___;
455#if __ARM_MAX_ARCH__>=7
456.fpu neon
457
458.type poly1305_init_neon,%function
459.align 5
460poly1305_init_neon:
461 ldr r4,[$ctx,#20] @ load key base 2^32
462 ldr r5,[$ctx,#24]
463 ldr r6,[$ctx,#28]
464 ldr r7,[$ctx,#32]
465
466 and r2,r4,#0x03ffffff @ base 2^32 -> base 2^26
467 mov r3,r4,lsr#26
468 mov r4,r5,lsr#20
469 orr r3,r3,r5,lsl#6
470 mov r5,r6,lsr#14
471 orr r4,r4,r6,lsl#12
472 mov r6,r7,lsr#8
473 orr r5,r5,r7,lsl#18
474 and r3,r3,#0x03ffffff
475 and r4,r4,#0x03ffffff
476 and r5,r5,#0x03ffffff
477
478 vdup.32 $R0,r2 @ r^1 in both lanes
479 add r2,r3,r3,lsl#2 @ *5
480 vdup.32 $R1,r3
481 add r3,r4,r4,lsl#2
482 vdup.32 $S1,r2
483 vdup.32 $R2,r4
484 add r4,r5,r5,lsl#2
485 vdup.32 $S2,r3
486 vdup.32 $R3,r5
487 add r5,r6,r6,lsl#2
488 vdup.32 $S3,r4
489 vdup.32 $R4,r6
490 vdup.32 $S4,r5
491
492 mov $zeros,#2 @ counter
493
494.Lsquare_neon:
495 @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
496 @ d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
497 @ d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
498 @ d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
499 @ d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
500 @ d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
501
502 vmull.u32 $D0,$R0,${R0}[1]
503 vmull.u32 $D1,$R1,${R0}[1]
504 vmull.u32 $D2,$R2,${R0}[1]
505 vmull.u32 $D3,$R3,${R0}[1]
506 vmull.u32 $D4,$R4,${R0}[1]
507
508 vmlal.u32 $D0,$R4,${S1}[1]
509 vmlal.u32 $D1,$R0,${R1}[1]
510 vmlal.u32 $D2,$R1,${R1}[1]
511 vmlal.u32 $D3,$R2,${R1}[1]
512 vmlal.u32 $D4,$R3,${R1}[1]
513
514 vmlal.u32 $D0,$R3,${S2}[1]
515 vmlal.u32 $D1,$R4,${S2}[1]
516 vmlal.u32 $D3,$R1,${R2}[1]
517 vmlal.u32 $D2,$R0,${R2}[1]
518 vmlal.u32 $D4,$R2,${R2}[1]
519
520 vmlal.u32 $D0,$R2,${S3}[1]
521 vmlal.u32 $D3,$R0,${R3}[1]
522 vmlal.u32 $D1,$R3,${S3}[1]
523 vmlal.u32 $D2,$R4,${S3}[1]
524 vmlal.u32 $D4,$R1,${R3}[1]
525
526 vmlal.u32 $D3,$R4,${S4}[1]
527 vmlal.u32 $D0,$R1,${S4}[1]
528 vmlal.u32 $D1,$R2,${S4}[1]
529 vmlal.u32 $D2,$R3,${S4}[1]
530 vmlal.u32 $D4,$R0,${R4}[1]
531
532 @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
533 @ lazy reduction as discussed in "NEON crypto" by D.J. Bernstein
534 @ and P. Schwabe
535 @
536 @ H0>>+H1>>+H2>>+H3>>+H4
537 @ H3>>+H4>>*5+H0>>+H1
538 @
539 @ Trivia.
540 @
541 @ Result of multiplication of n-bit number by m-bit number is
542 @ n+m bits wide. However! Even though 2^n is a n+1-bit number,
543 @ m-bit number multiplied by 2^n is still n+m bits wide.
544 @
545 @ Sum of two n-bit numbers is n+1 bits wide, sum of three - n+2,
546 @ and so is sum of four. Sum of 2^m n-m-bit numbers and n-bit
547 @ one is n+1 bits wide.
548 @
549 @ >>+ denotes Hnext += Hn>>26, Hn &= 0x3ffffff. This means that
550 @ H0, H2, H3 are guaranteed to be 26 bits wide, while H1 and H4
551 @ can be 27. However! In cases when their width exceeds 26 bits
552 @ they are limited by 2^26+2^6. This in turn means that *sum*
553 @ of the products with these values can still be viewed as sum
554 @ of 52-bit numbers as long as the amount of addends is not a
555 @ power of 2. For example,
556 @
557 @ H4 = H4*R0 + H3*R1 + H2*R2 + H1*R3 + H0 * R4,
558 @
559 @ which can't be larger than 5 * (2^26 + 2^6) * (2^26 + 2^6), or
560 @ 5 * (2^52 + 2*2^32 + 2^12), which in turn is smaller than
561 @ 8 * (2^52) or 2^55. However, the value is then multiplied by
562 @ by 5, so we should be looking at 5 * 5 * (2^52 + 2^33 + 2^12),
563 @ which is less than 32 * (2^52) or 2^57. And when processing
564 @ data we are looking at triple as many addends...
565 @
566 @ In key setup procedure pre-reduced H0 is limited by 5*4+1 and
567 @ 5*H4 - by 5*5 52-bit addends, or 57 bits. But when hashing the
568 @ input H0 is limited by (5*4+1)*3 addends, or 58 bits, while
569 @ 5*H4 by 5*5*3, or 59[!] bits. How is this relevant? vmlal.u32
570 @ instruction accepts 2x32-bit input and writes 2x64-bit result.
571 @ This means that result of reduction have to be compressed upon
572 @ loop wrap-around. This can be done in the process of reduction
573 @ to minimize amount of instructions [as well as amount of
574 @ 128-bit instructions, which benefits low-end processors], but
575 @ one has to watch for H2 (which is narrower than H0) and 5*H4
576 @ not being wider than 58 bits, so that result of right shift
577 @ by 26 bits fits in 32 bits. This is also useful on x86,
578 @ because it allows to use paddd in place for paddq, which
579 @ benefits Atom, where paddq is ridiculously slow.
580
581 vshr.u64 $T0,$D3,#26
582 vmovn.i64 $D3#lo,$D3
583 vshr.u64 $T1,$D0,#26
584 vmovn.i64 $D0#lo,$D0
585 vadd.i64 $D4,$D4,$T0 @ h3 -> h4
586 vbic.i32 $D3#lo,#0xfc000000 @ &=0x03ffffff
587 vadd.i64 $D1,$D1,$T1 @ h0 -> h1
588 vbic.i32 $D0#lo,#0xfc000000
589
590 vshrn.u64 $T0#lo,$D4,#26
591 vmovn.i64 $D4#lo,$D4
592 vshr.u64 $T1,$D1,#26
593 vmovn.i64 $D1#lo,$D1
594 vadd.i64 $D2,$D2,$T1 @ h1 -> h2
595 vbic.i32 $D4#lo,#0xfc000000
596 vbic.i32 $D1#lo,#0xfc000000
597
598 vadd.i32 $D0#lo,$D0#lo,$T0#lo
599 vshl.u32 $T0#lo,$T0#lo,#2
600 vshrn.u64 $T1#lo,$D2,#26
601 vmovn.i64 $D2#lo,$D2
602 vadd.i32 $D0#lo,$D0#lo,$T0#lo @ h4 -> h0
603 vadd.i32 $D3#lo,$D3#lo,$T1#lo @ h2 -> h3
604 vbic.i32 $D2#lo,#0xfc000000
605
606 vshr.u32 $T0#lo,$D0#lo,#26
607 vbic.i32 $D0#lo,#0xfc000000
608 vshr.u32 $T1#lo,$D3#lo,#26
609 vbic.i32 $D3#lo,#0xfc000000
610 vadd.i32 $D1#lo,$D1#lo,$T0#lo @ h0 -> h1
611 vadd.i32 $D4#lo,$D4#lo,$T1#lo @ h3 -> h4
612
613 subs $zeros,$zeros,#1
614 beq .Lsquare_break_neon
615
616 add $tbl0,$ctx,#(48+0*9*4)
617 add $tbl1,$ctx,#(48+1*9*4)
618
619 vtrn.32 $R0,$D0#lo @ r^2:r^1
620 vtrn.32 $R2,$D2#lo
621 vtrn.32 $R3,$D3#lo
622 vtrn.32 $R1,$D1#lo
623 vtrn.32 $R4,$D4#lo
624
625 vshl.u32 $S2,$R2,#2 @ *5
626 vshl.u32 $S3,$R3,#2
627 vshl.u32 $S1,$R1,#2
628 vshl.u32 $S4,$R4,#2
629 vadd.i32 $S2,$S2,$R2
630 vadd.i32 $S1,$S1,$R1
631 vadd.i32 $S3,$S3,$R3
632 vadd.i32 $S4,$S4,$R4
633
634 vst4.32 {${R0}[0],${R1}[0],${S1}[0],${R2}[0]},[$tbl0]!
635 vst4.32 {${R0}[1],${R1}[1],${S1}[1],${R2}[1]},[$tbl1]!
636 vst4.32 {${S2}[0],${R3}[0],${S3}[0],${R4}[0]},[$tbl0]!
637 vst4.32 {${S2}[1],${R3}[1],${S3}[1],${R4}[1]},[$tbl1]!
638 vst1.32 {${S4}[0]},[$tbl0,:32]
639 vst1.32 {${S4}[1]},[$tbl1,:32]
640
641 b .Lsquare_neon
642
643.align 4
644.Lsquare_break_neon:
645 add $tbl0,$ctx,#(48+2*4*9)
646 add $tbl1,$ctx,#(48+3*4*9)
647
648 vmov $R0,$D0#lo @ r^4:r^3
649 vshl.u32 $S1,$D1#lo,#2 @ *5
650 vmov $R1,$D1#lo
651 vshl.u32 $S2,$D2#lo,#2
652 vmov $R2,$D2#lo
653 vshl.u32 $S3,$D3#lo,#2
654 vmov $R3,$D3#lo
655 vshl.u32 $S4,$D4#lo,#2
656 vmov $R4,$D4#lo
657 vadd.i32 $S1,$S1,$D1#lo
658 vadd.i32 $S2,$S2,$D2#lo
659 vadd.i32 $S3,$S3,$D3#lo
660 vadd.i32 $S4,$S4,$D4#lo
661
662 vst4.32 {${R0}[0],${R1}[0],${S1}[0],${R2}[0]},[$tbl0]!
663 vst4.32 {${R0}[1],${R1}[1],${S1}[1],${R2}[1]},[$tbl1]!
664 vst4.32 {${S2}[0],${R3}[0],${S3}[0],${R4}[0]},[$tbl0]!
665 vst4.32 {${S2}[1],${R3}[1],${S3}[1],${R4}[1]},[$tbl1]!
666 vst1.32 {${S4}[0]},[$tbl0]
667 vst1.32 {${S4}[1]},[$tbl1]
668
669 ret @ bx lr
670.size poly1305_init_neon,.-poly1305_init_neon
671
672.type poly1305_blocks_neon,%function
673.align 5
674poly1305_blocks_neon:
675.Lpoly1305_blocks_neon:
676 ldr ip,[$ctx,#36] @ is_base2_26
677 ands $len,$len,#-16
678 beq .Lno_data_neon
679
680 cmp $len,#64
681 bhs .Lenter_neon
682 tst ip,ip @ is_base2_26?
683 beq .Lpoly1305_blocks
684
685.Lenter_neon:
686 stmdb sp!,{r4-r7}
687 vstmdb sp!,{d8-d15} @ ABI specification says so
688
689 tst ip,ip @ is_base2_26?
690 bne .Lbase2_26_neon
691
692 stmdb sp!,{r1-r3,lr}
693 bl poly1305_init_neon
694
695 ldr r4,[$ctx,#0] @ load hash value base 2^32
696 ldr r5,[$ctx,#4]
697 ldr r6,[$ctx,#8]
698 ldr r7,[$ctx,#12]
699 ldr ip,[$ctx,#16]
700
701 and r2,r4,#0x03ffffff @ base 2^32 -> base 2^26
702 mov r3,r4,lsr#26
703 veor $D0#lo,$D0#lo,$D0#lo
704 mov r4,r5,lsr#20
705 orr r3,r3,r5,lsl#6
706 veor $D1#lo,$D1#lo,$D1#lo
707 mov r5,r6,lsr#14
708 orr r4,r4,r6,lsl#12
709 veor $D2#lo,$D2#lo,$D2#lo
710 mov r6,r7,lsr#8
711 orr r5,r5,r7,lsl#18
712 veor $D3#lo,$D3#lo,$D3#lo
713 and r3,r3,#0x03ffffff
714 orr r6,r6,ip,lsl#24
715 veor $D4#lo,$D4#lo,$D4#lo
716 and r4,r4,#0x03ffffff
717 mov r1,#1
718 and r5,r5,#0x03ffffff
719 str r1,[$ctx,#36] @ is_base2_26
720
721 vmov.32 $D0#lo[0],r2
722 vmov.32 $D1#lo[0],r3
723 vmov.32 $D2#lo[0],r4
724 vmov.32 $D3#lo[0],r5
725 vmov.32 $D4#lo[0],r6
726 adr $zeros,.Lzeros
727
728 ldmia sp!,{r1-r3,lr}
729 b .Lbase2_32_neon
730
731.align 4
732.Lbase2_26_neon:
733 @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
734 @ load hash value
735
736 veor $D0#lo,$D0#lo,$D0#lo
737 veor $D1#lo,$D1#lo,$D1#lo
738 veor $D2#lo,$D2#lo,$D2#lo
739 veor $D3#lo,$D3#lo,$D3#lo
740 veor $D4#lo,$D4#lo,$D4#lo
741 vld4.32 {$D0#lo[0],$D1#lo[0],$D2#lo[0],$D3#lo[0]},[$ctx]!
742 adr $zeros,.Lzeros
743 vld1.32 {$D4#lo[0]},[$ctx]
744 sub $ctx,$ctx,#16 @ rewind
745
746.Lbase2_32_neon:
747 add $in2,$inp,#32
748 mov $padbit,$padbit,lsl#24
749 tst $len,#31
750 beq .Leven
751
752 vld4.32 {$H0#lo[0],$H1#lo[0],$H2#lo[0],$H3#lo[0]},[$inp]!
753 vmov.32 $H4#lo[0],$padbit
754 sub $len,$len,#16
755 add $in2,$inp,#32
756
757# ifdef __ARMEB__
758 vrev32.8 $H0,$H0
759 vrev32.8 $H3,$H3
760 vrev32.8 $H1,$H1
761 vrev32.8 $H2,$H2
762# endif
763 vsri.u32 $H4#lo,$H3#lo,#8 @ base 2^32 -> base 2^26
764 vshl.u32 $H3#lo,$H3#lo,#18
765
766 vsri.u32 $H3#lo,$H2#lo,#14
767 vshl.u32 $H2#lo,$H2#lo,#12
768 vadd.i32 $H4#hi,$H4#lo,$D4#lo @ add hash value and move to #hi
769
770 vbic.i32 $H3#lo,#0xfc000000
771 vsri.u32 $H2#lo,$H1#lo,#20
772 vshl.u32 $H1#lo,$H1#lo,#6
773
774 vbic.i32 $H2#lo,#0xfc000000
775 vsri.u32 $H1#lo,$H0#lo,#26
776 vadd.i32 $H3#hi,$H3#lo,$D3#lo
777
778 vbic.i32 $H0#lo,#0xfc000000
779 vbic.i32 $H1#lo,#0xfc000000
780 vadd.i32 $H2#hi,$H2#lo,$D2#lo
781
782 vadd.i32 $H0#hi,$H0#lo,$D0#lo
783 vadd.i32 $H1#hi,$H1#lo,$D1#lo
784
785 mov $tbl1,$zeros
786 add $tbl0,$ctx,#48
787
788 cmp $len,$len
789 b .Long_tail
790
791.align 4
792.Leven:
793 subs $len,$len,#64
794 it lo
795 movlo $in2,$zeros
796
797 vmov.i32 $H4,#1<<24 @ padbit, yes, always
798 vld4.32 {$H0#lo,$H1#lo,$H2#lo,$H3#lo},[$inp] @ inp[0:1]
799 add $inp,$inp,#64
800 vld4.32 {$H0#hi,$H1#hi,$H2#hi,$H3#hi},[$in2] @ inp[2:3] (or 0)
801 add $in2,$in2,#64
802 itt hi
803 addhi $tbl1,$ctx,#(48+1*9*4)
804 addhi $tbl0,$ctx,#(48+3*9*4)
805
806# ifdef __ARMEB__
807 vrev32.8 $H0,$H0
808 vrev32.8 $H3,$H3
809 vrev32.8 $H1,$H1
810 vrev32.8 $H2,$H2
811# endif
812 vsri.u32 $H4,$H3,#8 @ base 2^32 -> base 2^26
813 vshl.u32 $H3,$H3,#18
814
815 vsri.u32 $H3,$H2,#14
816 vshl.u32 $H2,$H2,#12
817
818 vbic.i32 $H3,#0xfc000000
819 vsri.u32 $H2,$H1,#20
820 vshl.u32 $H1,$H1,#6
821
822 vbic.i32 $H2,#0xfc000000
823 vsri.u32 $H1,$H0,#26
824
825 vbic.i32 $H0,#0xfc000000
826 vbic.i32 $H1,#0xfc000000
827
828 bls .Lskip_loop
829
830 vld4.32 {${R0}[1],${R1}[1],${S1}[1],${R2}[1]},[$tbl1]! @ load r^2
831 vld4.32 {${R0}[0],${R1}[0],${S1}[0],${R2}[0]},[$tbl0]! @ load r^4
832 vld4.32 {${S2}[1],${R3}[1],${S3}[1],${R4}[1]},[$tbl1]!
833 vld4.32 {${S2}[0],${R3}[0],${S3}[0],${R4}[0]},[$tbl0]!
834 b .Loop_neon
835
836.align 5
837.Loop_neon:
838 @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
839 @ ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2
840 @ ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r
841 @ \___________________/
842 @ ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2
843 @ ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r
844 @ \___________________/ \____________________/
845 @
846 @ Note that we start with inp[2:3]*r^2. This is because it
847 @ doesn't depend on reduction in previous iteration.
848 @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
849 @ d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
850 @ d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
851 @ d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
852 @ d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
853 @ d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
854
855 @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
856 @ inp[2:3]*r^2
857
858 vadd.i32 $H2#lo,$H2#lo,$D2#lo @ accumulate inp[0:1]
859 vmull.u32 $D2,$H2#hi,${R0}[1]
860 vadd.i32 $H0#lo,$H0#lo,$D0#lo
861 vmull.u32 $D0,$H0#hi,${R0}[1]
862 vadd.i32 $H3#lo,$H3#lo,$D3#lo
863 vmull.u32 $D3,$H3#hi,${R0}[1]
864 vmlal.u32 $D2,$H1#hi,${R1}[1]
865 vadd.i32 $H1#lo,$H1#lo,$D1#lo
866 vmull.u32 $D1,$H1#hi,${R0}[1]
867
868 vadd.i32 $H4#lo,$H4#lo,$D4#lo
869 vmull.u32 $D4,$H4#hi,${R0}[1]
870 subs $len,$len,#64
871 vmlal.u32 $D0,$H4#hi,${S1}[1]
872 it lo
873 movlo $in2,$zeros
874 vmlal.u32 $D3,$H2#hi,${R1}[1]
875 vld1.32 ${S4}[1],[$tbl1,:32]
876 vmlal.u32 $D1,$H0#hi,${R1}[1]
877 vmlal.u32 $D4,$H3#hi,${R1}[1]
878
879 vmlal.u32 $D0,$H3#hi,${S2}[1]
880 vmlal.u32 $D3,$H1#hi,${R2}[1]
881 vmlal.u32 $D4,$H2#hi,${R2}[1]
882 vmlal.u32 $D1,$H4#hi,${S2}[1]
883 vmlal.u32 $D2,$H0#hi,${R2}[1]
884
885 vmlal.u32 $D3,$H0#hi,${R3}[1]
886 vmlal.u32 $D0,$H2#hi,${S3}[1]
887 vmlal.u32 $D4,$H1#hi,${R3}[1]
888 vmlal.u32 $D1,$H3#hi,${S3}[1]
889 vmlal.u32 $D2,$H4#hi,${S3}[1]
890
891 vmlal.u32 $D3,$H4#hi,${S4}[1]
892 vmlal.u32 $D0,$H1#hi,${S4}[1]
893 vmlal.u32 $D4,$H0#hi,${R4}[1]
894 vmlal.u32 $D1,$H2#hi,${S4}[1]
895 vmlal.u32 $D2,$H3#hi,${S4}[1]
896
897 vld4.32 {$H0#hi,$H1#hi,$H2#hi,$H3#hi},[$in2] @ inp[2:3] (or 0)
898 add $in2,$in2,#64
899
900 @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
901 @ (hash+inp[0:1])*r^4 and accumulate
902
903 vmlal.u32 $D3,$H3#lo,${R0}[0]
904 vmlal.u32 $D0,$H0#lo,${R0}[0]
905 vmlal.u32 $D4,$H4#lo,${R0}[0]
906 vmlal.u32 $D1,$H1#lo,${R0}[0]
907 vmlal.u32 $D2,$H2#lo,${R0}[0]
908 vld1.32 ${S4}[0],[$tbl0,:32]
909
910 vmlal.u32 $D3,$H2#lo,${R1}[0]
911 vmlal.u32 $D0,$H4#lo,${S1}[0]
912 vmlal.u32 $D4,$H3#lo,${R1}[0]
913 vmlal.u32 $D1,$H0#lo,${R1}[0]
914 vmlal.u32 $D2,$H1#lo,${R1}[0]
915
916 vmlal.u32 $D3,$H1#lo,${R2}[0]
917 vmlal.u32 $D0,$H3#lo,${S2}[0]
918 vmlal.u32 $D4,$H2#lo,${R2}[0]
919 vmlal.u32 $D1,$H4#lo,${S2}[0]
920 vmlal.u32 $D2,$H0#lo,${R2}[0]
921
922 vmlal.u32 $D3,$H0#lo,${R3}[0]
923 vmlal.u32 $D0,$H2#lo,${S3}[0]
924 vmlal.u32 $D4,$H1#lo,${R3}[0]
925 vmlal.u32 $D1,$H3#lo,${S3}[0]
926 vmlal.u32 $D3,$H4#lo,${S4}[0]
927
928 vmlal.u32 $D2,$H4#lo,${S3}[0]
929 vmlal.u32 $D0,$H1#lo,${S4}[0]
930 vmlal.u32 $D4,$H0#lo,${R4}[0]
931 vmov.i32 $H4,#1<<24 @ padbit, yes, always
932 vmlal.u32 $D1,$H2#lo,${S4}[0]
933 vmlal.u32 $D2,$H3#lo,${S4}[0]
934
935 vld4.32 {$H0#lo,$H1#lo,$H2#lo,$H3#lo},[$inp] @ inp[0:1]
936 add $inp,$inp,#64
937# ifdef __ARMEB__
938 vrev32.8 $H0,$H0
939 vrev32.8 $H1,$H1
940 vrev32.8 $H2,$H2
941 vrev32.8 $H3,$H3
942# endif
943
944 @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
945 @ lazy reduction interleaved with base 2^32 -> base 2^26 of
946 @ inp[0:3] previously loaded to $H0-$H3 and smashed to $H0-$H4.
947
948 vshr.u64 $T0,$D3,#26
949 vmovn.i64 $D3#lo,$D3
950 vshr.u64 $T1,$D0,#26
951 vmovn.i64 $D0#lo,$D0
952 vadd.i64 $D4,$D4,$T0 @ h3 -> h4
953 vbic.i32 $D3#lo,#0xfc000000
954 vsri.u32 $H4,$H3,#8 @ base 2^32 -> base 2^26
955 vadd.i64 $D1,$D1,$T1 @ h0 -> h1
956 vshl.u32 $H3,$H3,#18
957 vbic.i32 $D0#lo,#0xfc000000
958
959 vshrn.u64 $T0#lo,$D4,#26
960 vmovn.i64 $D4#lo,$D4
961 vshr.u64 $T1,$D1,#26
962 vmovn.i64 $D1#lo,$D1
963 vadd.i64 $D2,$D2,$T1 @ h1 -> h2
964 vsri.u32 $H3,$H2,#14
965 vbic.i32 $D4#lo,#0xfc000000
966 vshl.u32 $H2,$H2,#12
967 vbic.i32 $D1#lo,#0xfc000000
968
969 vadd.i32 $D0#lo,$D0#lo,$T0#lo
970 vshl.u32 $T0#lo,$T0#lo,#2
971 vbic.i32 $H3,#0xfc000000
972 vshrn.u64 $T1#lo,$D2,#26
973 vmovn.i64 $D2#lo,$D2
974 vaddl.u32 $D0,$D0#lo,$T0#lo @ h4 -> h0 [widen for a sec]
975 vsri.u32 $H2,$H1,#20
976 vadd.i32 $D3#lo,$D3#lo,$T1#lo @ h2 -> h3
977 vshl.u32 $H1,$H1,#6
978 vbic.i32 $D2#lo,#0xfc000000
979 vbic.i32 $H2,#0xfc000000
980
981 vshrn.u64 $T0#lo,$D0,#26 @ re-narrow
982 vmovn.i64 $D0#lo,$D0
983 vsri.u32 $H1,$H0,#26
984 vbic.i32 $H0,#0xfc000000
985 vshr.u32 $T1#lo,$D3#lo,#26
986 vbic.i32 $D3#lo,#0xfc000000
987 vbic.i32 $D0#lo,#0xfc000000
988 vadd.i32 $D1#lo,$D1#lo,$T0#lo @ h0 -> h1
989 vadd.i32 $D4#lo,$D4#lo,$T1#lo @ h3 -> h4
990 vbic.i32 $H1,#0xfc000000
991
992 bhi .Loop_neon
993
994.Lskip_loop:
995 @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
996 @ multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1
997
998 add $tbl1,$ctx,#(48+0*9*4)
999 add $tbl0,$ctx,#(48+1*9*4)
1000 adds $len,$len,#32
1001 it ne
1002 movne $len,#0
1003 bne .Long_tail
1004
1005 vadd.i32 $H2#hi,$H2#lo,$D2#lo @ add hash value and move to #hi
1006 vadd.i32 $H0#hi,$H0#lo,$D0#lo
1007 vadd.i32 $H3#hi,$H3#lo,$D3#lo
1008 vadd.i32 $H1#hi,$H1#lo,$D1#lo
1009 vadd.i32 $H4#hi,$H4#lo,$D4#lo
1010
1011.Long_tail:
1012 vld4.32 {${R0}[1],${R1}[1],${S1}[1],${R2}[1]},[$tbl1]! @ load r^1
1013 vld4.32 {${R0}[0],${R1}[0],${S1}[0],${R2}[0]},[$tbl0]! @ load r^2
1014
1015 vadd.i32 $H2#lo,$H2#lo,$D2#lo @ can be redundant
1016 vmull.u32 $D2,$H2#hi,$R0
1017 vadd.i32 $H0#lo,$H0#lo,$D0#lo
1018 vmull.u32 $D0,$H0#hi,$R0
1019 vadd.i32 $H3#lo,$H3#lo,$D3#lo
1020 vmull.u32 $D3,$H3#hi,$R0
1021 vadd.i32 $H1#lo,$H1#lo,$D1#lo
1022 vmull.u32 $D1,$H1#hi,$R0
1023 vadd.i32 $H4#lo,$H4#lo,$D4#lo
1024 vmull.u32 $D4,$H4#hi,$R0
1025
1026 vmlal.u32 $D0,$H4#hi,$S1
1027 vld4.32 {${S2}[1],${R3}[1],${S3}[1],${R4}[1]},[$tbl1]!
1028 vmlal.u32 $D3,$H2#hi,$R1
1029 vld4.32 {${S2}[0],${R3}[0],${S3}[0],${R4}[0]},[$tbl0]!
1030 vmlal.u32 $D1,$H0#hi,$R1
1031 vmlal.u32 $D4,$H3#hi,$R1
1032 vmlal.u32 $D2,$H1#hi,$R1
1033
1034 vmlal.u32 $D3,$H1#hi,$R2
1035 vld1.32 ${S4}[1],[$tbl1,:32]
1036 vmlal.u32 $D0,$H3#hi,$S2
1037 vld1.32 ${S4}[0],[$tbl0,:32]
1038 vmlal.u32 $D4,$H2#hi,$R2
1039 vmlal.u32 $D1,$H4#hi,$S2
1040 vmlal.u32 $D2,$H0#hi,$R2
1041
1042 vmlal.u32 $D3,$H0#hi,$R3
1043 it ne
1044 addne $tbl1,$ctx,#(48+2*9*4)
1045 vmlal.u32 $D0,$H2#hi,$S3
1046 it ne
1047 addne $tbl0,$ctx,#(48+3*9*4)
1048 vmlal.u32 $D4,$H1#hi,$R3
1049 vmlal.u32 $D1,$H3#hi,$S3
1050 vmlal.u32 $D2,$H4#hi,$S3
1051
1052 vmlal.u32 $D3,$H4#hi,$S4
1053 vorn $MASK,$MASK,$MASK @ all-ones, can be redundant
1054 vmlal.u32 $D0,$H1#hi,$S4
1055 vshr.u64 $MASK,$MASK,#38
1056 vmlal.u32 $D4,$H0#hi,$R4
1057 vmlal.u32 $D1,$H2#hi,$S4
1058 vmlal.u32 $D2,$H3#hi,$S4
1059
1060 beq .Lshort_tail
1061
1062 @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
1063 @ (hash+inp[0:1])*r^4:r^3 and accumulate
1064
1065 vld4.32 {${R0}[1],${R1}[1],${S1}[1],${R2}[1]},[$tbl1]! @ load r^3
1066 vld4.32 {${R0}[0],${R1}[0],${S1}[0],${R2}[0]},[$tbl0]! @ load r^4
1067
1068 vmlal.u32 $D2,$H2#lo,$R0
1069 vmlal.u32 $D0,$H0#lo,$R0
1070 vmlal.u32 $D3,$H3#lo,$R0
1071 vmlal.u32 $D1,$H1#lo,$R0
1072 vmlal.u32 $D4,$H4#lo,$R0
1073
1074 vmlal.u32 $D0,$H4#lo,$S1
1075 vld4.32 {${S2}[1],${R3}[1],${S3}[1],${R4}[1]},[$tbl1]!
1076 vmlal.u32 $D3,$H2#lo,$R1
1077 vld4.32 {${S2}[0],${R3}[0],${S3}[0],${R4}[0]},[$tbl0]!
1078 vmlal.u32 $D1,$H0#lo,$R1
1079 vmlal.u32 $D4,$H3#lo,$R1
1080 vmlal.u32 $D2,$H1#lo,$R1
1081
1082 vmlal.u32 $D3,$H1#lo,$R2
1083 vld1.32 ${S4}[1],[$tbl1,:32]
1084 vmlal.u32 $D0,$H3#lo,$S2
1085 vld1.32 ${S4}[0],[$tbl0,:32]
1086 vmlal.u32 $D4,$H2#lo,$R2
1087 vmlal.u32 $D1,$H4#lo,$S2
1088 vmlal.u32 $D2,$H0#lo,$R2
1089
1090 vmlal.u32 $D3,$H0#lo,$R3
1091 vmlal.u32 $D0,$H2#lo,$S3
1092 vmlal.u32 $D4,$H1#lo,$R3
1093 vmlal.u32 $D1,$H3#lo,$S3
1094 vmlal.u32 $D2,$H4#lo,$S3
1095
1096 vmlal.u32 $D3,$H4#lo,$S4
1097 vorn $MASK,$MASK,$MASK @ all-ones
1098 vmlal.u32 $D0,$H1#lo,$S4
1099 vshr.u64 $MASK,$MASK,#38
1100 vmlal.u32 $D4,$H0#lo,$R4
1101 vmlal.u32 $D1,$H2#lo,$S4
1102 vmlal.u32 $D2,$H3#lo,$S4
1103
1104.Lshort_tail:
1105 @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
1106 @ horizontal addition
1107
1108 vadd.i64 $D3#lo,$D3#lo,$D3#hi
1109 vadd.i64 $D0#lo,$D0#lo,$D0#hi
1110 vadd.i64 $D4#lo,$D4#lo,$D4#hi
1111 vadd.i64 $D1#lo,$D1#lo,$D1#hi
1112 vadd.i64 $D2#lo,$D2#lo,$D2#hi
1113
1114 @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
1115 @ lazy reduction, but without narrowing
1116
1117 vshr.u64 $T0,$D3,#26
1118 vand.i64 $D3,$D3,$MASK
1119 vshr.u64 $T1,$D0,#26
1120 vand.i64 $D0,$D0,$MASK
1121 vadd.i64 $D4,$D4,$T0 @ h3 -> h4
1122 vadd.i64 $D1,$D1,$T1 @ h0 -> h1
1123
1124 vshr.u64 $T0,$D4,#26
1125 vand.i64 $D4,$D4,$MASK
1126 vshr.u64 $T1,$D1,#26
1127 vand.i64 $D1,$D1,$MASK
1128 vadd.i64 $D2,$D2,$T1 @ h1 -> h2
1129
1130 vadd.i64 $D0,$D0,$T0
1131 vshl.u64 $T0,$T0,#2
1132 vshr.u64 $T1,$D2,#26
1133 vand.i64 $D2,$D2,$MASK
1134 vadd.i64 $D0,$D0,$T0 @ h4 -> h0
1135 vadd.i64 $D3,$D3,$T1 @ h2 -> h3
1136
1137 vshr.u64 $T0,$D0,#26
1138 vand.i64 $D0,$D0,$MASK
1139 vshr.u64 $T1,$D3,#26
1140 vand.i64 $D3,$D3,$MASK
1141 vadd.i64 $D1,$D1,$T0 @ h0 -> h1
1142 vadd.i64 $D4,$D4,$T1 @ h3 -> h4
1143
1144 cmp $len,#0
1145 bne .Leven
1146
1147 @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
1148 @ store hash value
1149
1150 vst4.32 {$D0#lo[0],$D1#lo[0],$D2#lo[0],$D3#lo[0]},[$ctx]!
1151 vst1.32 {$D4#lo[0]},[$ctx]
1152
1153 vldmia sp!,{d8-d15} @ epilogue
1154 ldmia sp!,{r4-r7}
1155.Lno_data_neon:
1156 ret @ bx lr
1157.size poly1305_blocks_neon,.-poly1305_blocks_neon
1158
1159.type poly1305_emit_neon,%function
1160.align 5
1161poly1305_emit_neon:
1162.Lpoly1305_emit_neon:
1163 ldr ip,[$ctx,#36] @ is_base2_26
1164
1165 stmdb sp!,{r4-r11}
1166
1167 tst ip,ip
1168 beq .Lpoly1305_emit_enter
1169
1170 ldmia $ctx,{$h0-$h4}
1171 eor $g0,$g0,$g0
1172
1173 adds $h0,$h0,$h1,lsl#26 @ base 2^26 -> base 2^32
1174 mov $h1,$h1,lsr#6
1175 adcs $h1,$h1,$h2,lsl#20
1176 mov $h2,$h2,lsr#12
1177 adcs $h2,$h2,$h3,lsl#14
1178 mov $h3,$h3,lsr#18
1179 adcs $h3,$h3,$h4,lsl#8
1180 adc $h4,$g0,$h4,lsr#24 @ can be partially reduced ...
1181
1182 and $g0,$h4,#-4 @ ... so reduce
1183 and $h4,$h3,#3
1184 add $g0,$g0,$g0,lsr#2 @ *= 5
1185 adds $h0,$h0,$g0
1186 adcs $h1,$h1,#0
1187 adcs $h2,$h2,#0
1188 adcs $h3,$h3,#0
1189 adc $h4,$h4,#0
1190
1191 adds $g0,$h0,#5 @ compare to modulus
1192 adcs $g1,$h1,#0
1193 adcs $g2,$h2,#0
1194 adcs $g3,$h3,#0
1195 adc $g4,$h4,#0
1196 tst $g4,#4 @ did it carry/borrow?
1197
1198 it ne
1199 movne $h0,$g0
1200 ldr $g0,[$nonce,#0]
1201 it ne
1202 movne $h1,$g1
1203 ldr $g1,[$nonce,#4]
1204 it ne
1205 movne $h2,$g2
1206 ldr $g2,[$nonce,#8]
1207 it ne
1208 movne $h3,$g3
1209 ldr $g3,[$nonce,#12]
1210
1211 adds $h0,$h0,$g0 @ accumulate nonce
1212 adcs $h1,$h1,$g1
1213 adcs $h2,$h2,$g2
1214 adc $h3,$h3,$g3
1215
1216# ifdef __ARMEB__
1217 rev $h0,$h0
1218 rev $h1,$h1
1219 rev $h2,$h2
1220 rev $h3,$h3
1221# endif
1222 str $h0,[$mac,#0] @ store the result
1223 str $h1,[$mac,#4]
1224 str $h2,[$mac,#8]
1225 str $h3,[$mac,#12]
1226
1227 ldmia sp!,{r4-r11}
1228 ret @ bx lr
1229.size poly1305_emit_neon,.-poly1305_emit_neon
1230
1231.align 5
1232.Lzeros:
1233.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
1234.LOPENSSL_armcap:
1235.word OPENSSL_armcap_P-.Lpoly1305_init
1236#endif
1237___
1238} }
1239$code.=<<___;
1240.asciz "Poly1305 for ARMv4/NEON, CRYPTOGAMS by <appro\@openssl.org>"
1241.align 2
1242#if __ARM_MAX_ARCH__>=7
1243.comm OPENSSL_armcap_P,4,4
1244#endif
1245___
1246
1247foreach (split("\n",$code)) {
1248 s/\`([^\`]*)\`/eval $1/geo;
1249
1250 s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo or
1251 s/\bret\b/bx lr/go or
1252 s/\bbx\s+lr\b/.word\t0xe12fff1e/go; # make it possible to compile with -march=armv4
1253
1254 print $_,"\n";
1255}
1256close STDOUT or die "error closing STDOUT: $!"; # enforce flush
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette