VirtualBox

source: vbox/trunk/src/libs/openssl-3.1.3/crypto/aes/asm/vpaes-armv8.pl@ 102427

Last change on this file since 102427 was 101211, checked in by vboxsync, 17 months ago

openssl-3.1.3: Applied and adjusted our OpenSSL changes to 3.1.2. bugref:10527

  • Property svn:executable set to *
File size: 44.0 KB
Line 
1#! /usr/bin/env perl
2# Copyright 2015-2022 The OpenSSL Project Authors. All Rights Reserved.
3#
4# Licensed under the Apache License 2.0 (the "License"). You may not use
5# this file except in compliance with the License. You can obtain a copy
6# in the file LICENSE in the source distribution or at
7# https://www.openssl.org/source/license.html
8
9
10######################################################################
11## Constant-time SSSE3 AES core implementation.
12## version 0.1
13##
14## By Mike Hamburg (Stanford University), 2009
15## Public domain.
16##
17## For details see http://shiftleft.org/papers/vector_aes/ and
18## http://crypto.stanford.edu/vpaes/.
19##
20######################################################################
21# ARMv8 NEON adaptation by <[email protected]>
22#
23# Reason for undertaken effort is that there is at least one popular
24# SoC based on Cortex-A53 that doesn't have crypto extensions.
25#
26# CBC enc ECB enc/dec(*) [bit-sliced enc/dec]
27# Cortex-A53 21.5 18.1/20.6 [17.5/19.8 ]
28# Cortex-A57 36.0(**) 20.4/24.9(**) [14.4/16.6 ]
29# X-Gene 45.9(**) 45.8/57.7(**) [33.1/37.6(**) ]
30# Denver(***) 16.6(**) 15.1/17.8(**) [8.80/9.93 ]
31# Apple A7(***) 22.7(**) 10.9/14.3 [8.45/10.0 ]
32# Mongoose(***) 26.3(**) 21.0/25.0(**) [13.3/16.8 ]
33# ThunderX2(***) 39.4(**) 33.8/48.6(**)
34#
35# (*) ECB denotes approximate result for parallelizable modes
36# such as CBC decrypt, CTR, etc.;
37# (**) these results are worse than scalar compiler-generated
38# code, but it's constant-time and therefore preferred;
39# (***) presented for reference/comparison purposes;
40
41# $output is the last argument if it looks like a file (it has an extension)
42# $flavour is the first argument if it doesn't look like a file
43$output = $#ARGV >= 0 && $ARGV[$#ARGV] =~ m|\.\w+$| ? pop : undef;
44$flavour = $#ARGV >= 0 && $ARGV[0] !~ m|\.| ? shift : undef;
45
46$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
47( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
48( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
49die "can't locate arm-xlate.pl";
50
51open OUT,"| \"$^X\" $xlate $flavour \"$output\""
52 or die "can't call $xlate: $!";
53*STDOUT=*OUT;
54
55$code.=<<___;
56#include "arm_arch.h"
57
58.text
59
60.type _vpaes_consts,%object
61.align 7 // totally strategic alignment
62_vpaes_consts:
63.Lk_mc_forward: // mc_forward
64 .quad 0x0407060500030201, 0x0C0F0E0D080B0A09
65 .quad 0x080B0A0904070605, 0x000302010C0F0E0D
66 .quad 0x0C0F0E0D080B0A09, 0x0407060500030201
67 .quad 0x000302010C0F0E0D, 0x080B0A0904070605
68.Lk_mc_backward:// mc_backward
69 .quad 0x0605040702010003, 0x0E0D0C0F0A09080B
70 .quad 0x020100030E0D0C0F, 0x0A09080B06050407
71 .quad 0x0E0D0C0F0A09080B, 0x0605040702010003
72 .quad 0x0A09080B06050407, 0x020100030E0D0C0F
73.Lk_sr: // sr
74 .quad 0x0706050403020100, 0x0F0E0D0C0B0A0908
75 .quad 0x030E09040F0A0500, 0x0B06010C07020D08
76 .quad 0x0F060D040B020900, 0x070E050C030A0108
77 .quad 0x0B0E0104070A0D00, 0x0306090C0F020508
78
79//
80// "Hot" constants
81//
82.Lk_inv: // inv, inva
83 .quad 0x0E05060F0D080180, 0x040703090A0B0C02
84 .quad 0x01040A060F0B0780, 0x030D0E0C02050809
85.Lk_ipt: // input transform (lo, hi)
86 .quad 0xC2B2E8985A2A7000, 0xCABAE09052227808
87 .quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81
88.Lk_sbo: // sbou, sbot
89 .quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878
90 .quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA
91.Lk_sb1: // sb1u, sb1t
92 .quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF
93 .quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544
94.Lk_sb2: // sb2u, sb2t
95 .quad 0x69EB88400AE12900, 0xC2A163C8AB82234A
96 .quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD
97
98//
99// Decryption stuff
100//
101.Lk_dipt: // decryption input transform
102 .quad 0x0F505B040B545F00, 0x154A411E114E451A
103 .quad 0x86E383E660056500, 0x12771772F491F194
104.Lk_dsbo: // decryption sbox final output
105 .quad 0x1387EA537EF94000, 0xC7AA6DB9D4943E2D
106 .quad 0x12D7560F93441D00, 0xCA4B8159D8C58E9C
107.Lk_dsb9: // decryption sbox output *9*u, *9*t
108 .quad 0x851C03539A86D600, 0xCAD51F504F994CC9
109 .quad 0xC03B1789ECD74900, 0x725E2C9EB2FBA565
110.Lk_dsbd: // decryption sbox output *D*u, *D*t
111 .quad 0x7D57CCDFE6B1A200, 0xF56E9B13882A4439
112 .quad 0x3CE2FAF724C6CB00, 0x2931180D15DEEFD3
113.Lk_dsbb: // decryption sbox output *B*u, *B*t
114 .quad 0xD022649296B44200, 0x602646F6B0F2D404
115 .quad 0xC19498A6CD596700, 0xF3FF0C3E3255AA6B
116.Lk_dsbe: // decryption sbox output *E*u, *E*t
117 .quad 0x46F2929626D4D000, 0x2242600464B4F6B0
118 .quad 0x0C55A6CDFFAAC100, 0x9467F36B98593E32
119
120//
121// Key schedule constants
122//
123.Lk_dksd: // decryption key schedule: invskew x*D
124 .quad 0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9
125 .quad 0x41C277F4B5368300, 0x5FDC69EAAB289D1E
126.Lk_dksb: // decryption key schedule: invskew x*B
127 .quad 0x9A4FCA1F8550D500, 0x03D653861CC94C99
128 .quad 0x115BEDA7B6FC4A00, 0xD993256F7E3482C8
129.Lk_dkse: // decryption key schedule: invskew x*E + 0x63
130 .quad 0xD5031CCA1FC9D600, 0x53859A4C994F5086
131 .quad 0xA23196054FDC7BE8, 0xCD5EF96A20B31487
132.Lk_dks9: // decryption key schedule: invskew x*9
133 .quad 0xB6116FC87ED9A700, 0x4AED933482255BFC
134 .quad 0x4576516227143300, 0x8BB89FACE9DAFDCE
135
136.Lk_rcon: // rcon
137 .quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81
138
139.Lk_opt: // output transform
140 .quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808
141 .quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0
142.Lk_deskew: // deskew tables: inverts the sbox's "skew"
143 .quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A
144 .quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77
145
146.asciz "Vector Permutation AES for ARMv8, Mike Hamburg (Stanford University)"
147.size _vpaes_consts,.-_vpaes_consts
148.align 6
149___
150
151
152{
153my ($inp,$out,$key) = map("x$_",(0..2));
154
155my ($invlo,$invhi,$iptlo,$ipthi,$sbou,$sbot) = map("v$_.16b",(18..23));
156my ($sb1u,$sb1t,$sb2u,$sb2t) = map("v$_.16b",(24..27));
157my ($sb9u,$sb9t,$sbdu,$sbdt,$sbbu,$sbbt,$sbeu,$sbet)=map("v$_.16b",(24..31));
158
159$code.=<<___;
160//
161// _aes_preheat
162//
163// Fills register %r10 -> .aes_consts (so you can -fPIC)
164// and %xmm9-%xmm15 as specified below.
165//
166.type _vpaes_encrypt_preheat,%function
167.align 4
168_vpaes_encrypt_preheat:
169 adr x10, .Lk_inv
170 movi v17.16b, #0x0f
171 ld1 {v18.2d-v19.2d}, [x10],#32 // .Lk_inv
172 ld1 {v20.2d-v23.2d}, [x10],#64 // .Lk_ipt, .Lk_sbo
173 ld1 {v24.2d-v27.2d}, [x10] // .Lk_sb1, .Lk_sb2
174 ret
175.size _vpaes_encrypt_preheat,.-_vpaes_encrypt_preheat
176
177//
178// _aes_encrypt_core
179//
180// AES-encrypt %xmm0.
181//
182// Inputs:
183// %xmm0 = input
184// %xmm9-%xmm15 as in _vpaes_preheat
185// (%rdx) = scheduled keys
186//
187// Output in %xmm0
188// Clobbers %xmm1-%xmm5, %r9, %r10, %r11, %rax
189// Preserves %xmm6 - %xmm8 so you get some local vectors
190//
191//
192.type _vpaes_encrypt_core,%function
193.align 4
194_vpaes_encrypt_core:
195 mov x9, $key
196 ldr w8, [$key,#240] // pull rounds
197 adr x11, .Lk_mc_forward+16
198 // vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo
199 ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key
200 and v1.16b, v7.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1
201 ushr v0.16b, v7.16b, #4 // vpsrlb \$4, %xmm0, %xmm0
202 tbl v1.16b, {$iptlo}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1
203 // vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi
204 tbl v2.16b, {$ipthi}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2
205 eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0
206 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
207 b .Lenc_entry
208
209.align 4
210.Lenc_loop:
211 // middle of middle round
212 add x10, x11, #0x40
213 tbl v4.16b, {$sb1t}, v2.16b // vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u
214 ld1 {v1.2d}, [x11], #16 // vmovdqa -0x40(%r11,%r10), %xmm1 # .Lk_mc_forward[]
215 tbl v0.16b, {$sb1u}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t
216 eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
217 tbl v5.16b, {$sb2t}, v2.16b // vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u
218 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
219 tbl v2.16b, {$sb2u}, v3.16b // vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t
220 ld1 {v4.2d}, [x10] // vmovdqa (%r11,%r10), %xmm4 # .Lk_mc_backward[]
221 tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B
222 eor v2.16b, v2.16b, v5.16b // vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A
223 tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D
224 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B
225 tbl v4.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C
226 eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D
227 and x11, x11, #~(1<<6) // and \$0x30, %r11 # ... mod 4
228 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D
229 sub w8, w8, #1 // nr--
230
231.Lenc_entry:
232 // top of round
233 and v1.16b, v0.16b, v17.16b // vpand %xmm0, %xmm9, %xmm1 # 0 = k
234 ushr v0.16b, v0.16b, #4 // vpsrlb \$4, %xmm0, %xmm0 # 1 = i
235 tbl v5.16b, {$invhi}, v1.16b // vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k
236 eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j
237 tbl v3.16b, {$invlo}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
238 tbl v4.16b, {$invlo}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
239 eor v3.16b, v3.16b, v5.16b // vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
240 eor v4.16b, v4.16b, v5.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
241 tbl v2.16b, {$invlo}, v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak
242 tbl v3.16b, {$invlo}, v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak
243 eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io
244 eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo
245 ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm5
246 cbnz w8, .Lenc_loop
247
248 // middle of last round
249 add x10, x11, #0x80
250 // vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo
251 // vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16
252 tbl v4.16b, {$sbou}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou
253 ld1 {v1.2d}, [x10] // vmovdqa 0x40(%r11,%r10), %xmm1 # .Lk_sr[]
254 tbl v0.16b, {$sbot}, v3.16b // vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t
255 eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
256 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
257 tbl v0.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm0
258 ret
259.size _vpaes_encrypt_core,.-_vpaes_encrypt_core
260
261.globl vpaes_encrypt
262.type vpaes_encrypt,%function
263.align 4
264vpaes_encrypt:
265 AARCH64_SIGN_LINK_REGISTER
266 stp x29,x30,[sp,#-16]!
267 add x29,sp,#0
268
269 ld1 {v7.16b}, [$inp]
270 bl _vpaes_encrypt_preheat
271 bl _vpaes_encrypt_core
272 st1 {v0.16b}, [$out]
273
274 ldp x29,x30,[sp],#16
275 AARCH64_VALIDATE_LINK_REGISTER
276 ret
277.size vpaes_encrypt,.-vpaes_encrypt
278
279.type _vpaes_encrypt_2x,%function
280.align 4
281_vpaes_encrypt_2x:
282 mov x9, $key
283 ldr w8, [$key,#240] // pull rounds
284 adr x11, .Lk_mc_forward+16
285 // vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo
286 ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key
287 and v1.16b, v14.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1
288 ushr v0.16b, v14.16b, #4 // vpsrlb \$4, %xmm0, %xmm0
289 and v9.16b, v15.16b, v17.16b
290 ushr v8.16b, v15.16b, #4
291 tbl v1.16b, {$iptlo}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1
292 tbl v9.16b, {$iptlo}, v9.16b
293 // vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi
294 tbl v2.16b, {$ipthi}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2
295 tbl v10.16b, {$ipthi}, v8.16b
296 eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0
297 eor v8.16b, v9.16b, v16.16b
298 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
299 eor v8.16b, v8.16b, v10.16b
300 b .Lenc_2x_entry
301
302.align 4
303.Lenc_2x_loop:
304 // middle of middle round
305 add x10, x11, #0x40
306 tbl v4.16b, {$sb1t}, v2.16b // vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u
307 tbl v12.16b, {$sb1t}, v10.16b
308 ld1 {v1.2d}, [x11], #16 // vmovdqa -0x40(%r11,%r10), %xmm1 # .Lk_mc_forward[]
309 tbl v0.16b, {$sb1u}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t
310 tbl v8.16b, {$sb1u}, v11.16b
311 eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
312 eor v12.16b, v12.16b, v16.16b
313 tbl v5.16b, {$sb2t}, v2.16b // vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u
314 tbl v13.16b, {$sb2t}, v10.16b
315 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
316 eor v8.16b, v8.16b, v12.16b
317 tbl v2.16b, {$sb2u}, v3.16b // vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t
318 tbl v10.16b, {$sb2u}, v11.16b
319 ld1 {v4.2d}, [x10] // vmovdqa (%r11,%r10), %xmm4 # .Lk_mc_backward[]
320 tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B
321 tbl v11.16b, {v8.16b}, v1.16b
322 eor v2.16b, v2.16b, v5.16b // vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A
323 eor v10.16b, v10.16b, v13.16b
324 tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D
325 tbl v8.16b, {v8.16b}, v4.16b
326 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B
327 eor v11.16b, v11.16b, v10.16b
328 tbl v4.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C
329 tbl v12.16b, {v11.16b},v1.16b
330 eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D
331 eor v8.16b, v8.16b, v11.16b
332 and x11, x11, #~(1<<6) // and \$0x30, %r11 # ... mod 4
333 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D
334 eor v8.16b, v8.16b, v12.16b
335 sub w8, w8, #1 // nr--
336
337.Lenc_2x_entry:
338 // top of round
339 and v1.16b, v0.16b, v17.16b // vpand %xmm0, %xmm9, %xmm1 # 0 = k
340 ushr v0.16b, v0.16b, #4 // vpsrlb \$4, %xmm0, %xmm0 # 1 = i
341 and v9.16b, v8.16b, v17.16b
342 ushr v8.16b, v8.16b, #4
343 tbl v5.16b, {$invhi},v1.16b // vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k
344 tbl v13.16b, {$invhi},v9.16b
345 eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j
346 eor v9.16b, v9.16b, v8.16b
347 tbl v3.16b, {$invlo},v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
348 tbl v11.16b, {$invlo},v8.16b
349 tbl v4.16b, {$invlo},v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
350 tbl v12.16b, {$invlo},v9.16b
351 eor v3.16b, v3.16b, v5.16b // vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
352 eor v11.16b, v11.16b, v13.16b
353 eor v4.16b, v4.16b, v5.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
354 eor v12.16b, v12.16b, v13.16b
355 tbl v2.16b, {$invlo},v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak
356 tbl v10.16b, {$invlo},v11.16b
357 tbl v3.16b, {$invlo},v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak
358 tbl v11.16b, {$invlo},v12.16b
359 eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io
360 eor v10.16b, v10.16b, v9.16b
361 eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo
362 eor v11.16b, v11.16b, v8.16b
363 ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm5
364 cbnz w8, .Lenc_2x_loop
365
366 // middle of last round
367 add x10, x11, #0x80
368 // vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo
369 // vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16
370 tbl v4.16b, {$sbou}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou
371 tbl v12.16b, {$sbou}, v10.16b
372 ld1 {v1.2d}, [x10] // vmovdqa 0x40(%r11,%r10), %xmm1 # .Lk_sr[]
373 tbl v0.16b, {$sbot}, v3.16b // vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t
374 tbl v8.16b, {$sbot}, v11.16b
375 eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
376 eor v12.16b, v12.16b, v16.16b
377 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
378 eor v8.16b, v8.16b, v12.16b
379 tbl v0.16b, {v0.16b},v1.16b // vpshufb %xmm1, %xmm0, %xmm0
380 tbl v1.16b, {v8.16b},v1.16b
381 ret
382.size _vpaes_encrypt_2x,.-_vpaes_encrypt_2x
383
384.type _vpaes_decrypt_preheat,%function
385.align 4
386_vpaes_decrypt_preheat:
387 adr x10, .Lk_inv
388 movi v17.16b, #0x0f
389 adr x11, .Lk_dipt
390 ld1 {v18.2d-v19.2d}, [x10],#32 // .Lk_inv
391 ld1 {v20.2d-v23.2d}, [x11],#64 // .Lk_dipt, .Lk_dsbo
392 ld1 {v24.2d-v27.2d}, [x11],#64 // .Lk_dsb9, .Lk_dsbd
393 ld1 {v28.2d-v31.2d}, [x11] // .Lk_dsbb, .Lk_dsbe
394 ret
395.size _vpaes_decrypt_preheat,.-_vpaes_decrypt_preheat
396
397//
398// Decryption core
399//
400// Same API as encryption core.
401//
402.type _vpaes_decrypt_core,%function
403.align 4
404_vpaes_decrypt_core:
405 mov x9, $key
406 ldr w8, [$key,#240] // pull rounds
407
408 // vmovdqa .Lk_dipt(%rip), %xmm2 # iptlo
409 lsl x11, x8, #4 // mov %rax, %r11; shl \$4, %r11
410 eor x11, x11, #0x30 // xor \$0x30, %r11
411 adr x10, .Lk_sr
412 and x11, x11, #0x30 // and \$0x30, %r11
413 add x11, x11, x10
414 adr x10, .Lk_mc_forward+48
415
416 ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm4 # round0 key
417 and v1.16b, v7.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1
418 ushr v0.16b, v7.16b, #4 // vpsrlb \$4, %xmm0, %xmm0
419 tbl v2.16b, {$iptlo}, v1.16b // vpshufb %xmm1, %xmm2, %xmm2
420 ld1 {v5.2d}, [x10] // vmovdqa .Lk_mc_forward+48(%rip), %xmm5
421 // vmovdqa .Lk_dipt+16(%rip), %xmm1 # ipthi
422 tbl v0.16b, {$ipthi}, v0.16b // vpshufb %xmm0, %xmm1, %xmm0
423 eor v2.16b, v2.16b, v16.16b // vpxor %xmm4, %xmm2, %xmm2
424 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
425 b .Ldec_entry
426
427.align 4
428.Ldec_loop:
429//
430// Inverse mix columns
431//
432 // vmovdqa -0x20(%r10),%xmm4 # 4 : sb9u
433 // vmovdqa -0x10(%r10),%xmm1 # 0 : sb9t
434 tbl v4.16b, {$sb9u}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sb9u
435 tbl v1.16b, {$sb9t}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb9t
436 eor v0.16b, v4.16b, v16.16b // vpxor %xmm4, %xmm0, %xmm0
437 // vmovdqa 0x00(%r10),%xmm4 # 4 : sbdu
438 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
439 // vmovdqa 0x10(%r10),%xmm1 # 0 : sbdt
440
441 tbl v4.16b, {$sbdu}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbdu
442 tbl v0.16b, {v0.16b}, v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch
443 tbl v1.16b, {$sbdt}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbdt
444 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch
445 // vmovdqa 0x20(%r10), %xmm4 # 4 : sbbu
446 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
447 // vmovdqa 0x30(%r10), %xmm1 # 0 : sbbt
448
449 tbl v4.16b, {$sbbu}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbbu
450 tbl v0.16b, {v0.16b}, v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch
451 tbl v1.16b, {$sbbt}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbbt
452 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch
453 // vmovdqa 0x40(%r10), %xmm4 # 4 : sbeu
454 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
455 // vmovdqa 0x50(%r10), %xmm1 # 0 : sbet
456
457 tbl v4.16b, {$sbeu}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbeu
458 tbl v0.16b, {v0.16b}, v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch
459 tbl v1.16b, {$sbet}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbet
460 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch
461 ext v5.16b, v5.16b, v5.16b, #12 // vpalignr \$12, %xmm5, %xmm5, %xmm5
462 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
463 sub w8, w8, #1 // sub \$1,%rax # nr--
464
465.Ldec_entry:
466 // top of round
467 and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k
468 ushr v0.16b, v0.16b, #4 // vpsrlb \$4, %xmm0, %xmm0 # 1 = i
469 tbl v2.16b, {$invhi}, v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k
470 eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j
471 tbl v3.16b, {$invlo}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
472 tbl v4.16b, {$invlo}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
473 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
474 eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
475 tbl v2.16b, {$invlo}, v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak
476 tbl v3.16b, {$invlo}, v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak
477 eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io
478 eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo
479 ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm0
480 cbnz w8, .Ldec_loop
481
482 // middle of last round
483 // vmovdqa 0x60(%r10), %xmm4 # 3 : sbou
484 tbl v4.16b, {$sbou}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou
485 // vmovdqa 0x70(%r10), %xmm1 # 0 : sbot
486 ld1 {v2.2d}, [x11] // vmovdqa -0x160(%r11), %xmm2 # .Lk_sr-.Lk_dsbd=-0x160
487 tbl v1.16b, {$sbot}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb1t
488 eor v4.16b, v4.16b, v16.16b // vpxor %xmm0, %xmm4, %xmm4 # 4 = sb1u + k
489 eor v0.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm0 # 0 = A
490 tbl v0.16b, {v0.16b}, v2.16b // vpshufb %xmm2, %xmm0, %xmm0
491 ret
492.size _vpaes_decrypt_core,.-_vpaes_decrypt_core
493
494.globl vpaes_decrypt
495.type vpaes_decrypt,%function
496.align 4
497vpaes_decrypt:
498 AARCH64_SIGN_LINK_REGISTER
499 stp x29,x30,[sp,#-16]!
500 add x29,sp,#0
501
502 ld1 {v7.16b}, [$inp]
503 bl _vpaes_decrypt_preheat
504 bl _vpaes_decrypt_core
505 st1 {v0.16b}, [$out]
506
507 ldp x29,x30,[sp],#16
508 AARCH64_VALIDATE_LINK_REGISTER
509 ret
510.size vpaes_decrypt,.-vpaes_decrypt
511
512// v14-v15 input, v0-v1 output
513.type _vpaes_decrypt_2x,%function
514.align 4
515_vpaes_decrypt_2x:
516 mov x9, $key
517 ldr w8, [$key,#240] // pull rounds
518
519 // vmovdqa .Lk_dipt(%rip), %xmm2 # iptlo
520 lsl x11, x8, #4 // mov %rax, %r11; shl \$4, %r11
521 eor x11, x11, #0x30 // xor \$0x30, %r11
522 adr x10, .Lk_sr
523 and x11, x11, #0x30 // and \$0x30, %r11
524 add x11, x11, x10
525 adr x10, .Lk_mc_forward+48
526
527 ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm4 # round0 key
528 and v1.16b, v14.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1
529 ushr v0.16b, v14.16b, #4 // vpsrlb \$4, %xmm0, %xmm0
530 and v9.16b, v15.16b, v17.16b
531 ushr v8.16b, v15.16b, #4
532 tbl v2.16b, {$iptlo},v1.16b // vpshufb %xmm1, %xmm2, %xmm2
533 tbl v10.16b, {$iptlo},v9.16b
534 ld1 {v5.2d}, [x10] // vmovdqa .Lk_mc_forward+48(%rip), %xmm5
535 // vmovdqa .Lk_dipt+16(%rip), %xmm1 # ipthi
536 tbl v0.16b, {$ipthi},v0.16b // vpshufb %xmm0, %xmm1, %xmm0
537 tbl v8.16b, {$ipthi},v8.16b
538 eor v2.16b, v2.16b, v16.16b // vpxor %xmm4, %xmm2, %xmm2
539 eor v10.16b, v10.16b, v16.16b
540 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
541 eor v8.16b, v8.16b, v10.16b
542 b .Ldec_2x_entry
543
544.align 4
545.Ldec_2x_loop:
546//
547// Inverse mix columns
548//
549 // vmovdqa -0x20(%r10),%xmm4 # 4 : sb9u
550 // vmovdqa -0x10(%r10),%xmm1 # 0 : sb9t
551 tbl v4.16b, {$sb9u}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sb9u
552 tbl v12.16b, {$sb9u}, v10.16b
553 tbl v1.16b, {$sb9t}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb9t
554 tbl v9.16b, {$sb9t}, v11.16b
555 eor v0.16b, v4.16b, v16.16b // vpxor %xmm4, %xmm0, %xmm0
556 eor v8.16b, v12.16b, v16.16b
557 // vmovdqa 0x00(%r10),%xmm4 # 4 : sbdu
558 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
559 eor v8.16b, v8.16b, v9.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
560 // vmovdqa 0x10(%r10),%xmm1 # 0 : sbdt
561
562 tbl v4.16b, {$sbdu}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbdu
563 tbl v12.16b, {$sbdu}, v10.16b
564 tbl v0.16b, {v0.16b},v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch
565 tbl v8.16b, {v8.16b},v5.16b
566 tbl v1.16b, {$sbdt}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbdt
567 tbl v9.16b, {$sbdt}, v11.16b
568 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch
569 eor v8.16b, v8.16b, v12.16b
570 // vmovdqa 0x20(%r10), %xmm4 # 4 : sbbu
571 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
572 eor v8.16b, v8.16b, v9.16b
573 // vmovdqa 0x30(%r10), %xmm1 # 0 : sbbt
574
575 tbl v4.16b, {$sbbu}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbbu
576 tbl v12.16b, {$sbbu}, v10.16b
577 tbl v0.16b, {v0.16b},v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch
578 tbl v8.16b, {v8.16b},v5.16b
579 tbl v1.16b, {$sbbt}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbbt
580 tbl v9.16b, {$sbbt}, v11.16b
581 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch
582 eor v8.16b, v8.16b, v12.16b
583 // vmovdqa 0x40(%r10), %xmm4 # 4 : sbeu
584 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
585 eor v8.16b, v8.16b, v9.16b
586 // vmovdqa 0x50(%r10), %xmm1 # 0 : sbet
587
588 tbl v4.16b, {$sbeu}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbeu
589 tbl v12.16b, {$sbeu}, v10.16b
590 tbl v0.16b, {v0.16b},v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch
591 tbl v8.16b, {v8.16b},v5.16b
592 tbl v1.16b, {$sbet}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbet
593 tbl v9.16b, {$sbet}, v11.16b
594 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch
595 eor v8.16b, v8.16b, v12.16b
596 ext v5.16b, v5.16b, v5.16b, #12 // vpalignr \$12, %xmm5, %xmm5, %xmm5
597 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
598 eor v8.16b, v8.16b, v9.16b
599 sub w8, w8, #1 // sub \$1,%rax # nr--
600
601.Ldec_2x_entry:
602 // top of round
603 and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k
604 ushr v0.16b, v0.16b, #4 // vpsrlb \$4, %xmm0, %xmm0 # 1 = i
605 and v9.16b, v8.16b, v17.16b
606 ushr v8.16b, v8.16b, #4
607 tbl v2.16b, {$invhi},v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k
608 tbl v10.16b, {$invhi},v9.16b
609 eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j
610 eor v9.16b, v9.16b, v8.16b
611 tbl v3.16b, {$invlo},v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
612 tbl v11.16b, {$invlo},v8.16b
613 tbl v4.16b, {$invlo},v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
614 tbl v12.16b, {$invlo},v9.16b
615 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
616 eor v11.16b, v11.16b, v10.16b
617 eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
618 eor v12.16b, v12.16b, v10.16b
619 tbl v2.16b, {$invlo},v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak
620 tbl v10.16b, {$invlo},v11.16b
621 tbl v3.16b, {$invlo},v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak
622 tbl v11.16b, {$invlo},v12.16b
623 eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io
624 eor v10.16b, v10.16b, v9.16b
625 eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo
626 eor v11.16b, v11.16b, v8.16b
627 ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm0
628 cbnz w8, .Ldec_2x_loop
629
630 // middle of last round
631 // vmovdqa 0x60(%r10), %xmm4 # 3 : sbou
632 tbl v4.16b, {$sbou}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou
633 tbl v12.16b, {$sbou}, v10.16b
634 // vmovdqa 0x70(%r10), %xmm1 # 0 : sbot
635 tbl v1.16b, {$sbot}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb1t
636 tbl v9.16b, {$sbot}, v11.16b
637 ld1 {v2.2d}, [x11] // vmovdqa -0x160(%r11), %xmm2 # .Lk_sr-.Lk_dsbd=-0x160
638 eor v4.16b, v4.16b, v16.16b // vpxor %xmm0, %xmm4, %xmm4 # 4 = sb1u + k
639 eor v12.16b, v12.16b, v16.16b
640 eor v0.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm0 # 0 = A
641 eor v8.16b, v9.16b, v12.16b
642 tbl v0.16b, {v0.16b},v2.16b // vpshufb %xmm2, %xmm0, %xmm0
643 tbl v1.16b, {v8.16b},v2.16b
644 ret
645.size _vpaes_decrypt_2x,.-_vpaes_decrypt_2x
646___
647}
648
649{
650my ($inp,$bits,$out,$dir)=("x0","w1","x2","w3");
651my ($invlo,$invhi,$iptlo,$ipthi,$rcon) = map("v$_.16b",(18..21,8));
652
653$code.=<<___;
654////////////////////////////////////////////////////////
655// //
656// AES key schedule //
657// //
658////////////////////////////////////////////////////////
659.type _vpaes_key_preheat,%function
660.align 4
661_vpaes_key_preheat:
662 adr x10, .Lk_inv
663 movi v16.16b, #0x5b // .Lk_s63
664 adr x11, .Lk_sb1
665 movi v17.16b, #0x0f // .Lk_s0F
666 ld1 {v18.2d-v21.2d}, [x10] // .Lk_inv, .Lk_ipt
667 adr x10, .Lk_dksd
668 ld1 {v22.2d-v23.2d}, [x11] // .Lk_sb1
669 adr x11, .Lk_mc_forward
670 ld1 {v24.2d-v27.2d}, [x10],#64 // .Lk_dksd, .Lk_dksb
671 ld1 {v28.2d-v31.2d}, [x10],#64 // .Lk_dkse, .Lk_dks9
672 ld1 {v8.2d}, [x10] // .Lk_rcon
673 ld1 {v9.2d}, [x11] // .Lk_mc_forward[0]
674 ret
675.size _vpaes_key_preheat,.-_vpaes_key_preheat
676
677.type _vpaes_schedule_core,%function
678.align 4
679_vpaes_schedule_core:
680 AARCH64_SIGN_LINK_REGISTER
681 stp x29, x30, [sp,#-16]!
682 add x29,sp,#0
683
684 bl _vpaes_key_preheat // load the tables
685
686 ld1 {v0.16b}, [$inp],#16 // vmovdqu (%rdi), %xmm0 # load key (unaligned)
687
688 // input transform
689 mov v3.16b, v0.16b // vmovdqa %xmm0, %xmm3
690 bl _vpaes_schedule_transform
691 mov v7.16b, v0.16b // vmovdqa %xmm0, %xmm7
692
693 adr x10, .Lk_sr // lea .Lk_sr(%rip),%r10
694 add x8, x8, x10
695 cbnz $dir, .Lschedule_am_decrypting
696
697 // encrypting, output zeroth round key after transform
698 st1 {v0.2d}, [$out] // vmovdqu %xmm0, (%rdx)
699 b .Lschedule_go
700
701.Lschedule_am_decrypting:
702 // decrypting, output zeroth round key after shiftrows
703 ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1
704 tbl v3.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3
705 st1 {v3.2d}, [$out] // vmovdqu %xmm3, (%rdx)
706 eor x8, x8, #0x30 // xor \$0x30, %r8
707
708.Lschedule_go:
709 cmp $bits, #192 // cmp \$192, %esi
710 b.hi .Lschedule_256
711 b.eq .Lschedule_192
712 // 128: fall though
713
714//
715// .schedule_128
716//
717// 128-bit specific part of key schedule.
718//
719// This schedule is really simple, because all its parts
720// are accomplished by the subroutines.
721//
722.Lschedule_128:
723 mov $inp, #10 // mov \$10, %esi
724
725.Loop_schedule_128:
726 sub $inp, $inp, #1 // dec %esi
727 bl _vpaes_schedule_round
728 cbz $inp, .Lschedule_mangle_last
729 bl _vpaes_schedule_mangle // write output
730 b .Loop_schedule_128
731
732//
733// .aes_schedule_192
734//
735// 192-bit specific part of key schedule.
736//
737// The main body of this schedule is the same as the 128-bit
738// schedule, but with more smearing. The long, high side is
739// stored in %xmm7 as before, and the short, low side is in
740// the high bits of %xmm6.
741//
742// This schedule is somewhat nastier, however, because each
743// round produces 192 bits of key material, or 1.5 round keys.
744// Therefore, on each cycle we do 2 rounds and produce 3 round
745// keys.
746//
747.align 4
748.Lschedule_192:
749 sub $inp, $inp, #8
750 ld1 {v0.16b}, [$inp] // vmovdqu 8(%rdi),%xmm0 # load key part 2 (very unaligned)
751 bl _vpaes_schedule_transform // input transform
752 mov v6.16b, v0.16b // vmovdqa %xmm0, %xmm6 # save short part
753 eor v4.16b, v4.16b, v4.16b // vpxor %xmm4, %xmm4, %xmm4 # clear 4
754 ins v6.d[0], v4.d[0] // vmovhlps %xmm4, %xmm6, %xmm6 # clobber low side with zeros
755 mov $inp, #4 // mov \$4, %esi
756
757.Loop_schedule_192:
758 sub $inp, $inp, #1 // dec %esi
759 bl _vpaes_schedule_round
760 ext v0.16b, v6.16b, v0.16b, #8 // vpalignr \$8,%xmm6,%xmm0,%xmm0
761 bl _vpaes_schedule_mangle // save key n
762 bl _vpaes_schedule_192_smear
763 bl _vpaes_schedule_mangle // save key n+1
764 bl _vpaes_schedule_round
765 cbz $inp, .Lschedule_mangle_last
766 bl _vpaes_schedule_mangle // save key n+2
767 bl _vpaes_schedule_192_smear
768 b .Loop_schedule_192
769
770//
771// .aes_schedule_256
772//
773// 256-bit specific part of key schedule.
774//
775// The structure here is very similar to the 128-bit
776// schedule, but with an additional "low side" in
777// %xmm6. The low side's rounds are the same as the
778// high side's, except no rcon and no rotation.
779//
780.align 4
781.Lschedule_256:
782 ld1 {v0.16b}, [$inp] // vmovdqu 16(%rdi),%xmm0 # load key part 2 (unaligned)
783 bl _vpaes_schedule_transform // input transform
784 mov $inp, #7 // mov \$7, %esi
785
786.Loop_schedule_256:
787 sub $inp, $inp, #1 // dec %esi
788 bl _vpaes_schedule_mangle // output low result
789 mov v6.16b, v0.16b // vmovdqa %xmm0, %xmm6 # save cur_lo in xmm6
790
791 // high round
792 bl _vpaes_schedule_round
793 cbz $inp, .Lschedule_mangle_last
794 bl _vpaes_schedule_mangle
795
796 // low round. swap xmm7 and xmm6
797 dup v0.4s, v0.s[3] // vpshufd \$0xFF, %xmm0, %xmm0
798 movi v4.16b, #0
799 mov v5.16b, v7.16b // vmovdqa %xmm7, %xmm5
800 mov v7.16b, v6.16b // vmovdqa %xmm6, %xmm7
801 bl _vpaes_schedule_low_round
802 mov v7.16b, v5.16b // vmovdqa %xmm5, %xmm7
803
804 b .Loop_schedule_256
805
806//
807// .aes_schedule_mangle_last
808//
809// Mangler for last round of key schedule
810// Mangles %xmm0
811// when encrypting, outputs out(%xmm0) ^ 63
812// when decrypting, outputs unskew(%xmm0)
813//
814// Always called right before return... jumps to cleanup and exits
815//
816.align 4
817.Lschedule_mangle_last:
818 // schedule last round key from xmm0
819 adr x11, .Lk_deskew // lea .Lk_deskew(%rip),%r11 # prepare to deskew
820 cbnz $dir, .Lschedule_mangle_last_dec
821
822 // encrypting
823 ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10),%xmm1
824 adr x11, .Lk_opt // lea .Lk_opt(%rip), %r11 # prepare to output transform
825 add $out, $out, #32 // add \$32, %rdx
826 tbl v0.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm0 # output permute
827
828.Lschedule_mangle_last_dec:
829 ld1 {v20.2d-v21.2d}, [x11] // reload constants
830 sub $out, $out, #16 // add \$-16, %rdx
831 eor v0.16b, v0.16b, v16.16b // vpxor .Lk_s63(%rip), %xmm0, %xmm0
832 bl _vpaes_schedule_transform // output transform
833 st1 {v0.2d}, [$out] // vmovdqu %xmm0, (%rdx) # save last key
834
835 // cleanup
836 eor v0.16b, v0.16b, v0.16b // vpxor %xmm0, %xmm0, %xmm0
837 eor v1.16b, v1.16b, v1.16b // vpxor %xmm1, %xmm1, %xmm1
838 eor v2.16b, v2.16b, v2.16b // vpxor %xmm2, %xmm2, %xmm2
839 eor v3.16b, v3.16b, v3.16b // vpxor %xmm3, %xmm3, %xmm3
840 eor v4.16b, v4.16b, v4.16b // vpxor %xmm4, %xmm4, %xmm4
841 eor v5.16b, v5.16b, v5.16b // vpxor %xmm5, %xmm5, %xmm5
842 eor v6.16b, v6.16b, v6.16b // vpxor %xmm6, %xmm6, %xmm6
843 eor v7.16b, v7.16b, v7.16b // vpxor %xmm7, %xmm7, %xmm7
844 ldp x29, x30, [sp],#16
845 AARCH64_VALIDATE_LINK_REGISTER
846 ret
847.size _vpaes_schedule_core,.-_vpaes_schedule_core
848
849//
850// .aes_schedule_192_smear
851//
852// Smear the short, low side in the 192-bit key schedule.
853//
854// Inputs:
855// %xmm7: high side, b a x y
856// %xmm6: low side, d c 0 0
857// %xmm13: 0
858//
859// Outputs:
860// %xmm6: b+c+d b+c 0 0
861// %xmm0: b+c+d b+c b a
862//
863.type _vpaes_schedule_192_smear,%function
864.align 4
865_vpaes_schedule_192_smear:
866 movi v1.16b, #0
867 dup v0.4s, v7.s[3]
868 ins v1.s[3], v6.s[2] // vpshufd \$0x80, %xmm6, %xmm1 # d c 0 0 -> c 0 0 0
869 ins v0.s[0], v7.s[2] // vpshufd \$0xFE, %xmm7, %xmm0 # b a _ _ -> b b b a
870 eor v6.16b, v6.16b, v1.16b // vpxor %xmm1, %xmm6, %xmm6 # -> c+d c 0 0
871 eor v1.16b, v1.16b, v1.16b // vpxor %xmm1, %xmm1, %xmm1
872 eor v6.16b, v6.16b, v0.16b // vpxor %xmm0, %xmm6, %xmm6 # -> b+c+d b+c b a
873 mov v0.16b, v6.16b // vmovdqa %xmm6, %xmm0
874 ins v6.d[0], v1.d[0] // vmovhlps %xmm1, %xmm6, %xmm6 # clobber low side with zeros
875 ret
876.size _vpaes_schedule_192_smear,.-_vpaes_schedule_192_smear
877
878//
879// .aes_schedule_round
880//
881// Runs one main round of the key schedule on %xmm0, %xmm7
882//
883// Specifically, runs subbytes on the high dword of %xmm0
884// then rotates it by one byte and xors into the low dword of
885// %xmm7.
886//
887// Adds rcon from low byte of %xmm8, then rotates %xmm8 for
888// next rcon.
889//
890// Smears the dwords of %xmm7 by xoring the low into the
891// second low, result into third, result into highest.
892//
893// Returns results in %xmm7 = %xmm0.
894// Clobbers %xmm1-%xmm4, %r11.
895//
896.type _vpaes_schedule_round,%function
897.align 4
898_vpaes_schedule_round:
899 // extract rcon from xmm8
900 movi v4.16b, #0 // vpxor %xmm4, %xmm4, %xmm4
901 ext v1.16b, $rcon, v4.16b, #15 // vpalignr \$15, %xmm8, %xmm4, %xmm1
902 ext $rcon, $rcon, $rcon, #15 // vpalignr \$15, %xmm8, %xmm8, %xmm8
903 eor v7.16b, v7.16b, v1.16b // vpxor %xmm1, %xmm7, %xmm7
904
905 // rotate
906 dup v0.4s, v0.s[3] // vpshufd \$0xFF, %xmm0, %xmm0
907 ext v0.16b, v0.16b, v0.16b, #1 // vpalignr \$1, %xmm0, %xmm0, %xmm0
908
909 // fall through...
910
911 // low round: same as high round, but no rotation and no rcon.
912_vpaes_schedule_low_round:
913 // smear xmm7
914 ext v1.16b, v4.16b, v7.16b, #12 // vpslldq \$4, %xmm7, %xmm1
915 eor v7.16b, v7.16b, v1.16b // vpxor %xmm1, %xmm7, %xmm7
916 ext v4.16b, v4.16b, v7.16b, #8 // vpslldq \$8, %xmm7, %xmm4
917
918 // subbytes
919 and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k
920 ushr v0.16b, v0.16b, #4 // vpsrlb \$4, %xmm0, %xmm0 # 1 = i
921 eor v7.16b, v7.16b, v4.16b // vpxor %xmm4, %xmm7, %xmm7
922 tbl v2.16b, {$invhi}, v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k
923 eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j
924 tbl v3.16b, {$invlo}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
925 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
926 tbl v4.16b, {$invlo}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
927 eor v7.16b, v7.16b, v16.16b // vpxor .Lk_s63(%rip), %xmm7, %xmm7
928 tbl v3.16b, {$invlo}, v3.16b // vpshufb %xmm3, %xmm10, %xmm3 # 2 = 1/iak
929 eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
930 tbl v2.16b, {$invlo}, v4.16b // vpshufb %xmm4, %xmm10, %xmm2 # 3 = 1/jak
931 eor v3.16b, v3.16b, v1.16b // vpxor %xmm1, %xmm3, %xmm3 # 2 = io
932 eor v2.16b, v2.16b, v0.16b // vpxor %xmm0, %xmm2, %xmm2 # 3 = jo
933 tbl v4.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm13, %xmm4 # 4 = sbou
934 tbl v1.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm12, %xmm1 # 0 = sb1t
935 eor v1.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm1 # 0 = sbox output
936
937 // add in smeared stuff
938 eor v0.16b, v1.16b, v7.16b // vpxor %xmm7, %xmm1, %xmm0
939 eor v7.16b, v1.16b, v7.16b // vmovdqa %xmm0, %xmm7
940 ret
941.size _vpaes_schedule_round,.-_vpaes_schedule_round
942
943//
944// .aes_schedule_transform
945//
946// Linear-transform %xmm0 according to tables at (%r11)
947//
948// Requires that %xmm9 = 0x0F0F... as in preheat
949// Output in %xmm0
950// Clobbers %xmm1, %xmm2
951//
952.type _vpaes_schedule_transform,%function
953.align 4
954_vpaes_schedule_transform:
955 and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1
956 ushr v0.16b, v0.16b, #4 // vpsrlb \$4, %xmm0, %xmm0
957 // vmovdqa (%r11), %xmm2 # lo
958 tbl v2.16b, {$iptlo}, v1.16b // vpshufb %xmm1, %xmm2, %xmm2
959 // vmovdqa 16(%r11), %xmm1 # hi
960 tbl v0.16b, {$ipthi}, v0.16b // vpshufb %xmm0, %xmm1, %xmm0
961 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
962 ret
963.size _vpaes_schedule_transform,.-_vpaes_schedule_transform
964
965//
966// .aes_schedule_mangle
967//
968// Mangle xmm0 from (basis-transformed) standard version
969// to our version.
970//
971// On encrypt,
972// xor with 0x63
973// multiply by circulant 0,1,1,1
974// apply shiftrows transform
975//
976// On decrypt,
977// xor with 0x63
978// multiply by "inverse mixcolumns" circulant E,B,D,9
979// deskew
980// apply shiftrows transform
981//
982//
983// Writes out to (%rdx), and increments or decrements it
984// Keeps track of round number mod 4 in %r8
985// Preserves xmm0
986// Clobbers xmm1-xmm5
987//
988.type _vpaes_schedule_mangle,%function
989.align 4
990_vpaes_schedule_mangle:
991 mov v4.16b, v0.16b // vmovdqa %xmm0, %xmm4 # save xmm0 for later
992 // vmovdqa .Lk_mc_forward(%rip),%xmm5
993 cbnz $dir, .Lschedule_mangle_dec
994
995 // encrypting
996 eor v4.16b, v0.16b, v16.16b // vpxor .Lk_s63(%rip), %xmm0, %xmm4
997 add $out, $out, #16 // add \$16, %rdx
998 tbl v4.16b, {v4.16b}, v9.16b // vpshufb %xmm5, %xmm4, %xmm4
999 tbl v1.16b, {v4.16b}, v9.16b // vpshufb %xmm5, %xmm4, %xmm1
1000 tbl v3.16b, {v1.16b}, v9.16b // vpshufb %xmm5, %xmm1, %xmm3
1001 eor v4.16b, v4.16b, v1.16b // vpxor %xmm1, %xmm4, %xmm4
1002 ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1
1003 eor v3.16b, v3.16b, v4.16b // vpxor %xmm4, %xmm3, %xmm3
1004
1005 b .Lschedule_mangle_both
1006.align 4
1007.Lschedule_mangle_dec:
1008 // inverse mix columns
1009 // lea .Lk_dksd(%rip),%r11
1010 ushr v1.16b, v4.16b, #4 // vpsrlb \$4, %xmm4, %xmm1 # 1 = hi
1011 and v4.16b, v4.16b, v17.16b // vpand %xmm9, %xmm4, %xmm4 # 4 = lo
1012
1013 // vmovdqa 0x00(%r11), %xmm2
1014 tbl v2.16b, {v24.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2
1015 // vmovdqa 0x10(%r11), %xmm3
1016 tbl v3.16b, {v25.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3
1017 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3
1018 tbl v3.16b, {v3.16b}, v9.16b // vpshufb %xmm5, %xmm3, %xmm3
1019
1020 // vmovdqa 0x20(%r11), %xmm2
1021 tbl v2.16b, {v26.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2
1022 eor v2.16b, v2.16b, v3.16b // vpxor %xmm3, %xmm2, %xmm2
1023 // vmovdqa 0x30(%r11), %xmm3
1024 tbl v3.16b, {v27.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3
1025 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3
1026 tbl v3.16b, {v3.16b}, v9.16b // vpshufb %xmm5, %xmm3, %xmm3
1027
1028 // vmovdqa 0x40(%r11), %xmm2
1029 tbl v2.16b, {v28.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2
1030 eor v2.16b, v2.16b, v3.16b // vpxor %xmm3, %xmm2, %xmm2
1031 // vmovdqa 0x50(%r11), %xmm3
1032 tbl v3.16b, {v29.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3
1033 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3
1034
1035 // vmovdqa 0x60(%r11), %xmm2
1036 tbl v2.16b, {v30.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2
1037 tbl v3.16b, {v3.16b}, v9.16b // vpshufb %xmm5, %xmm3, %xmm3
1038 // vmovdqa 0x70(%r11), %xmm4
1039 tbl v4.16b, {v31.16b}, v1.16b // vpshufb %xmm1, %xmm4, %xmm4
1040 ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1
1041 eor v2.16b, v2.16b, v3.16b // vpxor %xmm3, %xmm2, %xmm2
1042 eor v3.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm3
1043
1044 sub $out, $out, #16 // add \$-16, %rdx
1045
1046.Lschedule_mangle_both:
1047 tbl v3.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3
1048 add x8, x8, #64-16 // add \$-16, %r8
1049 and x8, x8, #~(1<<6) // and \$0x30, %r8
1050 st1 {v3.2d}, [$out] // vmovdqu %xmm3, (%rdx)
1051 ret
1052.size _vpaes_schedule_mangle,.-_vpaes_schedule_mangle
1053
1054.globl vpaes_set_encrypt_key
1055.type vpaes_set_encrypt_key,%function
1056.align 4
1057vpaes_set_encrypt_key:
1058 AARCH64_SIGN_LINK_REGISTER
1059 stp x29,x30,[sp,#-16]!
1060 add x29,sp,#0
1061 stp d8,d9,[sp,#-16]! // ABI spec says so
1062
1063 lsr w9, $bits, #5 // shr \$5,%eax
1064 add w9, w9, #5 // \$5,%eax
1065 str w9, [$out,#240] // mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5;
1066
1067 mov $dir, #0 // mov \$0,%ecx
1068 mov x8, #0x30 // mov \$0x30,%r8d
1069 bl _vpaes_schedule_core
1070 eor x0, x0, x0
1071
1072 ldp d8,d9,[sp],#16
1073 ldp x29,x30,[sp],#16
1074 AARCH64_VALIDATE_LINK_REGISTER
1075 ret
1076.size vpaes_set_encrypt_key,.-vpaes_set_encrypt_key
1077
1078.globl vpaes_set_decrypt_key
1079.type vpaes_set_decrypt_key,%function
1080.align 4
1081vpaes_set_decrypt_key:
1082 AARCH64_SIGN_LINK_REGISTER
1083 stp x29,x30,[sp,#-16]!
1084 add x29,sp,#0
1085 stp d8,d9,[sp,#-16]! // ABI spec says so
1086
1087 lsr w9, $bits, #5 // shr \$5,%eax
1088 add w9, w9, #5 // \$5,%eax
1089 str w9, [$out,#240] // mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5;
1090 lsl w9, w9, #4 // shl \$4,%eax
1091 add $out, $out, #16 // lea 16(%rdx,%rax),%rdx
1092 add $out, $out, x9
1093
1094 mov $dir, #1 // mov \$1,%ecx
1095 lsr w8, $bits, #1 // shr \$1,%r8d
1096 and x8, x8, #32 // and \$32,%r8d
1097 eor x8, x8, #32 // xor \$32,%r8d # nbits==192?0:32
1098 bl _vpaes_schedule_core
1099
1100 ldp d8,d9,[sp],#16
1101 ldp x29,x30,[sp],#16
1102 AARCH64_VALIDATE_LINK_REGISTER
1103 ret
1104.size vpaes_set_decrypt_key,.-vpaes_set_decrypt_key
1105___
1106}
1107{
1108my ($inp,$out,$len,$key,$ivec,$dir) = map("x$_",(0..5));
1109
1110$code.=<<___;
1111.globl vpaes_cbc_encrypt
1112.type vpaes_cbc_encrypt,%function
1113.align 4
1114vpaes_cbc_encrypt:
1115 AARCH64_SIGN_LINK_REGISTER
1116 cbz $len, .Lcbc_abort
1117 cmp w5, #0 // check direction
1118 b.eq vpaes_cbc_decrypt
1119
1120 stp x29,x30,[sp,#-16]!
1121 add x29,sp,#0
1122
1123 mov x17, $len // reassign
1124 mov x2, $key // reassign
1125
1126 ld1 {v0.16b}, [$ivec] // load ivec
1127 bl _vpaes_encrypt_preheat
1128 b .Lcbc_enc_loop
1129
1130.align 4
1131.Lcbc_enc_loop:
1132 ld1 {v7.16b}, [$inp],#16 // load input
1133 eor v7.16b, v7.16b, v0.16b // xor with ivec
1134 bl _vpaes_encrypt_core
1135 st1 {v0.16b}, [$out],#16 // save output
1136 subs x17, x17, #16
1137 b.hi .Lcbc_enc_loop
1138
1139 st1 {v0.16b}, [$ivec] // write ivec
1140
1141 ldp x29,x30,[sp],#16
1142.Lcbc_abort:
1143 AARCH64_VALIDATE_LINK_REGISTER
1144 ret
1145.size vpaes_cbc_encrypt,.-vpaes_cbc_encrypt
1146
1147.type vpaes_cbc_decrypt,%function
1148.align 4
1149vpaes_cbc_decrypt:
1150 // Not adding AARCH64_SIGN_LINK_REGISTER here because vpaes_cbc_decrypt is jumped to
1151 // only from vpaes_cbc_encrypt which has already signed the return address.
1152 stp x29,x30,[sp,#-16]!
1153 add x29,sp,#0
1154 stp d8,d9,[sp,#-16]! // ABI spec says so
1155 stp d10,d11,[sp,#-16]!
1156 stp d12,d13,[sp,#-16]!
1157 stp d14,d15,[sp,#-16]!
1158
1159 mov x17, $len // reassign
1160 mov x2, $key // reassign
1161 ld1 {v6.16b}, [$ivec] // load ivec
1162 bl _vpaes_decrypt_preheat
1163 tst x17, #16
1164 b.eq .Lcbc_dec_loop2x
1165
1166 ld1 {v7.16b}, [$inp], #16 // load input
1167 bl _vpaes_decrypt_core
1168 eor v0.16b, v0.16b, v6.16b // xor with ivec
1169 orr v6.16b, v7.16b, v7.16b // next ivec value
1170 st1 {v0.16b}, [$out], #16
1171 subs x17, x17, #16
1172 b.ls .Lcbc_dec_done
1173
1174.align 4
1175.Lcbc_dec_loop2x:
1176 ld1 {v14.16b,v15.16b}, [$inp], #32
1177 bl _vpaes_decrypt_2x
1178 eor v0.16b, v0.16b, v6.16b // xor with ivec
1179 eor v1.16b, v1.16b, v14.16b
1180 orr v6.16b, v15.16b, v15.16b
1181 st1 {v0.16b,v1.16b}, [$out], #32
1182 subs x17, x17, #32
1183 b.hi .Lcbc_dec_loop2x
1184
1185.Lcbc_dec_done:
1186 st1 {v6.16b}, [$ivec]
1187
1188 ldp d14,d15,[sp],#16
1189 ldp d12,d13,[sp],#16
1190 ldp d10,d11,[sp],#16
1191 ldp d8,d9,[sp],#16
1192 ldp x29,x30,[sp],#16
1193 AARCH64_VALIDATE_LINK_REGISTER
1194 ret
1195.size vpaes_cbc_decrypt,.-vpaes_cbc_decrypt
1196___
1197if (1) {
1198$code.=<<___;
1199.globl vpaes_ecb_encrypt
1200.type vpaes_ecb_encrypt,%function
1201.align 4
1202vpaes_ecb_encrypt:
1203 AARCH64_SIGN_LINK_REGISTER
1204 stp x29,x30,[sp,#-16]!
1205 add x29,sp,#0
1206 stp d8,d9,[sp,#-16]! // ABI spec says so
1207 stp d10,d11,[sp,#-16]!
1208 stp d12,d13,[sp,#-16]!
1209 stp d14,d15,[sp,#-16]!
1210
1211 mov x17, $len
1212 mov x2, $key
1213 bl _vpaes_encrypt_preheat
1214 tst x17, #16
1215 b.eq .Lecb_enc_loop
1216
1217 ld1 {v7.16b}, [$inp],#16
1218 bl _vpaes_encrypt_core
1219 st1 {v0.16b}, [$out],#16
1220 subs x17, x17, #16
1221 b.ls .Lecb_enc_done
1222
1223.align 4
1224.Lecb_enc_loop:
1225 ld1 {v14.16b,v15.16b}, [$inp], #32
1226 bl _vpaes_encrypt_2x
1227 st1 {v0.16b,v1.16b}, [$out], #32
1228 subs x17, x17, #32
1229 b.hi .Lecb_enc_loop
1230
1231.Lecb_enc_done:
1232 ldp d14,d15,[sp],#16
1233 ldp d12,d13,[sp],#16
1234 ldp d10,d11,[sp],#16
1235 ldp d8,d9,[sp],#16
1236 ldp x29,x30,[sp],#16
1237 AARCH64_VALIDATE_LINK_REGISTER
1238 ret
1239.size vpaes_ecb_encrypt,.-vpaes_ecb_encrypt
1240
1241.globl vpaes_ecb_decrypt
1242.type vpaes_ecb_decrypt,%function
1243.align 4
1244vpaes_ecb_decrypt:
1245 AARCH64_SIGN_LINK_REGISTER
1246 stp x29,x30,[sp,#-16]!
1247 add x29,sp,#0
1248 stp d8,d9,[sp,#-16]! // ABI spec says so
1249 stp d10,d11,[sp,#-16]!
1250 stp d12,d13,[sp,#-16]!
1251 stp d14,d15,[sp,#-16]!
1252
1253 mov x17, $len
1254 mov x2, $key
1255 bl _vpaes_decrypt_preheat
1256 tst x17, #16
1257 b.eq .Lecb_dec_loop
1258
1259 ld1 {v7.16b}, [$inp],#16
1260 bl _vpaes_encrypt_core
1261 st1 {v0.16b}, [$out],#16
1262 subs x17, x17, #16
1263 b.ls .Lecb_dec_done
1264
1265.align 4
1266.Lecb_dec_loop:
1267 ld1 {v14.16b,v15.16b}, [$inp], #32
1268 bl _vpaes_decrypt_2x
1269 st1 {v0.16b,v1.16b}, [$out], #32
1270 subs x17, x17, #32
1271 b.hi .Lecb_dec_loop
1272
1273.Lecb_dec_done:
1274 ldp d14,d15,[sp],#16
1275 ldp d12,d13,[sp],#16
1276 ldp d10,d11,[sp],#16
1277 ldp d8,d9,[sp],#16
1278 ldp x29,x30,[sp],#16
1279 AARCH64_VALIDATE_LINK_REGISTER
1280 ret
1281.size vpaes_ecb_decrypt,.-vpaes_ecb_decrypt
1282___
1283} }
1284print $code;
1285
1286close STDOUT or die "error closing STDOUT: $!";
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette