VirtualBox

source: vbox/trunk/src/libs/openssl-3.0.1/crypto/aes/asm/vpaes-armv8.pl@ 94081

Last change on this file since 94081 was 91772, checked in by vboxsync, 3 years ago

openssl-1.1.1l: Applied and adjusted our OpenSSL changes to 1.1.1l. bugref:10126

  • Property svn:executable set to *
File size: 43.6 KB
Line 
1#! /usr/bin/env perl
2# Copyright 2015-2020 The OpenSSL Project Authors. All Rights Reserved.
3#
4# Licensed under the OpenSSL license (the "License"). You may not use
5# this file except in compliance with the License. You can obtain a copy
6# in the file LICENSE in the source distribution or at
7# https://www.openssl.org/source/license.html
8
9
10######################################################################
11## Constant-time SSSE3 AES core implementation.
12## version 0.1
13##
14## By Mike Hamburg (Stanford University), 2009
15## Public domain.
16##
17## For details see http://shiftleft.org/papers/vector_aes/ and
18## http://crypto.stanford.edu/vpaes/.
19##
20######################################################################
21# ARMv8 NEON adaptation by <[email protected]>
22#
23# Reason for undertaken effort is that there is at least one popular
24# SoC based on Cortex-A53 that doesn't have crypto extensions.
25#
26# CBC enc ECB enc/dec(*) [bit-sliced enc/dec]
27# Cortex-A53 21.5 18.1/20.6 [17.5/19.8 ]
28# Cortex-A57 36.0(**) 20.4/24.9(**) [14.4/16.6 ]
29# X-Gene 45.9(**) 45.8/57.7(**) [33.1/37.6(**) ]
30# Denver(***) 16.6(**) 15.1/17.8(**) [8.80/9.93 ]
31# Apple A7(***) 22.7(**) 10.9/14.3 [8.45/10.0 ]
32# Mongoose(***) 26.3(**) 21.0/25.0(**) [13.3/16.8 ]
33#
34# (*) ECB denotes approximate result for parallelizable modes
35# such as CBC decrypt, CTR, etc.;
36# (**) these results are worse than scalar compiler-generated
37# code, but it's constant-time and therefore preferred;
38# (***) presented for reference/comparison purposes;
39
40$flavour = shift;
41while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {}
42
43$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
44( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
45( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
46die "can't locate arm-xlate.pl";
47
48open OUT,"| \"$^X\" $xlate $flavour $output";
49*STDOUT=*OUT;
50
51$code.=<<___;
52.text
53
54.type _vpaes_consts,%object
55.align 7 // totally strategic alignment
56_vpaes_consts:
57.Lk_mc_forward: // mc_forward
58 .quad 0x0407060500030201, 0x0C0F0E0D080B0A09
59 .quad 0x080B0A0904070605, 0x000302010C0F0E0D
60 .quad 0x0C0F0E0D080B0A09, 0x0407060500030201
61 .quad 0x000302010C0F0E0D, 0x080B0A0904070605
62.Lk_mc_backward:// mc_backward
63 .quad 0x0605040702010003, 0x0E0D0C0F0A09080B
64 .quad 0x020100030E0D0C0F, 0x0A09080B06050407
65 .quad 0x0E0D0C0F0A09080B, 0x0605040702010003
66 .quad 0x0A09080B06050407, 0x020100030E0D0C0F
67.Lk_sr: // sr
68 .quad 0x0706050403020100, 0x0F0E0D0C0B0A0908
69 .quad 0x030E09040F0A0500, 0x0B06010C07020D08
70 .quad 0x0F060D040B020900, 0x070E050C030A0108
71 .quad 0x0B0E0104070A0D00, 0x0306090C0F020508
72
73//
74// "Hot" constants
75//
76.Lk_inv: // inv, inva
77 .quad 0x0E05060F0D080180, 0x040703090A0B0C02
78 .quad 0x01040A060F0B0780, 0x030D0E0C02050809
79.Lk_ipt: // input transform (lo, hi)
80 .quad 0xC2B2E8985A2A7000, 0xCABAE09052227808
81 .quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81
82.Lk_sbo: // sbou, sbot
83 .quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878
84 .quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA
85.Lk_sb1: // sb1u, sb1t
86 .quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF
87 .quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544
88.Lk_sb2: // sb2u, sb2t
89 .quad 0x69EB88400AE12900, 0xC2A163C8AB82234A
90 .quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD
91
92//
93// Decryption stuff
94//
95.Lk_dipt: // decryption input transform
96 .quad 0x0F505B040B545F00, 0x154A411E114E451A
97 .quad 0x86E383E660056500, 0x12771772F491F194
98.Lk_dsbo: // decryption sbox final output
99 .quad 0x1387EA537EF94000, 0xC7AA6DB9D4943E2D
100 .quad 0x12D7560F93441D00, 0xCA4B8159D8C58E9C
101.Lk_dsb9: // decryption sbox output *9*u, *9*t
102 .quad 0x851C03539A86D600, 0xCAD51F504F994CC9
103 .quad 0xC03B1789ECD74900, 0x725E2C9EB2FBA565
104.Lk_dsbd: // decryption sbox output *D*u, *D*t
105 .quad 0x7D57CCDFE6B1A200, 0xF56E9B13882A4439
106 .quad 0x3CE2FAF724C6CB00, 0x2931180D15DEEFD3
107.Lk_dsbb: // decryption sbox output *B*u, *B*t
108 .quad 0xD022649296B44200, 0x602646F6B0F2D404
109 .quad 0xC19498A6CD596700, 0xF3FF0C3E3255AA6B
110.Lk_dsbe: // decryption sbox output *E*u, *E*t
111 .quad 0x46F2929626D4D000, 0x2242600464B4F6B0
112 .quad 0x0C55A6CDFFAAC100, 0x9467F36B98593E32
113
114//
115// Key schedule constants
116//
117.Lk_dksd: // decryption key schedule: invskew x*D
118 .quad 0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9
119 .quad 0x41C277F4B5368300, 0x5FDC69EAAB289D1E
120.Lk_dksb: // decryption key schedule: invskew x*B
121 .quad 0x9A4FCA1F8550D500, 0x03D653861CC94C99
122 .quad 0x115BEDA7B6FC4A00, 0xD993256F7E3482C8
123.Lk_dkse: // decryption key schedule: invskew x*E + 0x63
124 .quad 0xD5031CCA1FC9D600, 0x53859A4C994F5086
125 .quad 0xA23196054FDC7BE8, 0xCD5EF96A20B31487
126.Lk_dks9: // decryption key schedule: invskew x*9
127 .quad 0xB6116FC87ED9A700, 0x4AED933482255BFC
128 .quad 0x4576516227143300, 0x8BB89FACE9DAFDCE
129
130.Lk_rcon: // rcon
131 .quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81
132
133.Lk_opt: // output transform
134 .quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808
135 .quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0
136.Lk_deskew: // deskew tables: inverts the sbox's "skew"
137 .quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A
138 .quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77
139
140.asciz "Vector Permutation AES for ARMv8, Mike Hamburg (Stanford University)"
141.size _vpaes_consts,.-_vpaes_consts
142.align 6
143___
144
145
146{
147my ($inp,$out,$key) = map("x$_",(0..2));
148
149my ($invlo,$invhi,$iptlo,$ipthi,$sbou,$sbot) = map("v$_.16b",(18..23));
150my ($sb1u,$sb1t,$sb2u,$sb2t) = map("v$_.16b",(24..27));
151my ($sb9u,$sb9t,$sbdu,$sbdt,$sbbu,$sbbt,$sbeu,$sbet)=map("v$_.16b",(24..31));
152
153$code.=<<___;
154##
155## _aes_preheat
156##
157## Fills register %r10 -> .aes_consts (so you can -fPIC)
158## and %xmm9-%xmm15 as specified below.
159##
160.type _vpaes_encrypt_preheat,%function
161.align 4
162_vpaes_encrypt_preheat:
163 adr x10, .Lk_inv
164 movi v17.16b, #0x0f
165 ld1 {v18.2d-v19.2d}, [x10],#32 // .Lk_inv
166 ld1 {v20.2d-v23.2d}, [x10],#64 // .Lk_ipt, .Lk_sbo
167 ld1 {v24.2d-v27.2d}, [x10] // .Lk_sb1, .Lk_sb2
168 ret
169.size _vpaes_encrypt_preheat,.-_vpaes_encrypt_preheat
170
171##
172## _aes_encrypt_core
173##
174## AES-encrypt %xmm0.
175##
176## Inputs:
177## %xmm0 = input
178## %xmm9-%xmm15 as in _vpaes_preheat
179## (%rdx) = scheduled keys
180##
181## Output in %xmm0
182## Clobbers %xmm1-%xmm5, %r9, %r10, %r11, %rax
183## Preserves %xmm6 - %xmm8 so you get some local vectors
184##
185##
186.type _vpaes_encrypt_core,%function
187.align 4
188_vpaes_encrypt_core:
189 mov x9, $key
190 ldr w8, [$key,#240] // pull rounds
191 adr x11, .Lk_mc_forward+16
192 // vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo
193 ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key
194 and v1.16b, v7.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1
195 ushr v0.16b, v7.16b, #4 // vpsrlb \$4, %xmm0, %xmm0
196 tbl v1.16b, {$iptlo}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1
197 // vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi
198 tbl v2.16b, {$ipthi}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2
199 eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0
200 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
201 b .Lenc_entry
202
203.align 4
204.Lenc_loop:
205 // middle of middle round
206 add x10, x11, #0x40
207 tbl v4.16b, {$sb1t}, v2.16b // vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u
208 ld1 {v1.2d}, [x11], #16 // vmovdqa -0x40(%r11,%r10), %xmm1 # .Lk_mc_forward[]
209 tbl v0.16b, {$sb1u}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t
210 eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
211 tbl v5.16b, {$sb2t}, v2.16b // vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u
212 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
213 tbl v2.16b, {$sb2u}, v3.16b // vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t
214 ld1 {v4.2d}, [x10] // vmovdqa (%r11,%r10), %xmm4 # .Lk_mc_backward[]
215 tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B
216 eor v2.16b, v2.16b, v5.16b // vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A
217 tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D
218 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B
219 tbl v4.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C
220 eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D
221 and x11, x11, #~(1<<6) // and \$0x30, %r11 # ... mod 4
222 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D
223 sub w8, w8, #1 // nr--
224
225.Lenc_entry:
226 // top of round
227 and v1.16b, v0.16b, v17.16b // vpand %xmm0, %xmm9, %xmm1 # 0 = k
228 ushr v0.16b, v0.16b, #4 // vpsrlb \$4, %xmm0, %xmm0 # 1 = i
229 tbl v5.16b, {$invhi}, v1.16b // vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k
230 eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j
231 tbl v3.16b, {$invlo}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
232 tbl v4.16b, {$invlo}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
233 eor v3.16b, v3.16b, v5.16b // vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
234 eor v4.16b, v4.16b, v5.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
235 tbl v2.16b, {$invlo}, v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak
236 tbl v3.16b, {$invlo}, v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak
237 eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io
238 eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo
239 ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm5
240 cbnz w8, .Lenc_loop
241
242 // middle of last round
243 add x10, x11, #0x80
244 // vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo
245 // vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16
246 tbl v4.16b, {$sbou}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou
247 ld1 {v1.2d}, [x10] // vmovdqa 0x40(%r11,%r10), %xmm1 # .Lk_sr[]
248 tbl v0.16b, {$sbot}, v3.16b // vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t
249 eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
250 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
251 tbl v0.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm0
252 ret
253.size _vpaes_encrypt_core,.-_vpaes_encrypt_core
254
255.globl vpaes_encrypt
256.type vpaes_encrypt,%function
257.align 4
258vpaes_encrypt:
259 .inst 0xd503233f // paciasp
260 stp x29,x30,[sp,#-16]!
261 add x29,sp,#0
262
263 ld1 {v7.16b}, [$inp]
264 bl _vpaes_encrypt_preheat
265 bl _vpaes_encrypt_core
266 st1 {v0.16b}, [$out]
267
268 ldp x29,x30,[sp],#16
269 .inst 0xd50323bf // autiasp
270 ret
271.size vpaes_encrypt,.-vpaes_encrypt
272
273.type _vpaes_encrypt_2x,%function
274.align 4
275_vpaes_encrypt_2x:
276 mov x9, $key
277 ldr w8, [$key,#240] // pull rounds
278 adr x11, .Lk_mc_forward+16
279 // vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo
280 ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key
281 and v1.16b, v14.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1
282 ushr v0.16b, v14.16b, #4 // vpsrlb \$4, %xmm0, %xmm0
283 and v9.16b, v15.16b, v17.16b
284 ushr v8.16b, v15.16b, #4
285 tbl v1.16b, {$iptlo}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1
286 tbl v9.16b, {$iptlo}, v9.16b
287 // vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi
288 tbl v2.16b, {$ipthi}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2
289 tbl v10.16b, {$ipthi}, v8.16b
290 eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0
291 eor v8.16b, v9.16b, v16.16b
292 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
293 eor v8.16b, v8.16b, v10.16b
294 b .Lenc_2x_entry
295
296.align 4
297.Lenc_2x_loop:
298 // middle of middle round
299 add x10, x11, #0x40
300 tbl v4.16b, {$sb1t}, v2.16b // vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u
301 tbl v12.16b, {$sb1t}, v10.16b
302 ld1 {v1.2d}, [x11], #16 // vmovdqa -0x40(%r11,%r10), %xmm1 # .Lk_mc_forward[]
303 tbl v0.16b, {$sb1u}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t
304 tbl v8.16b, {$sb1u}, v11.16b
305 eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
306 eor v12.16b, v12.16b, v16.16b
307 tbl v5.16b, {$sb2t}, v2.16b // vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u
308 tbl v13.16b, {$sb2t}, v10.16b
309 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
310 eor v8.16b, v8.16b, v12.16b
311 tbl v2.16b, {$sb2u}, v3.16b // vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t
312 tbl v10.16b, {$sb2u}, v11.16b
313 ld1 {v4.2d}, [x10] // vmovdqa (%r11,%r10), %xmm4 # .Lk_mc_backward[]
314 tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B
315 tbl v11.16b, {v8.16b}, v1.16b
316 eor v2.16b, v2.16b, v5.16b // vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A
317 eor v10.16b, v10.16b, v13.16b
318 tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D
319 tbl v8.16b, {v8.16b}, v4.16b
320 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B
321 eor v11.16b, v11.16b, v10.16b
322 tbl v4.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C
323 tbl v12.16b, {v11.16b},v1.16b
324 eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D
325 eor v8.16b, v8.16b, v11.16b
326 and x11, x11, #~(1<<6) // and \$0x30, %r11 # ... mod 4
327 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D
328 eor v8.16b, v8.16b, v12.16b
329 sub w8, w8, #1 // nr--
330
331.Lenc_2x_entry:
332 // top of round
333 and v1.16b, v0.16b, v17.16b // vpand %xmm0, %xmm9, %xmm1 # 0 = k
334 ushr v0.16b, v0.16b, #4 // vpsrlb \$4, %xmm0, %xmm0 # 1 = i
335 and v9.16b, v8.16b, v17.16b
336 ushr v8.16b, v8.16b, #4
337 tbl v5.16b, {$invhi},v1.16b // vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k
338 tbl v13.16b, {$invhi},v9.16b
339 eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j
340 eor v9.16b, v9.16b, v8.16b
341 tbl v3.16b, {$invlo},v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
342 tbl v11.16b, {$invlo},v8.16b
343 tbl v4.16b, {$invlo},v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
344 tbl v12.16b, {$invlo},v9.16b
345 eor v3.16b, v3.16b, v5.16b // vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
346 eor v11.16b, v11.16b, v13.16b
347 eor v4.16b, v4.16b, v5.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
348 eor v12.16b, v12.16b, v13.16b
349 tbl v2.16b, {$invlo},v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak
350 tbl v10.16b, {$invlo},v11.16b
351 tbl v3.16b, {$invlo},v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak
352 tbl v11.16b, {$invlo},v12.16b
353 eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io
354 eor v10.16b, v10.16b, v9.16b
355 eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo
356 eor v11.16b, v11.16b, v8.16b
357 ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm5
358 cbnz w8, .Lenc_2x_loop
359
360 // middle of last round
361 add x10, x11, #0x80
362 // vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo
363 // vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16
364 tbl v4.16b, {$sbou}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou
365 tbl v12.16b, {$sbou}, v10.16b
366 ld1 {v1.2d}, [x10] // vmovdqa 0x40(%r11,%r10), %xmm1 # .Lk_sr[]
367 tbl v0.16b, {$sbot}, v3.16b // vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t
368 tbl v8.16b, {$sbot}, v11.16b
369 eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
370 eor v12.16b, v12.16b, v16.16b
371 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
372 eor v8.16b, v8.16b, v12.16b
373 tbl v0.16b, {v0.16b},v1.16b // vpshufb %xmm1, %xmm0, %xmm0
374 tbl v1.16b, {v8.16b},v1.16b
375 ret
376.size _vpaes_encrypt_2x,.-_vpaes_encrypt_2x
377
378.type _vpaes_decrypt_preheat,%function
379.align 4
380_vpaes_decrypt_preheat:
381 adr x10, .Lk_inv
382 movi v17.16b, #0x0f
383 adr x11, .Lk_dipt
384 ld1 {v18.2d-v19.2d}, [x10],#32 // .Lk_inv
385 ld1 {v20.2d-v23.2d}, [x11],#64 // .Lk_dipt, .Lk_dsbo
386 ld1 {v24.2d-v27.2d}, [x11],#64 // .Lk_dsb9, .Lk_dsbd
387 ld1 {v28.2d-v31.2d}, [x11] // .Lk_dsbb, .Lk_dsbe
388 ret
389.size _vpaes_decrypt_preheat,.-_vpaes_decrypt_preheat
390
391##
392## Decryption core
393##
394## Same API as encryption core.
395##
396.type _vpaes_decrypt_core,%function
397.align 4
398_vpaes_decrypt_core:
399 mov x9, $key
400 ldr w8, [$key,#240] // pull rounds
401
402 // vmovdqa .Lk_dipt(%rip), %xmm2 # iptlo
403 lsl x11, x8, #4 // mov %rax, %r11; shl \$4, %r11
404 eor x11, x11, #0x30 // xor \$0x30, %r11
405 adr x10, .Lk_sr
406 and x11, x11, #0x30 // and \$0x30, %r11
407 add x11, x11, x10
408 adr x10, .Lk_mc_forward+48
409
410 ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm4 # round0 key
411 and v1.16b, v7.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1
412 ushr v0.16b, v7.16b, #4 // vpsrlb \$4, %xmm0, %xmm0
413 tbl v2.16b, {$iptlo}, v1.16b // vpshufb %xmm1, %xmm2, %xmm2
414 ld1 {v5.2d}, [x10] // vmovdqa .Lk_mc_forward+48(%rip), %xmm5
415 // vmovdqa .Lk_dipt+16(%rip), %xmm1 # ipthi
416 tbl v0.16b, {$ipthi}, v0.16b // vpshufb %xmm0, %xmm1, %xmm0
417 eor v2.16b, v2.16b, v16.16b // vpxor %xmm4, %xmm2, %xmm2
418 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
419 b .Ldec_entry
420
421.align 4
422.Ldec_loop:
423//
424// Inverse mix columns
425//
426 // vmovdqa -0x20(%r10),%xmm4 # 4 : sb9u
427 // vmovdqa -0x10(%r10),%xmm1 # 0 : sb9t
428 tbl v4.16b, {$sb9u}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sb9u
429 tbl v1.16b, {$sb9t}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb9t
430 eor v0.16b, v4.16b, v16.16b // vpxor %xmm4, %xmm0, %xmm0
431 // vmovdqa 0x00(%r10),%xmm4 # 4 : sbdu
432 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
433 // vmovdqa 0x10(%r10),%xmm1 # 0 : sbdt
434
435 tbl v4.16b, {$sbdu}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbdu
436 tbl v0.16b, {v0.16b}, v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch
437 tbl v1.16b, {$sbdt}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbdt
438 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch
439 // vmovdqa 0x20(%r10), %xmm4 # 4 : sbbu
440 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
441 // vmovdqa 0x30(%r10), %xmm1 # 0 : sbbt
442
443 tbl v4.16b, {$sbbu}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbbu
444 tbl v0.16b, {v0.16b}, v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch
445 tbl v1.16b, {$sbbt}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbbt
446 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch
447 // vmovdqa 0x40(%r10), %xmm4 # 4 : sbeu
448 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
449 // vmovdqa 0x50(%r10), %xmm1 # 0 : sbet
450
451 tbl v4.16b, {$sbeu}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbeu
452 tbl v0.16b, {v0.16b}, v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch
453 tbl v1.16b, {$sbet}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbet
454 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch
455 ext v5.16b, v5.16b, v5.16b, #12 // vpalignr \$12, %xmm5, %xmm5, %xmm5
456 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
457 sub w8, w8, #1 // sub \$1,%rax # nr--
458
459.Ldec_entry:
460 // top of round
461 and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k
462 ushr v0.16b, v0.16b, #4 // vpsrlb \$4, %xmm0, %xmm0 # 1 = i
463 tbl v2.16b, {$invhi}, v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k
464 eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j
465 tbl v3.16b, {$invlo}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
466 tbl v4.16b, {$invlo}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
467 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
468 eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
469 tbl v2.16b, {$invlo}, v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak
470 tbl v3.16b, {$invlo}, v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak
471 eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io
472 eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo
473 ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm0
474 cbnz w8, .Ldec_loop
475
476 // middle of last round
477 // vmovdqa 0x60(%r10), %xmm4 # 3 : sbou
478 tbl v4.16b, {$sbou}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou
479 // vmovdqa 0x70(%r10), %xmm1 # 0 : sbot
480 ld1 {v2.2d}, [x11] // vmovdqa -0x160(%r11), %xmm2 # .Lk_sr-.Lk_dsbd=-0x160
481 tbl v1.16b, {$sbot}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb1t
482 eor v4.16b, v4.16b, v16.16b // vpxor %xmm0, %xmm4, %xmm4 # 4 = sb1u + k
483 eor v0.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm0 # 0 = A
484 tbl v0.16b, {v0.16b}, v2.16b // vpshufb %xmm2, %xmm0, %xmm0
485 ret
486.size _vpaes_decrypt_core,.-_vpaes_decrypt_core
487
488.globl vpaes_decrypt
489.type vpaes_decrypt,%function
490.align 4
491vpaes_decrypt:
492 .inst 0xd503233f // paciasp
493 stp x29,x30,[sp,#-16]!
494 add x29,sp,#0
495
496 ld1 {v7.16b}, [$inp]
497 bl _vpaes_decrypt_preheat
498 bl _vpaes_decrypt_core
499 st1 {v0.16b}, [$out]
500
501 ldp x29,x30,[sp],#16
502 .inst 0xd50323bf // autiasp
503 ret
504.size vpaes_decrypt,.-vpaes_decrypt
505
506// v14-v15 input, v0-v1 output
507.type _vpaes_decrypt_2x,%function
508.align 4
509_vpaes_decrypt_2x:
510 mov x9, $key
511 ldr w8, [$key,#240] // pull rounds
512
513 // vmovdqa .Lk_dipt(%rip), %xmm2 # iptlo
514 lsl x11, x8, #4 // mov %rax, %r11; shl \$4, %r11
515 eor x11, x11, #0x30 // xor \$0x30, %r11
516 adr x10, .Lk_sr
517 and x11, x11, #0x30 // and \$0x30, %r11
518 add x11, x11, x10
519 adr x10, .Lk_mc_forward+48
520
521 ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm4 # round0 key
522 and v1.16b, v14.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1
523 ushr v0.16b, v14.16b, #4 // vpsrlb \$4, %xmm0, %xmm0
524 and v9.16b, v15.16b, v17.16b
525 ushr v8.16b, v15.16b, #4
526 tbl v2.16b, {$iptlo},v1.16b // vpshufb %xmm1, %xmm2, %xmm2
527 tbl v10.16b, {$iptlo},v9.16b
528 ld1 {v5.2d}, [x10] // vmovdqa .Lk_mc_forward+48(%rip), %xmm5
529 // vmovdqa .Lk_dipt+16(%rip), %xmm1 # ipthi
530 tbl v0.16b, {$ipthi},v0.16b // vpshufb %xmm0, %xmm1, %xmm0
531 tbl v8.16b, {$ipthi},v8.16b
532 eor v2.16b, v2.16b, v16.16b // vpxor %xmm4, %xmm2, %xmm2
533 eor v10.16b, v10.16b, v16.16b
534 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
535 eor v8.16b, v8.16b, v10.16b
536 b .Ldec_2x_entry
537
538.align 4
539.Ldec_2x_loop:
540//
541// Inverse mix columns
542//
543 // vmovdqa -0x20(%r10),%xmm4 # 4 : sb9u
544 // vmovdqa -0x10(%r10),%xmm1 # 0 : sb9t
545 tbl v4.16b, {$sb9u}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sb9u
546 tbl v12.16b, {$sb9u}, v10.16b
547 tbl v1.16b, {$sb9t}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb9t
548 tbl v9.16b, {$sb9t}, v11.16b
549 eor v0.16b, v4.16b, v16.16b // vpxor %xmm4, %xmm0, %xmm0
550 eor v8.16b, v12.16b, v16.16b
551 // vmovdqa 0x00(%r10),%xmm4 # 4 : sbdu
552 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
553 eor v8.16b, v8.16b, v9.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
554 // vmovdqa 0x10(%r10),%xmm1 # 0 : sbdt
555
556 tbl v4.16b, {$sbdu}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbdu
557 tbl v12.16b, {$sbdu}, v10.16b
558 tbl v0.16b, {v0.16b},v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch
559 tbl v8.16b, {v8.16b},v5.16b
560 tbl v1.16b, {$sbdt}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbdt
561 tbl v9.16b, {$sbdt}, v11.16b
562 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch
563 eor v8.16b, v8.16b, v12.16b
564 // vmovdqa 0x20(%r10), %xmm4 # 4 : sbbu
565 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
566 eor v8.16b, v8.16b, v9.16b
567 // vmovdqa 0x30(%r10), %xmm1 # 0 : sbbt
568
569 tbl v4.16b, {$sbbu}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbbu
570 tbl v12.16b, {$sbbu}, v10.16b
571 tbl v0.16b, {v0.16b},v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch
572 tbl v8.16b, {v8.16b},v5.16b
573 tbl v1.16b, {$sbbt}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbbt
574 tbl v9.16b, {$sbbt}, v11.16b
575 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch
576 eor v8.16b, v8.16b, v12.16b
577 // vmovdqa 0x40(%r10), %xmm4 # 4 : sbeu
578 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
579 eor v8.16b, v8.16b, v9.16b
580 // vmovdqa 0x50(%r10), %xmm1 # 0 : sbet
581
582 tbl v4.16b, {$sbeu}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbeu
583 tbl v12.16b, {$sbeu}, v10.16b
584 tbl v0.16b, {v0.16b},v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch
585 tbl v8.16b, {v8.16b},v5.16b
586 tbl v1.16b, {$sbet}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbet
587 tbl v9.16b, {$sbet}, v11.16b
588 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch
589 eor v8.16b, v8.16b, v12.16b
590 ext v5.16b, v5.16b, v5.16b, #12 // vpalignr \$12, %xmm5, %xmm5, %xmm5
591 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
592 eor v8.16b, v8.16b, v9.16b
593 sub w8, w8, #1 // sub \$1,%rax # nr--
594
595.Ldec_2x_entry:
596 // top of round
597 and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k
598 ushr v0.16b, v0.16b, #4 // vpsrlb \$4, %xmm0, %xmm0 # 1 = i
599 and v9.16b, v8.16b, v17.16b
600 ushr v8.16b, v8.16b, #4
601 tbl v2.16b, {$invhi},v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k
602 tbl v10.16b, {$invhi},v9.16b
603 eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j
604 eor v9.16b, v9.16b, v8.16b
605 tbl v3.16b, {$invlo},v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
606 tbl v11.16b, {$invlo},v8.16b
607 tbl v4.16b, {$invlo},v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
608 tbl v12.16b, {$invlo},v9.16b
609 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
610 eor v11.16b, v11.16b, v10.16b
611 eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
612 eor v12.16b, v12.16b, v10.16b
613 tbl v2.16b, {$invlo},v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak
614 tbl v10.16b, {$invlo},v11.16b
615 tbl v3.16b, {$invlo},v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak
616 tbl v11.16b, {$invlo},v12.16b
617 eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io
618 eor v10.16b, v10.16b, v9.16b
619 eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo
620 eor v11.16b, v11.16b, v8.16b
621 ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm0
622 cbnz w8, .Ldec_2x_loop
623
624 // middle of last round
625 // vmovdqa 0x60(%r10), %xmm4 # 3 : sbou
626 tbl v4.16b, {$sbou}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou
627 tbl v12.16b, {$sbou}, v10.16b
628 // vmovdqa 0x70(%r10), %xmm1 # 0 : sbot
629 tbl v1.16b, {$sbot}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb1t
630 tbl v9.16b, {$sbot}, v11.16b
631 ld1 {v2.2d}, [x11] // vmovdqa -0x160(%r11), %xmm2 # .Lk_sr-.Lk_dsbd=-0x160
632 eor v4.16b, v4.16b, v16.16b // vpxor %xmm0, %xmm4, %xmm4 # 4 = sb1u + k
633 eor v12.16b, v12.16b, v16.16b
634 eor v0.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm0 # 0 = A
635 eor v8.16b, v9.16b, v12.16b
636 tbl v0.16b, {v0.16b},v2.16b // vpshufb %xmm2, %xmm0, %xmm0
637 tbl v1.16b, {v8.16b},v2.16b
638 ret
639.size _vpaes_decrypt_2x,.-_vpaes_decrypt_2x
640___
641}
642
643{
644my ($inp,$bits,$out,$dir)=("x0","w1","x2","w3");
645my ($invlo,$invhi,$iptlo,$ipthi,$rcon) = map("v$_.16b",(18..21,8));
646
647$code.=<<___;
648########################################################
649## ##
650## AES key schedule ##
651## ##
652########################################################
653.type _vpaes_key_preheat,%function
654.align 4
655_vpaes_key_preheat:
656 adr x10, .Lk_inv
657 movi v16.16b, #0x5b // .Lk_s63
658 adr x11, .Lk_sb1
659 movi v17.16b, #0x0f // .Lk_s0F
660 ld1 {v18.2d-v21.2d}, [x10] // .Lk_inv, .Lk_ipt
661 adr x10, .Lk_dksd
662 ld1 {v22.2d-v23.2d}, [x11] // .Lk_sb1
663 adr x11, .Lk_mc_forward
664 ld1 {v24.2d-v27.2d}, [x10],#64 // .Lk_dksd, .Lk_dksb
665 ld1 {v28.2d-v31.2d}, [x10],#64 // .Lk_dkse, .Lk_dks9
666 ld1 {v8.2d}, [x10] // .Lk_rcon
667 ld1 {v9.2d}, [x11] // .Lk_mc_forward[0]
668 ret
669.size _vpaes_key_preheat,.-_vpaes_key_preheat
670
671.type _vpaes_schedule_core,%function
672.align 4
673_vpaes_schedule_core:
674 .inst 0xd503233f // paciasp
675 stp x29, x30, [sp,#-16]!
676 add x29,sp,#0
677
678 bl _vpaes_key_preheat // load the tables
679
680 ld1 {v0.16b}, [$inp],#16 // vmovdqu (%rdi), %xmm0 # load key (unaligned)
681
682 // input transform
683 mov v3.16b, v0.16b // vmovdqa %xmm0, %xmm3
684 bl _vpaes_schedule_transform
685 mov v7.16b, v0.16b // vmovdqa %xmm0, %xmm7
686
687 adr x10, .Lk_sr // lea .Lk_sr(%rip),%r10
688 add x8, x8, x10
689 cbnz $dir, .Lschedule_am_decrypting
690
691 // encrypting, output zeroth round key after transform
692 st1 {v0.2d}, [$out] // vmovdqu %xmm0, (%rdx)
693 b .Lschedule_go
694
695.Lschedule_am_decrypting:
696 // decrypting, output zeroth round key after shiftrows
697 ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1
698 tbl v3.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3
699 st1 {v3.2d}, [$out] // vmovdqu %xmm3, (%rdx)
700 eor x8, x8, #0x30 // xor \$0x30, %r8
701
702.Lschedule_go:
703 cmp $bits, #192 // cmp \$192, %esi
704 b.hi .Lschedule_256
705 b.eq .Lschedule_192
706 // 128: fall though
707
708##
709## .schedule_128
710##
711## 128-bit specific part of key schedule.
712##
713## This schedule is really simple, because all its parts
714## are accomplished by the subroutines.
715##
716.Lschedule_128:
717 mov $inp, #10 // mov \$10, %esi
718
719.Loop_schedule_128:
720 sub $inp, $inp, #1 // dec %esi
721 bl _vpaes_schedule_round
722 cbz $inp, .Lschedule_mangle_last
723 bl _vpaes_schedule_mangle // write output
724 b .Loop_schedule_128
725
726##
727## .aes_schedule_192
728##
729## 192-bit specific part of key schedule.
730##
731## The main body of this schedule is the same as the 128-bit
732## schedule, but with more smearing. The long, high side is
733## stored in %xmm7 as before, and the short, low side is in
734## the high bits of %xmm6.
735##
736## This schedule is somewhat nastier, however, because each
737## round produces 192 bits of key material, or 1.5 round keys.
738## Therefore, on each cycle we do 2 rounds and produce 3 round
739## keys.
740##
741.align 4
742.Lschedule_192:
743 sub $inp, $inp, #8
744 ld1 {v0.16b}, [$inp] // vmovdqu 8(%rdi),%xmm0 # load key part 2 (very unaligned)
745 bl _vpaes_schedule_transform // input transform
746 mov v6.16b, v0.16b // vmovdqa %xmm0, %xmm6 # save short part
747 eor v4.16b, v4.16b, v4.16b // vpxor %xmm4, %xmm4, %xmm4 # clear 4
748 ins v6.d[0], v4.d[0] // vmovhlps %xmm4, %xmm6, %xmm6 # clobber low side with zeros
749 mov $inp, #4 // mov \$4, %esi
750
751.Loop_schedule_192:
752 sub $inp, $inp, #1 // dec %esi
753 bl _vpaes_schedule_round
754 ext v0.16b, v6.16b, v0.16b, #8 // vpalignr \$8,%xmm6,%xmm0,%xmm0
755 bl _vpaes_schedule_mangle // save key n
756 bl _vpaes_schedule_192_smear
757 bl _vpaes_schedule_mangle // save key n+1
758 bl _vpaes_schedule_round
759 cbz $inp, .Lschedule_mangle_last
760 bl _vpaes_schedule_mangle // save key n+2
761 bl _vpaes_schedule_192_smear
762 b .Loop_schedule_192
763
764##
765## .aes_schedule_256
766##
767## 256-bit specific part of key schedule.
768##
769## The structure here is very similar to the 128-bit
770## schedule, but with an additional "low side" in
771## %xmm6. The low side's rounds are the same as the
772## high side's, except no rcon and no rotation.
773##
774.align 4
775.Lschedule_256:
776 ld1 {v0.16b}, [$inp] // vmovdqu 16(%rdi),%xmm0 # load key part 2 (unaligned)
777 bl _vpaes_schedule_transform // input transform
778 mov $inp, #7 // mov \$7, %esi
779
780.Loop_schedule_256:
781 sub $inp, $inp, #1 // dec %esi
782 bl _vpaes_schedule_mangle // output low result
783 mov v6.16b, v0.16b // vmovdqa %xmm0, %xmm6 # save cur_lo in xmm6
784
785 // high round
786 bl _vpaes_schedule_round
787 cbz $inp, .Lschedule_mangle_last
788 bl _vpaes_schedule_mangle
789
790 // low round. swap xmm7 and xmm6
791 dup v0.4s, v0.s[3] // vpshufd \$0xFF, %xmm0, %xmm0
792 movi v4.16b, #0
793 mov v5.16b, v7.16b // vmovdqa %xmm7, %xmm5
794 mov v7.16b, v6.16b // vmovdqa %xmm6, %xmm7
795 bl _vpaes_schedule_low_round
796 mov v7.16b, v5.16b // vmovdqa %xmm5, %xmm7
797
798 b .Loop_schedule_256
799
800##
801## .aes_schedule_mangle_last
802##
803## Mangler for last round of key schedule
804## Mangles %xmm0
805## when encrypting, outputs out(%xmm0) ^ 63
806## when decrypting, outputs unskew(%xmm0)
807##
808## Always called right before return... jumps to cleanup and exits
809##
810.align 4
811.Lschedule_mangle_last:
812 // schedule last round key from xmm0
813 adr x11, .Lk_deskew // lea .Lk_deskew(%rip),%r11 # prepare to deskew
814 cbnz $dir, .Lschedule_mangle_last_dec
815
816 // encrypting
817 ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10),%xmm1
818 adr x11, .Lk_opt // lea .Lk_opt(%rip), %r11 # prepare to output transform
819 add $out, $out, #32 // add \$32, %rdx
820 tbl v0.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm0 # output permute
821
822.Lschedule_mangle_last_dec:
823 ld1 {v20.2d-v21.2d}, [x11] // reload constants
824 sub $out, $out, #16 // add \$-16, %rdx
825 eor v0.16b, v0.16b, v16.16b // vpxor .Lk_s63(%rip), %xmm0, %xmm0
826 bl _vpaes_schedule_transform // output transform
827 st1 {v0.2d}, [$out] // vmovdqu %xmm0, (%rdx) # save last key
828
829 // cleanup
830 eor v0.16b, v0.16b, v0.16b // vpxor %xmm0, %xmm0, %xmm0
831 eor v1.16b, v1.16b, v1.16b // vpxor %xmm1, %xmm1, %xmm1
832 eor v2.16b, v2.16b, v2.16b // vpxor %xmm2, %xmm2, %xmm2
833 eor v3.16b, v3.16b, v3.16b // vpxor %xmm3, %xmm3, %xmm3
834 eor v4.16b, v4.16b, v4.16b // vpxor %xmm4, %xmm4, %xmm4
835 eor v5.16b, v5.16b, v5.16b // vpxor %xmm5, %xmm5, %xmm5
836 eor v6.16b, v6.16b, v6.16b // vpxor %xmm6, %xmm6, %xmm6
837 eor v7.16b, v7.16b, v7.16b // vpxor %xmm7, %xmm7, %xmm7
838 ldp x29, x30, [sp],#16
839 .inst 0xd50323bf // autiasp
840 ret
841.size _vpaes_schedule_core,.-_vpaes_schedule_core
842
843##
844## .aes_schedule_192_smear
845##
846## Smear the short, low side in the 192-bit key schedule.
847##
848## Inputs:
849## %xmm7: high side, b a x y
850## %xmm6: low side, d c 0 0
851## %xmm13: 0
852##
853## Outputs:
854## %xmm6: b+c+d b+c 0 0
855## %xmm0: b+c+d b+c b a
856##
857.type _vpaes_schedule_192_smear,%function
858.align 4
859_vpaes_schedule_192_smear:
860 movi v1.16b, #0
861 dup v0.4s, v7.s[3]
862 ins v1.s[3], v6.s[2] // vpshufd \$0x80, %xmm6, %xmm1 # d c 0 0 -> c 0 0 0
863 ins v0.s[0], v7.s[2] // vpshufd \$0xFE, %xmm7, %xmm0 # b a _ _ -> b b b a
864 eor v6.16b, v6.16b, v1.16b // vpxor %xmm1, %xmm6, %xmm6 # -> c+d c 0 0
865 eor v1.16b, v1.16b, v1.16b // vpxor %xmm1, %xmm1, %xmm1
866 eor v6.16b, v6.16b, v0.16b // vpxor %xmm0, %xmm6, %xmm6 # -> b+c+d b+c b a
867 mov v0.16b, v6.16b // vmovdqa %xmm6, %xmm0
868 ins v6.d[0], v1.d[0] // vmovhlps %xmm1, %xmm6, %xmm6 # clobber low side with zeros
869 ret
870.size _vpaes_schedule_192_smear,.-_vpaes_schedule_192_smear
871
872##
873## .aes_schedule_round
874##
875## Runs one main round of the key schedule on %xmm0, %xmm7
876##
877## Specifically, runs subbytes on the high dword of %xmm0
878## then rotates it by one byte and xors into the low dword of
879## %xmm7.
880##
881## Adds rcon from low byte of %xmm8, then rotates %xmm8 for
882## next rcon.
883##
884## Smears the dwords of %xmm7 by xoring the low into the
885## second low, result into third, result into highest.
886##
887## Returns results in %xmm7 = %xmm0.
888## Clobbers %xmm1-%xmm4, %r11.
889##
890.type _vpaes_schedule_round,%function
891.align 4
892_vpaes_schedule_round:
893 // extract rcon from xmm8
894 movi v4.16b, #0 // vpxor %xmm4, %xmm4, %xmm4
895 ext v1.16b, $rcon, v4.16b, #15 // vpalignr \$15, %xmm8, %xmm4, %xmm1
896 ext $rcon, $rcon, $rcon, #15 // vpalignr \$15, %xmm8, %xmm8, %xmm8
897 eor v7.16b, v7.16b, v1.16b // vpxor %xmm1, %xmm7, %xmm7
898
899 // rotate
900 dup v0.4s, v0.s[3] // vpshufd \$0xFF, %xmm0, %xmm0
901 ext v0.16b, v0.16b, v0.16b, #1 // vpalignr \$1, %xmm0, %xmm0, %xmm0
902
903 // fall through...
904
905 // low round: same as high round, but no rotation and no rcon.
906_vpaes_schedule_low_round:
907 // smear xmm7
908 ext v1.16b, v4.16b, v7.16b, #12 // vpslldq \$4, %xmm7, %xmm1
909 eor v7.16b, v7.16b, v1.16b // vpxor %xmm1, %xmm7, %xmm7
910 ext v4.16b, v4.16b, v7.16b, #8 // vpslldq \$8, %xmm7, %xmm4
911
912 // subbytes
913 and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k
914 ushr v0.16b, v0.16b, #4 // vpsrlb \$4, %xmm0, %xmm0 # 1 = i
915 eor v7.16b, v7.16b, v4.16b // vpxor %xmm4, %xmm7, %xmm7
916 tbl v2.16b, {$invhi}, v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k
917 eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j
918 tbl v3.16b, {$invlo}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
919 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
920 tbl v4.16b, {$invlo}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
921 eor v7.16b, v7.16b, v16.16b // vpxor .Lk_s63(%rip), %xmm7, %xmm7
922 tbl v3.16b, {$invlo}, v3.16b // vpshufb %xmm3, %xmm10, %xmm3 # 2 = 1/iak
923 eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
924 tbl v2.16b, {$invlo}, v4.16b // vpshufb %xmm4, %xmm10, %xmm2 # 3 = 1/jak
925 eor v3.16b, v3.16b, v1.16b // vpxor %xmm1, %xmm3, %xmm3 # 2 = io
926 eor v2.16b, v2.16b, v0.16b // vpxor %xmm0, %xmm2, %xmm2 # 3 = jo
927 tbl v4.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm13, %xmm4 # 4 = sbou
928 tbl v1.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm12, %xmm1 # 0 = sb1t
929 eor v1.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm1 # 0 = sbox output
930
931 // add in smeared stuff
932 eor v0.16b, v1.16b, v7.16b // vpxor %xmm7, %xmm1, %xmm0
933 eor v7.16b, v1.16b, v7.16b // vmovdqa %xmm0, %xmm7
934 ret
935.size _vpaes_schedule_round,.-_vpaes_schedule_round
936
937##
938## .aes_schedule_transform
939##
940## Linear-transform %xmm0 according to tables at (%r11)
941##
942## Requires that %xmm9 = 0x0F0F... as in preheat
943## Output in %xmm0
944## Clobbers %xmm1, %xmm2
945##
946.type _vpaes_schedule_transform,%function
947.align 4
948_vpaes_schedule_transform:
949 and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1
950 ushr v0.16b, v0.16b, #4 // vpsrlb \$4, %xmm0, %xmm0
951 // vmovdqa (%r11), %xmm2 # lo
952 tbl v2.16b, {$iptlo}, v1.16b // vpshufb %xmm1, %xmm2, %xmm2
953 // vmovdqa 16(%r11), %xmm1 # hi
954 tbl v0.16b, {$ipthi}, v0.16b // vpshufb %xmm0, %xmm1, %xmm0
955 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
956 ret
957.size _vpaes_schedule_transform,.-_vpaes_schedule_transform
958
959##
960## .aes_schedule_mangle
961##
962## Mangle xmm0 from (basis-transformed) standard version
963## to our version.
964##
965## On encrypt,
966## xor with 0x63
967## multiply by circulant 0,1,1,1
968## apply shiftrows transform
969##
970## On decrypt,
971## xor with 0x63
972## multiply by "inverse mixcolumns" circulant E,B,D,9
973## deskew
974## apply shiftrows transform
975##
976##
977## Writes out to (%rdx), and increments or decrements it
978## Keeps track of round number mod 4 in %r8
979## Preserves xmm0
980## Clobbers xmm1-xmm5
981##
982.type _vpaes_schedule_mangle,%function
983.align 4
984_vpaes_schedule_mangle:
985 mov v4.16b, v0.16b // vmovdqa %xmm0, %xmm4 # save xmm0 for later
986 // vmovdqa .Lk_mc_forward(%rip),%xmm5
987 cbnz $dir, .Lschedule_mangle_dec
988
989 // encrypting
990 eor v4.16b, v0.16b, v16.16b // vpxor .Lk_s63(%rip), %xmm0, %xmm4
991 add $out, $out, #16 // add \$16, %rdx
992 tbl v4.16b, {v4.16b}, v9.16b // vpshufb %xmm5, %xmm4, %xmm4
993 tbl v1.16b, {v4.16b}, v9.16b // vpshufb %xmm5, %xmm4, %xmm1
994 tbl v3.16b, {v1.16b}, v9.16b // vpshufb %xmm5, %xmm1, %xmm3
995 eor v4.16b, v4.16b, v1.16b // vpxor %xmm1, %xmm4, %xmm4
996 ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1
997 eor v3.16b, v3.16b, v4.16b // vpxor %xmm4, %xmm3, %xmm3
998
999 b .Lschedule_mangle_both
1000.align 4
1001.Lschedule_mangle_dec:
1002 // inverse mix columns
1003 // lea .Lk_dksd(%rip),%r11
1004 ushr v1.16b, v4.16b, #4 // vpsrlb \$4, %xmm4, %xmm1 # 1 = hi
1005 and v4.16b, v4.16b, v17.16b // vpand %xmm9, %xmm4, %xmm4 # 4 = lo
1006
1007 // vmovdqa 0x00(%r11), %xmm2
1008 tbl v2.16b, {v24.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2
1009 // vmovdqa 0x10(%r11), %xmm3
1010 tbl v3.16b, {v25.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3
1011 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3
1012 tbl v3.16b, {v3.16b}, v9.16b // vpshufb %xmm5, %xmm3, %xmm3
1013
1014 // vmovdqa 0x20(%r11), %xmm2
1015 tbl v2.16b, {v26.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2
1016 eor v2.16b, v2.16b, v3.16b // vpxor %xmm3, %xmm2, %xmm2
1017 // vmovdqa 0x30(%r11), %xmm3
1018 tbl v3.16b, {v27.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3
1019 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3
1020 tbl v3.16b, {v3.16b}, v9.16b // vpshufb %xmm5, %xmm3, %xmm3
1021
1022 // vmovdqa 0x40(%r11), %xmm2
1023 tbl v2.16b, {v28.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2
1024 eor v2.16b, v2.16b, v3.16b // vpxor %xmm3, %xmm2, %xmm2
1025 // vmovdqa 0x50(%r11), %xmm3
1026 tbl v3.16b, {v29.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3
1027 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3
1028
1029 // vmovdqa 0x60(%r11), %xmm2
1030 tbl v2.16b, {v30.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2
1031 tbl v3.16b, {v3.16b}, v9.16b // vpshufb %xmm5, %xmm3, %xmm3
1032 // vmovdqa 0x70(%r11), %xmm4
1033 tbl v4.16b, {v31.16b}, v1.16b // vpshufb %xmm1, %xmm4, %xmm4
1034 ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1
1035 eor v2.16b, v2.16b, v3.16b // vpxor %xmm3, %xmm2, %xmm2
1036 eor v3.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm3
1037
1038 sub $out, $out, #16 // add \$-16, %rdx
1039
1040.Lschedule_mangle_both:
1041 tbl v3.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3
1042 add x8, x8, #64-16 // add \$-16, %r8
1043 and x8, x8, #~(1<<6) // and \$0x30, %r8
1044 st1 {v3.2d}, [$out] // vmovdqu %xmm3, (%rdx)
1045 ret
1046.size _vpaes_schedule_mangle,.-_vpaes_schedule_mangle
1047
1048.globl vpaes_set_encrypt_key
1049.type vpaes_set_encrypt_key,%function
1050.align 4
1051vpaes_set_encrypt_key:
1052 .inst 0xd503233f // paciasp
1053 stp x29,x30,[sp,#-16]!
1054 add x29,sp,#0
1055 stp d8,d9,[sp,#-16]! // ABI spec says so
1056
1057 lsr w9, $bits, #5 // shr \$5,%eax
1058 add w9, w9, #5 // \$5,%eax
1059 str w9, [$out,#240] // mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5;
1060
1061 mov $dir, #0 // mov \$0,%ecx
1062 mov x8, #0x30 // mov \$0x30,%r8d
1063 bl _vpaes_schedule_core
1064 eor x0, x0, x0
1065
1066 ldp d8,d9,[sp],#16
1067 ldp x29,x30,[sp],#16
1068 .inst 0xd50323bf // autiasp
1069 ret
1070.size vpaes_set_encrypt_key,.-vpaes_set_encrypt_key
1071
1072.globl vpaes_set_decrypt_key
1073.type vpaes_set_decrypt_key,%function
1074.align 4
1075vpaes_set_decrypt_key:
1076 .inst 0xd503233f // paciasp
1077 stp x29,x30,[sp,#-16]!
1078 add x29,sp,#0
1079 stp d8,d9,[sp,#-16]! // ABI spec says so
1080
1081 lsr w9, $bits, #5 // shr \$5,%eax
1082 add w9, w9, #5 // \$5,%eax
1083 str w9, [$out,#240] // mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5;
1084 lsl w9, w9, #4 // shl \$4,%eax
1085 add $out, $out, #16 // lea 16(%rdx,%rax),%rdx
1086 add $out, $out, x9
1087
1088 mov $dir, #1 // mov \$1,%ecx
1089 lsr w8, $bits, #1 // shr \$1,%r8d
1090 and x8, x8, #32 // and \$32,%r8d
1091 eor x8, x8, #32 // xor \$32,%r8d # nbits==192?0:32
1092 bl _vpaes_schedule_core
1093
1094 ldp d8,d9,[sp],#16
1095 ldp x29,x30,[sp],#16
1096 .inst 0xd50323bf // autiasp
1097 ret
1098.size vpaes_set_decrypt_key,.-vpaes_set_decrypt_key
1099___
1100}
1101{
1102my ($inp,$out,$len,$key,$ivec,$dir) = map("x$_",(0..5));
1103
1104$code.=<<___;
1105.globl vpaes_cbc_encrypt
1106.type vpaes_cbc_encrypt,%function
1107.align 4
1108vpaes_cbc_encrypt:
1109 cbz $len, .Lcbc_abort
1110 cmp w5, #0 // check direction
1111 b.eq vpaes_cbc_decrypt
1112
1113 .inst 0xd503233f // paciasp
1114 stp x29,x30,[sp,#-16]!
1115 add x29,sp,#0
1116
1117 mov x17, $len // reassign
1118 mov x2, $key // reassign
1119
1120 ld1 {v0.16b}, [$ivec] // load ivec
1121 bl _vpaes_encrypt_preheat
1122 b .Lcbc_enc_loop
1123
1124.align 4
1125.Lcbc_enc_loop:
1126 ld1 {v7.16b}, [$inp],#16 // load input
1127 eor v7.16b, v7.16b, v0.16b // xor with ivec
1128 bl _vpaes_encrypt_core
1129 st1 {v0.16b}, [$out],#16 // save output
1130 subs x17, x17, #16
1131 b.hi .Lcbc_enc_loop
1132
1133 st1 {v0.16b}, [$ivec] // write ivec
1134
1135 ldp x29,x30,[sp],#16
1136 .inst 0xd50323bf // autiasp
1137.Lcbc_abort:
1138 ret
1139.size vpaes_cbc_encrypt,.-vpaes_cbc_encrypt
1140
1141.type vpaes_cbc_decrypt,%function
1142.align 4
1143vpaes_cbc_decrypt:
1144 .inst 0xd503233f // paciasp
1145 stp x29,x30,[sp,#-16]!
1146 add x29,sp,#0
1147 stp d8,d9,[sp,#-16]! // ABI spec says so
1148 stp d10,d11,[sp,#-16]!
1149 stp d12,d13,[sp,#-16]!
1150 stp d14,d15,[sp,#-16]!
1151
1152 mov x17, $len // reassign
1153 mov x2, $key // reassign
1154 ld1 {v6.16b}, [$ivec] // load ivec
1155 bl _vpaes_decrypt_preheat
1156 tst x17, #16
1157 b.eq .Lcbc_dec_loop2x
1158
1159 ld1 {v7.16b}, [$inp], #16 // load input
1160 bl _vpaes_decrypt_core
1161 eor v0.16b, v0.16b, v6.16b // xor with ivec
1162 orr v6.16b, v7.16b, v7.16b // next ivec value
1163 st1 {v0.16b}, [$out], #16
1164 subs x17, x17, #16
1165 b.ls .Lcbc_dec_done
1166
1167.align 4
1168.Lcbc_dec_loop2x:
1169 ld1 {v14.16b,v15.16b}, [$inp], #32
1170 bl _vpaes_decrypt_2x
1171 eor v0.16b, v0.16b, v6.16b // xor with ivec
1172 eor v1.16b, v1.16b, v14.16b
1173 orr v6.16b, v15.16b, v15.16b
1174 st1 {v0.16b,v1.16b}, [$out], #32
1175 subs x17, x17, #32
1176 b.hi .Lcbc_dec_loop2x
1177
1178.Lcbc_dec_done:
1179 st1 {v6.16b}, [$ivec]
1180
1181 ldp d14,d15,[sp],#16
1182 ldp d12,d13,[sp],#16
1183 ldp d10,d11,[sp],#16
1184 ldp d8,d9,[sp],#16
1185 ldp x29,x30,[sp],#16
1186 .inst 0xd50323bf // autiasp
1187 ret
1188.size vpaes_cbc_decrypt,.-vpaes_cbc_decrypt
1189___
1190if (1) {
1191$code.=<<___;
1192.globl vpaes_ecb_encrypt
1193.type vpaes_ecb_encrypt,%function
1194.align 4
1195vpaes_ecb_encrypt:
1196 .inst 0xd503233f // paciasp
1197 stp x29,x30,[sp,#-16]!
1198 add x29,sp,#0
1199 stp d8,d9,[sp,#-16]! // ABI spec says so
1200 stp d10,d11,[sp,#-16]!
1201 stp d12,d13,[sp,#-16]!
1202 stp d14,d15,[sp,#-16]!
1203
1204 mov x17, $len
1205 mov x2, $key
1206 bl _vpaes_encrypt_preheat
1207 tst x17, #16
1208 b.eq .Lecb_enc_loop
1209
1210 ld1 {v7.16b}, [$inp],#16
1211 bl _vpaes_encrypt_core
1212 st1 {v0.16b}, [$out],#16
1213 subs x17, x17, #16
1214 b.ls .Lecb_enc_done
1215
1216.align 4
1217.Lecb_enc_loop:
1218 ld1 {v14.16b,v15.16b}, [$inp], #32
1219 bl _vpaes_encrypt_2x
1220 st1 {v0.16b,v1.16b}, [$out], #32
1221 subs x17, x17, #32
1222 b.hi .Lecb_enc_loop
1223
1224.Lecb_enc_done:
1225 ldp d14,d15,[sp],#16
1226 ldp d12,d13,[sp],#16
1227 ldp d10,d11,[sp],#16
1228 ldp d8,d9,[sp],#16
1229 ldp x29,x30,[sp],#16
1230 .inst 0xd50323bf // autiasp
1231 ret
1232.size vpaes_ecb_encrypt,.-vpaes_ecb_encrypt
1233
1234.globl vpaes_ecb_decrypt
1235.type vpaes_ecb_decrypt,%function
1236.align 4
1237vpaes_ecb_decrypt:
1238 .inst 0xd503233f // paciasp
1239 stp x29,x30,[sp,#-16]!
1240 add x29,sp,#0
1241 stp d8,d9,[sp,#-16]! // ABI spec says so
1242 stp d10,d11,[sp,#-16]!
1243 stp d12,d13,[sp,#-16]!
1244 stp d14,d15,[sp,#-16]!
1245
1246 mov x17, $len
1247 mov x2, $key
1248 bl _vpaes_decrypt_preheat
1249 tst x17, #16
1250 b.eq .Lecb_dec_loop
1251
1252 ld1 {v7.16b}, [$inp],#16
1253 bl _vpaes_encrypt_core
1254 st1 {v0.16b}, [$out],#16
1255 subs x17, x17, #16
1256 b.ls .Lecb_dec_done
1257
1258.align 4
1259.Lecb_dec_loop:
1260 ld1 {v14.16b,v15.16b}, [$inp], #32
1261 bl _vpaes_decrypt_2x
1262 st1 {v0.16b,v1.16b}, [$out], #32
1263 subs x17, x17, #32
1264 b.hi .Lecb_dec_loop
1265
1266.Lecb_dec_done:
1267 ldp d14,d15,[sp],#16
1268 ldp d12,d13,[sp],#16
1269 ldp d10,d11,[sp],#16
1270 ldp d8,d9,[sp],#16
1271 ldp x29,x30,[sp],#16
1272 .inst 0xd50323bf // autiasp
1273 ret
1274.size vpaes_ecb_decrypt,.-vpaes_ecb_decrypt
1275___
1276} }
1277print $code;
1278
1279close STDOUT or die "error closing STDOUT: $!";
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette