VirtualBox

source: vbox/trunk/src/libs/openssl-1.1.0g/crypto/aes/asm/vpaes-armv8.pl@ 69890

Last change on this file since 69890 was 69890, checked in by vboxsync, 7 years ago

Added OpenSSL 1.1.0g with unneeded files removed, otherwise unmodified.
bugref:8070: src/libs maintenance

  • Property svn:eol-style set to LF
  • Property svn:executable set to *
File size: 43.1 KB
Line 
1#! /usr/bin/env perl
2# Copyright 2015-2016 The OpenSSL Project Authors. All Rights Reserved.
3#
4# Licensed under the OpenSSL license (the "License"). You may not use
5# this file except in compliance with the License. You can obtain a copy
6# in the file LICENSE in the source distribution or at
7# https://www.openssl.org/source/license.html
8
9
10######################################################################
11## Constant-time SSSE3 AES core implementation.
12## version 0.1
13##
14## By Mike Hamburg (Stanford University), 2009
15## Public domain.
16##
17## For details see http://shiftleft.org/papers/vector_aes/ and
18## http://crypto.stanford.edu/vpaes/.
19##
20######################################################################
21# ARMv8 NEON adaptation by <[email protected]>
22#
23# Reason for undertaken effort is that there is at least one popular
24# SoC based on Cortex-A53 that doesn't have crypto extensions.
25#
26# CBC enc ECB enc/dec(*) [bit-sliced enc/dec]
27# Cortex-A53 21.5 18.1/20.6 [17.5/19.8 ]
28# Cortex-A57 36.0(**) 20.4/24.9(**) [14.4/16.6 ]
29# X-Gene 45.9(**) 45.8/57.7(**) [33.1/37.6(**) ]
30# Denver(***) 16.6(**) 15.1/17.8(**) [8.80/9.93 ]
31# Apple A7(***) 22.7(**) 10.9/14.3 [8.45/10.0 ]
32# Mongoose(***) 26.3(**) 21.0/25.0(**) [13.3/16.8 ]
33#
34# (*) ECB denotes approximate result for parallelizeable modes
35# such as CBC decrypt, CTR, etc.;
36# (**) these results are worse than scalar compiler-generated
37# code, but it's constant-time and therefore preferred;
38# (***) presented for reference/comparison purposes;
39
40$flavour = shift;
41while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {}
42
43$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
44( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
45( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
46die "can't locate arm-xlate.pl";
47
48open OUT,"| \"$^X\" $xlate $flavour $output";
49*STDOUT=*OUT;
50
51$code.=<<___;
52.text
53
54.type _vpaes_consts,%object
55.align 7 // totally strategic alignment
56_vpaes_consts:
57.Lk_mc_forward: // mc_forward
58 .quad 0x0407060500030201, 0x0C0F0E0D080B0A09
59 .quad 0x080B0A0904070605, 0x000302010C0F0E0D
60 .quad 0x0C0F0E0D080B0A09, 0x0407060500030201
61 .quad 0x000302010C0F0E0D, 0x080B0A0904070605
62.Lk_mc_backward:// mc_backward
63 .quad 0x0605040702010003, 0x0E0D0C0F0A09080B
64 .quad 0x020100030E0D0C0F, 0x0A09080B06050407
65 .quad 0x0E0D0C0F0A09080B, 0x0605040702010003
66 .quad 0x0A09080B06050407, 0x020100030E0D0C0F
67.Lk_sr: // sr
68 .quad 0x0706050403020100, 0x0F0E0D0C0B0A0908
69 .quad 0x030E09040F0A0500, 0x0B06010C07020D08
70 .quad 0x0F060D040B020900, 0x070E050C030A0108
71 .quad 0x0B0E0104070A0D00, 0x0306090C0F020508
72
73//
74// "Hot" constants
75//
76.Lk_inv: // inv, inva
77 .quad 0x0E05060F0D080180, 0x040703090A0B0C02
78 .quad 0x01040A060F0B0780, 0x030D0E0C02050809
79.Lk_ipt: // input transform (lo, hi)
80 .quad 0xC2B2E8985A2A7000, 0xCABAE09052227808
81 .quad 0x4C01307D317C4D00, 0xCD80B1FCB0FDCC81
82.Lk_sbo: // sbou, sbot
83 .quad 0xD0D26D176FBDC700, 0x15AABF7AC502A878
84 .quad 0xCFE474A55FBB6A00, 0x8E1E90D1412B35FA
85.Lk_sb1: // sb1u, sb1t
86 .quad 0x3618D415FAE22300, 0x3BF7CCC10D2ED9EF
87 .quad 0xB19BE18FCB503E00, 0xA5DF7A6E142AF544
88.Lk_sb2: // sb2u, sb2t
89 .quad 0x69EB88400AE12900, 0xC2A163C8AB82234A
90 .quad 0xE27A93C60B712400, 0x5EB7E955BC982FCD
91
92//
93// Decryption stuff
94//
95.Lk_dipt: // decryption input transform
96 .quad 0x0F505B040B545F00, 0x154A411E114E451A
97 .quad 0x86E383E660056500, 0x12771772F491F194
98.Lk_dsbo: // decryption sbox final output
99 .quad 0x1387EA537EF94000, 0xC7AA6DB9D4943E2D
100 .quad 0x12D7560F93441D00, 0xCA4B8159D8C58E9C
101.Lk_dsb9: // decryption sbox output *9*u, *9*t
102 .quad 0x851C03539A86D600, 0xCAD51F504F994CC9
103 .quad 0xC03B1789ECD74900, 0x725E2C9EB2FBA565
104.Lk_dsbd: // decryption sbox output *D*u, *D*t
105 .quad 0x7D57CCDFE6B1A200, 0xF56E9B13882A4439
106 .quad 0x3CE2FAF724C6CB00, 0x2931180D15DEEFD3
107.Lk_dsbb: // decryption sbox output *B*u, *B*t
108 .quad 0xD022649296B44200, 0x602646F6B0F2D404
109 .quad 0xC19498A6CD596700, 0xF3FF0C3E3255AA6B
110.Lk_dsbe: // decryption sbox output *E*u, *E*t
111 .quad 0x46F2929626D4D000, 0x2242600464B4F6B0
112 .quad 0x0C55A6CDFFAAC100, 0x9467F36B98593E32
113
114//
115// Key schedule constants
116//
117.Lk_dksd: // decryption key schedule: invskew x*D
118 .quad 0xFEB91A5DA3E44700, 0x0740E3A45A1DBEF9
119 .quad 0x41C277F4B5368300, 0x5FDC69EAAB289D1E
120.Lk_dksb: // decryption key schedule: invskew x*B
121 .quad 0x9A4FCA1F8550D500, 0x03D653861CC94C99
122 .quad 0x115BEDA7B6FC4A00, 0xD993256F7E3482C8
123.Lk_dkse: // decryption key schedule: invskew x*E + 0x63
124 .quad 0xD5031CCA1FC9D600, 0x53859A4C994F5086
125 .quad 0xA23196054FDC7BE8, 0xCD5EF96A20B31487
126.Lk_dks9: // decryption key schedule: invskew x*9
127 .quad 0xB6116FC87ED9A700, 0x4AED933482255BFC
128 .quad 0x4576516227143300, 0x8BB89FACE9DAFDCE
129
130.Lk_rcon: // rcon
131 .quad 0x1F8391B9AF9DEEB6, 0x702A98084D7C7D81
132
133.Lk_opt: // output transform
134 .quad 0xFF9F4929D6B66000, 0xF7974121DEBE6808
135 .quad 0x01EDBD5150BCEC00, 0xE10D5DB1B05C0CE0
136.Lk_deskew: // deskew tables: inverts the sbox's "skew"
137 .quad 0x07E4A34047A4E300, 0x1DFEB95A5DBEF91A
138 .quad 0x5F36B5DC83EA6900, 0x2841C2ABF49D1E77
139
140.asciz "Vector Permutaion AES for ARMv8, Mike Hamburg (Stanford University)"
141.size _vpaes_consts,.-_vpaes_consts
142.align 6
143___
144
145
146{
147my ($inp,$out,$key) = map("x$_",(0..2));
148
149my ($invlo,$invhi,$iptlo,$ipthi,$sbou,$sbot) = map("v$_.16b",(18..23));
150my ($sb1u,$sb1t,$sb2u,$sb2t) = map("v$_.16b",(24..27));
151my ($sb9u,$sb9t,$sbdu,$sbdt,$sbbu,$sbbt,$sbeu,$sbet)=map("v$_.16b",(24..31));
152
153$code.=<<___;
154##
155## _aes_preheat
156##
157## Fills register %r10 -> .aes_consts (so you can -fPIC)
158## and %xmm9-%xmm15 as specified below.
159##
160.type _vpaes_encrypt_preheat,%function
161.align 4
162_vpaes_encrypt_preheat:
163 adr x10, .Lk_inv
164 movi v17.16b, #0x0f
165 ld1 {v18.2d-v19.2d}, [x10],#32 // .Lk_inv
166 ld1 {v20.2d-v23.2d}, [x10],#64 // .Lk_ipt, .Lk_sbo
167 ld1 {v24.2d-v27.2d}, [x10] // .Lk_sb1, .Lk_sb2
168 ret
169.size _vpaes_encrypt_preheat,.-_vpaes_encrypt_preheat
170
171##
172## _aes_encrypt_core
173##
174## AES-encrypt %xmm0.
175##
176## Inputs:
177## %xmm0 = input
178## %xmm9-%xmm15 as in _vpaes_preheat
179## (%rdx) = scheduled keys
180##
181## Output in %xmm0
182## Clobbers %xmm1-%xmm5, %r9, %r10, %r11, %rax
183## Preserves %xmm6 - %xmm8 so you get some local vectors
184##
185##
186.type _vpaes_encrypt_core,%function
187.align 4
188_vpaes_encrypt_core:
189 mov x9, $key
190 ldr w8, [$key,#240] // pull rounds
191 adr x11, .Lk_mc_forward+16
192 // vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo
193 ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key
194 and v1.16b, v7.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1
195 ushr v0.16b, v7.16b, #4 // vpsrlb \$4, %xmm0, %xmm0
196 tbl v1.16b, {$iptlo}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1
197 // vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi
198 tbl v2.16b, {$ipthi}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2
199 eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0
200 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
201 b .Lenc_entry
202
203.align 4
204.Lenc_loop:
205 // middle of middle round
206 add x10, x11, #0x40
207 tbl v4.16b, {$sb1t}, v2.16b // vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u
208 ld1 {v1.2d}, [x11], #16 // vmovdqa -0x40(%r11,%r10), %xmm1 # .Lk_mc_forward[]
209 tbl v0.16b, {$sb1u}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t
210 eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
211 tbl v5.16b, {$sb2t}, v2.16b // vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u
212 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
213 tbl v2.16b, {$sb2u}, v3.16b // vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t
214 ld1 {v4.2d}, [x10] // vmovdqa (%r11,%r10), %xmm4 # .Lk_mc_backward[]
215 tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B
216 eor v2.16b, v2.16b, v5.16b // vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A
217 tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D
218 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B
219 tbl v4.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C
220 eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D
221 and x11, x11, #~(1<<6) // and \$0x30, %r11 # ... mod 4
222 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D
223 sub w8, w8, #1 // nr--
224
225.Lenc_entry:
226 // top of round
227 and v1.16b, v0.16b, v17.16b // vpand %xmm0, %xmm9, %xmm1 # 0 = k
228 ushr v0.16b, v0.16b, #4 // vpsrlb \$4, %xmm0, %xmm0 # 1 = i
229 tbl v5.16b, {$invhi}, v1.16b // vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k
230 eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j
231 tbl v3.16b, {$invlo}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
232 tbl v4.16b, {$invlo}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
233 eor v3.16b, v3.16b, v5.16b // vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
234 eor v4.16b, v4.16b, v5.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
235 tbl v2.16b, {$invlo}, v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak
236 tbl v3.16b, {$invlo}, v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak
237 eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io
238 eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo
239 ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm5
240 cbnz w8, .Lenc_loop
241
242 // middle of last round
243 add x10, x11, #0x80
244 // vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo
245 // vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16
246 tbl v4.16b, {$sbou}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou
247 ld1 {v1.2d}, [x10] // vmovdqa 0x40(%r11,%r10), %xmm1 # .Lk_sr[]
248 tbl v0.16b, {$sbot}, v3.16b // vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t
249 eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
250 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
251 tbl v0.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm0
252 ret
253.size _vpaes_encrypt_core,.-_vpaes_encrypt_core
254
255.globl vpaes_encrypt
256.type vpaes_encrypt,%function
257.align 4
258vpaes_encrypt:
259 stp x29,x30,[sp,#-16]!
260 add x29,sp,#0
261
262 ld1 {v7.16b}, [$inp]
263 bl _vpaes_encrypt_preheat
264 bl _vpaes_encrypt_core
265 st1 {v0.16b}, [$out]
266
267 ldp x29,x30,[sp],#16
268 ret
269.size vpaes_encrypt,.-vpaes_encrypt
270
271.type _vpaes_encrypt_2x,%function
272.align 4
273_vpaes_encrypt_2x:
274 mov x9, $key
275 ldr w8, [$key,#240] // pull rounds
276 adr x11, .Lk_mc_forward+16
277 // vmovdqa .Lk_ipt(%rip), %xmm2 # iptlo
278 ld1 {v16.2d}, [x9], #16 // vmovdqu (%r9), %xmm5 # round0 key
279 and v1.16b, v14.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1
280 ushr v0.16b, v14.16b, #4 // vpsrlb \$4, %xmm0, %xmm0
281 and v9.16b, v15.16b, v17.16b
282 ushr v8.16b, v15.16b, #4
283 tbl v1.16b, {$iptlo}, v1.16b // vpshufb %xmm1, %xmm2, %xmm1
284 tbl v9.16b, {$iptlo}, v9.16b
285 // vmovdqa .Lk_ipt+16(%rip), %xmm3 # ipthi
286 tbl v2.16b, {$ipthi}, v0.16b // vpshufb %xmm0, %xmm3, %xmm2
287 tbl v10.16b, {$ipthi}, v8.16b
288 eor v0.16b, v1.16b, v16.16b // vpxor %xmm5, %xmm1, %xmm0
289 eor v8.16b, v9.16b, v16.16b
290 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
291 eor v8.16b, v8.16b, v10.16b
292 b .Lenc_2x_entry
293
294.align 4
295.Lenc_2x_loop:
296 // middle of middle round
297 add x10, x11, #0x40
298 tbl v4.16b, {$sb1t}, v2.16b // vpshufb %xmm2, %xmm13, %xmm4 # 4 = sb1u
299 tbl v12.16b, {$sb1t}, v10.16b
300 ld1 {v1.2d}, [x11], #16 // vmovdqa -0x40(%r11,%r10), %xmm1 # .Lk_mc_forward[]
301 tbl v0.16b, {$sb1u}, v3.16b // vpshufb %xmm3, %xmm12, %xmm0 # 0 = sb1t
302 tbl v8.16b, {$sb1u}, v11.16b
303 eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
304 eor v12.16b, v12.16b, v16.16b
305 tbl v5.16b, {$sb2t}, v2.16b // vpshufb %xmm2, %xmm15, %xmm5 # 4 = sb2u
306 tbl v13.16b, {$sb2t}, v10.16b
307 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
308 eor v8.16b, v8.16b, v12.16b
309 tbl v2.16b, {$sb2u}, v3.16b // vpshufb %xmm3, %xmm14, %xmm2 # 2 = sb2t
310 tbl v10.16b, {$sb2u}, v11.16b
311 ld1 {v4.2d}, [x10] // vmovdqa (%r11,%r10), %xmm4 # .Lk_mc_backward[]
312 tbl v3.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm3 # 0 = B
313 tbl v11.16b, {v8.16b}, v1.16b
314 eor v2.16b, v2.16b, v5.16b // vpxor %xmm5, %xmm2, %xmm2 # 2 = 2A
315 eor v10.16b, v10.16b, v13.16b
316 tbl v0.16b, {v0.16b}, v4.16b // vpshufb %xmm4, %xmm0, %xmm0 # 3 = D
317 tbl v8.16b, {v8.16b}, v4.16b
318 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 0 = 2A+B
319 eor v11.16b, v11.16b, v10.16b
320 tbl v4.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm4 # 0 = 2B+C
321 tbl v12.16b, {v11.16b},v1.16b
322 eor v0.16b, v0.16b, v3.16b // vpxor %xmm3, %xmm0, %xmm0 # 3 = 2A+B+D
323 eor v8.16b, v8.16b, v11.16b
324 and x11, x11, #~(1<<6) // and \$0x30, %r11 # ... mod 4
325 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = 2A+3B+C+D
326 eor v8.16b, v8.16b, v12.16b
327 sub w8, w8, #1 // nr--
328
329.Lenc_2x_entry:
330 // top of round
331 and v1.16b, v0.16b, v17.16b // vpand %xmm0, %xmm9, %xmm1 # 0 = k
332 ushr v0.16b, v0.16b, #4 // vpsrlb \$4, %xmm0, %xmm0 # 1 = i
333 and v9.16b, v8.16b, v17.16b
334 ushr v8.16b, v8.16b, #4
335 tbl v5.16b, {$invhi},v1.16b // vpshufb %xmm1, %xmm11, %xmm5 # 2 = a/k
336 tbl v13.16b, {$invhi},v9.16b
337 eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j
338 eor v9.16b, v9.16b, v8.16b
339 tbl v3.16b, {$invlo},v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
340 tbl v11.16b, {$invlo},v8.16b
341 tbl v4.16b, {$invlo},v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
342 tbl v12.16b, {$invlo},v9.16b
343 eor v3.16b, v3.16b, v5.16b // vpxor %xmm5, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
344 eor v11.16b, v11.16b, v13.16b
345 eor v4.16b, v4.16b, v5.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
346 eor v12.16b, v12.16b, v13.16b
347 tbl v2.16b, {$invlo},v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak
348 tbl v10.16b, {$invlo},v11.16b
349 tbl v3.16b, {$invlo},v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak
350 tbl v11.16b, {$invlo},v12.16b
351 eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io
352 eor v10.16b, v10.16b, v9.16b
353 eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo
354 eor v11.16b, v11.16b, v8.16b
355 ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm5
356 cbnz w8, .Lenc_2x_loop
357
358 // middle of last round
359 add x10, x11, #0x80
360 // vmovdqa -0x60(%r10), %xmm4 # 3 : sbou .Lk_sbo
361 // vmovdqa -0x50(%r10), %xmm0 # 0 : sbot .Lk_sbo+16
362 tbl v4.16b, {$sbou}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou
363 tbl v12.16b, {$sbou}, v10.16b
364 ld1 {v1.2d}, [x10] // vmovdqa 0x40(%r11,%r10), %xmm1 # .Lk_sr[]
365 tbl v0.16b, {$sbot}, v3.16b // vpshufb %xmm3, %xmm0, %xmm0 # 0 = sb1t
366 tbl v8.16b, {$sbot}, v11.16b
367 eor v4.16b, v4.16b, v16.16b // vpxor %xmm5, %xmm4, %xmm4 # 4 = sb1u + k
368 eor v12.16b, v12.16b, v16.16b
369 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 0 = A
370 eor v8.16b, v8.16b, v12.16b
371 tbl v0.16b, {v0.16b},v1.16b // vpshufb %xmm1, %xmm0, %xmm0
372 tbl v1.16b, {v8.16b},v1.16b
373 ret
374.size _vpaes_encrypt_2x,.-_vpaes_encrypt_2x
375
376.type _vpaes_decrypt_preheat,%function
377.align 4
378_vpaes_decrypt_preheat:
379 adr x10, .Lk_inv
380 movi v17.16b, #0x0f
381 adr x11, .Lk_dipt
382 ld1 {v18.2d-v19.2d}, [x10],#32 // .Lk_inv
383 ld1 {v20.2d-v23.2d}, [x11],#64 // .Lk_dipt, .Lk_dsbo
384 ld1 {v24.2d-v27.2d}, [x11],#64 // .Lk_dsb9, .Lk_dsbd
385 ld1 {v28.2d-v31.2d}, [x11] // .Lk_dsbb, .Lk_dsbe
386 ret
387.size _vpaes_decrypt_preheat,.-_vpaes_decrypt_preheat
388
389##
390## Decryption core
391##
392## Same API as encryption core.
393##
394.type _vpaes_decrypt_core,%function
395.align 4
396_vpaes_decrypt_core:
397 mov x9, $key
398 ldr w8, [$key,#240] // pull rounds
399
400 // vmovdqa .Lk_dipt(%rip), %xmm2 # iptlo
401 lsl x11, x8, #4 // mov %rax, %r11; shl \$4, %r11
402 eor x11, x11, #0x30 // xor \$0x30, %r11
403 adr x10, .Lk_sr
404 and x11, x11, #0x30 // and \$0x30, %r11
405 add x11, x11, x10
406 adr x10, .Lk_mc_forward+48
407
408 ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm4 # round0 key
409 and v1.16b, v7.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1
410 ushr v0.16b, v7.16b, #4 // vpsrlb \$4, %xmm0, %xmm0
411 tbl v2.16b, {$iptlo}, v1.16b // vpshufb %xmm1, %xmm2, %xmm2
412 ld1 {v5.2d}, [x10] // vmovdqa .Lk_mc_forward+48(%rip), %xmm5
413 // vmovdqa .Lk_dipt+16(%rip), %xmm1 # ipthi
414 tbl v0.16b, {$ipthi}, v0.16b // vpshufb %xmm0, %xmm1, %xmm0
415 eor v2.16b, v2.16b, v16.16b // vpxor %xmm4, %xmm2, %xmm2
416 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
417 b .Ldec_entry
418
419.align 4
420.Ldec_loop:
421//
422// Inverse mix columns
423//
424 // vmovdqa -0x20(%r10),%xmm4 # 4 : sb9u
425 // vmovdqa -0x10(%r10),%xmm1 # 0 : sb9t
426 tbl v4.16b, {$sb9u}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sb9u
427 tbl v1.16b, {$sb9t}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb9t
428 eor v0.16b, v4.16b, v16.16b // vpxor %xmm4, %xmm0, %xmm0
429 // vmovdqa 0x00(%r10),%xmm4 # 4 : sbdu
430 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
431 // vmovdqa 0x10(%r10),%xmm1 # 0 : sbdt
432
433 tbl v4.16b, {$sbdu}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbdu
434 tbl v0.16b, {v0.16b}, v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch
435 tbl v1.16b, {$sbdt}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbdt
436 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch
437 // vmovdqa 0x20(%r10), %xmm4 # 4 : sbbu
438 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
439 // vmovdqa 0x30(%r10), %xmm1 # 0 : sbbt
440
441 tbl v4.16b, {$sbbu}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbbu
442 tbl v0.16b, {v0.16b}, v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch
443 tbl v1.16b, {$sbbt}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbbt
444 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch
445 // vmovdqa 0x40(%r10), %xmm4 # 4 : sbeu
446 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
447 // vmovdqa 0x50(%r10), %xmm1 # 0 : sbet
448
449 tbl v4.16b, {$sbeu}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbeu
450 tbl v0.16b, {v0.16b}, v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch
451 tbl v1.16b, {$sbet}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbet
452 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch
453 ext v5.16b, v5.16b, v5.16b, #12 // vpalignr \$12, %xmm5, %xmm5, %xmm5
454 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
455 sub w8, w8, #1 // sub \$1,%rax # nr--
456
457.Ldec_entry:
458 // top of round
459 and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k
460 ushr v0.16b, v0.16b, #4 // vpsrlb \$4, %xmm0, %xmm0 # 1 = i
461 tbl v2.16b, {$invhi}, v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k
462 eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j
463 tbl v3.16b, {$invlo}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
464 tbl v4.16b, {$invlo}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
465 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
466 eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
467 tbl v2.16b, {$invlo}, v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak
468 tbl v3.16b, {$invlo}, v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak
469 eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io
470 eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo
471 ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm0
472 cbnz w8, .Ldec_loop
473
474 // middle of last round
475 // vmovdqa 0x60(%r10), %xmm4 # 3 : sbou
476 tbl v4.16b, {$sbou}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou
477 // vmovdqa 0x70(%r10), %xmm1 # 0 : sbot
478 ld1 {v2.2d}, [x11] // vmovdqa -0x160(%r11), %xmm2 # .Lk_sr-.Lk_dsbd=-0x160
479 tbl v1.16b, {$sbot}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb1t
480 eor v4.16b, v4.16b, v16.16b // vpxor %xmm0, %xmm4, %xmm4 # 4 = sb1u + k
481 eor v0.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm0 # 0 = A
482 tbl v0.16b, {v0.16b}, v2.16b // vpshufb %xmm2, %xmm0, %xmm0
483 ret
484.size _vpaes_decrypt_core,.-_vpaes_decrypt_core
485
486.globl vpaes_decrypt
487.type vpaes_decrypt,%function
488.align 4
489vpaes_decrypt:
490 stp x29,x30,[sp,#-16]!
491 add x29,sp,#0
492
493 ld1 {v7.16b}, [$inp]
494 bl _vpaes_decrypt_preheat
495 bl _vpaes_decrypt_core
496 st1 {v0.16b}, [$out]
497
498 ldp x29,x30,[sp],#16
499 ret
500.size vpaes_decrypt,.-vpaes_decrypt
501
502// v14-v15 input, v0-v1 output
503.type _vpaes_decrypt_2x,%function
504.align 4
505_vpaes_decrypt_2x:
506 mov x9, $key
507 ldr w8, [$key,#240] // pull rounds
508
509 // vmovdqa .Lk_dipt(%rip), %xmm2 # iptlo
510 lsl x11, x8, #4 // mov %rax, %r11; shl \$4, %r11
511 eor x11, x11, #0x30 // xor \$0x30, %r11
512 adr x10, .Lk_sr
513 and x11, x11, #0x30 // and \$0x30, %r11
514 add x11, x11, x10
515 adr x10, .Lk_mc_forward+48
516
517 ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm4 # round0 key
518 and v1.16b, v14.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1
519 ushr v0.16b, v14.16b, #4 // vpsrlb \$4, %xmm0, %xmm0
520 and v9.16b, v15.16b, v17.16b
521 ushr v8.16b, v15.16b, #4
522 tbl v2.16b, {$iptlo},v1.16b // vpshufb %xmm1, %xmm2, %xmm2
523 tbl v10.16b, {$iptlo},v9.16b
524 ld1 {v5.2d}, [x10] // vmovdqa .Lk_mc_forward+48(%rip), %xmm5
525 // vmovdqa .Lk_dipt+16(%rip), %xmm1 # ipthi
526 tbl v0.16b, {$ipthi},v0.16b // vpshufb %xmm0, %xmm1, %xmm0
527 tbl v8.16b, {$ipthi},v8.16b
528 eor v2.16b, v2.16b, v16.16b // vpxor %xmm4, %xmm2, %xmm2
529 eor v10.16b, v10.16b, v16.16b
530 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
531 eor v8.16b, v8.16b, v10.16b
532 b .Ldec_2x_entry
533
534.align 4
535.Ldec_2x_loop:
536//
537// Inverse mix columns
538//
539 // vmovdqa -0x20(%r10),%xmm4 # 4 : sb9u
540 // vmovdqa -0x10(%r10),%xmm1 # 0 : sb9t
541 tbl v4.16b, {$sb9u}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sb9u
542 tbl v12.16b, {$sb9u}, v10.16b
543 tbl v1.16b, {$sb9t}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb9t
544 tbl v9.16b, {$sb9t}, v11.16b
545 eor v0.16b, v4.16b, v16.16b // vpxor %xmm4, %xmm0, %xmm0
546 eor v8.16b, v12.16b, v16.16b
547 // vmovdqa 0x00(%r10),%xmm4 # 4 : sbdu
548 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
549 eor v8.16b, v8.16b, v9.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
550 // vmovdqa 0x10(%r10),%xmm1 # 0 : sbdt
551
552 tbl v4.16b, {$sbdu}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbdu
553 tbl v12.16b, {$sbdu}, v10.16b
554 tbl v0.16b, {v0.16b},v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch
555 tbl v8.16b, {v8.16b},v5.16b
556 tbl v1.16b, {$sbdt}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbdt
557 tbl v9.16b, {$sbdt}, v11.16b
558 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch
559 eor v8.16b, v8.16b, v12.16b
560 // vmovdqa 0x20(%r10), %xmm4 # 4 : sbbu
561 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
562 eor v8.16b, v8.16b, v9.16b
563 // vmovdqa 0x30(%r10), %xmm1 # 0 : sbbt
564
565 tbl v4.16b, {$sbbu}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbbu
566 tbl v12.16b, {$sbbu}, v10.16b
567 tbl v0.16b, {v0.16b},v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch
568 tbl v8.16b, {v8.16b},v5.16b
569 tbl v1.16b, {$sbbt}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbbt
570 tbl v9.16b, {$sbbt}, v11.16b
571 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch
572 eor v8.16b, v8.16b, v12.16b
573 // vmovdqa 0x40(%r10), %xmm4 # 4 : sbeu
574 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
575 eor v8.16b, v8.16b, v9.16b
576 // vmovdqa 0x50(%r10), %xmm1 # 0 : sbet
577
578 tbl v4.16b, {$sbeu}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbeu
579 tbl v12.16b, {$sbeu}, v10.16b
580 tbl v0.16b, {v0.16b},v5.16b // vpshufb %xmm5, %xmm0, %xmm0 # MC ch
581 tbl v8.16b, {v8.16b},v5.16b
582 tbl v1.16b, {$sbet}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sbet
583 tbl v9.16b, {$sbet}, v11.16b
584 eor v0.16b, v0.16b, v4.16b // vpxor %xmm4, %xmm0, %xmm0 # 4 = ch
585 eor v8.16b, v8.16b, v12.16b
586 ext v5.16b, v5.16b, v5.16b, #12 // vpalignr \$12, %xmm5, %xmm5, %xmm5
587 eor v0.16b, v0.16b, v1.16b // vpxor %xmm1, %xmm0, %xmm0 # 0 = ch
588 eor v8.16b, v8.16b, v9.16b
589 sub w8, w8, #1 // sub \$1,%rax # nr--
590
591.Ldec_2x_entry:
592 // top of round
593 and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k
594 ushr v0.16b, v0.16b, #4 // vpsrlb \$4, %xmm0, %xmm0 # 1 = i
595 and v9.16b, v8.16b, v17.16b
596 ushr v8.16b, v8.16b, #4
597 tbl v2.16b, {$invhi},v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k
598 tbl v10.16b, {$invhi},v9.16b
599 eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j
600 eor v9.16b, v9.16b, v8.16b
601 tbl v3.16b, {$invlo},v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
602 tbl v11.16b, {$invlo},v8.16b
603 tbl v4.16b, {$invlo},v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
604 tbl v12.16b, {$invlo},v9.16b
605 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
606 eor v11.16b, v11.16b, v10.16b
607 eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
608 eor v12.16b, v12.16b, v10.16b
609 tbl v2.16b, {$invlo},v3.16b // vpshufb %xmm3, %xmm10, %xmm2 # 2 = 1/iak
610 tbl v10.16b, {$invlo},v11.16b
611 tbl v3.16b, {$invlo},v4.16b // vpshufb %xmm4, %xmm10, %xmm3 # 3 = 1/jak
612 tbl v11.16b, {$invlo},v12.16b
613 eor v2.16b, v2.16b, v1.16b // vpxor %xmm1, %xmm2, %xmm2 # 2 = io
614 eor v10.16b, v10.16b, v9.16b
615 eor v3.16b, v3.16b, v0.16b // vpxor %xmm0, %xmm3, %xmm3 # 3 = jo
616 eor v11.16b, v11.16b, v8.16b
617 ld1 {v16.2d}, [x9],#16 // vmovdqu (%r9), %xmm0
618 cbnz w8, .Ldec_2x_loop
619
620 // middle of last round
621 // vmovdqa 0x60(%r10), %xmm4 # 3 : sbou
622 tbl v4.16b, {$sbou}, v2.16b // vpshufb %xmm2, %xmm4, %xmm4 # 4 = sbou
623 tbl v12.16b, {$sbou}, v10.16b
624 // vmovdqa 0x70(%r10), %xmm1 # 0 : sbot
625 tbl v1.16b, {$sbot}, v3.16b // vpshufb %xmm3, %xmm1, %xmm1 # 0 = sb1t
626 tbl v9.16b, {$sbot}, v11.16b
627 ld1 {v2.2d}, [x11] // vmovdqa -0x160(%r11), %xmm2 # .Lk_sr-.Lk_dsbd=-0x160
628 eor v4.16b, v4.16b, v16.16b // vpxor %xmm0, %xmm4, %xmm4 # 4 = sb1u + k
629 eor v12.16b, v12.16b, v16.16b
630 eor v0.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm0 # 0 = A
631 eor v8.16b, v9.16b, v12.16b
632 tbl v0.16b, {v0.16b},v2.16b // vpshufb %xmm2, %xmm0, %xmm0
633 tbl v1.16b, {v8.16b},v2.16b
634 ret
635.size _vpaes_decrypt_2x,.-_vpaes_decrypt_2x
636___
637}
638
639{
640my ($inp,$bits,$out,$dir)=("x0","w1","x2","w3");
641my ($invlo,$invhi,$iptlo,$ipthi,$rcon) = map("v$_.16b",(18..21,8));
642
643$code.=<<___;
644########################################################
645## ##
646## AES key schedule ##
647## ##
648########################################################
649.type _vpaes_key_preheat,%function
650.align 4
651_vpaes_key_preheat:
652 adr x10, .Lk_inv
653 movi v16.16b, #0x5b // .Lk_s63
654 adr x11, .Lk_sb1
655 movi v17.16b, #0x0f // .Lk_s0F
656 ld1 {v18.2d-v21.2d}, [x10] // .Lk_inv, .Lk_ipt
657 adr x10, .Lk_dksd
658 ld1 {v22.2d-v23.2d}, [x11] // .Lk_sb1
659 adr x11, .Lk_mc_forward
660 ld1 {v24.2d-v27.2d}, [x10],#64 // .Lk_dksd, .Lk_dksb
661 ld1 {v28.2d-v31.2d}, [x10],#64 // .Lk_dkse, .Lk_dks9
662 ld1 {v8.2d}, [x10] // .Lk_rcon
663 ld1 {v9.2d}, [x11] // .Lk_mc_forward[0]
664 ret
665.size _vpaes_key_preheat,.-_vpaes_key_preheat
666
667.type _vpaes_schedule_core,%function
668.align 4
669_vpaes_schedule_core:
670 stp x29, x30, [sp,#-16]!
671 add x29,sp,#0
672
673 bl _vpaes_key_preheat // load the tables
674
675 ld1 {v0.16b}, [$inp],#16 // vmovdqu (%rdi), %xmm0 # load key (unaligned)
676
677 // input transform
678 mov v3.16b, v0.16b // vmovdqa %xmm0, %xmm3
679 bl _vpaes_schedule_transform
680 mov v7.16b, v0.16b // vmovdqa %xmm0, %xmm7
681
682 adr x10, .Lk_sr // lea .Lk_sr(%rip),%r10
683 add x8, x8, x10
684 cbnz $dir, .Lschedule_am_decrypting
685
686 // encrypting, output zeroth round key after transform
687 st1 {v0.2d}, [$out] // vmovdqu %xmm0, (%rdx)
688 b .Lschedule_go
689
690.Lschedule_am_decrypting:
691 // decrypting, output zeroth round key after shiftrows
692 ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1
693 tbl v3.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3
694 st1 {v3.2d}, [$out] // vmovdqu %xmm3, (%rdx)
695 eor x8, x8, #0x30 // xor \$0x30, %r8
696
697.Lschedule_go:
698 cmp $bits, #192 // cmp \$192, %esi
699 b.hi .Lschedule_256
700 b.eq .Lschedule_192
701 // 128: fall though
702
703##
704## .schedule_128
705##
706## 128-bit specific part of key schedule.
707##
708## This schedule is really simple, because all its parts
709## are accomplished by the subroutines.
710##
711.Lschedule_128:
712 mov $inp, #10 // mov \$10, %esi
713
714.Loop_schedule_128:
715 sub $inp, $inp, #1 // dec %esi
716 bl _vpaes_schedule_round
717 cbz $inp, .Lschedule_mangle_last
718 bl _vpaes_schedule_mangle // write output
719 b .Loop_schedule_128
720
721##
722## .aes_schedule_192
723##
724## 192-bit specific part of key schedule.
725##
726## The main body of this schedule is the same as the 128-bit
727## schedule, but with more smearing. The long, high side is
728## stored in %xmm7 as before, and the short, low side is in
729## the high bits of %xmm6.
730##
731## This schedule is somewhat nastier, however, because each
732## round produces 192 bits of key material, or 1.5 round keys.
733## Therefore, on each cycle we do 2 rounds and produce 3 round
734## keys.
735##
736.align 4
737.Lschedule_192:
738 sub $inp, $inp, #8
739 ld1 {v0.16b}, [$inp] // vmovdqu 8(%rdi),%xmm0 # load key part 2 (very unaligned)
740 bl _vpaes_schedule_transform // input transform
741 mov v6.16b, v0.16b // vmovdqa %xmm0, %xmm6 # save short part
742 eor v4.16b, v4.16b, v4.16b // vpxor %xmm4, %xmm4, %xmm4 # clear 4
743 ins v6.d[0], v4.d[0] // vmovhlps %xmm4, %xmm6, %xmm6 # clobber low side with zeros
744 mov $inp, #4 // mov \$4, %esi
745
746.Loop_schedule_192:
747 sub $inp, $inp, #1 // dec %esi
748 bl _vpaes_schedule_round
749 ext v0.16b, v6.16b, v0.16b, #8 // vpalignr \$8,%xmm6,%xmm0,%xmm0
750 bl _vpaes_schedule_mangle // save key n
751 bl _vpaes_schedule_192_smear
752 bl _vpaes_schedule_mangle // save key n+1
753 bl _vpaes_schedule_round
754 cbz $inp, .Lschedule_mangle_last
755 bl _vpaes_schedule_mangle // save key n+2
756 bl _vpaes_schedule_192_smear
757 b .Loop_schedule_192
758
759##
760## .aes_schedule_256
761##
762## 256-bit specific part of key schedule.
763##
764## The structure here is very similar to the 128-bit
765## schedule, but with an additional "low side" in
766## %xmm6. The low side's rounds are the same as the
767## high side's, except no rcon and no rotation.
768##
769.align 4
770.Lschedule_256:
771 ld1 {v0.16b}, [$inp] // vmovdqu 16(%rdi),%xmm0 # load key part 2 (unaligned)
772 bl _vpaes_schedule_transform // input transform
773 mov $inp, #7 // mov \$7, %esi
774
775.Loop_schedule_256:
776 sub $inp, $inp, #1 // dec %esi
777 bl _vpaes_schedule_mangle // output low result
778 mov v6.16b, v0.16b // vmovdqa %xmm0, %xmm6 # save cur_lo in xmm6
779
780 // high round
781 bl _vpaes_schedule_round
782 cbz $inp, .Lschedule_mangle_last
783 bl _vpaes_schedule_mangle
784
785 // low round. swap xmm7 and xmm6
786 dup v0.4s, v0.s[3] // vpshufd \$0xFF, %xmm0, %xmm0
787 movi v4.16b, #0
788 mov v5.16b, v7.16b // vmovdqa %xmm7, %xmm5
789 mov v7.16b, v6.16b // vmovdqa %xmm6, %xmm7
790 bl _vpaes_schedule_low_round
791 mov v7.16b, v5.16b // vmovdqa %xmm5, %xmm7
792
793 b .Loop_schedule_256
794
795##
796## .aes_schedule_mangle_last
797##
798## Mangler for last round of key schedule
799## Mangles %xmm0
800## when encrypting, outputs out(%xmm0) ^ 63
801## when decrypting, outputs unskew(%xmm0)
802##
803## Always called right before return... jumps to cleanup and exits
804##
805.align 4
806.Lschedule_mangle_last:
807 // schedule last round key from xmm0
808 adr x11, .Lk_deskew // lea .Lk_deskew(%rip),%r11 # prepare to deskew
809 cbnz $dir, .Lschedule_mangle_last_dec
810
811 // encrypting
812 ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10),%xmm1
813 adr x11, .Lk_opt // lea .Lk_opt(%rip), %r11 # prepare to output transform
814 add $out, $out, #32 // add \$32, %rdx
815 tbl v0.16b, {v0.16b}, v1.16b // vpshufb %xmm1, %xmm0, %xmm0 # output permute
816
817.Lschedule_mangle_last_dec:
818 ld1 {v20.2d-v21.2d}, [x11] // reload constants
819 sub $out, $out, #16 // add \$-16, %rdx
820 eor v0.16b, v0.16b, v16.16b // vpxor .Lk_s63(%rip), %xmm0, %xmm0
821 bl _vpaes_schedule_transform // output transform
822 st1 {v0.2d}, [$out] // vmovdqu %xmm0, (%rdx) # save last key
823
824 // cleanup
825 eor v0.16b, v0.16b, v0.16b // vpxor %xmm0, %xmm0, %xmm0
826 eor v1.16b, v1.16b, v1.16b // vpxor %xmm1, %xmm1, %xmm1
827 eor v2.16b, v2.16b, v2.16b // vpxor %xmm2, %xmm2, %xmm2
828 eor v3.16b, v3.16b, v3.16b // vpxor %xmm3, %xmm3, %xmm3
829 eor v4.16b, v4.16b, v4.16b // vpxor %xmm4, %xmm4, %xmm4
830 eor v5.16b, v5.16b, v5.16b // vpxor %xmm5, %xmm5, %xmm5
831 eor v6.16b, v6.16b, v6.16b // vpxor %xmm6, %xmm6, %xmm6
832 eor v7.16b, v7.16b, v7.16b // vpxor %xmm7, %xmm7, %xmm7
833 ldp x29, x30, [sp],#16
834 ret
835.size _vpaes_schedule_core,.-_vpaes_schedule_core
836
837##
838## .aes_schedule_192_smear
839##
840## Smear the short, low side in the 192-bit key schedule.
841##
842## Inputs:
843## %xmm7: high side, b a x y
844## %xmm6: low side, d c 0 0
845## %xmm13: 0
846##
847## Outputs:
848## %xmm6: b+c+d b+c 0 0
849## %xmm0: b+c+d b+c b a
850##
851.type _vpaes_schedule_192_smear,%function
852.align 4
853_vpaes_schedule_192_smear:
854 movi v1.16b, #0
855 dup v0.4s, v7.s[3]
856 ins v1.s[3], v6.s[2] // vpshufd \$0x80, %xmm6, %xmm1 # d c 0 0 -> c 0 0 0
857 ins v0.s[0], v7.s[2] // vpshufd \$0xFE, %xmm7, %xmm0 # b a _ _ -> b b b a
858 eor v6.16b, v6.16b, v1.16b // vpxor %xmm1, %xmm6, %xmm6 # -> c+d c 0 0
859 eor v1.16b, v1.16b, v1.16b // vpxor %xmm1, %xmm1, %xmm1
860 eor v6.16b, v6.16b, v0.16b // vpxor %xmm0, %xmm6, %xmm6 # -> b+c+d b+c b a
861 mov v0.16b, v6.16b // vmovdqa %xmm6, %xmm0
862 ins v6.d[0], v1.d[0] // vmovhlps %xmm1, %xmm6, %xmm6 # clobber low side with zeros
863 ret
864.size _vpaes_schedule_192_smear,.-_vpaes_schedule_192_smear
865
866##
867## .aes_schedule_round
868##
869## Runs one main round of the key schedule on %xmm0, %xmm7
870##
871## Specifically, runs subbytes on the high dword of %xmm0
872## then rotates it by one byte and xors into the low dword of
873## %xmm7.
874##
875## Adds rcon from low byte of %xmm8, then rotates %xmm8 for
876## next rcon.
877##
878## Smears the dwords of %xmm7 by xoring the low into the
879## second low, result into third, result into highest.
880##
881## Returns results in %xmm7 = %xmm0.
882## Clobbers %xmm1-%xmm4, %r11.
883##
884.type _vpaes_schedule_round,%function
885.align 4
886_vpaes_schedule_round:
887 // extract rcon from xmm8
888 movi v4.16b, #0 // vpxor %xmm4, %xmm4, %xmm4
889 ext v1.16b, $rcon, v4.16b, #15 // vpalignr \$15, %xmm8, %xmm4, %xmm1
890 ext $rcon, $rcon, $rcon, #15 // vpalignr \$15, %xmm8, %xmm8, %xmm8
891 eor v7.16b, v7.16b, v1.16b // vpxor %xmm1, %xmm7, %xmm7
892
893 // rotate
894 dup v0.4s, v0.s[3] // vpshufd \$0xFF, %xmm0, %xmm0
895 ext v0.16b, v0.16b, v0.16b, #1 // vpalignr \$1, %xmm0, %xmm0, %xmm0
896
897 // fall through...
898
899 // low round: same as high round, but no rotation and no rcon.
900_vpaes_schedule_low_round:
901 // smear xmm7
902 ext v1.16b, v4.16b, v7.16b, #12 // vpslldq \$4, %xmm7, %xmm1
903 eor v7.16b, v7.16b, v1.16b // vpxor %xmm1, %xmm7, %xmm7
904 ext v4.16b, v4.16b, v7.16b, #8 // vpslldq \$8, %xmm7, %xmm4
905
906 // subbytes
907 and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1 # 0 = k
908 ushr v0.16b, v0.16b, #4 // vpsrlb \$4, %xmm0, %xmm0 # 1 = i
909 eor v7.16b, v7.16b, v4.16b // vpxor %xmm4, %xmm7, %xmm7
910 tbl v2.16b, {$invhi}, v1.16b // vpshufb %xmm1, %xmm11, %xmm2 # 2 = a/k
911 eor v1.16b, v1.16b, v0.16b // vpxor %xmm0, %xmm1, %xmm1 # 0 = j
912 tbl v3.16b, {$invlo}, v0.16b // vpshufb %xmm0, %xmm10, %xmm3 # 3 = 1/i
913 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3 # 3 = iak = 1/i + a/k
914 tbl v4.16b, {$invlo}, v1.16b // vpshufb %xmm1, %xmm10, %xmm4 # 4 = 1/j
915 eor v7.16b, v7.16b, v16.16b // vpxor .Lk_s63(%rip), %xmm7, %xmm7
916 tbl v3.16b, {$invlo}, v3.16b // vpshufb %xmm3, %xmm10, %xmm3 # 2 = 1/iak
917 eor v4.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm4 # 4 = jak = 1/j + a/k
918 tbl v2.16b, {$invlo}, v4.16b // vpshufb %xmm4, %xmm10, %xmm2 # 3 = 1/jak
919 eor v3.16b, v3.16b, v1.16b // vpxor %xmm1, %xmm3, %xmm3 # 2 = io
920 eor v2.16b, v2.16b, v0.16b // vpxor %xmm0, %xmm2, %xmm2 # 3 = jo
921 tbl v4.16b, {v23.16b}, v3.16b // vpshufb %xmm3, %xmm13, %xmm4 # 4 = sbou
922 tbl v1.16b, {v22.16b}, v2.16b // vpshufb %xmm2, %xmm12, %xmm1 # 0 = sb1t
923 eor v1.16b, v1.16b, v4.16b // vpxor %xmm4, %xmm1, %xmm1 # 0 = sbox output
924
925 // add in smeared stuff
926 eor v0.16b, v1.16b, v7.16b // vpxor %xmm7, %xmm1, %xmm0
927 eor v7.16b, v1.16b, v7.16b // vmovdqa %xmm0, %xmm7
928 ret
929.size _vpaes_schedule_round,.-_vpaes_schedule_round
930
931##
932## .aes_schedule_transform
933##
934## Linear-transform %xmm0 according to tables at (%r11)
935##
936## Requires that %xmm9 = 0x0F0F... as in preheat
937## Output in %xmm0
938## Clobbers %xmm1, %xmm2
939##
940.type _vpaes_schedule_transform,%function
941.align 4
942_vpaes_schedule_transform:
943 and v1.16b, v0.16b, v17.16b // vpand %xmm9, %xmm0, %xmm1
944 ushr v0.16b, v0.16b, #4 // vpsrlb \$4, %xmm0, %xmm0
945 // vmovdqa (%r11), %xmm2 # lo
946 tbl v2.16b, {$iptlo}, v1.16b // vpshufb %xmm1, %xmm2, %xmm2
947 // vmovdqa 16(%r11), %xmm1 # hi
948 tbl v0.16b, {$ipthi}, v0.16b // vpshufb %xmm0, %xmm1, %xmm0
949 eor v0.16b, v0.16b, v2.16b // vpxor %xmm2, %xmm0, %xmm0
950 ret
951.size _vpaes_schedule_transform,.-_vpaes_schedule_transform
952
953##
954## .aes_schedule_mangle
955##
956## Mangle xmm0 from (basis-transformed) standard version
957## to our version.
958##
959## On encrypt,
960## xor with 0x63
961## multiply by circulant 0,1,1,1
962## apply shiftrows transform
963##
964## On decrypt,
965## xor with 0x63
966## multiply by "inverse mixcolumns" circulant E,B,D,9
967## deskew
968## apply shiftrows transform
969##
970##
971## Writes out to (%rdx), and increments or decrements it
972## Keeps track of round number mod 4 in %r8
973## Preserves xmm0
974## Clobbers xmm1-xmm5
975##
976.type _vpaes_schedule_mangle,%function
977.align 4
978_vpaes_schedule_mangle:
979 mov v4.16b, v0.16b // vmovdqa %xmm0, %xmm4 # save xmm0 for later
980 // vmovdqa .Lk_mc_forward(%rip),%xmm5
981 cbnz $dir, .Lschedule_mangle_dec
982
983 // encrypting
984 eor v4.16b, v0.16b, v16.16b // vpxor .Lk_s63(%rip), %xmm0, %xmm4
985 add $out, $out, #16 // add \$16, %rdx
986 tbl v4.16b, {v4.16b}, v9.16b // vpshufb %xmm5, %xmm4, %xmm4
987 tbl v1.16b, {v4.16b}, v9.16b // vpshufb %xmm5, %xmm4, %xmm1
988 tbl v3.16b, {v1.16b}, v9.16b // vpshufb %xmm5, %xmm1, %xmm3
989 eor v4.16b, v4.16b, v1.16b // vpxor %xmm1, %xmm4, %xmm4
990 ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1
991 eor v3.16b, v3.16b, v4.16b // vpxor %xmm4, %xmm3, %xmm3
992
993 b .Lschedule_mangle_both
994.align 4
995.Lschedule_mangle_dec:
996 // inverse mix columns
997 // lea .Lk_dksd(%rip),%r11
998 ushr v1.16b, v4.16b, #4 // vpsrlb \$4, %xmm4, %xmm1 # 1 = hi
999 and v4.16b, v4.16b, v17.16b // vpand %xmm9, %xmm4, %xmm4 # 4 = lo
1000
1001 // vmovdqa 0x00(%r11), %xmm2
1002 tbl v2.16b, {v24.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2
1003 // vmovdqa 0x10(%r11), %xmm3
1004 tbl v3.16b, {v25.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3
1005 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3
1006 tbl v3.16b, {v3.16b}, v9.16b // vpshufb %xmm5, %xmm3, %xmm3
1007
1008 // vmovdqa 0x20(%r11), %xmm2
1009 tbl v2.16b, {v26.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2
1010 eor v2.16b, v2.16b, v3.16b // vpxor %xmm3, %xmm2, %xmm2
1011 // vmovdqa 0x30(%r11), %xmm3
1012 tbl v3.16b, {v27.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3
1013 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3
1014 tbl v3.16b, {v3.16b}, v9.16b // vpshufb %xmm5, %xmm3, %xmm3
1015
1016 // vmovdqa 0x40(%r11), %xmm2
1017 tbl v2.16b, {v28.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2
1018 eor v2.16b, v2.16b, v3.16b // vpxor %xmm3, %xmm2, %xmm2
1019 // vmovdqa 0x50(%r11), %xmm3
1020 tbl v3.16b, {v29.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3
1021 eor v3.16b, v3.16b, v2.16b // vpxor %xmm2, %xmm3, %xmm3
1022
1023 // vmovdqa 0x60(%r11), %xmm2
1024 tbl v2.16b, {v30.16b}, v4.16b // vpshufb %xmm4, %xmm2, %xmm2
1025 tbl v3.16b, {v3.16b}, v9.16b // vpshufb %xmm5, %xmm3, %xmm3
1026 // vmovdqa 0x70(%r11), %xmm4
1027 tbl v4.16b, {v31.16b}, v1.16b // vpshufb %xmm1, %xmm4, %xmm4
1028 ld1 {v1.2d}, [x8] // vmovdqa (%r8,%r10), %xmm1
1029 eor v2.16b, v2.16b, v3.16b // vpxor %xmm3, %xmm2, %xmm2
1030 eor v3.16b, v4.16b, v2.16b // vpxor %xmm2, %xmm4, %xmm3
1031
1032 sub $out, $out, #16 // add \$-16, %rdx
1033
1034.Lschedule_mangle_both:
1035 tbl v3.16b, {v3.16b}, v1.16b // vpshufb %xmm1, %xmm3, %xmm3
1036 add x8, x8, #64-16 // add \$-16, %r8
1037 and x8, x8, #~(1<<6) // and \$0x30, %r8
1038 st1 {v3.2d}, [$out] // vmovdqu %xmm3, (%rdx)
1039 ret
1040.size _vpaes_schedule_mangle,.-_vpaes_schedule_mangle
1041
1042.globl vpaes_set_encrypt_key
1043.type vpaes_set_encrypt_key,%function
1044.align 4
1045vpaes_set_encrypt_key:
1046 stp x29,x30,[sp,#-16]!
1047 add x29,sp,#0
1048 stp d8,d9,[sp,#-16]! // ABI spec says so
1049
1050 lsr w9, $bits, #5 // shr \$5,%eax
1051 add w9, w9, #5 // \$5,%eax
1052 str w9, [$out,#240] // mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5;
1053
1054 mov $dir, #0 // mov \$0,%ecx
1055 mov x8, #0x30 // mov \$0x30,%r8d
1056 bl _vpaes_schedule_core
1057 eor x0, x0, x0
1058
1059 ldp d8,d9,[sp],#16
1060 ldp x29,x30,[sp],#16
1061 ret
1062.size vpaes_set_encrypt_key,.-vpaes_set_encrypt_key
1063
1064.globl vpaes_set_decrypt_key
1065.type vpaes_set_decrypt_key,%function
1066.align 4
1067vpaes_set_decrypt_key:
1068 stp x29,x30,[sp,#-16]!
1069 add x29,sp,#0
1070 stp d8,d9,[sp,#-16]! // ABI spec says so
1071
1072 lsr w9, $bits, #5 // shr \$5,%eax
1073 add w9, w9, #5 // \$5,%eax
1074 str w9, [$out,#240] // mov %eax,240(%rdx) # AES_KEY->rounds = nbits/32+5;
1075 lsl w9, w9, #4 // shl \$4,%eax
1076 add $out, $out, #16 // lea 16(%rdx,%rax),%rdx
1077 add $out, $out, x9
1078
1079 mov $dir, #1 // mov \$1,%ecx
1080 lsr w8, $bits, #1 // shr \$1,%r8d
1081 and x8, x8, #32 // and \$32,%r8d
1082 eor x8, x8, #32 // xor \$32,%r8d # nbits==192?0:32
1083 bl _vpaes_schedule_core
1084
1085 ldp d8,d9,[sp],#16
1086 ldp x29,x30,[sp],#16
1087 ret
1088.size vpaes_set_decrypt_key,.-vpaes_set_decrypt_key
1089___
1090}
1091{
1092my ($inp,$out,$len,$key,$ivec,$dir) = map("x$_",(0..5));
1093
1094$code.=<<___;
1095.globl vpaes_cbc_encrypt
1096.type vpaes_cbc_encrypt,%function
1097.align 4
1098vpaes_cbc_encrypt:
1099 cbz $len, .Lcbc_abort
1100 cmp w5, #0 // check direction
1101 b.eq vpaes_cbc_decrypt
1102
1103 stp x29,x30,[sp,#-16]!
1104 add x29,sp,#0
1105
1106 mov x17, $len // reassign
1107 mov x2, $key // reassign
1108
1109 ld1 {v0.16b}, [$ivec] // load ivec
1110 bl _vpaes_encrypt_preheat
1111 b .Lcbc_enc_loop
1112
1113.align 4
1114.Lcbc_enc_loop:
1115 ld1 {v7.16b}, [$inp],#16 // load input
1116 eor v7.16b, v7.16b, v0.16b // xor with ivec
1117 bl _vpaes_encrypt_core
1118 st1 {v0.16b}, [$out],#16 // save output
1119 subs x17, x17, #16
1120 b.hi .Lcbc_enc_loop
1121
1122 st1 {v0.16b}, [$ivec] // write ivec
1123
1124 ldp x29,x30,[sp],#16
1125.Lcbc_abort:
1126 ret
1127.size vpaes_cbc_encrypt,.-vpaes_cbc_encrypt
1128
1129.type vpaes_cbc_decrypt,%function
1130.align 4
1131vpaes_cbc_decrypt:
1132 stp x29,x30,[sp,#-16]!
1133 add x29,sp,#0
1134 stp d8,d9,[sp,#-16]! // ABI spec says so
1135 stp d10,d11,[sp,#-16]!
1136 stp d12,d13,[sp,#-16]!
1137 stp d14,d15,[sp,#-16]!
1138
1139 mov x17, $len // reassign
1140 mov x2, $key // reassign
1141 ld1 {v6.16b}, [$ivec] // load ivec
1142 bl _vpaes_decrypt_preheat
1143 tst x17, #16
1144 b.eq .Lcbc_dec_loop2x
1145
1146 ld1 {v7.16b}, [$inp], #16 // load input
1147 bl _vpaes_decrypt_core
1148 eor v0.16b, v0.16b, v6.16b // xor with ivec
1149 orr v6.16b, v7.16b, v7.16b // next ivec value
1150 st1 {v0.16b}, [$out], #16
1151 subs x17, x17, #16
1152 b.ls .Lcbc_dec_done
1153
1154.align 4
1155.Lcbc_dec_loop2x:
1156 ld1 {v14.16b,v15.16b}, [$inp], #32
1157 bl _vpaes_decrypt_2x
1158 eor v0.16b, v0.16b, v6.16b // xor with ivec
1159 eor v1.16b, v1.16b, v14.16b
1160 orr v6.16b, v15.16b, v15.16b
1161 st1 {v0.16b,v1.16b}, [$out], #32
1162 subs x17, x17, #32
1163 b.hi .Lcbc_dec_loop2x
1164
1165.Lcbc_dec_done:
1166 st1 {v6.16b}, [$ivec]
1167
1168 ldp d14,d15,[sp],#16
1169 ldp d12,d13,[sp],#16
1170 ldp d10,d11,[sp],#16
1171 ldp d8,d9,[sp],#16
1172 ldp x29,x30,[sp],#16
1173 ret
1174.size vpaes_cbc_decrypt,.-vpaes_cbc_decrypt
1175___
1176if (1) {
1177$code.=<<___;
1178.globl vpaes_ecb_encrypt
1179.type vpaes_ecb_encrypt,%function
1180.align 4
1181vpaes_ecb_encrypt:
1182 stp x29,x30,[sp,#-16]!
1183 add x29,sp,#0
1184 stp d8,d9,[sp,#-16]! // ABI spec says so
1185 stp d10,d11,[sp,#-16]!
1186 stp d12,d13,[sp,#-16]!
1187 stp d14,d15,[sp,#-16]!
1188
1189 mov x17, $len
1190 mov x2, $key
1191 bl _vpaes_encrypt_preheat
1192 tst x17, #16
1193 b.eq .Lecb_enc_loop
1194
1195 ld1 {v7.16b}, [$inp],#16
1196 bl _vpaes_encrypt_core
1197 st1 {v0.16b}, [$out],#16
1198 subs x17, x17, #16
1199 b.ls .Lecb_enc_done
1200
1201.align 4
1202.Lecb_enc_loop:
1203 ld1 {v14.16b,v15.16b}, [$inp], #32
1204 bl _vpaes_encrypt_2x
1205 st1 {v0.16b,v1.16b}, [$out], #32
1206 subs x17, x17, #32
1207 b.hi .Lecb_enc_loop
1208
1209.Lecb_enc_done:
1210 ldp d14,d15,[sp],#16
1211 ldp d12,d13,[sp],#16
1212 ldp d10,d11,[sp],#16
1213 ldp d8,d9,[sp],#16
1214 ldp x29,x30,[sp],#16
1215 ret
1216.size vpaes_ecb_encrypt,.-vpaes_ecb_encrypt
1217
1218.globl vpaes_ecb_decrypt
1219.type vpaes_ecb_decrypt,%function
1220.align 4
1221vpaes_ecb_decrypt:
1222 stp x29,x30,[sp,#-16]!
1223 add x29,sp,#0
1224 stp d8,d9,[sp,#-16]! // ABI spec says so
1225 stp d10,d11,[sp,#-16]!
1226 stp d12,d13,[sp,#-16]!
1227 stp d14,d15,[sp,#-16]!
1228
1229 mov x17, $len
1230 mov x2, $key
1231 bl _vpaes_decrypt_preheat
1232 tst x17, #16
1233 b.eq .Lecb_dec_loop
1234
1235 ld1 {v7.16b}, [$inp],#16
1236 bl _vpaes_encrypt_core
1237 st1 {v0.16b}, [$out],#16
1238 subs x17, x17, #16
1239 b.ls .Lecb_dec_done
1240
1241.align 4
1242.Lecb_dec_loop:
1243 ld1 {v14.16b,v15.16b}, [$inp], #32
1244 bl _vpaes_decrypt_2x
1245 st1 {v0.16b,v1.16b}, [$out], #32
1246 subs x17, x17, #32
1247 b.hi .Lecb_dec_loop
1248
1249.Lecb_dec_done:
1250 ldp d14,d15,[sp],#16
1251 ldp d12,d13,[sp],#16
1252 ldp d10,d11,[sp],#16
1253 ldp d8,d9,[sp],#16
1254 ldp x29,x30,[sp],#16
1255 ret
1256.size vpaes_ecb_decrypt,.-vpaes_ecb_decrypt
1257___
1258} }
1259print $code;
1260
1261close STDOUT;
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette