VirtualBox

source: vbox/trunk/src/VBox/VMM/include/IEMN8veRecompilerEmit.h@ 104359

Last change on this file since 104359 was 104359, checked in by vboxsync, 8 months ago

VMM/IEM: Fix store size (word means 32-bit, dword 64-bit on arm64), harmless because these paths were never used apparently, bugref:10376

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 328.2 KB
Line 
1/* $Id: IEMN8veRecompilerEmit.h 104359 2024-04-18 06:27:36Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - Native Recompiler Inlined Emitters.
4 */
5
6/*
7 * Copyright (C) 2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#ifndef VMM_INCLUDED_SRC_include_IEMN8veRecompilerEmit_h
29#define VMM_INCLUDED_SRC_include_IEMN8veRecompilerEmit_h
30#ifndef RT_WITHOUT_PRAGMA_ONCE
31# pragma once
32#endif
33
34#include "IEMN8veRecompiler.h"
35
36
37/** @defgroup grp_iem_n8ve_re_inline Native Recompiler Inlined Emitters
38 * @ingroup grp_iem_n8ve_re
39 * @{
40 */
41
42/**
43 * Emit a simple marker instruction to more easily tell where something starts
44 * in the disassembly.
45 */
46DECL_INLINE_THROW(uint32_t)
47iemNativeEmitMarker(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t uInfo)
48{
49#ifdef RT_ARCH_AMD64
50 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
51 if (uInfo == 0)
52 {
53 /* nop */
54 pbCodeBuf[off++] = 0x90;
55 }
56 else
57 {
58 /* nop [disp32] */
59 pbCodeBuf[off++] = 0x0f;
60 pbCodeBuf[off++] = 0x1f;
61 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_MEM0, 0, 5);
62 pbCodeBuf[off++] = RT_BYTE1(uInfo);
63 pbCodeBuf[off++] = RT_BYTE2(uInfo);
64 pbCodeBuf[off++] = RT_BYTE3(uInfo);
65 pbCodeBuf[off++] = RT_BYTE4(uInfo);
66 }
67#elif defined(RT_ARCH_ARM64)
68 /* nop */
69 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
70 if (uInfo == 0)
71 pu32CodeBuf[off++] = ARMV8_A64_INSTR_NOP;
72 else
73 pu32CodeBuf[off++] = Armv8A64MkInstrMovZ(ARMV8_A64_REG_XZR, (uint16_t)uInfo);
74
75 RT_NOREF(uInfo);
76#else
77# error "port me"
78#endif
79 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
80 return off;
81}
82
83
84/**
85 * Emit a breakpoint instruction.
86 */
87DECL_FORCE_INLINE(uint32_t) iemNativeEmitBrkEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint32_t uInfo)
88{
89#ifdef RT_ARCH_AMD64
90 pCodeBuf[off++] = 0xcc;
91 RT_NOREF(uInfo); /** @todo use multibyte nop for info? */
92
93#elif defined(RT_ARCH_ARM64)
94 pCodeBuf[off++] = Armv8A64MkInstrBrk(uInfo & UINT32_C(0xffff));
95
96#else
97# error "error"
98#endif
99 return off;
100}
101
102
103/**
104 * Emit a breakpoint instruction.
105 */
106DECL_INLINE_THROW(uint32_t) iemNativeEmitBrk(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t uInfo)
107{
108#ifdef RT_ARCH_AMD64
109 off = iemNativeEmitBrkEx(iemNativeInstrBufEnsure(pReNative, off, 1), off, uInfo);
110#elif defined(RT_ARCH_ARM64)
111 off = iemNativeEmitBrkEx(iemNativeInstrBufEnsure(pReNative, off, 1), off, uInfo);
112#else
113# error "error"
114#endif
115 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
116 return off;
117}
118
119
120/*********************************************************************************************************************************
121* Loads, Stores and Related Stuff. *
122*********************************************************************************************************************************/
123
124#ifdef RT_ARCH_AMD64
125/**
126 * Common bit of iemNativeEmitLoadGprByGpr and friends.
127 */
128DECL_FORCE_INLINE(uint32_t)
129iemNativeEmitGprByGprDisp(uint8_t *pbCodeBuf, uint32_t off, uint8_t iGprReg, uint8_t iGprBase, int32_t offDisp)
130{
131 if (offDisp == 0 && (iGprBase & 7) != X86_GREG_xBP) /* Can use encoding w/o displacement field. */
132 {
133 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_MEM0, iGprReg & 7, iGprBase & 7);
134 if ((iGprBase & 7) == X86_GREG_xSP) /* for RSP/R12 relative addressing we have to use a SIB byte. */
135 pbCodeBuf[off++] = X86_SIB_MAKE(X86_GREG_xSP, X86_GREG_xSP, 0); /* -> [RSP/R12] */
136 }
137 else if (offDisp == (int8_t)offDisp)
138 {
139 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_MEM1, iGprReg & 7, iGprBase & 7);
140 if ((iGprBase & 7) == X86_GREG_xSP) /* for RSP/R12 relative addressing we have to use a SIB byte. */
141 pbCodeBuf[off++] = X86_SIB_MAKE(X86_GREG_xSP, X86_GREG_xSP, 0); /* -> [RSP/R12] */
142 pbCodeBuf[off++] = (uint8_t)offDisp;
143 }
144 else
145 {
146 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_MEM4, iGprReg & 7, iGprBase & 7);
147 if ((iGprBase & 7) == X86_GREG_xSP) /* for RSP/R12 relative addressing we have to use a SIB byte. */
148 pbCodeBuf[off++] = X86_SIB_MAKE(X86_GREG_xSP, X86_GREG_xSP, 0); /* -> [RSP/R12] */
149 pbCodeBuf[off++] = RT_BYTE1((uint32_t)offDisp);
150 pbCodeBuf[off++] = RT_BYTE2((uint32_t)offDisp);
151 pbCodeBuf[off++] = RT_BYTE3((uint32_t)offDisp);
152 pbCodeBuf[off++] = RT_BYTE4((uint32_t)offDisp);
153 }
154 return off;
155}
156#endif /* RT_ARCH_AMD64 */
157
158/**
159 * Emits setting a GPR to zero.
160 */
161DECL_INLINE_THROW(uint32_t)
162iemNativeEmitGprZero(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr)
163{
164#ifdef RT_ARCH_AMD64
165 /* xor gpr32, gpr32 */
166 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3);
167 if (iGpr >= 8)
168 pbCodeBuf[off++] = X86_OP_REX_R | X86_OP_REX_B;
169 pbCodeBuf[off++] = 0x33;
170 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGpr & 7, iGpr & 7);
171
172#elif defined(RT_ARCH_ARM64)
173 /* mov gpr, #0x0 */
174 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
175 pu32CodeBuf[off++] = UINT32_C(0xd2800000) | iGpr;
176
177#else
178# error "port me"
179#endif
180 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
181 return off;
182}
183
184
185/**
186 * Variant of iemNativeEmitLoadGprImm64 where the caller ensures sufficent
187 * buffer space.
188 *
189 * Max buffer consumption:
190 * - AMD64: 10 instruction bytes.
191 * - ARM64: 4 instruction words (16 bytes).
192 */
193DECL_FORCE_INLINE(uint32_t)
194iemNativeEmitLoadGprImmEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGpr, uint64_t uImm64)
195{
196#ifdef RT_ARCH_AMD64
197 if (uImm64 == 0)
198 {
199 /* xor gpr, gpr */
200 if (iGpr >= 8)
201 pCodeBuf[off++] = X86_OP_REX_R | X86_OP_REX_B;
202 pCodeBuf[off++] = 0x33;
203 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGpr & 7, iGpr & 7);
204 }
205 else if (uImm64 <= UINT32_MAX)
206 {
207 /* mov gpr, imm32 */
208 if (iGpr >= 8)
209 pCodeBuf[off++] = X86_OP_REX_B;
210 pCodeBuf[off++] = 0xb8 + (iGpr & 7);
211 pCodeBuf[off++] = RT_BYTE1(uImm64);
212 pCodeBuf[off++] = RT_BYTE2(uImm64);
213 pCodeBuf[off++] = RT_BYTE3(uImm64);
214 pCodeBuf[off++] = RT_BYTE4(uImm64);
215 }
216 else if (uImm64 == (uint64_t)(int32_t)uImm64)
217 {
218 /* mov gpr, sx(imm32) */
219 if (iGpr < 8)
220 pCodeBuf[off++] = X86_OP_REX_W;
221 else
222 pCodeBuf[off++] = X86_OP_REX_W | X86_OP_REX_B;
223 pCodeBuf[off++] = 0xc7;
224 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, iGpr & 7);
225 pCodeBuf[off++] = RT_BYTE1(uImm64);
226 pCodeBuf[off++] = RT_BYTE2(uImm64);
227 pCodeBuf[off++] = RT_BYTE3(uImm64);
228 pCodeBuf[off++] = RT_BYTE4(uImm64);
229 }
230 else
231 {
232 /* mov gpr, imm64 */
233 if (iGpr < 8)
234 pCodeBuf[off++] = X86_OP_REX_W;
235 else
236 pCodeBuf[off++] = X86_OP_REX_W | X86_OP_REX_B;
237 pCodeBuf[off++] = 0xb8 + (iGpr & 7);
238 pCodeBuf[off++] = RT_BYTE1(uImm64);
239 pCodeBuf[off++] = RT_BYTE2(uImm64);
240 pCodeBuf[off++] = RT_BYTE3(uImm64);
241 pCodeBuf[off++] = RT_BYTE4(uImm64);
242 pCodeBuf[off++] = RT_BYTE5(uImm64);
243 pCodeBuf[off++] = RT_BYTE6(uImm64);
244 pCodeBuf[off++] = RT_BYTE7(uImm64);
245 pCodeBuf[off++] = RT_BYTE8(uImm64);
246 }
247
248#elif defined(RT_ARCH_ARM64)
249 /*
250 * We need to start this sequence with a 'mov grp, imm16, lsl #x' and
251 * supply remaining bits using 'movk grp, imm16, lsl #x'.
252 *
253 * The mov instruction is encoded 0xd2800000 + shift + imm16 + grp,
254 * while the movk is 0xf2800000 + shift + imm16 + grp, meaning the diff
255 * is 0x20000000 (bit 29). So, we keep this bit in a variable and set it
256 * after the first non-zero immediate component so we switch to movk for
257 * the remainder.
258 */
259 unsigned cZeroHalfWords = !( uImm64 & UINT16_MAX)
260 + !((uImm64 >> 16) & UINT16_MAX)
261 + !((uImm64 >> 32) & UINT16_MAX)
262 + !((uImm64 >> 48) & UINT16_MAX);
263 unsigned cFfffHalfWords = cZeroHalfWords >= 2 ? 0 /* skip */
264 : ( (uImm64 & UINT16_MAX) == UINT16_MAX)
265 + (((uImm64 >> 16) & UINT16_MAX) == UINT16_MAX)
266 + (((uImm64 >> 32) & UINT16_MAX) == UINT16_MAX)
267 + (((uImm64 >> 48) & UINT16_MAX) == UINT16_MAX);
268 if (cFfffHalfWords <= cZeroHalfWords)
269 {
270 uint32_t fMovBase = UINT32_C(0xd2800000) | iGpr;
271
272 /* movz gpr, imm16 */
273 uint32_t uImmPart = (uint32_t)((uImm64 >> 0) & UINT32_C(0xffff));
274 if (uImmPart || cZeroHalfWords == 4)
275 {
276 pCodeBuf[off++] = fMovBase | (UINT32_C(0) << 21) | (uImmPart << 5);
277 fMovBase |= RT_BIT_32(29);
278 }
279 /* mov[z/k] gpr, imm16, lsl #16 */
280 uImmPart = (uint32_t)((uImm64 >> 16) & UINT32_C(0xffff));
281 if (uImmPart)
282 {
283 pCodeBuf[off++] = fMovBase | (UINT32_C(1) << 21) | (uImmPart << 5);
284 fMovBase |= RT_BIT_32(29);
285 }
286 /* mov[z/k] gpr, imm16, lsl #32 */
287 uImmPart = (uint32_t)((uImm64 >> 32) & UINT32_C(0xffff));
288 if (uImmPart)
289 {
290 pCodeBuf[off++] = fMovBase | (UINT32_C(2) << 21) | (uImmPart << 5);
291 fMovBase |= RT_BIT_32(29);
292 }
293 /* mov[z/k] gpr, imm16, lsl #48 */
294 uImmPart = (uint32_t)((uImm64 >> 48) & UINT32_C(0xffff));
295 if (uImmPart)
296 pCodeBuf[off++] = fMovBase | (UINT32_C(3) << 21) | (uImmPart << 5);
297 }
298 else
299 {
300 uint32_t fMovBase = UINT32_C(0x92800000) | iGpr;
301
302 /* find the first half-word that isn't UINT16_MAX. */
303 uint32_t const iHwNotFfff = (uImm64 & UINT16_MAX) != UINT16_MAX ? 0
304 : ((uImm64 >> 16) & UINT16_MAX) != UINT16_MAX ? 1
305 : ((uImm64 >> 32) & UINT16_MAX) != UINT16_MAX ? 2 : 3;
306
307 /* movn gpr, imm16, lsl #iHwNotFfff*16 */
308 uint32_t uImmPart = (uint32_t)(~(uImm64 >> (iHwNotFfff * 16)) & UINT32_C(0xffff)) << 5;
309 pCodeBuf[off++] = fMovBase | (iHwNotFfff << 21) | uImmPart;
310 fMovBase |= RT_BIT_32(30) | RT_BIT_32(29); /* -> movk */
311 /* movk gpr, imm16 */
312 if (iHwNotFfff != 0)
313 {
314 uImmPart = (uint32_t)((uImm64 >> 0) & UINT32_C(0xffff));
315 if (uImmPart != UINT32_C(0xffff))
316 pCodeBuf[off++] = fMovBase | (UINT32_C(0) << 21) | (uImmPart << 5);
317 }
318 /* movk gpr, imm16, lsl #16 */
319 if (iHwNotFfff != 1)
320 {
321 uImmPart = (uint32_t)((uImm64 >> 16) & UINT32_C(0xffff));
322 if (uImmPart != UINT32_C(0xffff))
323 pCodeBuf[off++] = fMovBase | (UINT32_C(1) << 21) | (uImmPart << 5);
324 }
325 /* movk gpr, imm16, lsl #32 */
326 if (iHwNotFfff != 2)
327 {
328 uImmPart = (uint32_t)((uImm64 >> 32) & UINT32_C(0xffff));
329 if (uImmPart != UINT32_C(0xffff))
330 pCodeBuf[off++] = fMovBase | (UINT32_C(2) << 21) | (uImmPart << 5);
331 }
332 /* movk gpr, imm16, lsl #48 */
333 if (iHwNotFfff != 3)
334 {
335 uImmPart = (uint32_t)((uImm64 >> 48) & UINT32_C(0xffff));
336 if (uImmPart != UINT32_C(0xffff))
337 pCodeBuf[off++] = fMovBase | (UINT32_C(3) << 21) | (uImmPart << 5);
338 }
339 }
340
341 /** @todo load into 'w' register instead of 'x' when imm64 <= UINT32_MAX?
342 * clang 12.x does that, only to use the 'x' version for the
343 * addressing in the following ldr). */
344
345#else
346# error "port me"
347#endif
348 return off;
349}
350
351
352/**
353 * Emits loading a constant into a 64-bit GPR
354 */
355DECL_INLINE_THROW(uint32_t)
356iemNativeEmitLoadGprImm64(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr, uint64_t uImm64)
357{
358#ifdef RT_ARCH_AMD64
359 off = iemNativeEmitLoadGprImmEx(iemNativeInstrBufEnsure(pReNative, off, 10), off, iGpr, uImm64);
360#elif defined(RT_ARCH_ARM64)
361 off = iemNativeEmitLoadGprImmEx(iemNativeInstrBufEnsure(pReNative, off, 4), off, iGpr, uImm64);
362#else
363# error "port me"
364#endif
365 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
366 return off;
367}
368
369
370/**
371 * Variant of iemNativeEmitLoadGpr32Imm where the caller ensures sufficent
372 * buffer space.
373 *
374 * Max buffer consumption:
375 * - AMD64: 6 instruction bytes.
376 * - ARM64: 2 instruction words (8 bytes).
377 *
378 * @note The top 32 bits will be cleared.
379 */
380DECLINLINE(uint32_t) iemNativeEmitLoadGpr32ImmEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGpr, uint32_t uImm32)
381{
382#ifdef RT_ARCH_AMD64
383 if (uImm32 == 0)
384 {
385 /* xor gpr, gpr */
386 if (iGpr >= 8)
387 pCodeBuf[off++] = X86_OP_REX_R | X86_OP_REX_B;
388 pCodeBuf[off++] = 0x33;
389 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGpr & 7, iGpr & 7);
390 }
391 else
392 {
393 /* mov gpr, imm32 */
394 if (iGpr >= 8)
395 pCodeBuf[off++] = X86_OP_REX_B;
396 pCodeBuf[off++] = 0xb8 + (iGpr & 7);
397 pCodeBuf[off++] = RT_BYTE1(uImm32);
398 pCodeBuf[off++] = RT_BYTE2(uImm32);
399 pCodeBuf[off++] = RT_BYTE3(uImm32);
400 pCodeBuf[off++] = RT_BYTE4(uImm32);
401 }
402
403#elif defined(RT_ARCH_ARM64)
404 if ((uImm32 >> 16) == 0)
405 /* movz gpr, imm16 */
406 pCodeBuf[off++] = Armv8A64MkInstrMovZ(iGpr, uImm32, 0, false /*f64Bit*/);
407 else if ((uImm32 & UINT32_C(0xffff)) == 0)
408 /* movz gpr, imm16, lsl #16 */
409 pCodeBuf[off++] = Armv8A64MkInstrMovZ(iGpr, uImm32 >> 16, 1, false /*f64Bit*/);
410 else if ((uImm32 & UINT32_C(0xffff)) == UINT32_C(0xffff))
411 /* movn gpr, imm16, lsl #16 */
412 pCodeBuf[off++] = Armv8A64MkInstrMovN(iGpr, ~uImm32 >> 16, 1, false /*f64Bit*/);
413 else if ((uImm32 >> 16) == UINT32_C(0xffff))
414 /* movn gpr, imm16 */
415 pCodeBuf[off++] = Armv8A64MkInstrMovN(iGpr, ~uImm32, 0, false /*f64Bit*/);
416 else
417 {
418 pCodeBuf[off++] = Armv8A64MkInstrMovZ(iGpr, uImm32 & UINT32_C(0xffff), 0, false /*f64Bit*/);
419 pCodeBuf[off++] = Armv8A64MkInstrMovK(iGpr, uImm32 >> 16, 1, false /*f64Bit*/);
420 }
421
422#else
423# error "port me"
424#endif
425 return off;
426}
427
428
429/**
430 * Emits loading a constant into a 32-bit GPR.
431 * @note The top 32 bits will be cleared.
432 */
433DECL_INLINE_THROW(uint32_t)
434iemNativeEmitLoadGprImm32(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr, uint32_t uImm32)
435{
436#ifdef RT_ARCH_AMD64
437 off = iemNativeEmitLoadGpr32ImmEx(iemNativeInstrBufEnsure(pReNative, off, 6), off, iGpr, uImm32);
438#elif defined(RT_ARCH_ARM64)
439 off = iemNativeEmitLoadGpr32ImmEx(iemNativeInstrBufEnsure(pReNative, off, 2), off, iGpr, uImm32);
440#else
441# error "port me"
442#endif
443 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
444 return off;
445}
446
447
448/**
449 * Emits loading a constant into a 8-bit GPR
450 * @note The AMD64 version does *NOT* clear any bits in the 8..63 range,
451 * only the ARM64 version does that.
452 */
453DECL_INLINE_THROW(uint32_t)
454iemNativeEmitLoadGpr8Imm(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr, uint8_t uImm8)
455{
456#ifdef RT_ARCH_AMD64
457 /* mov gpr, imm8 */
458 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3);
459 if (iGpr >= 8)
460 pbCodeBuf[off++] = X86_OP_REX_B;
461 else if (iGpr >= 4)
462 pbCodeBuf[off++] = X86_OP_REX;
463 pbCodeBuf[off++] = 0xb0 + (iGpr & 7);
464 pbCodeBuf[off++] = RT_BYTE1(uImm8);
465
466#elif defined(RT_ARCH_ARM64)
467 /* movz gpr, imm16, lsl #0 */
468 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
469 pu32CodeBuf[off++] = UINT32_C(0xd2800000) | (UINT32_C(0) << 21) | ((uint32_t)uImm8 << 5) | iGpr;
470
471#else
472# error "port me"
473#endif
474 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
475 return off;
476}
477
478
479#ifdef RT_ARCH_AMD64
480/**
481 * Common bit of iemNativeEmitLoadGprFromVCpuU64 and friends.
482 */
483DECL_FORCE_INLINE(uint32_t)
484iemNativeEmitGprByVCpuDisp(uint8_t *pbCodeBuf, uint32_t off, uint8_t iGprReg, uint32_t offVCpu)
485{
486 if (offVCpu < 128)
487 {
488 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_MEM1, iGprReg & 7, IEMNATIVE_REG_FIXED_PVMCPU);
489 pbCodeBuf[off++] = (uint8_t)(int8_t)offVCpu;
490 }
491 else
492 {
493 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_MEM4, iGprReg & 7, IEMNATIVE_REG_FIXED_PVMCPU);
494 pbCodeBuf[off++] = RT_BYTE1((uint32_t)offVCpu);
495 pbCodeBuf[off++] = RT_BYTE2((uint32_t)offVCpu);
496 pbCodeBuf[off++] = RT_BYTE3((uint32_t)offVCpu);
497 pbCodeBuf[off++] = RT_BYTE4((uint32_t)offVCpu);
498 }
499 return off;
500}
501
502#elif defined(RT_ARCH_ARM64)
503
504/**
505 * Common bit of iemNativeEmitLoadGprFromVCpuU64Ex and friends.
506 *
507 * @note Loads can use @a iGprReg for large offsets, stores requires a temporary
508 * registers (@a iGprTmp).
509 * @note DON'T try this with prefetch.
510 */
511DECL_FORCE_INLINE_THROW(uint32_t)
512iemNativeEmitGprByVCpuLdStEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprReg, uint32_t offVCpu,
513 ARMV8A64INSTRLDSTTYPE enmOperation, unsigned cbData, uint8_t iGprTmp = UINT8_MAX)
514{
515 /*
516 * There are a couple of ldr variants that takes an immediate offset, so
517 * try use those if we can, otherwise we have to use the temporary register
518 * help with the addressing.
519 */
520 if (offVCpu < _4K * cbData && !(offVCpu & (cbData - 1)))
521 /* Use the unsigned variant of ldr Wt, [<Xn|SP>, #off]. */
522 pCodeBuf[off++] = Armv8A64MkInstrStLdRUOff(enmOperation, iGprReg, IEMNATIVE_REG_FIXED_PVMCPU, offVCpu / cbData);
523 else if (offVCpu - RT_UOFFSETOF(VMCPU, cpum.GstCtx) < (unsigned)(_4K * cbData) && !(offVCpu & (cbData - 1)))
524 pCodeBuf[off++] = Armv8A64MkInstrStLdRUOff(enmOperation, iGprReg, IEMNATIVE_REG_FIXED_PCPUMCTX,
525 (offVCpu - RT_UOFFSETOF(VMCPU, cpum.GstCtx)) / cbData);
526 else if (!ARMV8A64INSTRLDSTTYPE_IS_STORE(enmOperation) || iGprTmp != UINT8_MAX)
527 {
528 /* The offset is too large, so we must load it into a register and use
529 ldr Wt, [<Xn|SP>, (<Wm>|<Xm>)]. */
530 /** @todo reduce by offVCpu by >> 3 or >> 2? if it saves instructions? */
531 if (iGprTmp == UINT8_MAX)
532 iGprTmp = iGprReg;
533 off = iemNativeEmitLoadGprImmEx(pCodeBuf, off, iGprTmp, offVCpu);
534 pCodeBuf[off++] = Armv8A64MkInstrStLdRegIdx(enmOperation, iGprReg, IEMNATIVE_REG_FIXED_PVMCPU, iGprTmp);
535 }
536 else
537# ifdef IEM_WITH_THROW_CATCH
538 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(NULL, VERR_IEM_IPE_9));
539# else
540 AssertReleaseFailedStmt(off = UINT32_MAX);
541# endif
542
543 return off;
544}
545
546/**
547 * Common bit of iemNativeEmitLoadGprFromVCpuU64 and friends.
548 */
549DECL_FORCE_INLINE_THROW(uint32_t)
550iemNativeEmitGprByVCpuLdSt(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprReg,
551 uint32_t offVCpu, ARMV8A64INSTRLDSTTYPE enmOperation, unsigned cbData)
552{
553 /*
554 * There are a couple of ldr variants that takes an immediate offset, so
555 * try use those if we can, otherwise we have to use the temporary register
556 * help with the addressing.
557 */
558 if (offVCpu < _4K * cbData && !(offVCpu & (cbData - 1)))
559 {
560 /* Use the unsigned variant of ldr Wt, [<Xn|SP>, #off]. */
561 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
562 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(enmOperation, iGprReg, IEMNATIVE_REG_FIXED_PVMCPU, offVCpu / cbData);
563 }
564 else if (offVCpu - RT_UOFFSETOF(VMCPU, cpum.GstCtx) < (unsigned)(_4K * cbData) && !(offVCpu & (cbData - 1)))
565 {
566 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
567 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(enmOperation, iGprReg, IEMNATIVE_REG_FIXED_PCPUMCTX,
568 (offVCpu - RT_UOFFSETOF(VMCPU, cpum.GstCtx)) / cbData);
569 }
570 else
571 {
572 /* The offset is too large, so we must load it into a register and use
573 ldr Wt, [<Xn|SP>, (<Wm>|<Xm>)]. */
574 /** @todo reduce by offVCpu by >> 3 or >> 2? if it saves instructions? */
575 off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_REG_FIXED_TMP0, offVCpu);
576 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
577 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRegIdx(enmOperation, iGprReg, IEMNATIVE_REG_FIXED_PVMCPU,
578 IEMNATIVE_REG_FIXED_TMP0);
579 }
580 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
581 return off;
582}
583
584#endif /* RT_ARCH_ARM64 */
585
586
587/**
588 * Emits a 64-bit GPR load of a VCpu value.
589 */
590DECL_FORCE_INLINE_THROW(uint32_t)
591iemNativeEmitLoadGprFromVCpuU64Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGpr, uint32_t offVCpu)
592{
593#ifdef RT_ARCH_AMD64
594 /* mov reg64, mem64 */
595 if (iGpr < 8)
596 pCodeBuf[off++] = X86_OP_REX_W;
597 else
598 pCodeBuf[off++] = X86_OP_REX_W | X86_OP_REX_R;
599 pCodeBuf[off++] = 0x8b;
600 off = iemNativeEmitGprByVCpuDisp(pCodeBuf, off,iGpr, offVCpu);
601
602#elif defined(RT_ARCH_ARM64)
603 off = iemNativeEmitGprByVCpuLdStEx(pCodeBuf, off, iGpr, offVCpu, kArmv8A64InstrLdStType_Ld_Dword, sizeof(uint64_t));
604
605#else
606# error "port me"
607#endif
608 return off;
609}
610
611
612/**
613 * Emits a 64-bit GPR load of a VCpu value.
614 */
615DECL_INLINE_THROW(uint32_t)
616iemNativeEmitLoadGprFromVCpuU64(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr, uint32_t offVCpu)
617{
618#ifdef RT_ARCH_AMD64
619 off = iemNativeEmitLoadGprFromVCpuU64Ex(iemNativeInstrBufEnsure(pReNative, off, 7), off, iGpr, offVCpu);
620 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
621
622#elif defined(RT_ARCH_ARM64)
623 off = iemNativeEmitGprByVCpuLdSt(pReNative, off, iGpr, offVCpu, kArmv8A64InstrLdStType_Ld_Dword, sizeof(uint64_t));
624
625#else
626# error "port me"
627#endif
628 return off;
629}
630
631
632/**
633 * Emits a 32-bit GPR load of a VCpu value.
634 * @note Bits 32 thru 63 in the GPR will be zero after the operation.
635 */
636DECL_INLINE_THROW(uint32_t)
637iemNativeEmitLoadGprFromVCpuU32(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr, uint32_t offVCpu)
638{
639#ifdef RT_ARCH_AMD64
640 /* mov reg32, mem32 */
641 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
642 if (iGpr >= 8)
643 pbCodeBuf[off++] = X86_OP_REX_R;
644 pbCodeBuf[off++] = 0x8b;
645 off = iemNativeEmitGprByVCpuDisp(pbCodeBuf, off, iGpr, offVCpu);
646 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
647
648#elif defined(RT_ARCH_ARM64)
649 off = iemNativeEmitGprByVCpuLdSt(pReNative, off, iGpr, offVCpu, kArmv8A64InstrLdStType_Ld_Word, sizeof(uint32_t));
650
651#else
652# error "port me"
653#endif
654 return off;
655}
656
657
658/**
659 * Emits a 16-bit GPR load of a VCpu value.
660 * @note Bits 16 thru 63 in the GPR will be zero after the operation.
661 */
662DECL_INLINE_THROW(uint32_t)
663iemNativeEmitLoadGprFromVCpuU16(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr, uint32_t offVCpu)
664{
665#ifdef RT_ARCH_AMD64
666 /* movzx reg32, mem16 */
667 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8);
668 if (iGpr >= 8)
669 pbCodeBuf[off++] = X86_OP_REX_R;
670 pbCodeBuf[off++] = 0x0f;
671 pbCodeBuf[off++] = 0xb7;
672 off = iemNativeEmitGprByVCpuDisp(pbCodeBuf, off, iGpr, offVCpu);
673 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
674
675#elif defined(RT_ARCH_ARM64)
676 off = iemNativeEmitGprByVCpuLdSt(pReNative, off, iGpr, offVCpu, kArmv8A64InstrLdStType_Ld_Half, sizeof(uint16_t));
677
678#else
679# error "port me"
680#endif
681 return off;
682}
683
684
685/**
686 * Emits a 8-bit GPR load of a VCpu value.
687 * @note Bits 8 thru 63 in the GPR will be zero after the operation.
688 */
689DECL_INLINE_THROW(uint32_t)
690iemNativeEmitLoadGprFromVCpuU8(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr, uint32_t offVCpu)
691{
692#ifdef RT_ARCH_AMD64
693 /* movzx reg32, mem8 */
694 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8);
695 if (iGpr >= 8)
696 pbCodeBuf[off++] = X86_OP_REX_R;
697 pbCodeBuf[off++] = 0x0f;
698 pbCodeBuf[off++] = 0xb6;
699 off = iemNativeEmitGprByVCpuDisp(pbCodeBuf, off, iGpr, offVCpu);
700 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
701
702#elif defined(RT_ARCH_ARM64)
703 off = iemNativeEmitGprByVCpuLdSt(pReNative, off, iGpr, offVCpu, kArmv8A64InstrLdStType_Ld_Byte, sizeof(uint8_t));
704
705#else
706# error "port me"
707#endif
708 return off;
709}
710
711
712/**
713 * Emits a store of a GPR value to a 64-bit VCpu field.
714 */
715DECL_FORCE_INLINE_THROW(uint32_t)
716iemNativeEmitStoreGprToVCpuU64Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGpr, uint32_t offVCpu,
717 uint8_t iGprTmp = UINT8_MAX)
718{
719#ifdef RT_ARCH_AMD64
720 /* mov mem64, reg64 */
721 if (iGpr < 8)
722 pCodeBuf[off++] = X86_OP_REX_W;
723 else
724 pCodeBuf[off++] = X86_OP_REX_W | X86_OP_REX_R;
725 pCodeBuf[off++] = 0x89;
726 off = iemNativeEmitGprByVCpuDisp(pCodeBuf, off, iGpr, offVCpu);
727 RT_NOREF(iGprTmp);
728
729#elif defined(RT_ARCH_ARM64)
730 off = iemNativeEmitGprByVCpuLdStEx(pCodeBuf, off, iGpr, offVCpu, kArmv8A64InstrLdStType_St_Dword, sizeof(uint64_t), iGprTmp);
731
732#else
733# error "port me"
734#endif
735 return off;
736}
737
738
739/**
740 * Emits a store of a GPR value to a 64-bit VCpu field.
741 */
742DECL_INLINE_THROW(uint32_t)
743iemNativeEmitStoreGprToVCpuU64(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr, uint32_t offVCpu)
744{
745#ifdef RT_ARCH_AMD64
746 off = iemNativeEmitStoreGprToVCpuU64Ex(iemNativeInstrBufEnsure(pReNative, off, 7), off, iGpr, offVCpu);
747#elif defined(RT_ARCH_ARM64)
748 off = iemNativeEmitStoreGprToVCpuU64Ex(iemNativeInstrBufEnsure(pReNative, off, 5), off, iGpr, offVCpu,
749 IEMNATIVE_REG_FIXED_TMP0);
750#else
751# error "port me"
752#endif
753 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
754 return off;
755}
756
757
758/**
759 * Emits a store of a GPR value to a 32-bit VCpu field.
760 */
761DECL_INLINE_THROW(uint32_t)
762iemNativeEmitStoreGprToVCpuU32(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr, uint32_t offVCpu)
763{
764#ifdef RT_ARCH_AMD64
765 /* mov mem32, reg32 */
766 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
767 if (iGpr >= 8)
768 pbCodeBuf[off++] = X86_OP_REX_R;
769 pbCodeBuf[off++] = 0x89;
770 off = iemNativeEmitGprByVCpuDisp(pbCodeBuf, off, iGpr, offVCpu);
771 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
772
773#elif defined(RT_ARCH_ARM64)
774 off = iemNativeEmitGprByVCpuLdSt(pReNative, off, iGpr, offVCpu, kArmv8A64InstrLdStType_St_Word, sizeof(uint32_t));
775
776#else
777# error "port me"
778#endif
779 return off;
780}
781
782
783/**
784 * Emits a store of a GPR value to a 16-bit VCpu field.
785 */
786DECL_INLINE_THROW(uint32_t)
787iemNativeEmitStoreGprToVCpuU16(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr, uint32_t offVCpu)
788{
789#ifdef RT_ARCH_AMD64
790 /* mov mem16, reg16 */
791 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8);
792 pbCodeBuf[off++] = X86_OP_PRF_SIZE_OP;
793 if (iGpr >= 8)
794 pbCodeBuf[off++] = X86_OP_REX_R;
795 pbCodeBuf[off++] = 0x89;
796 off = iemNativeEmitGprByVCpuDisp(pbCodeBuf, off, iGpr, offVCpu);
797 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
798
799#elif defined(RT_ARCH_ARM64)
800 off = iemNativeEmitGprByVCpuLdSt(pReNative, off, iGpr, offVCpu, kArmv8A64InstrLdStType_St_Half, sizeof(uint16_t));
801
802#else
803# error "port me"
804#endif
805 return off;
806}
807
808
809/**
810 * Emits a store of a GPR value to a 8-bit VCpu field.
811 */
812DECL_INLINE_THROW(uint32_t)
813iemNativeEmitStoreGprToVCpuU8(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr, uint32_t offVCpu)
814{
815#ifdef RT_ARCH_AMD64
816 /* mov mem8, reg8 */
817 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
818 if (iGpr >= 8)
819 pbCodeBuf[off++] = X86_OP_REX_R;
820 pbCodeBuf[off++] = 0x88;
821 off = iemNativeEmitGprByVCpuDisp(pbCodeBuf, off, iGpr, offVCpu);
822 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
823
824#elif defined(RT_ARCH_ARM64)
825 off = iemNativeEmitGprByVCpuLdSt(pReNative, off, iGpr, offVCpu, kArmv8A64InstrLdStType_St_Byte, sizeof(uint8_t));
826
827#else
828# error "port me"
829#endif
830 return off;
831}
832
833
834/**
835 * Emits a store of an immediate value to a 64-bit VCpu field.
836 *
837 * @note Will allocate temporary registers on both ARM64 and AMD64.
838 */
839DECL_FORCE_INLINE_THROW(uint32_t)
840iemNativeEmitStoreImmToVCpuU64(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint64_t uImm, uint32_t offVCpu)
841{
842#ifdef RT_ARCH_AMD64
843 /* mov mem32, imm32 */
844 uint8_t const idxRegImm = iemNativeRegAllocTmpImm(pReNative, &off, uImm);
845 off = iemNativeEmitStoreGprToVCpuU64Ex(iemNativeInstrBufEnsure(pReNative, off, 7), off, idxRegImm, offVCpu);
846 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
847 iemNativeRegFreeTmpImm(pReNative, idxRegImm);
848
849#elif defined(RT_ARCH_ARM64)
850 uint8_t const idxRegImm = uImm == 0 ? ARMV8_A64_REG_XZR : iemNativeRegAllocTmpImm(pReNative, &off, uImm);
851 off = iemNativeEmitGprByVCpuLdSt(pReNative, off, idxRegImm, offVCpu, kArmv8A64InstrLdStType_St_Dword, sizeof(uint64_t));
852 if (idxRegImm != ARMV8_A64_REG_XZR)
853 iemNativeRegFreeTmpImm(pReNative, idxRegImm);
854
855#else
856# error "port me"
857#endif
858 return off;
859}
860
861
862/**
863 * Emits a store of an immediate value to a 32-bit VCpu field.
864 *
865 * @note ARM64: Will allocate temporary registers.
866 */
867DECL_FORCE_INLINE_THROW(uint32_t)
868iemNativeEmitStoreImmToVCpuU32(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t uImm, uint32_t offVCpu)
869{
870#ifdef RT_ARCH_AMD64
871 /* mov mem32, imm32 */
872 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 10);
873 pCodeBuf[off++] = 0xc7;
874 off = iemNativeEmitGprByVCpuDisp(pCodeBuf, off, 0, offVCpu);
875 pCodeBuf[off++] = RT_BYTE1(uImm);
876 pCodeBuf[off++] = RT_BYTE2(uImm);
877 pCodeBuf[off++] = RT_BYTE3(uImm);
878 pCodeBuf[off++] = RT_BYTE4(uImm);
879 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
880
881#elif defined(RT_ARCH_ARM64)
882 uint8_t const idxRegImm = uImm == 0 ? ARMV8_A64_REG_XZR : iemNativeRegAllocTmpImm(pReNative, &off, uImm);
883 off = iemNativeEmitGprByVCpuLdSt(pReNative, off, idxRegImm, offVCpu, kArmv8A64InstrLdStType_St_Word, sizeof(uint32_t));
884 if (idxRegImm != ARMV8_A64_REG_XZR)
885 iemNativeRegFreeTmpImm(pReNative, idxRegImm);
886
887#else
888# error "port me"
889#endif
890 return off;
891}
892
893
894
895/**
896 * Emits a store of an immediate value to a 16-bit VCpu field.
897 *
898 * @note ARM64: A idxTmp1 is always required! The idxTmp2 depends on whehter the
899 * offset can be encoded as an immediate or not. The @a offVCpu immediate
900 * range is 0..8190 bytes from VMCPU and the same from CPUMCPU.
901 */
902DECL_FORCE_INLINE_THROW(uint32_t)
903iemNativeEmitStoreImmToVCpuU16Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint16_t uImm, uint32_t offVCpu,
904 uint8_t idxTmp1 = UINT8_MAX, uint8_t idxTmp2 = UINT8_MAX)
905{
906#ifdef RT_ARCH_AMD64
907 /* mov mem16, imm16 */
908 pCodeBuf[off++] = X86_OP_PRF_SIZE_OP;
909 pCodeBuf[off++] = 0xc7;
910 off = iemNativeEmitGprByVCpuDisp(pCodeBuf, off, 0, offVCpu);
911 pCodeBuf[off++] = RT_BYTE1(uImm);
912 pCodeBuf[off++] = RT_BYTE2(uImm);
913 RT_NOREF(idxTmp1, idxTmp2);
914
915#elif defined(RT_ARCH_ARM64)
916 if (idxTmp1 != UINT8_MAX)
917 {
918 pCodeBuf[off++] = Armv8A64MkInstrMovZ(idxTmp1, uImm);
919 off = iemNativeEmitGprByVCpuLdStEx(pCodeBuf, off, idxTmp1, offVCpu, kArmv8A64InstrLdStType_St_Half,
920 sizeof(uint16_t), idxTmp2);
921 }
922 else
923# ifdef IEM_WITH_THROW_CATCH
924 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(NULL, VERR_IEM_IPE_9));
925# else
926 AssertReleaseFailedStmt(off = UINT32_MAX);
927# endif
928
929#else
930# error "port me"
931#endif
932 return off;
933}
934
935
936/**
937 * Emits a store of an immediate value to a 8-bit VCpu field.
938 */
939DECL_INLINE_THROW(uint32_t)
940iemNativeEmitStoreImmToVCpuU8(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t bImm, uint32_t offVCpu)
941{
942#ifdef RT_ARCH_AMD64
943 /* mov mem8, imm8 */
944 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
945 pbCodeBuf[off++] = 0xc6;
946 off = iemNativeEmitGprByVCpuDisp(pbCodeBuf, off, 0, offVCpu);
947 pbCodeBuf[off++] = bImm;
948 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
949
950#elif defined(RT_ARCH_ARM64)
951 /* Cannot use IEMNATIVE_REG_FIXED_TMP0 for the immediate as that's used by iemNativeEmitGprByVCpuLdSt. */
952 uint8_t const idxRegImm = iemNativeRegAllocTmpImm(pReNative, &off, bImm);
953 off = iemNativeEmitGprByVCpuLdSt(pReNative, off, idxRegImm, offVCpu, kArmv8A64InstrLdStType_St_Byte, sizeof(uint8_t));
954 iemNativeRegFreeTmpImm(pReNative, idxRegImm);
955
956#else
957# error "port me"
958#endif
959 return off;
960}
961
962
963/**
964 * Emits a load effective address to a GRP of a VCpu field.
965 */
966DECL_INLINE_THROW(uint32_t)
967iemNativeEmitLeaGprByVCpu(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint32_t offVCpu)
968{
969#ifdef RT_ARCH_AMD64
970 /* lea gprdst, [rbx + offDisp] */
971 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
972 if (iGprDst < 8)
973 pbCodeBuf[off++] = X86_OP_REX_W;
974 else
975 pbCodeBuf[off++] = X86_OP_REX_W | X86_OP_REX_R;
976 pbCodeBuf[off++] = 0x8d;
977 off = iemNativeEmitGprByVCpuDisp(pbCodeBuf, off, iGprDst, offVCpu);
978
979#elif defined(RT_ARCH_ARM64)
980 if (offVCpu < (unsigned)_4K)
981 {
982 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
983 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(false /*fSub*/, iGprDst, IEMNATIVE_REG_FIXED_PVMCPU, offVCpu);
984 }
985 else if (offVCpu - RT_UOFFSETOF(VMCPU, cpum.GstCtx) < (unsigned)_4K)
986 {
987 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
988 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(false /*fSub*/, iGprDst, IEMNATIVE_REG_FIXED_PCPUMCTX,
989 offVCpu - RT_UOFFSETOF(VMCPU, cpum.GstCtx));
990 }
991 else
992 {
993 Assert(iGprDst != IEMNATIVE_REG_FIXED_PVMCPU);
994 off = iemNativeEmitLoadGprImm64(pReNative, off, iGprDst, offVCpu);
995 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
996 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubReg(false /*fSub*/, iGprDst, IEMNATIVE_REG_FIXED_PCPUMCTX, iGprDst);
997 }
998
999#else
1000# error "port me"
1001#endif
1002 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1003 return off;
1004}
1005
1006
1007/** This is just as a typesafe alternative to RT_UOFFSETOF. */
1008DECL_FORCE_INLINE(uint32_t) iemNativeVCpuOffsetFromStamCounterPtr(PVMCPU pVCpu, PSTAMCOUNTER pStamCounter)
1009{
1010 uintptr_t const off = (uintptr_t)pStamCounter - (uintptr_t)pVCpu;
1011 Assert(off < sizeof(VMCPU));
1012 return off;
1013}
1014
1015
1016/** This is just as a typesafe alternative to RT_UOFFSETOF. */
1017DECL_FORCE_INLINE(uint32_t) iemNativeVCpuOffsetFromU64Ptr(PVMCPU pVCpu, uint64_t *pu64)
1018{
1019 uintptr_t const off = (uintptr_t)pu64 - (uintptr_t)pVCpu;
1020 Assert(off < sizeof(VMCPU));
1021 return off;
1022}
1023
1024
1025/**
1026 * Emits code for incrementing a statistics counter (STAMCOUNTER/uint64_t) in VMCPU.
1027 *
1028 * @note The two temp registers are not required for AMD64. ARM64 always
1029 * requires the first, and the 2nd is needed if the offset cannot be
1030 * encoded as an immediate.
1031 */
1032DECL_FORCE_INLINE(uint32_t)
1033iemNativeEmitIncStamCounterInVCpuEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t idxTmp1, uint8_t idxTmp2, uint32_t offVCpu)
1034{
1035#ifdef RT_ARCH_AMD64
1036 /* inc qword [pVCpu + off] */
1037 pCodeBuf[off++] = X86_OP_REX_W;
1038 pCodeBuf[off++] = 0xff;
1039 off = iemNativeEmitGprByVCpuDisp(pCodeBuf, off, 0, offVCpu);
1040 RT_NOREF(idxTmp1, idxTmp2);
1041
1042#elif defined(RT_ARCH_ARM64)
1043 /* Determine how we're to access pVCpu first. */
1044 uint32_t const cbData = sizeof(STAMCOUNTER);
1045 if (offVCpu < _4K * cbData && !(offVCpu & (cbData - 1)))
1046 {
1047 /* Use the unsigned variant of ldr Wt, [<Xn|SP>, #off]. */
1048 pCodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_Ld_Dword, idxTmp1,
1049 IEMNATIVE_REG_FIXED_PVMCPU, offVCpu / cbData);
1050 pCodeBuf[off++] = Armv8A64MkInstrAddUImm12(idxTmp1, idxTmp1, 1);
1051 pCodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_St_Dword, idxTmp1,
1052 IEMNATIVE_REG_FIXED_PVMCPU, offVCpu / cbData);
1053 }
1054 else if (offVCpu - RT_UOFFSETOF(VMCPU, cpum.GstCtx) < (unsigned)(_4K * cbData) && !(offVCpu & (cbData - 1)))
1055 {
1056 pCodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_Ld_Dword, idxTmp1, IEMNATIVE_REG_FIXED_PCPUMCTX,
1057 (offVCpu - RT_UOFFSETOF(VMCPU, cpum.GstCtx)) / cbData);
1058 pCodeBuf[off++] = Armv8A64MkInstrAddUImm12(idxTmp1, idxTmp1, 1);
1059 pCodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_St_Dword, idxTmp1, IEMNATIVE_REG_FIXED_PCPUMCTX,
1060 (offVCpu - RT_UOFFSETOF(VMCPU, cpum.GstCtx)) / cbData);
1061 }
1062 else
1063 {
1064 /* The offset is too large, so we must load it into a register and use
1065 ldr Wt, [<Xn|SP>, (<Wm>|<Xm>)]. */
1066 off = iemNativeEmitLoadGprImmEx(pCodeBuf, off, idxTmp2, offVCpu);
1067 pCodeBuf[off++] = Armv8A64MkInstrStLdRegIdx(kArmv8A64InstrLdStType_Ld_Dword, idxTmp1, IEMNATIVE_REG_FIXED_PVMCPU, idxTmp2);
1068 pCodeBuf[off++] = Armv8A64MkInstrAddUImm12(idxTmp1, idxTmp1, 1);
1069 pCodeBuf[off++] = Armv8A64MkInstrStLdRegIdx(kArmv8A64InstrLdStType_St_Dword, idxTmp1, IEMNATIVE_REG_FIXED_PVMCPU, idxTmp2);
1070 }
1071
1072#else
1073# error "port me"
1074#endif
1075 return off;
1076}
1077
1078
1079/**
1080 * Emits code for incrementing a statistics counter (STAMCOUNTER/uint64_t) in VMCPU.
1081 *
1082 * @note The two temp registers are not required for AMD64. ARM64 always
1083 * requires the first, and the 2nd is needed if the offset cannot be
1084 * encoded as an immediate.
1085 */
1086DECL_FORCE_INLINE(uint32_t)
1087iemNativeEmitIncStamCounterInVCpu(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxTmp1, uint8_t idxTmp2, uint32_t offVCpu)
1088{
1089#ifdef RT_ARCH_AMD64
1090 off = iemNativeEmitIncStamCounterInVCpuEx(iemNativeInstrBufEnsure(pReNative, off, 7), off, idxTmp1, idxTmp2, offVCpu);
1091#elif defined(RT_ARCH_ARM64)
1092 off = iemNativeEmitIncStamCounterInVCpuEx(iemNativeInstrBufEnsure(pReNative, off, 4+3), off, idxTmp1, idxTmp2, offVCpu);
1093#else
1094# error "port me"
1095#endif
1096 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1097 return off;
1098}
1099
1100
1101/**
1102 * Emits code for incrementing an unsigned 32-bit statistics counter in VMCPU.
1103 *
1104 * @note The two temp registers are not required for AMD64. ARM64 always
1105 * requires the first, and the 2nd is needed if the offset cannot be
1106 * encoded as an immediate.
1107 */
1108DECL_FORCE_INLINE(uint32_t)
1109iemNativeEmitIncU32CounterInVCpuEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t idxTmp1, uint8_t idxTmp2, uint32_t offVCpu)
1110{
1111 Assert(!(offVCpu & 3)); /* ASSUME correctly aligned member. */
1112#ifdef RT_ARCH_AMD64
1113 /* inc dword [pVCpu + offVCpu] */
1114 pCodeBuf[off++] = 0xff;
1115 off = iemNativeEmitGprByVCpuDisp(pCodeBuf, off, 0, offVCpu);
1116 RT_NOREF(idxTmp1, idxTmp2);
1117
1118#elif defined(RT_ARCH_ARM64)
1119 /* Determine how we're to access pVCpu first. */
1120 uint32_t const cbData = sizeof(uint32_t);
1121 if (offVCpu < (unsigned)(_4K * cbData))
1122 {
1123 /* Use the unsigned variant of ldr Wt, [<Xn|SP>, #off]. */
1124 pCodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_Ld_Word, idxTmp1,
1125 IEMNATIVE_REG_FIXED_PVMCPU, offVCpu / cbData);
1126 pCodeBuf[off++] = Armv8A64MkInstrAddUImm12(idxTmp1, idxTmp1, 1);
1127 pCodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_St_Word, idxTmp1,
1128 IEMNATIVE_REG_FIXED_PVMCPU, offVCpu / cbData);
1129 }
1130 else if (offVCpu - RT_UOFFSETOF(VMCPU, cpum.GstCtx) < (unsigned)(_4K * cbData))
1131 {
1132 pCodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_Ld_Word, idxTmp1, IEMNATIVE_REG_FIXED_PCPUMCTX,
1133 (offVCpu - RT_UOFFSETOF(VMCPU, cpum.GstCtx)) / cbData);
1134 pCodeBuf[off++] = Armv8A64MkInstrAddUImm12(idxTmp1, idxTmp1, 1);
1135 pCodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_St_Word, idxTmp1, IEMNATIVE_REG_FIXED_PCPUMCTX,
1136 (offVCpu - RT_UOFFSETOF(VMCPU, cpum.GstCtx)) / cbData);
1137 }
1138 else
1139 {
1140 /* The offset is too large, so we must load it into a register and use
1141 ldr Wt, [<Xn|SP>, (<Wm>|<Xm>)]. We'll try use the 'LSL, #2' feature
1142 of the instruction if that'll reduce the constant to 16-bits. */
1143 if (offVCpu / cbData < (unsigned)UINT16_MAX)
1144 {
1145 pCodeBuf[off++] = Armv8A64MkInstrMovZ(idxTmp2, offVCpu / cbData);
1146 pCodeBuf[off++] = Armv8A64MkInstrStLdRegIdx(kArmv8A64InstrLdStType_Ld_Word, idxTmp1, IEMNATIVE_REG_FIXED_PVMCPU,
1147 idxTmp2, kArmv8A64InstrLdStExtend_Lsl, true /*fShifted(2)*/);
1148 pCodeBuf[off++] = Armv8A64MkInstrAddUImm12(idxTmp1, idxTmp1, 1);
1149 pCodeBuf[off++] = Armv8A64MkInstrStLdRegIdx(kArmv8A64InstrLdStType_St_Word, idxTmp1, IEMNATIVE_REG_FIXED_PVMCPU,
1150 idxTmp2, kArmv8A64InstrLdStExtend_Lsl, true /*fShifted(2)*/);
1151 }
1152 else
1153 {
1154 off = iemNativeEmitLoadGprImmEx(pCodeBuf, off, idxTmp2, offVCpu);
1155 pCodeBuf[off++] = Armv8A64MkInstrStLdRegIdx(kArmv8A64InstrLdStType_Ld_Word, idxTmp1, IEMNATIVE_REG_FIXED_PVMCPU, idxTmp2);
1156 pCodeBuf[off++] = Armv8A64MkInstrAddUImm12(idxTmp1, idxTmp1, 1);
1157 pCodeBuf[off++] = Armv8A64MkInstrStLdRegIdx(kArmv8A64InstrLdStType_St_Word, idxTmp1, IEMNATIVE_REG_FIXED_PVMCPU, idxTmp2);
1158 }
1159 }
1160
1161#else
1162# error "port me"
1163#endif
1164 return off;
1165}
1166
1167
1168/**
1169 * Emits code for incrementing an unsigned 32-bit statistics counter in VMCPU.
1170 *
1171 * @note The two temp registers are not required for AMD64. ARM64 always
1172 * requires the first, and the 2nd is needed if the offset cannot be
1173 * encoded as an immediate.
1174 */
1175DECL_FORCE_INLINE(uint32_t)
1176iemNativeEmitIncU32CounterInVCpu(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxTmp1, uint8_t idxTmp2, uint32_t offVCpu)
1177{
1178#ifdef RT_ARCH_AMD64
1179 off = iemNativeEmitIncU32CounterInVCpuEx(iemNativeInstrBufEnsure(pReNative, off, 6), off, idxTmp1, idxTmp2, offVCpu);
1180#elif defined(RT_ARCH_ARM64)
1181 off = iemNativeEmitIncU32CounterInVCpuEx(iemNativeInstrBufEnsure(pReNative, off, 4+3), off, idxTmp1, idxTmp2, offVCpu);
1182#else
1183# error "port me"
1184#endif
1185 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1186 return off;
1187}
1188
1189
1190/**
1191 * Emits code for OR'ing a bitmask into a 32-bit VMCPU member.
1192 *
1193 * @note May allocate temporary registers (not AMD64).
1194 */
1195DECL_FORCE_INLINE(uint32_t)
1196iemNativeEmitOrImmIntoVCpuU32(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t fMask, uint32_t offVCpu)
1197{
1198 Assert(!(offVCpu & 3)); /* ASSUME correctly aligned member. */
1199#ifdef RT_ARCH_AMD64
1200 /* or dword [pVCpu + offVCpu], imm8/32 */
1201 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 10);
1202 if (fMask < 0x80)
1203 {
1204 pCodeBuf[off++] = 0x83;
1205 off = iemNativeEmitGprByVCpuDisp(pCodeBuf, off, 1, offVCpu);
1206 pCodeBuf[off++] = (uint8_t)fMask;
1207 }
1208 else
1209 {
1210 pCodeBuf[off++] = 0x81;
1211 off = iemNativeEmitGprByVCpuDisp(pCodeBuf, off, 1, offVCpu);
1212 pCodeBuf[off++] = RT_BYTE1(fMask);
1213 pCodeBuf[off++] = RT_BYTE2(fMask);
1214 pCodeBuf[off++] = RT_BYTE3(fMask);
1215 pCodeBuf[off++] = RT_BYTE4(fMask);
1216 }
1217
1218#elif defined(RT_ARCH_ARM64)
1219 /* If the constant is unwieldy we'll need a register to hold it as well. */
1220 uint32_t uImmSizeLen, uImmRotate;
1221 uint8_t const idxTmpMask = Armv8A64ConvertMask32ToImmRImmS(fMask, &uImmSizeLen, &uImmRotate) ? UINT8_MAX
1222 : iemNativeRegAllocTmpImm(pReNative, &off, fMask);
1223
1224 /* We need a temp register for holding the member value we're modifying. */
1225 uint8_t const idxTmpValue = iemNativeRegAllocTmp(pReNative, &off);
1226
1227 /* Determine how we're to access pVCpu first. */
1228 uint32_t const cbData = sizeof(uint32_t);
1229 if (offVCpu < (unsigned)(_4K * cbData))
1230 {
1231 /* Use the unsigned variant of ldr Wt, [<Xn|SP>, #off]. */
1232 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3);
1233 pCodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_Ld_Word, idxTmpValue,
1234 IEMNATIVE_REG_FIXED_PVMCPU, offVCpu / cbData);
1235 if (idxTmpMask == UINT8_MAX)
1236 pCodeBuf[off++] = Armv8A64MkInstrOrrImm(idxTmpValue, idxTmpValue, uImmSizeLen, uImmRotate, false /*f64Bit*/);
1237 else
1238 pCodeBuf[off++] = Armv8A64MkInstrOrr(idxTmpValue, idxTmpValue, idxTmpMask, false /*f64Bit*/);
1239 pCodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_St_Word, idxTmpValue,
1240 IEMNATIVE_REG_FIXED_PVMCPU, offVCpu / cbData);
1241 }
1242 else if (offVCpu - RT_UOFFSETOF(VMCPU, cpum.GstCtx) < (unsigned)(_4K * cbData))
1243 {
1244 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3);
1245 pCodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_Ld_Word, idxTmpValue, IEMNATIVE_REG_FIXED_PCPUMCTX,
1246 (offVCpu - RT_UOFFSETOF(VMCPU, cpum.GstCtx)) / cbData);
1247 if (idxTmpMask == UINT8_MAX)
1248 pCodeBuf[off++] = Armv8A64MkInstrOrrImm(idxTmpValue, idxTmpValue, uImmSizeLen, uImmRotate, false /*f64Bit*/);
1249 else
1250 pCodeBuf[off++] = Armv8A64MkInstrOrr(idxTmpValue, idxTmpValue, idxTmpMask, false /*f64Bit*/);
1251 pCodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_St_Word, idxTmpValue, IEMNATIVE_REG_FIXED_PCPUMCTX,
1252 (offVCpu - RT_UOFFSETOF(VMCPU, cpum.GstCtx)) / cbData);
1253 }
1254 else
1255 {
1256 /* The offset is too large, so we must load it into a register and use
1257 ldr Wt, [<Xn|SP>, (<Wm>|<Xm>)]. We'll try use the 'LSL, #2' feature
1258 of the instruction if that'll reduce the constant to 16-bits. */
1259 uint8_t const idxTmpIndex = iemNativeRegAllocTmp(pReNative, &off);
1260 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 5);
1261 bool const fShifted = offVCpu / cbData < (unsigned)UINT16_MAX;
1262 if (fShifted)
1263 pCodeBuf[off++] = Armv8A64MkInstrMovZ(idxTmpIndex, offVCpu / cbData);
1264 else
1265 off = iemNativeEmitLoadGprImmEx(pCodeBuf, off, idxTmpIndex, offVCpu);
1266
1267 pCodeBuf[off++] = Armv8A64MkInstrStLdRegIdx(kArmv8A64InstrLdStType_Ld_Word, idxTmpValue, IEMNATIVE_REG_FIXED_PVMCPU,
1268 idxTmpIndex, kArmv8A64InstrLdStExtend_Lsl, fShifted /*fShifted(2)*/);
1269
1270 if (idxTmpMask == UINT8_MAX)
1271 pCodeBuf[off++] = Armv8A64MkInstrOrrImm(idxTmpValue, idxTmpValue, uImmSizeLen, uImmRotate, false /*f64Bit*/);
1272 else
1273 pCodeBuf[off++] = Armv8A64MkInstrOrr(idxTmpValue, idxTmpValue, idxTmpMask, false /*f64Bit*/);
1274
1275 pCodeBuf[off++] = Armv8A64MkInstrStLdRegIdx(kArmv8A64InstrLdStType_St_Word, idxTmpValue, IEMNATIVE_REG_FIXED_PVMCPU,
1276 idxTmpIndex, kArmv8A64InstrLdStExtend_Lsl, fShifted /*fShifted(2)*/);
1277 iemNativeRegFreeTmp(pReNative, idxTmpIndex);
1278 }
1279 iemNativeRegFreeTmp(pReNative, idxTmpValue);
1280 if (idxTmpMask != UINT8_MAX)
1281 iemNativeRegFreeTmp(pReNative, idxTmpMask);
1282
1283#else
1284# error "port me"
1285#endif
1286 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1287 return off;
1288}
1289
1290
1291/**
1292 * Emits code for AND'ing a bitmask into a 32-bit VMCPU member.
1293 *
1294 * @note May allocate temporary registers (not AMD64).
1295 */
1296DECL_FORCE_INLINE(uint32_t)
1297iemNativeEmitAndImmIntoVCpuU32(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t fMask, uint32_t offVCpu)
1298{
1299 Assert(!(offVCpu & 3)); /* ASSUME correctly aligned member. */
1300#ifdef RT_ARCH_AMD64
1301 /* and dword [pVCpu + offVCpu], imm8/32 */
1302 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 10);
1303 if (fMask < 0x80)
1304 {
1305 pCodeBuf[off++] = 0x83;
1306 off = iemNativeEmitGprByVCpuDisp(pCodeBuf, off, 4, offVCpu);
1307 pCodeBuf[off++] = (uint8_t)fMask;
1308 }
1309 else
1310 {
1311 pCodeBuf[off++] = 0x81;
1312 off = iemNativeEmitGprByVCpuDisp(pCodeBuf, off, 4, offVCpu);
1313 pCodeBuf[off++] = RT_BYTE1(fMask);
1314 pCodeBuf[off++] = RT_BYTE2(fMask);
1315 pCodeBuf[off++] = RT_BYTE3(fMask);
1316 pCodeBuf[off++] = RT_BYTE4(fMask);
1317 }
1318
1319#elif defined(RT_ARCH_ARM64)
1320 /* If the constant is unwieldy we'll need a register to hold it as well. */
1321 uint32_t uImmSizeLen, uImmRotate;
1322 uint8_t const idxTmpMask = Armv8A64ConvertMask32ToImmRImmS(fMask, &uImmSizeLen, &uImmRotate) ? UINT8_MAX
1323 : iemNativeRegAllocTmpImm(pReNative, &off, fMask);
1324
1325 /* We need a temp register for holding the member value we're modifying. */
1326 uint8_t const idxTmpValue = iemNativeRegAllocTmp(pReNative, &off);
1327
1328 /* Determine how we're to access pVCpu first. */
1329 uint32_t const cbData = sizeof(uint32_t);
1330 if (offVCpu < (unsigned)(_4K * cbData))
1331 {
1332 /* Use the unsigned variant of ldr Wt, [<Xn|SP>, #off]. */
1333 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3);
1334 pCodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_Ld_Word, idxTmpValue,
1335 IEMNATIVE_REG_FIXED_PVMCPU, offVCpu / cbData);
1336 if (idxTmpMask == UINT8_MAX)
1337 pCodeBuf[off++] = Armv8A64MkInstrAndImm(idxTmpValue, idxTmpValue, uImmSizeLen, uImmRotate, false /*f64Bit*/);
1338 else
1339 pCodeBuf[off++] = Armv8A64MkInstrAnd(idxTmpValue, idxTmpValue, idxTmpMask, false /*f64Bit*/);
1340 pCodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_St_Word, idxTmpValue,
1341 IEMNATIVE_REG_FIXED_PVMCPU, offVCpu / cbData);
1342 }
1343 else if (offVCpu - RT_UOFFSETOF(VMCPU, cpum.GstCtx) < (unsigned)(_4K * cbData))
1344 {
1345 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3);
1346 pCodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_Ld_Word, idxTmpValue, IEMNATIVE_REG_FIXED_PCPUMCTX,
1347 (offVCpu - RT_UOFFSETOF(VMCPU, cpum.GstCtx)) / cbData);
1348 if (idxTmpMask == UINT8_MAX)
1349 pCodeBuf[off++] = Armv8A64MkInstrAndImm(idxTmpValue, idxTmpValue, uImmSizeLen, uImmRotate, false /*f64Bit*/);
1350 else
1351 pCodeBuf[off++] = Armv8A64MkInstrAnd(idxTmpValue, idxTmpValue, idxTmpMask, false /*f64Bit*/);
1352 pCodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_St_Word, idxTmpValue, IEMNATIVE_REG_FIXED_PCPUMCTX,
1353 (offVCpu - RT_UOFFSETOF(VMCPU, cpum.GstCtx)) / cbData);
1354 }
1355 else
1356 {
1357 /* The offset is too large, so we must load it into a register and use
1358 ldr Wt, [<Xn|SP>, (<Wm>|<Xm>)]. We'll try use the 'LSL, #2' feature
1359 of the instruction if that'll reduce the constant to 16-bits. */
1360 uint8_t const idxTmpIndex = iemNativeRegAllocTmp(pReNative, &off);
1361 PIEMNATIVEINSTR const pCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 5);
1362 bool const fShifted = offVCpu / cbData < (unsigned)UINT16_MAX;
1363 if (fShifted)
1364 pCodeBuf[off++] = Armv8A64MkInstrMovZ(idxTmpIndex, offVCpu / cbData);
1365 else
1366 off = iemNativeEmitLoadGprImmEx(pCodeBuf, off, idxTmpIndex, offVCpu);
1367
1368 pCodeBuf[off++] = Armv8A64MkInstrStLdRegIdx(kArmv8A64InstrLdStType_Ld_Word, idxTmpValue, IEMNATIVE_REG_FIXED_PVMCPU,
1369 idxTmpIndex, kArmv8A64InstrLdStExtend_Lsl, fShifted /*fShifted(2)*/);
1370
1371 if (idxTmpMask == UINT8_MAX)
1372 pCodeBuf[off++] = Armv8A64MkInstrAndImm(idxTmpValue, idxTmpValue, uImmSizeLen, uImmRotate, false /*f64Bit*/);
1373 else
1374 pCodeBuf[off++] = Armv8A64MkInstrAnd(idxTmpValue, idxTmpValue, idxTmpMask, false /*f64Bit*/);
1375
1376 pCodeBuf[off++] = Armv8A64MkInstrStLdRegIdx(kArmv8A64InstrLdStType_St_Word, idxTmpValue, IEMNATIVE_REG_FIXED_PVMCPU,
1377 idxTmpIndex, kArmv8A64InstrLdStExtend_Lsl, fShifted /*fShifted(2)*/);
1378 iemNativeRegFreeTmp(pReNative, idxTmpIndex);
1379 }
1380 iemNativeRegFreeTmp(pReNative, idxTmpValue);
1381 if (idxTmpMask != UINT8_MAX)
1382 iemNativeRegFreeTmp(pReNative, idxTmpMask);
1383
1384#else
1385# error "port me"
1386#endif
1387 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1388 return off;
1389}
1390
1391
1392/**
1393 * Emits a gprdst = gprsrc load.
1394 */
1395DECL_FORCE_INLINE(uint32_t)
1396iemNativeEmitLoadGprFromGprEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc)
1397{
1398#ifdef RT_ARCH_AMD64
1399 /* mov gprdst, gprsrc */
1400 if ((iGprDst | iGprSrc) >= 8)
1401 pCodeBuf[off++] = iGprDst < 8 ? X86_OP_REX_W | X86_OP_REX_B
1402 : iGprSrc >= 8 ? X86_OP_REX_W | X86_OP_REX_R | X86_OP_REX_B
1403 : X86_OP_REX_W | X86_OP_REX_R;
1404 else
1405 pCodeBuf[off++] = X86_OP_REX_W;
1406 pCodeBuf[off++] = 0x8b;
1407 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGprDst & 7, iGprSrc & 7);
1408
1409#elif defined(RT_ARCH_ARM64)
1410 /* mov dst, src; alias for: orr dst, xzr, src */
1411 pCodeBuf[off++] = Armv8A64MkInstrOrr(iGprDst, ARMV8_A64_REG_XZR, iGprSrc);
1412
1413#else
1414# error "port me"
1415#endif
1416 return off;
1417}
1418
1419
1420/**
1421 * Emits a gprdst = gprsrc load.
1422 */
1423DECL_INLINE_THROW(uint32_t)
1424iemNativeEmitLoadGprFromGpr(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc)
1425{
1426#ifdef RT_ARCH_AMD64
1427 off = iemNativeEmitLoadGprFromGprEx(iemNativeInstrBufEnsure(pReNative, off, 3), off, iGprDst, iGprSrc);
1428#elif defined(RT_ARCH_ARM64)
1429 off = iemNativeEmitLoadGprFromGprEx(iemNativeInstrBufEnsure(pReNative, off, 1), off, iGprDst, iGprSrc);
1430#else
1431# error "port me"
1432#endif
1433 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1434 return off;
1435}
1436
1437
1438/**
1439 * Emits a gprdst = gprsrc[31:0] load.
1440 * @note Bits 63 thru 32 are cleared.
1441 */
1442DECL_FORCE_INLINE(uint32_t)
1443iemNativeEmitLoadGprFromGpr32Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc)
1444{
1445#ifdef RT_ARCH_AMD64
1446 /* mov gprdst, gprsrc */
1447 if ((iGprDst | iGprSrc) >= 8)
1448 pCodeBuf[off++] = iGprDst < 8 ? X86_OP_REX_B
1449 : iGprSrc >= 8 ? X86_OP_REX_R | X86_OP_REX_B
1450 : X86_OP_REX_R;
1451 pCodeBuf[off++] = 0x8b;
1452 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGprDst & 7, iGprSrc & 7);
1453
1454#elif defined(RT_ARCH_ARM64)
1455 /* mov dst32, src32; alias for: orr dst32, wzr, src32 */
1456 pCodeBuf[off++] = Armv8A64MkInstrOrr(iGprDst, ARMV8_A64_REG_WZR, iGprSrc, false /*f64bit*/);
1457
1458#else
1459# error "port me"
1460#endif
1461 return off;
1462}
1463
1464
1465/**
1466 * Emits a gprdst = gprsrc[31:0] load.
1467 * @note Bits 63 thru 32 are cleared.
1468 */
1469DECL_INLINE_THROW(uint32_t)
1470iemNativeEmitLoadGprFromGpr32(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc)
1471{
1472#ifdef RT_ARCH_AMD64
1473 off = iemNativeEmitLoadGprFromGpr32Ex(iemNativeInstrBufEnsure(pReNative, off, 3), off, iGprDst, iGprSrc);
1474#elif defined(RT_ARCH_ARM64)
1475 off = iemNativeEmitLoadGprFromGpr32Ex(iemNativeInstrBufEnsure(pReNative, off, 1), off, iGprDst, iGprSrc);
1476#else
1477# error "port me"
1478#endif
1479 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1480 return off;
1481}
1482
1483
1484/**
1485 * Emits a gprdst = gprsrc[15:0] load.
1486 * @note Bits 63 thru 15 are cleared.
1487 */
1488DECL_INLINE_THROW(uint32_t)
1489iemNativeEmitLoadGprFromGpr16Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc)
1490{
1491#ifdef RT_ARCH_AMD64
1492 /* movzx Gv,Ew */
1493 if ((iGprDst | iGprSrc) >= 8)
1494 pCodeBuf[off++] = iGprDst < 8 ? X86_OP_REX_B
1495 : iGprSrc >= 8 ? X86_OP_REX_R | X86_OP_REX_B
1496 : X86_OP_REX_R;
1497 pCodeBuf[off++] = 0x0f;
1498 pCodeBuf[off++] = 0xb7;
1499 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGprDst & 7, iGprSrc & 7);
1500
1501#elif defined(RT_ARCH_ARM64)
1502 /* and gprdst, gprsrc, #0xffff */
1503# if 1
1504 Assert(Armv8A64ConvertImmRImmS2Mask32(0x0f, 0) == UINT16_MAX);
1505 pCodeBuf[off++] = Armv8A64MkInstrAndImm(iGprDst, iGprSrc, 0x0f, 0, false /*f64Bit*/);
1506# else
1507 Assert(Armv8A64ConvertImmRImmS2Mask64(0x4f, 0) == UINT16_MAX);
1508 pCodeBuf[off++] = Armv8A64MkInstrAndImm(iGprDst, iGprSrc, 0x4f, 0);
1509# endif
1510
1511#else
1512# error "port me"
1513#endif
1514 return off;
1515}
1516
1517
1518/**
1519 * Emits a gprdst = gprsrc[15:0] load.
1520 * @note Bits 63 thru 15 are cleared.
1521 */
1522DECL_INLINE_THROW(uint32_t)
1523iemNativeEmitLoadGprFromGpr16(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc)
1524{
1525#ifdef RT_ARCH_AMD64
1526 off = iemNativeEmitLoadGprFromGpr16Ex(iemNativeInstrBufEnsure(pReNative, off, 4), off, iGprDst, iGprSrc);
1527#elif defined(RT_ARCH_ARM64)
1528 off = iemNativeEmitLoadGprFromGpr16Ex(iemNativeInstrBufEnsure(pReNative, off, 1), off, iGprDst, iGprSrc);
1529#else
1530# error "port me"
1531#endif
1532 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1533 return off;
1534}
1535
1536
1537/**
1538 * Emits a gprdst = gprsrc[7:0] load.
1539 * @note Bits 63 thru 8 are cleared.
1540 */
1541DECL_FORCE_INLINE(uint32_t)
1542iemNativeEmitLoadGprFromGpr8Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc)
1543{
1544#ifdef RT_ARCH_AMD64
1545 /* movzx Gv,Eb */
1546 if (iGprDst >= 8 || iGprSrc >= 8)
1547 pCodeBuf[off++] = iGprDst < 8 ? X86_OP_REX_B
1548 : iGprSrc >= 8 ? X86_OP_REX_R | X86_OP_REX_B
1549 : X86_OP_REX_R;
1550 else if (iGprSrc >= 4)
1551 pCodeBuf[off++] = X86_OP_REX;
1552 pCodeBuf[off++] = 0x0f;
1553 pCodeBuf[off++] = 0xb6;
1554 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGprDst & 7, iGprSrc & 7);
1555
1556#elif defined(RT_ARCH_ARM64)
1557 /* and gprdst, gprsrc, #0xff */
1558 Assert(Armv8A64ConvertImmRImmS2Mask32(0x07, 0) == UINT8_MAX);
1559 pCodeBuf[off++] = Armv8A64MkInstrAndImm(iGprDst, iGprSrc, 0x07, 0, false /*f64Bit*/);
1560
1561#else
1562# error "port me"
1563#endif
1564 return off;
1565}
1566
1567
1568/**
1569 * Emits a gprdst = gprsrc[7:0] load.
1570 * @note Bits 63 thru 8 are cleared.
1571 */
1572DECL_INLINE_THROW(uint32_t)
1573iemNativeEmitLoadGprFromGpr8(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc)
1574{
1575#ifdef RT_ARCH_AMD64
1576 off = iemNativeEmitLoadGprFromGpr8Ex(iemNativeInstrBufEnsure(pReNative, off, 4), off, iGprDst, iGprSrc);
1577#elif defined(RT_ARCH_ARM64)
1578 off = iemNativeEmitLoadGprFromGpr8Ex(iemNativeInstrBufEnsure(pReNative, off, 1), off, iGprDst, iGprSrc);
1579#else
1580# error "port me"
1581#endif
1582 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1583 return off;
1584}
1585
1586
1587/**
1588 * Emits a gprdst = gprsrc[15:8] load (ah, ch, dh, bh).
1589 * @note Bits 63 thru 8 are cleared.
1590 */
1591DECL_INLINE_THROW(uint32_t)
1592iemNativeEmitLoadGprFromGpr8Hi(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc)
1593{
1594#ifdef RT_ARCH_AMD64
1595 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8);
1596
1597 /* movzx Gv,Ew */
1598 if ((iGprDst | iGprSrc) >= 8)
1599 pbCodeBuf[off++] = iGprDst < 8 ? X86_OP_REX_B
1600 : iGprSrc >= 8 ? X86_OP_REX_R | X86_OP_REX_B
1601 : X86_OP_REX_R;
1602 pbCodeBuf[off++] = 0x0f;
1603 pbCodeBuf[off++] = 0xb7;
1604 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGprDst & 7, iGprSrc & 7);
1605
1606 /* shr Ev,8 */
1607 if (iGprDst >= 8)
1608 pbCodeBuf[off++] = X86_OP_REX_B;
1609 pbCodeBuf[off++] = 0xc1;
1610 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 5, iGprDst & 7);
1611 pbCodeBuf[off++] = 8;
1612
1613#elif defined(RT_ARCH_ARM64)
1614 /* ubfx gprdst, gprsrc, #8, #8 - gprdst = gprsrc[15:8] */
1615 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
1616 pu32CodeBuf[off++] = Armv8A64MkInstrUbfx(iGprDst, iGprSrc, 8, 8, false /*f64Bit*/);
1617
1618#else
1619# error "port me"
1620#endif
1621 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1622 return off;
1623}
1624
1625
1626/**
1627 * Sign-extends 32-bit value in @a iGprSrc into a 64-bit value in @a iGprDst.
1628 */
1629DECL_INLINE_THROW(uint32_t)
1630iemNativeEmitLoadGprSignExtendedFromGpr32(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc)
1631{
1632#ifdef RT_ARCH_AMD64
1633 /* movsxd r64, r/m32 */
1634 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3);
1635 pbCodeBuf[off++] = X86_OP_REX_W | (iGprDst < 8 ? 0 : X86_OP_REX_R) | (iGprSrc < 8 ? 0 : X86_OP_REX_B);
1636 pbCodeBuf[off++] = 0x63;
1637 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGprDst & 7, iGprSrc & 7);
1638
1639#elif defined(RT_ARCH_ARM64)
1640 /* sxtw dst, src */
1641 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
1642 pu32CodeBuf[off++] = Armv8A64MkInstrSxtw(iGprDst, iGprSrc);
1643
1644#else
1645# error "port me"
1646#endif
1647 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1648 return off;
1649}
1650
1651
1652/**
1653 * Sign-extends 16-bit value in @a iGprSrc into a 64-bit value in @a iGprDst.
1654 */
1655DECL_INLINE_THROW(uint32_t)
1656iemNativeEmitLoadGprSignExtendedFromGpr16(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc)
1657{
1658#ifdef RT_ARCH_AMD64
1659 /* movsx r64, r/m16 */
1660 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 4);
1661 pbCodeBuf[off++] = X86_OP_REX_W | (iGprDst < 8 ? 0 : X86_OP_REX_R) | (iGprSrc < 8 ? 0 : X86_OP_REX_B);
1662 pbCodeBuf[off++] = 0x0f;
1663 pbCodeBuf[off++] = 0xbf;
1664 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGprDst & 7, iGprSrc & 7);
1665
1666#elif defined(RT_ARCH_ARM64)
1667 /* sxth dst, src */
1668 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
1669 pu32CodeBuf[off++] = Armv8A64MkInstrSxth(iGprDst, iGprSrc);
1670
1671#else
1672# error "port me"
1673#endif
1674 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1675 return off;
1676}
1677
1678
1679/**
1680 * Sign-extends 16-bit value in @a iGprSrc into a 32-bit value in @a iGprDst.
1681 */
1682DECL_INLINE_THROW(uint32_t)
1683iemNativeEmitLoadGpr32SignExtendedFromGpr16(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc)
1684{
1685#ifdef RT_ARCH_AMD64
1686 /* movsx r64, r/m16 */
1687 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 4);
1688 if (iGprDst >= 8 || iGprSrc >= 8)
1689 pbCodeBuf[off++] = (iGprDst < 8 ? 0 : X86_OP_REX_R) | (iGprSrc < 8 ? 0 : X86_OP_REX_B);
1690 pbCodeBuf[off++] = 0x0f;
1691 pbCodeBuf[off++] = 0xbf;
1692 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGprDst & 7, iGprSrc & 7);
1693
1694#elif defined(RT_ARCH_ARM64)
1695 /* sxth dst32, src */
1696 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
1697 pu32CodeBuf[off++] = Armv8A64MkInstrSxth(iGprDst, iGprSrc, false /*f64Bit*/);
1698
1699#else
1700# error "port me"
1701#endif
1702 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1703 return off;
1704}
1705
1706
1707/**
1708 * Sign-extends 8-bit value in @a iGprSrc into a 64-bit value in @a iGprDst.
1709 */
1710DECL_INLINE_THROW(uint32_t)
1711iemNativeEmitLoadGprSignExtendedFromGpr8(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc)
1712{
1713#ifdef RT_ARCH_AMD64
1714 /* movsx r64, r/m8 */
1715 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 4);
1716 pbCodeBuf[off++] = X86_OP_REX_W | (iGprDst < 8 ? 0 : X86_OP_REX_R) | (iGprSrc < 8 ? 0 : X86_OP_REX_B);
1717 pbCodeBuf[off++] = 0x0f;
1718 pbCodeBuf[off++] = 0xbe;
1719 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGprDst & 7, iGprSrc & 7);
1720
1721#elif defined(RT_ARCH_ARM64)
1722 /* sxtb dst, src */
1723 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
1724 pu32CodeBuf[off++] = Armv8A64MkInstrSxtb(iGprDst, iGprSrc);
1725
1726#else
1727# error "port me"
1728#endif
1729 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1730 return off;
1731}
1732
1733
1734/**
1735 * Sign-extends 8-bit value in @a iGprSrc into a 32-bit value in @a iGprDst.
1736 * @note Bits 63 thru 32 are cleared.
1737 */
1738DECL_INLINE_THROW(uint32_t)
1739iemNativeEmitLoadGpr32SignExtendedFromGpr8(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc)
1740{
1741#ifdef RT_ARCH_AMD64
1742 /* movsx r32, r/m8 */
1743 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 4);
1744 if (iGprDst >= 8 || iGprSrc >= 8)
1745 pbCodeBuf[off++] = (iGprDst < 8 ? 0 : X86_OP_REX_R) | (iGprSrc < 8 ? 0 : X86_OP_REX_B);
1746 else if (iGprSrc >= 4)
1747 pbCodeBuf[off++] = X86_OP_REX;
1748 pbCodeBuf[off++] = 0x0f;
1749 pbCodeBuf[off++] = 0xbe;
1750 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGprDst & 7, iGprSrc & 7);
1751
1752#elif defined(RT_ARCH_ARM64)
1753 /* sxtb dst32, src32 */
1754 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
1755 pu32CodeBuf[off++] = Armv8A64MkInstrSxtb(iGprDst, iGprSrc, false /*f64Bit*/);
1756
1757#else
1758# error "port me"
1759#endif
1760 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1761 return off;
1762}
1763
1764
1765/**
1766 * Sign-extends 8-bit value in @a iGprSrc into a 16-bit value in @a iGprDst.
1767 * @note Bits 63 thru 16 are cleared.
1768 */
1769DECL_INLINE_THROW(uint32_t)
1770iemNativeEmitLoadGpr16SignExtendedFromGpr8(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc)
1771{
1772#ifdef RT_ARCH_AMD64
1773 /* movsx r16, r/m8 */
1774 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 9);
1775 pbCodeBuf[off++] = X86_OP_PRF_SIZE_OP;
1776 if (iGprDst >= 8 || iGprSrc >= 8)
1777 pbCodeBuf[off++] = (iGprDst < 8 ? 0 : X86_OP_REX_R) | (iGprSrc < 8 ? 0 : X86_OP_REX_B);
1778 else if (iGprSrc >= 4)
1779 pbCodeBuf[off++] = X86_OP_REX;
1780 pbCodeBuf[off++] = 0x0f;
1781 pbCodeBuf[off++] = 0xbe;
1782 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGprDst & 7, iGprSrc & 7);
1783
1784 /* movzx r32, r/m16 */
1785 if (iGprDst >= 8)
1786 pbCodeBuf[off++] = X86_OP_REX_R | X86_OP_REX_B;
1787 pbCodeBuf[off++] = 0x0f;
1788 pbCodeBuf[off++] = 0xb7;
1789 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGprDst & 7, iGprDst & 7);
1790
1791#elif defined(RT_ARCH_ARM64)
1792 /* sxtb dst32, src32; and dst32, dst32, #0xffff */
1793 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2);
1794 pu32CodeBuf[off++] = Armv8A64MkInstrSxtb(iGprDst, iGprSrc, false /*f64Bit*/);
1795 Assert(Armv8A64ConvertImmRImmS2Mask32(15, 0) == 0xffff);
1796 pu32CodeBuf[off++] = Armv8A64MkInstrAndImm(iGprDst, iGprDst, 15, 0, false /*f64Bit*/);
1797
1798#else
1799# error "port me"
1800#endif
1801 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1802 return off;
1803}
1804
1805
1806/**
1807 * Emits a gprdst = gprsrc + addend load.
1808 * @note The added is 32-bit for AMD64 and 64-bit for ARM64.
1809 */
1810#ifdef RT_ARCH_AMD64
1811DECL_INLINE_THROW(uint32_t)
1812iemNativeEmitLoadGprFromGprWithAddend(PIEMRECOMPILERSTATE pReNative, uint32_t off,
1813 uint8_t iGprDst, uint8_t iGprSrc, int32_t iAddend)
1814{
1815 Assert(iAddend != 0);
1816
1817 /* lea gprdst, [gprsrc + iAddend] */
1818 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8);
1819 pbCodeBuf[off++] = X86_OP_REX_W | (iGprDst >= 8 ? X86_OP_REX_R : 0) | (iGprSrc >= 8 ? X86_OP_REX_B : 0);
1820 pbCodeBuf[off++] = 0x8d;
1821 off = iemNativeEmitGprByGprDisp(pbCodeBuf, off, iGprDst, iGprSrc, iAddend);
1822 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1823 return off;
1824}
1825
1826#elif defined(RT_ARCH_ARM64)
1827DECL_INLINE_THROW(uint32_t)
1828iemNativeEmitLoadGprFromGprWithAddend(PIEMRECOMPILERSTATE pReNative, uint32_t off,
1829 uint8_t iGprDst, uint8_t iGprSrc, int64_t iAddend)
1830{
1831 if ((uint32_t)iAddend < 4096)
1832 {
1833 /* add dst, src, uimm12 */
1834 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
1835 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(false /*fSub*/, iGprDst, iGprSrc, (uint32_t)iAddend);
1836 }
1837 else if ((uint32_t)-iAddend < 4096)
1838 {
1839 /* sub dst, src, uimm12 */
1840 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
1841 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(true /*fSub*/, iGprDst, iGprSrc, (uint32_t)-iAddend);
1842 }
1843 else
1844 {
1845 Assert(iGprSrc != iGprDst);
1846 off = iemNativeEmitLoadGprImm64(pReNative, off, iGprDst, iAddend);
1847 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
1848 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubReg(false /*fSub*/, iGprDst, iGprSrc, iGprDst);
1849 }
1850 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1851 return off;
1852}
1853#else
1854# error "port me"
1855#endif
1856
1857/**
1858 * Emits a gprdst = gprsrc + addend load, accepting iAddend == 0.
1859 * @note The added is 32-bit for AMD64 and 64-bit for ARM64.
1860 */
1861#ifdef RT_ARCH_AMD64
1862DECL_INLINE_THROW(uint32_t)
1863iemNativeEmitLoadGprFromGprWithAddendMaybeZero(PIEMRECOMPILERSTATE pReNative, uint32_t off,
1864 uint8_t iGprDst, uint8_t iGprSrc, int32_t iAddend)
1865#else
1866DECL_INLINE_THROW(uint32_t)
1867iemNativeEmitLoadGprFromGprWithAddendMaybeZero(PIEMRECOMPILERSTATE pReNative, uint32_t off,
1868 uint8_t iGprDst, uint8_t iGprSrc, int64_t iAddend)
1869#endif
1870{
1871 if (iAddend != 0)
1872 return iemNativeEmitLoadGprFromGprWithAddend(pReNative, off, iGprDst, iGprSrc, iAddend);
1873 return iemNativeEmitLoadGprFromGpr(pReNative, off, iGprDst, iGprSrc);
1874}
1875
1876
1877/**
1878 * Emits a gprdst = gprsrc32 + addend load.
1879 * @note Bits 63 thru 32 are cleared.
1880 */
1881DECL_INLINE_THROW(uint32_t)
1882iemNativeEmitLoadGprFromGpr32WithAddend(PIEMRECOMPILERSTATE pReNative, uint32_t off,
1883 uint8_t iGprDst, uint8_t iGprSrc, int32_t iAddend)
1884{
1885 Assert(iAddend != 0);
1886
1887#ifdef RT_ARCH_AMD64
1888 /* a32 o32 lea gprdst, [gprsrc + iAddend] */
1889 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 9);
1890 pbCodeBuf[off++] = X86_OP_PRF_SIZE_ADDR;
1891 if ((iGprDst | iGprSrc) >= 8)
1892 pbCodeBuf[off++] = (iGprDst >= 8 ? X86_OP_REX_R : 0) | (iGprSrc >= 8 ? X86_OP_REX_B : 0);
1893 pbCodeBuf[off++] = 0x8d;
1894 off = iemNativeEmitGprByGprDisp(pbCodeBuf, off, iGprDst, iGprSrc, iAddend);
1895
1896#elif defined(RT_ARCH_ARM64)
1897 if ((uint32_t)iAddend < 4096)
1898 {
1899 /* add dst, src, uimm12 */
1900 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
1901 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(false /*fSub*/, iGprDst, iGprSrc, (uint32_t)iAddend, false /*f64Bit*/);
1902 }
1903 else if ((uint32_t)-iAddend < 4096)
1904 {
1905 /* sub dst, src, uimm12 */
1906 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
1907 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(true /*fSub*/, iGprDst, iGprSrc, (uint32_t)-iAddend, false /*f64Bit*/);
1908 }
1909 else
1910 {
1911 Assert(iGprSrc != iGprDst);
1912 off = iemNativeEmitLoadGprImm64(pReNative, off, iGprDst, (int64_t)iAddend);
1913 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
1914 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubReg(false /*fSub*/, iGprDst, iGprSrc, iGprDst, false /*f64Bit*/);
1915 }
1916
1917#else
1918# error "port me"
1919#endif
1920 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1921 return off;
1922}
1923
1924
1925/**
1926 * Emits a gprdst = gprsrc32 + addend load, accepting iAddend == 0.
1927 */
1928DECL_INLINE_THROW(uint32_t)
1929iemNativeEmitLoadGprFromGpr32WithAddendMaybeZero(PIEMRECOMPILERSTATE pReNative, uint32_t off,
1930 uint8_t iGprDst, uint8_t iGprSrc, int32_t iAddend)
1931{
1932 if (iAddend != 0)
1933 return iemNativeEmitLoadGprFromGpr32WithAddend(pReNative, off, iGprDst, iGprSrc, iAddend);
1934 return iemNativeEmitLoadGprFromGpr32(pReNative, off, iGprDst, iGprSrc);
1935}
1936
1937
1938/**
1939 * Emits a gprdst[15:0] = gprsrc[15:0], preserving all other bits in the
1940 * destination.
1941 */
1942DECL_FORCE_INLINE(uint32_t)
1943iemNativeEmitGprMergeInGpr16Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t idxDst, uint8_t idxSrc)
1944{
1945#ifdef RT_ARCH_AMD64
1946 /* mov reg16, r/m16 */
1947 pCodeBuf[off++] = X86_OP_PRF_SIZE_OP;
1948 if (idxDst >= 8 || idxSrc >= 8)
1949 pCodeBuf[off++] = (idxDst < 8 ? 0 : X86_OP_REX_R) | (idxSrc < 8 ? 0 : X86_OP_REX_B);
1950 pCodeBuf[off++] = 0x8b;
1951 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, idxDst & 7, idxSrc & 7);
1952
1953#elif defined(RT_ARCH_ARM64)
1954 /* bfi w1, w2, 0, 16 - moves bits 15:0 from idxSrc to idxDst bits 15:0. */
1955 pCodeBuf[off++] = Armv8A64MkInstrBfi(idxDst, idxSrc, 0, 16);
1956
1957#else
1958# error "Port me!"
1959#endif
1960 return off;
1961}
1962
1963
1964/**
1965 * Emits a gprdst[15:0] = gprsrc[15:0], preserving all other bits in the
1966 * destination.
1967 */
1968DECL_INLINE_THROW(uint32_t)
1969iemNativeEmitGprMergeInGpr16(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxDst, uint8_t idxSrc)
1970{
1971#ifdef RT_ARCH_AMD64
1972 off = iemNativeEmitGprMergeInGpr16Ex(iemNativeInstrBufEnsure(pReNative, off, 4), off, idxDst, idxSrc);
1973#elif defined(RT_ARCH_ARM64)
1974 off = iemNativeEmitGprMergeInGpr16Ex(iemNativeInstrBufEnsure(pReNative, off, 1), off, idxDst, idxSrc);
1975#else
1976# error "Port me!"
1977#endif
1978 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1979 return off;
1980}
1981
1982
1983#ifdef RT_ARCH_AMD64
1984/**
1985 * Common bit of iemNativeEmitLoadGprByBp and friends.
1986 */
1987DECL_FORCE_INLINE(uint32_t) iemNativeEmitGprByBpDisp(uint8_t *pbCodeBuf, uint32_t off, uint8_t iGprReg, int32_t offDisp,
1988 PIEMRECOMPILERSTATE pReNativeAssert)
1989{
1990 if (offDisp < 128 && offDisp >= -128)
1991 {
1992 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_MEM1, iGprReg & 7, X86_GREG_xBP);
1993 pbCodeBuf[off++] = (uint8_t)(int8_t)offDisp;
1994 }
1995 else
1996 {
1997 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_MEM4, iGprReg & 7, X86_GREG_xBP);
1998 pbCodeBuf[off++] = RT_BYTE1((uint32_t)offDisp);
1999 pbCodeBuf[off++] = RT_BYTE2((uint32_t)offDisp);
2000 pbCodeBuf[off++] = RT_BYTE3((uint32_t)offDisp);
2001 pbCodeBuf[off++] = RT_BYTE4((uint32_t)offDisp);
2002 }
2003 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNativeAssert, off); RT_NOREF(pReNativeAssert);
2004 return off;
2005}
2006#elif defined(RT_ARCH_ARM64)
2007/**
2008 * Common bit of iemNativeEmitLoadGprByBp and friends.
2009 */
2010DECL_FORCE_INLINE_THROW(uint32_t)
2011iemNativeEmitGprByBpLdSt(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprReg,
2012 int32_t offDisp, ARMV8A64INSTRLDSTTYPE enmOperation, unsigned cbData)
2013{
2014 if ((uint32_t)offDisp < 4096U * cbData && !((uint32_t)offDisp & (cbData - 1)))
2015 {
2016 /* str w/ unsigned imm12 (scaled) */
2017 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
2018 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(enmOperation, iGprReg, ARMV8_A64_REG_BP, (uint32_t)offDisp / cbData);
2019 }
2020 else if (offDisp >= -256 && offDisp <= 256)
2021 {
2022 /* stur w/ signed imm9 (unscaled) */
2023 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
2024 pu32CodeBuf[off++] = Armv8A64MkInstrSturLdur(enmOperation, iGprReg, ARMV8_A64_REG_BP, offDisp);
2025 }
2026 else
2027 {
2028 /* Use temporary indexing register. */
2029 off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_REG_FIXED_TMP0, (uint32_t)offDisp);
2030 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
2031 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRegIdx(enmOperation, iGprReg, ARMV8_A64_REG_BP,
2032 IEMNATIVE_REG_FIXED_TMP0, kArmv8A64InstrLdStExtend_Sxtw);
2033 }
2034 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
2035 return off;
2036}
2037#endif
2038
2039
2040/**
2041 * Emits a 64-bit GRP load instruction with an BP relative source address.
2042 */
2043DECL_INLINE_THROW(uint32_t)
2044iemNativeEmitLoadGprByBp(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, int32_t offDisp)
2045{
2046#ifdef RT_ARCH_AMD64
2047 /* mov gprdst, qword [rbp + offDisp] */
2048 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
2049 if (iGprDst < 8)
2050 pbCodeBuf[off++] = X86_OP_REX_W;
2051 else
2052 pbCodeBuf[off++] = X86_OP_REX_W | X86_OP_REX_R;
2053 pbCodeBuf[off++] = 0x8b;
2054 return iemNativeEmitGprByBpDisp(pbCodeBuf, off, iGprDst, offDisp, pReNative);
2055
2056#elif defined(RT_ARCH_ARM64)
2057 return iemNativeEmitGprByBpLdSt(pReNative, off, iGprDst, offDisp, kArmv8A64InstrLdStType_Ld_Dword, sizeof(uint64_t));
2058
2059#else
2060# error "port me"
2061#endif
2062}
2063
2064
2065/**
2066 * Emits a 32-bit GRP load instruction with an BP relative source address.
2067 * @note Bits 63 thru 32 of the GPR will be cleared.
2068 */
2069DECL_INLINE_THROW(uint32_t)
2070iemNativeEmitLoadGprByBpU32(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, int32_t offDisp)
2071{
2072#ifdef RT_ARCH_AMD64
2073 /* mov gprdst, dword [rbp + offDisp] */
2074 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
2075 if (iGprDst >= 8)
2076 pbCodeBuf[off++] = X86_OP_REX_R;
2077 pbCodeBuf[off++] = 0x8b;
2078 return iemNativeEmitGprByBpDisp(pbCodeBuf, off, iGprDst, offDisp, pReNative);
2079
2080#elif defined(RT_ARCH_ARM64)
2081 return iemNativeEmitGprByBpLdSt(pReNative, off, iGprDst, offDisp, kArmv8A64InstrLdStType_Ld_Word, sizeof(uint32_t));
2082
2083#else
2084# error "port me"
2085#endif
2086}
2087
2088
2089/**
2090 * Emits a 16-bit GRP load instruction with an BP relative source address.
2091 * @note Bits 63 thru 16 of the GPR will be cleared.
2092 */
2093DECL_INLINE_THROW(uint32_t)
2094iemNativeEmitLoadGprByBpU16(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, int32_t offDisp)
2095{
2096#ifdef RT_ARCH_AMD64
2097 /* movzx gprdst, word [rbp + offDisp] */
2098 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8);
2099 if (iGprDst >= 8)
2100 pbCodeBuf[off++] = X86_OP_REX_R;
2101 pbCodeBuf[off++] = 0x0f;
2102 pbCodeBuf[off++] = 0xb7;
2103 return iemNativeEmitGprByBpDisp(pbCodeBuf, off, iGprDst, offDisp, pReNative);
2104
2105#elif defined(RT_ARCH_ARM64)
2106 return iemNativeEmitGprByBpLdSt(pReNative, off, iGprDst, offDisp, kArmv8A64InstrLdStType_Ld_Half, sizeof(uint32_t));
2107
2108#else
2109# error "port me"
2110#endif
2111}
2112
2113
2114/**
2115 * Emits a 8-bit GRP load instruction with an BP relative source address.
2116 * @note Bits 63 thru 8 of the GPR will be cleared.
2117 */
2118DECL_INLINE_THROW(uint32_t)
2119iemNativeEmitLoadGprByBpU8(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, int32_t offDisp)
2120{
2121#ifdef RT_ARCH_AMD64
2122 /* movzx gprdst, byte [rbp + offDisp] */
2123 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8);
2124 if (iGprDst >= 8)
2125 pbCodeBuf[off++] = X86_OP_REX_R;
2126 pbCodeBuf[off++] = 0x0f;
2127 pbCodeBuf[off++] = 0xb6;
2128 return iemNativeEmitGprByBpDisp(pbCodeBuf, off, iGprDst, offDisp, pReNative);
2129
2130#elif defined(RT_ARCH_ARM64)
2131 return iemNativeEmitGprByBpLdSt(pReNative, off, iGprDst, offDisp, kArmv8A64InstrLdStType_Ld_Byte, sizeof(uint32_t));
2132
2133#else
2134# error "port me"
2135#endif
2136}
2137
2138
2139#ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
2140/**
2141 * Emits a 128-bit vector register load instruction with an BP relative source address.
2142 */
2143DECL_FORCE_INLINE_THROW(uint32_t)
2144iemNativeEmitLoadVecRegByBpU128(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iVecRegDst, int32_t offDisp)
2145{
2146#ifdef RT_ARCH_AMD64
2147 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 9);
2148
2149 /* movdqu reg128, mem128 */
2150 pbCodeBuf[off++] = 0xf3;
2151 if (iVecRegDst >= 8)
2152 pbCodeBuf[off++] = X86_OP_REX_R;
2153 pbCodeBuf[off++] = 0x0f;
2154 pbCodeBuf[off++] = 0x6f;
2155 return iemNativeEmitGprByBpDisp(pbCodeBuf, off, iVecRegDst, offDisp, pReNative);
2156#elif defined(RT_ARCH_ARM64)
2157 return iemNativeEmitGprByBpLdSt(pReNative, off, iVecRegDst, offDisp, kArmv8A64InstrLdStType_Ld_Vr_128, sizeof(RTUINT128U));
2158#else
2159# error "port me"
2160#endif
2161}
2162
2163
2164/**
2165 * Emits a 256-bit vector register load instruction with an BP relative source address.
2166 */
2167DECL_FORCE_INLINE_THROW(uint32_t)
2168iemNativeEmitLoadVecRegByBpU256(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iVecRegDst, int32_t offDisp)
2169{
2170#ifdef RT_ARCH_AMD64
2171 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8);
2172
2173 /* vmovdqu reg256, mem256 */
2174 pbCodeBuf[off++] = X86_OP_VEX2;
2175 pbCodeBuf[off++] = X86_OP_VEX2_BYTE1_MAKE_NO_VVVV(iVecRegDst >= 8, true /*f256BitAvx*/, X86_OP_VEX2_BYTE1_P_0F3H);
2176 pbCodeBuf[off++] = 0x6f;
2177 return iemNativeEmitGprByBpDisp(pbCodeBuf, off, iVecRegDst, offDisp, pReNative);
2178#elif defined(RT_ARCH_ARM64)
2179 /* ASSUMES two consecutive vector registers for the 256-bit value. */
2180 Assert(!(iVecRegDst & 0x1));
2181 off = iemNativeEmitGprByBpLdSt(pReNative, off, iVecRegDst, offDisp, kArmv8A64InstrLdStType_Ld_Vr_128, sizeof(RTUINT128U));
2182 return iemNativeEmitGprByBpLdSt(pReNative, off, iVecRegDst + 1, offDisp + sizeof(RTUINT128U), kArmv8A64InstrLdStType_Ld_Vr_128, sizeof(RTUINT128U));
2183#else
2184# error "port me"
2185#endif
2186}
2187
2188#endif
2189
2190
2191/**
2192 * Emits a load effective address to a GRP with an BP relative source address.
2193 */
2194DECL_INLINE_THROW(uint32_t)
2195iemNativeEmitLeaGprByBp(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, int32_t offDisp)
2196{
2197#ifdef RT_ARCH_AMD64
2198 /* lea gprdst, [rbp + offDisp] */
2199 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
2200 if (iGprDst < 8)
2201 pbCodeBuf[off++] = X86_OP_REX_W;
2202 else
2203 pbCodeBuf[off++] = X86_OP_REX_W | X86_OP_REX_R;
2204 pbCodeBuf[off++] = 0x8d;
2205 off = iemNativeEmitGprByBpDisp(pbCodeBuf, off, iGprDst, offDisp, pReNative);
2206
2207#elif defined(RT_ARCH_ARM64)
2208 if ((uint32_t)offDisp < (unsigned)_4K)
2209 {
2210 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
2211 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(false /*fSub*/, iGprDst, ARMV8_A64_REG_BP, (uint32_t)offDisp);
2212 }
2213 else if ((uint32_t)-offDisp < (unsigned)_4K)
2214 {
2215 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
2216 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(true /*fSub*/, iGprDst, ARMV8_A64_REG_BP, (uint32_t)-offDisp);
2217 }
2218 else
2219 {
2220 Assert(iGprDst != IEMNATIVE_REG_FIXED_PVMCPU);
2221 off = iemNativeEmitLoadGprImm64(pReNative, off, iGprDst, offDisp >= 0 ? (uint32_t)offDisp : (uint32_t)-offDisp);
2222 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
2223 if (offDisp >= 0)
2224 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubReg(false /*fSub*/, iGprDst, ARMV8_A64_REG_BP, iGprDst);
2225 else
2226 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubReg(true /*fSub*/, iGprDst, ARMV8_A64_REG_BP, iGprDst);
2227 }
2228
2229#else
2230# error "port me"
2231#endif
2232
2233 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
2234 return off;
2235}
2236
2237
2238/**
2239 * Emits a 64-bit GPR store with an BP relative destination address.
2240 *
2241 * @note May trash IEMNATIVE_REG_FIXED_TMP0.
2242 */
2243DECL_INLINE_THROW(uint32_t)
2244iemNativeEmitStoreGprByBp(PIEMRECOMPILERSTATE pReNative, uint32_t off, int32_t offDisp, uint8_t iGprSrc)
2245{
2246#ifdef RT_ARCH_AMD64
2247 /* mov qword [rbp + offDisp], gprdst */
2248 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
2249 if (iGprSrc < 8)
2250 pbCodeBuf[off++] = X86_OP_REX_W;
2251 else
2252 pbCodeBuf[off++] = X86_OP_REX_W | X86_OP_REX_R;
2253 pbCodeBuf[off++] = 0x89;
2254 return iemNativeEmitGprByBpDisp(pbCodeBuf, off, iGprSrc, offDisp, pReNative);
2255
2256#elif defined(RT_ARCH_ARM64)
2257 if (offDisp >= 0 && offDisp < 4096 * 8 && !((uint32_t)offDisp & 7))
2258 {
2259 /* str w/ unsigned imm12 (scaled) */
2260 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
2261 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_St_Dword, iGprSrc,
2262 ARMV8_A64_REG_BP, (uint32_t)offDisp / 8);
2263 }
2264 else if (offDisp >= -256 && offDisp <= 256)
2265 {
2266 /* stur w/ signed imm9 (unscaled) */
2267 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
2268 pu32CodeBuf[off++] = Armv8A64MkInstrSturLdur(kArmv8A64InstrLdStType_St_Dword, iGprSrc, ARMV8_A64_REG_BP, offDisp);
2269 }
2270 else if ((uint32_t)-offDisp < (unsigned)_4K)
2271 {
2272 /* Use temporary indexing register w/ sub uimm12. */
2273 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2);
2274 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(true /*fSub*/, IEMNATIVE_REG_FIXED_TMP0,
2275 ARMV8_A64_REG_BP, (uint32_t)-offDisp);
2276 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_St_Dword, iGprSrc, IEMNATIVE_REG_FIXED_TMP0, 0);
2277 }
2278 else
2279 {
2280 /* Use temporary indexing register. */
2281 off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_REG_FIXED_TMP0, (uint32_t)offDisp);
2282 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
2283 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRegIdx(kArmv8A64InstrLdStType_St_Dword, iGprSrc, ARMV8_A64_REG_BP,
2284 IEMNATIVE_REG_FIXED_TMP0, kArmv8A64InstrLdStExtend_Sxtw);
2285 }
2286 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
2287 return off;
2288
2289#else
2290# error "Port me!"
2291#endif
2292}
2293
2294
2295/**
2296 * Emits a 64-bit immediate store with an BP relative destination address.
2297 *
2298 * @note May trash IEMNATIVE_REG_FIXED_TMP0.
2299 */
2300DECL_INLINE_THROW(uint32_t)
2301iemNativeEmitStoreImm64ByBp(PIEMRECOMPILERSTATE pReNative, uint32_t off, int32_t offDisp, uint64_t uImm64)
2302{
2303#ifdef RT_ARCH_AMD64
2304 if ((int64_t)uImm64 == (int32_t)uImm64)
2305 {
2306 /* mov qword [rbp + offDisp], imm32 - sign extended */
2307 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 11);
2308 pbCodeBuf[off++] = X86_OP_REX_W;
2309 pbCodeBuf[off++] = 0xc7;
2310 if (offDisp < 128 && offDisp >= -128)
2311 {
2312 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_MEM1, 0, X86_GREG_xBP);
2313 pbCodeBuf[off++] = (uint8_t)offDisp;
2314 }
2315 else
2316 {
2317 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_MEM4, 0, X86_GREG_xBP);
2318 pbCodeBuf[off++] = RT_BYTE1((uint32_t)offDisp);
2319 pbCodeBuf[off++] = RT_BYTE2((uint32_t)offDisp);
2320 pbCodeBuf[off++] = RT_BYTE3((uint32_t)offDisp);
2321 pbCodeBuf[off++] = RT_BYTE4((uint32_t)offDisp);
2322 }
2323 pbCodeBuf[off++] = RT_BYTE1(uImm64);
2324 pbCodeBuf[off++] = RT_BYTE2(uImm64);
2325 pbCodeBuf[off++] = RT_BYTE3(uImm64);
2326 pbCodeBuf[off++] = RT_BYTE4(uImm64);
2327 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
2328 return off;
2329 }
2330#endif
2331
2332 /* Load tmp0, imm64; Store tmp to bp+disp. */
2333 off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_REG_FIXED_TMP0, uImm64);
2334 return iemNativeEmitStoreGprByBp(pReNative, off, offDisp, IEMNATIVE_REG_FIXED_TMP0);
2335}
2336
2337
2338#ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
2339/**
2340 * Emits a 128-bit vector register store with an BP relative destination address.
2341 *
2342 * @note May trash IEMNATIVE_REG_FIXED_TMP0.
2343 */
2344DECL_INLINE_THROW(uint32_t)
2345iemNativeEmitStoreVecRegByBpU128(PIEMRECOMPILERSTATE pReNative, uint32_t off, int32_t offDisp, uint8_t iVecRegSrc)
2346{
2347#ifdef RT_ARCH_AMD64
2348 /* movdqu [rbp + offDisp], vecsrc */
2349 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
2350 pbCodeBuf[off++] = 0xf3;
2351 if (iVecRegSrc >= 8)
2352 pbCodeBuf[off++] = X86_OP_REX_R;
2353 pbCodeBuf[off++] = 0x0f;
2354 pbCodeBuf[off++] = 0x7f;
2355 return iemNativeEmitGprByBpDisp(pbCodeBuf, off, iVecRegSrc, offDisp, pReNative);
2356
2357#elif defined(RT_ARCH_ARM64)
2358 if (offDisp >= 0 && offDisp < 4096 * 8 && !((uint32_t)offDisp & 7))
2359 {
2360 /* str w/ unsigned imm12 (scaled) */
2361 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
2362 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_St_Vr_128, iVecRegSrc,
2363 ARMV8_A64_REG_BP, (uint32_t)offDisp / 8);
2364 }
2365 else if (offDisp >= -256 && offDisp <= 256)
2366 {
2367 /* stur w/ signed imm9 (unscaled) */
2368 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
2369 pu32CodeBuf[off++] = Armv8A64MkInstrSturLdur(kArmv8A64InstrLdStType_St_Vr_128, iVecRegSrc, ARMV8_A64_REG_BP, offDisp);
2370 }
2371 else if ((uint32_t)-offDisp < (unsigned)_4K)
2372 {
2373 /* Use temporary indexing register w/ sub uimm12. */
2374 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2);
2375 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(true /*fSub*/, IEMNATIVE_REG_FIXED_TMP0,
2376 ARMV8_A64_REG_BP, (uint32_t)-offDisp);
2377 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_St_Vr_128, iVecRegSrc, IEMNATIVE_REG_FIXED_TMP0, 0);
2378 }
2379 else
2380 {
2381 /* Use temporary indexing register. */
2382 off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_REG_FIXED_TMP0, (uint32_t)offDisp);
2383 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
2384 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRegIdx(kArmv8A64InstrLdStType_St_Vr_128, iVecRegSrc, ARMV8_A64_REG_BP,
2385 IEMNATIVE_REG_FIXED_TMP0, kArmv8A64InstrLdStExtend_Sxtw);
2386 }
2387 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
2388 return off;
2389
2390#else
2391# error "Port me!"
2392#endif
2393}
2394
2395
2396/**
2397 * Emits a 256-bit vector register store with an BP relative destination address.
2398 *
2399 * @note May trash IEMNATIVE_REG_FIXED_TMP0.
2400 */
2401DECL_INLINE_THROW(uint32_t)
2402iemNativeEmitStoreVecRegByBpU256(PIEMRECOMPILERSTATE pReNative, uint32_t off, int32_t offDisp, uint8_t iVecRegSrc)
2403{
2404#ifdef RT_ARCH_AMD64
2405 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8);
2406
2407 /* vmovdqu mem256, reg256 */
2408 pbCodeBuf[off++] = X86_OP_VEX2;
2409 pbCodeBuf[off++] = X86_OP_VEX2_BYTE1_MAKE_NO_VVVV(iVecRegSrc >= 8, true /*f256BitAvx*/, X86_OP_VEX2_BYTE1_P_0F3H);
2410 pbCodeBuf[off++] = 0x7f;
2411 return iemNativeEmitGprByBpDisp(pbCodeBuf, off, iVecRegSrc, offDisp, pReNative);
2412#elif defined(RT_ARCH_ARM64)
2413 Assert(!(iVecRegSrc & 0x1));
2414 off = iemNativeEmitStoreVecRegByBpU128(pReNative, off, offDisp, iVecRegSrc);
2415 return iemNativeEmitStoreVecRegByBpU128(pReNative, off, offDisp + sizeof(RTUINT128U), iVecRegSrc + 1);
2416#else
2417# error "Port me!"
2418#endif
2419}
2420#endif
2421
2422#if defined(RT_ARCH_ARM64)
2423
2424/**
2425 * Common bit of iemNativeEmitLoadGprFromVCpuU64 and friends.
2426 *
2427 * @note Odd and large @a offDisp values requires a temporary, unless it's a
2428 * load and @a iGprReg differs from @a iGprBase. Will assert / throw if
2429 * caller does not heed this.
2430 *
2431 * @note DON'T try this with prefetch.
2432 */
2433DECL_FORCE_INLINE_THROW(uint32_t)
2434iemNativeEmitGprByGprLdStEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprReg, uint8_t iGprBase, int32_t offDisp,
2435 ARMV8A64INSTRLDSTTYPE enmOperation, unsigned cbData, uint8_t iGprTmp = UINT8_MAX)
2436{
2437 if ((uint32_t)offDisp < _4K * cbData && !((uint32_t)offDisp & (cbData - 1)))
2438 {
2439 /* Use the unsigned variant of ldr Wt, [<Xn|SP>, #off]. */
2440 pCodeBuf[off++] = Armv8A64MkInstrStLdRUOff(enmOperation, iGprReg, iGprBase, (uint32_t)offDisp / cbData);
2441 }
2442 else if ( ( !ARMV8A64INSTRLDSTTYPE_IS_STORE(enmOperation)
2443 && iGprReg != iGprBase)
2444 || iGprTmp != UINT8_MAX)
2445 {
2446 /* The offset is too large, so we must load it into a register and use
2447 ldr Wt, [<Xn|SP>, (<Wm>|<Xm>)]. */
2448 /** @todo reduce by offVCpu by >> 3 or >> 2? if it saves instructions? */
2449 if (iGprTmp == UINT8_MAX)
2450 iGprTmp = iGprReg;
2451 off = iemNativeEmitLoadGprImmEx(pCodeBuf, off, iGprTmp, (int64_t)offDisp);
2452 pCodeBuf[off++] = Armv8A64MkInstrStLdRegIdx(enmOperation, iGprReg, iGprBase, iGprTmp);
2453 }
2454 else
2455# ifdef IEM_WITH_THROW_CATCH
2456 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(NULL, VERR_IEM_IPE_9));
2457# else
2458 AssertReleaseFailedStmt(off = UINT32_MAX);
2459# endif
2460 return off;
2461}
2462
2463/**
2464 * Common bit of iemNativeEmitLoadGprFromVCpuU64 and friends.
2465 */
2466DECL_FORCE_INLINE_THROW(uint32_t)
2467iemNativeEmitGprByGprLdSt(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprReg,
2468 uint8_t iGprBase, int32_t offDisp, ARMV8A64INSTRLDSTTYPE enmOperation, unsigned cbData)
2469{
2470 /*
2471 * There are a couple of ldr variants that takes an immediate offset, so
2472 * try use those if we can, otherwise we have to use the temporary register
2473 * help with the addressing.
2474 */
2475 if ((uint32_t)offDisp < _4K * cbData && !((uint32_t)offDisp & (cbData - 1)))
2476 {
2477 /* Use the unsigned variant of ldr Wt, [<Xn|SP>, #off]. */
2478 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
2479 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(enmOperation, iGprReg, iGprBase, (uint32_t)offDisp / cbData);
2480 }
2481 else
2482 {
2483 /* The offset is too large, so we must load it into a register and use
2484 ldr Wt, [<Xn|SP>, (<Wm>|<Xm>)]. */
2485 /** @todo reduce by offVCpu by >> 3 or >> 2? if it saves instructions? */
2486 uint8_t const idxTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, (int64_t)offDisp);
2487
2488 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
2489 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRegIdx(enmOperation, iGprReg, iGprBase, idxTmpReg);
2490
2491 iemNativeRegFreeTmpImm(pReNative, idxTmpReg);
2492 }
2493 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
2494 return off;
2495}
2496
2497# ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
2498/**
2499 * Common bit of iemNativeEmitLoadVecRegByGprU128 and friends.
2500 *
2501 * @note Odd and large @a offDisp values requires a temporary, unless it's a
2502 * load and @a iGprReg differs from @a iGprBase. Will assert / throw if
2503 * caller does not heed this.
2504 *
2505 * @note DON'T try this with prefetch.
2506 */
2507DECL_FORCE_INLINE_THROW(uint32_t)
2508iemNativeEmitVecRegByGprLdStEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iVecReg, uint8_t iGprBase, int32_t offDisp,
2509 ARMV8A64INSTRLDSTTYPE enmOperation, unsigned cbData, uint8_t iGprTmp = UINT8_MAX)
2510{
2511 if ((uint32_t)offDisp < _4K * cbData && !((uint32_t)offDisp & (cbData - 1)))
2512 {
2513 /* Use the unsigned variant of ldr Wt, [<Xn|SP>, #off]. */
2514 pCodeBuf[off++] = Armv8A64MkInstrStLdRUOff(enmOperation, iVecReg, iGprBase, (uint32_t)offDisp / cbData);
2515 }
2516 else if ( !ARMV8A64INSTRLDSTTYPE_IS_STORE(enmOperation)
2517 || iGprTmp != UINT8_MAX)
2518 {
2519 /* The offset is too large, so we must load it into a register and use
2520 ldr Wt, [<Xn|SP>, (<Wm>|<Xm>)]. */
2521 /** @todo reduce by offVCpu by >> 3 or >> 2? if it saves instructions? */
2522 off = iemNativeEmitLoadGprImmEx(pCodeBuf, off, iGprTmp, (int64_t)offDisp);
2523 pCodeBuf[off++] = Armv8A64MkInstrStLdRegIdx(enmOperation, iVecReg, iGprBase, iGprTmp);
2524 }
2525 else
2526# ifdef IEM_WITH_THROW_CATCH
2527 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(NULL, VERR_IEM_IPE_9));
2528# else
2529 AssertReleaseFailedStmt(off = UINT32_MAX);
2530# endif
2531 return off;
2532}
2533# endif
2534
2535
2536/**
2537 * Common bit of iemNativeEmitLoadVecRegByGprU128 and friends.
2538 */
2539DECL_FORCE_INLINE_THROW(uint32_t)
2540iemNativeEmitVecRegByGprLdSt(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iVecReg,
2541 uint8_t iGprBase, int32_t offDisp, ARMV8A64INSTRLDSTTYPE enmOperation, unsigned cbData)
2542{
2543 /*
2544 * There are a couple of ldr variants that takes an immediate offset, so
2545 * try use those if we can, otherwise we have to use the temporary register
2546 * help with the addressing.
2547 */
2548 if ((uint32_t)offDisp < _4K * cbData && !((uint32_t)offDisp & (cbData - 1)))
2549 {
2550 /* Use the unsigned variant of ldr Wt, [<Xn|SP>, #off]. */
2551 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
2552 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(enmOperation, iVecReg, iGprBase, (uint32_t)offDisp / cbData);
2553 }
2554 else
2555 {
2556 /* The offset is too large, so we must load it into a register and use
2557 ldr Wt, [<Xn|SP>, (<Wm>|<Xm>)]. */
2558 /** @todo reduce by offVCpu by >> 3 or >> 2? if it saves instructions? */
2559 uint8_t const idxTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, (int64_t)offDisp);
2560
2561 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
2562 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRegIdx(enmOperation, iVecReg, iGprBase, idxTmpReg);
2563
2564 iemNativeRegFreeTmpImm(pReNative, idxTmpReg);
2565 }
2566 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
2567 return off;
2568}
2569#endif /* RT_ARCH_ARM64 */
2570
2571/**
2572 * Emits a 64-bit GPR load via a GPR base address with a displacement.
2573 *
2574 * @note ARM64: Misaligned @a offDisp values and values not in the
2575 * -0x7ff8...0x7ff8 range will require a temporary register (@a iGprTmp) if
2576 * @a iGprReg and @a iGprBase are the same. Will assert / throw if caller
2577 * does not heed this.
2578 */
2579DECL_FORCE_INLINE_THROW(uint32_t)
2580iemNativeEmitLoadGprByGprU64Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint8_t iGprBase,
2581 int32_t offDisp = 0, uint8_t iGprTmp = UINT8_MAX)
2582{
2583#ifdef RT_ARCH_AMD64
2584 /* mov reg64, mem64 */
2585 pCodeBuf[off++] = X86_OP_REX_W | (iGprDst < 8 ? 0 : X86_OP_REX_R) | (iGprBase < 8 ? 0 : X86_OP_REX_B);
2586 pCodeBuf[off++] = 0x8b;
2587 off = iemNativeEmitGprByGprDisp(pCodeBuf, off, iGprDst, iGprBase, offDisp);
2588 RT_NOREF(iGprTmp);
2589
2590#elif defined(RT_ARCH_ARM64)
2591 off = iemNativeEmitGprByGprLdStEx(pCodeBuf, off, iGprDst, iGprBase, offDisp,
2592 kArmv8A64InstrLdStType_Ld_Dword, sizeof(uint64_t), iGprTmp);
2593
2594#else
2595# error "port me"
2596#endif
2597 return off;
2598}
2599
2600
2601/**
2602 * Emits a 64-bit GPR load via a GPR base address with a displacement.
2603 */
2604DECL_INLINE_THROW(uint32_t)
2605iemNativeEmitLoadGprByGprU64(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t iGprBase, int32_t offDisp)
2606{
2607#ifdef RT_ARCH_AMD64
2608 off = iemNativeEmitLoadGprByGprU64Ex(iemNativeInstrBufEnsure(pReNative, off, 8), off, iGprDst, iGprBase, offDisp);
2609 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
2610
2611#elif defined(RT_ARCH_ARM64)
2612 off = iemNativeEmitGprByGprLdSt(pReNative, off, iGprDst, iGprBase, offDisp, kArmv8A64InstrLdStType_Ld_Dword, sizeof(uint64_t));
2613
2614#else
2615# error "port me"
2616#endif
2617 return off;
2618}
2619
2620
2621/**
2622 * Emits a 32-bit GPR load via a GPR base address with a displacement.
2623 *
2624 * @note ARM64: Misaligned @a offDisp values and values not in the
2625 * -0x3ffc...0x3ffc range will require a temporary register (@a iGprTmp)
2626 * if @a iGprReg and @a iGprBase are the same. Will assert / throw if
2627 * caller does not heed this.
2628 *
2629 * @note Bits 63 thru 32 in @a iGprDst will be cleared.
2630 */
2631DECL_FORCE_INLINE_THROW(uint32_t)
2632iemNativeEmitLoadGprByGprU32Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint8_t iGprBase,
2633 int32_t offDisp = 0, uint8_t iGprTmp = UINT8_MAX)
2634{
2635#ifdef RT_ARCH_AMD64
2636 /* mov reg32, mem32 */
2637 if (iGprDst >= 8 || iGprBase >= 8)
2638 pCodeBuf[off++] = (iGprDst < 8 ? 0 : X86_OP_REX_R) | (iGprBase < 8 ? 0 : X86_OP_REX_B);
2639 pCodeBuf[off++] = 0x8b;
2640 off = iemNativeEmitGprByGprDisp(pCodeBuf, off, iGprDst, iGprBase, offDisp);
2641 RT_NOREF(iGprTmp);
2642
2643#elif defined(RT_ARCH_ARM64)
2644 off = iemNativeEmitGprByGprLdStEx(pCodeBuf, off, iGprDst, iGprBase, offDisp,
2645 kArmv8A64InstrLdStType_Ld_Word, sizeof(uint32_t), iGprTmp);
2646
2647#else
2648# error "port me"
2649#endif
2650 return off;
2651}
2652
2653
2654/**
2655 * Emits a 32-bit GPR load via a GPR base address with a displacement.
2656 * @note Bits 63 thru 32 in @a iGprDst will be cleared.
2657 */
2658DECL_INLINE_THROW(uint32_t)
2659iemNativeEmitLoadGprByGprU32(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t iGprBase, int32_t offDisp)
2660{
2661#ifdef RT_ARCH_AMD64
2662 off = iemNativeEmitLoadGprByGprU32Ex(iemNativeInstrBufEnsure(pReNative, off, 8), off, iGprDst, iGprBase, offDisp);
2663 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
2664
2665#elif defined(RT_ARCH_ARM64)
2666 off = iemNativeEmitGprByGprLdSt(pReNative, off, iGprDst, iGprBase, offDisp, kArmv8A64InstrLdStType_Ld_Word, sizeof(uint32_t));
2667
2668#else
2669# error "port me"
2670#endif
2671 return off;
2672}
2673
2674
2675/**
2676 * Emits a 32-bit GPR load via a GPR base address with a displacement,
2677 * sign-extending the value to 64 bits.
2678 *
2679 * @note ARM64: Misaligned @a offDisp values and values not in the
2680 * -0x3ffc...0x3ffc range will require a temporary register (@a iGprTmp)
2681 * if @a iGprReg and @a iGprBase are the same. Will assert / throw if
2682 * caller does not heed this.
2683 */
2684DECL_FORCE_INLINE_THROW(uint32_t)
2685iemNativeEmitLoadGprByGprU64SignExtendedFromS32Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint8_t iGprBase,
2686 int32_t offDisp = 0, uint8_t iGprTmp = UINT8_MAX)
2687{
2688#ifdef RT_ARCH_AMD64
2689 /* movsxd reg64, mem32 */
2690 pCodeBuf[off++] = X86_OP_REX_W | (iGprDst < 8 ? 0 : X86_OP_REX_R) | (iGprBase < 8 ? 0 : X86_OP_REX_B);
2691 pCodeBuf[off++] = 0x63;
2692 off = iemNativeEmitGprByGprDisp(pCodeBuf, off, iGprDst, iGprBase, offDisp);
2693 RT_NOREF(iGprTmp);
2694
2695#elif defined(RT_ARCH_ARM64)
2696 off = iemNativeEmitGprByGprLdStEx(pCodeBuf, off, iGprDst, iGprBase, offDisp,
2697 kArmv8A64InstrLdStType_Ld_SignWord64, sizeof(uint32_t), iGprTmp);
2698
2699#else
2700# error "port me"
2701#endif
2702 return off;
2703}
2704
2705
2706/**
2707 * Emits a 16-bit GPR load via a GPR base address with a displacement.
2708 *
2709 * @note ARM64: Misaligned @a offDisp values and values not in the
2710 * -0x1ffe...0x1ffe range will require a temporary register (@a iGprTmp)
2711 * if @a iGprReg and @a iGprBase are the same. Will assert / throw if
2712 * caller does not heed this.
2713 *
2714 * @note Bits 63 thru 16 in @a iGprDst will be cleared.
2715 */
2716DECL_FORCE_INLINE_THROW(uint32_t)
2717iemNativeEmitLoadGprByGprU16Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint8_t iGprBase,
2718 int32_t offDisp = 0, uint8_t iGprTmp = UINT8_MAX)
2719{
2720#ifdef RT_ARCH_AMD64
2721 /* movzx reg32, mem16 */
2722 if (iGprDst >= 8 || iGprBase >= 8)
2723 pCodeBuf[off++] = (iGprDst < 8 ? 0 : X86_OP_REX_R) | (iGprBase < 8 ? 0 : X86_OP_REX_B);
2724 pCodeBuf[off++] = 0x0f;
2725 pCodeBuf[off++] = 0xb7;
2726 off = iemNativeEmitGprByGprDisp(pCodeBuf, off, iGprDst, iGprBase, offDisp);
2727 RT_NOREF(iGprTmp);
2728
2729#elif defined(RT_ARCH_ARM64)
2730 off = iemNativeEmitGprByGprLdStEx(pCodeBuf, off, iGprDst, iGprBase, offDisp,
2731 kArmv8A64InstrLdStType_Ld_Half, sizeof(uint16_t), iGprTmp);
2732
2733#else
2734# error "port me"
2735#endif
2736 return off;
2737}
2738
2739
2740/**
2741 * Emits a 16-bit GPR load via a GPR base address with a displacement,
2742 * sign-extending the value to 64 bits.
2743 *
2744 * @note ARM64: Misaligned @a offDisp values and values not in the
2745 * -0x1ffe...0x1ffe range will require a temporary register (@a iGprTmp)
2746 * if @a iGprReg and @a iGprBase are the same. Will assert / throw if
2747 * caller does not heed this.
2748 */
2749DECL_FORCE_INLINE_THROW(uint32_t)
2750iemNativeEmitLoadGprByGprU64SignExtendedFromS16Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint8_t iGprBase,
2751 int32_t offDisp = 0, uint8_t iGprTmp = UINT8_MAX)
2752{
2753#ifdef RT_ARCH_AMD64
2754 /* movsx reg64, mem16 */
2755 pCodeBuf[off++] = X86_OP_REX_W | (iGprDst < 8 ? 0 : X86_OP_REX_R) | (iGprBase < 8 ? 0 : X86_OP_REX_B);
2756 pCodeBuf[off++] = 0x0f;
2757 pCodeBuf[off++] = 0xbf;
2758 off = iemNativeEmitGprByGprDisp(pCodeBuf, off, iGprDst, iGprBase, offDisp);
2759 RT_NOREF(iGprTmp);
2760
2761#elif defined(RT_ARCH_ARM64)
2762 off = iemNativeEmitGprByGprLdStEx(pCodeBuf, off, iGprDst, iGprBase, offDisp,
2763 kArmv8A64InstrLdStType_Ld_SignHalf64, sizeof(uint16_t), iGprTmp);
2764
2765#else
2766# error "port me"
2767#endif
2768 return off;
2769}
2770
2771
2772/**
2773 * Emits a 16-bit GPR load via a GPR base address with a displacement,
2774 * sign-extending the value to 32 bits.
2775 *
2776 * @note ARM64: Misaligned @a offDisp values and values not in the
2777 * -0x1ffe...0x1ffe range will require a temporary register (@a iGprTmp)
2778 * if @a iGprReg and @a iGprBase are the same. Will assert / throw if
2779 * caller does not heed this.
2780 *
2781 * @note Bits 63 thru 32 in @a iGprDst will be cleared.
2782 */
2783DECL_FORCE_INLINE_THROW(uint32_t)
2784iemNativeEmitLoadGprByGprU32SignExtendedFromS16Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint8_t iGprBase,
2785 int32_t offDisp = 0, uint8_t iGprTmp = UINT8_MAX)
2786{
2787#ifdef RT_ARCH_AMD64
2788 /* movsx reg32, mem16 */
2789 if (iGprDst >= 8 || iGprBase >= 8)
2790 pCodeBuf[off++] = (iGprDst < 8 ? 0 : X86_OP_REX_R) | (iGprBase < 8 ? 0 : X86_OP_REX_B);
2791 pCodeBuf[off++] = 0x0f;
2792 pCodeBuf[off++] = 0xbf;
2793 off = iemNativeEmitGprByGprDisp(pCodeBuf, off, iGprDst, iGprBase, offDisp);
2794 RT_NOREF(iGprTmp);
2795
2796#elif defined(RT_ARCH_ARM64)
2797 off = iemNativeEmitGprByGprLdStEx(pCodeBuf, off, iGprDst, iGprBase, offDisp,
2798 kArmv8A64InstrLdStType_Ld_SignHalf32, sizeof(uint16_t), iGprTmp);
2799
2800#else
2801# error "port me"
2802#endif
2803 return off;
2804}
2805
2806
2807/**
2808 * Emits a 8-bit GPR load via a GPR base address with a displacement.
2809 *
2810 * @note ARM64: @a offDisp values not in the 0xfff...0xfff range will require a
2811 * temporary register (@a iGprTmp) if @a iGprReg and @a iGprBase are the
2812 * same. Will assert / throw if caller does not heed this.
2813 *
2814 * @note Bits 63 thru 8 in @a iGprDst will be cleared.
2815 */
2816DECL_FORCE_INLINE_THROW(uint32_t)
2817iemNativeEmitLoadGprByGprU8Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint8_t iGprBase,
2818 int32_t offDisp = 0, uint8_t iGprTmp = UINT8_MAX)
2819{
2820#ifdef RT_ARCH_AMD64
2821 /* movzx reg32, mem8 */
2822 if (iGprDst >= 8 || iGprBase >= 8)
2823 pCodeBuf[off++] = (iGprDst < 8 ? 0 : X86_OP_REX_R) | (iGprBase < 8 ? 0 : X86_OP_REX_B);
2824 pCodeBuf[off++] = 0x0f;
2825 pCodeBuf[off++] = 0xb6;
2826 off = iemNativeEmitGprByGprDisp(pCodeBuf, off, iGprDst, iGprBase, offDisp);
2827 RT_NOREF(iGprTmp);
2828
2829#elif defined(RT_ARCH_ARM64)
2830 off = iemNativeEmitGprByGprLdStEx(pCodeBuf, off, iGprDst, iGprBase, offDisp,
2831 kArmv8A64InstrLdStType_Ld_Byte, sizeof(uint8_t), iGprTmp);
2832
2833#else
2834# error "port me"
2835#endif
2836 return off;
2837}
2838
2839
2840/**
2841 * Emits a 8-bit GPR load via a GPR base address with a displacement,
2842 * sign-extending the value to 64 bits.
2843 *
2844 * @note ARM64: @a offDisp values not in the 0xfff...0xfff range will require a
2845 * temporary register (@a iGprTmp) if @a iGprReg and @a iGprBase are the
2846 * same. Will assert / throw if caller does not heed this.
2847 */
2848DECL_FORCE_INLINE_THROW(uint32_t)
2849iemNativeEmitLoadGprByGprU64SignExtendedFromS8Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint8_t iGprBase,
2850 int32_t offDisp = 0, uint8_t iGprTmp = UINT8_MAX)
2851{
2852#ifdef RT_ARCH_AMD64
2853 /* movsx reg64, mem8 */
2854 pCodeBuf[off++] = X86_OP_REX_W | (iGprDst < 8 ? 0 : X86_OP_REX_R) | (iGprBase < 8 ? 0 : X86_OP_REX_B);
2855 pCodeBuf[off++] = 0x0f;
2856 pCodeBuf[off++] = 0xbe;
2857 off = iemNativeEmitGprByGprDisp(pCodeBuf, off, iGprDst, iGprBase, offDisp);
2858 RT_NOREF(iGprTmp);
2859
2860#elif defined(RT_ARCH_ARM64)
2861 off = iemNativeEmitGprByGprLdStEx(pCodeBuf, off, iGprDst, iGprBase, offDisp,
2862 kArmv8A64InstrLdStType_Ld_SignByte64, sizeof(uint8_t), iGprTmp);
2863
2864#else
2865# error "port me"
2866#endif
2867 return off;
2868}
2869
2870
2871/**
2872 * Emits a 8-bit GPR load via a GPR base address with a displacement,
2873 * sign-extending the value to 32 bits.
2874 *
2875 * @note ARM64: @a offDisp values not in the 0xfff...0xfff range will require a
2876 * temporary register (@a iGprTmp) if @a iGprReg and @a iGprBase are the
2877 * same. Will assert / throw if caller does not heed this.
2878 *
2879 * @note Bits 63 thru 32 in @a iGprDst will be cleared.
2880 */
2881DECL_FORCE_INLINE_THROW(uint32_t)
2882iemNativeEmitLoadGprByGprU32SignExtendedFromS8Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint8_t iGprBase,
2883 int32_t offDisp = 0, uint8_t iGprTmp = UINT8_MAX)
2884{
2885#ifdef RT_ARCH_AMD64
2886 /* movsx reg32, mem8 */
2887 if (iGprDst >= 8 || iGprBase >= 8)
2888 pCodeBuf[off++] = (iGprDst < 8 ? 0 : X86_OP_REX_R) | (iGprBase < 8 ? 0 : X86_OP_REX_B);
2889 pCodeBuf[off++] = 0x0f;
2890 pCodeBuf[off++] = 0xbe;
2891 off = iemNativeEmitGprByGprDisp(pCodeBuf, off, iGprDst, iGprBase, offDisp);
2892 RT_NOREF(iGprTmp);
2893
2894#elif defined(RT_ARCH_ARM64)
2895 off = iemNativeEmitGprByGprLdStEx(pCodeBuf, off, iGprDst, iGprBase, offDisp,
2896 kArmv8A64InstrLdStType_Ld_SignByte32, sizeof(uint8_t), iGprTmp);
2897
2898#else
2899# error "port me"
2900#endif
2901 return off;
2902}
2903
2904
2905/**
2906 * Emits a 8-bit GPR load via a GPR base address with a displacement,
2907 * sign-extending the value to 16 bits.
2908 *
2909 * @note ARM64: @a offDisp values not in the 0xfff...0xfff range will require a
2910 * temporary register (@a iGprTmp) if @a iGprReg and @a iGprBase are the
2911 * same. Will assert / throw if caller does not heed this.
2912 *
2913 * @note Bits 63 thru 16 in @a iGprDst will be cleared.
2914 */
2915DECL_FORCE_INLINE_THROW(uint32_t)
2916iemNativeEmitLoadGprByGprU16SignExtendedFromS8Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint8_t iGprBase,
2917 int32_t offDisp = 0, uint8_t iGprTmp = UINT8_MAX)
2918{
2919#ifdef RT_ARCH_AMD64
2920 /* movsx reg32, mem8 */
2921 if (iGprDst >= 8 || iGprBase >= 8)
2922 pCodeBuf[off++] = (iGprDst < 8 ? 0 : X86_OP_REX_R) | (iGprBase < 8 ? 0 : X86_OP_REX_B);
2923 pCodeBuf[off++] = 0x0f;
2924 pCodeBuf[off++] = 0xbe;
2925 off = iemNativeEmitGprByGprDisp(pCodeBuf, off, iGprDst, iGprBase, offDisp);
2926# if 1 /** @todo use 'movzx reg32, reg16' instead of 'and reg32, 0ffffh' ? */
2927 /* and reg32, 0xffffh */
2928 if (iGprDst >= 8)
2929 pCodeBuf[off++] = X86_OP_REX_B;
2930 pCodeBuf[off++] = 0x81;
2931 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 4, iGprDst & 7);
2932 pCodeBuf[off++] = 0xff;
2933 pCodeBuf[off++] = 0xff;
2934 pCodeBuf[off++] = 0;
2935 pCodeBuf[off++] = 0;
2936# else
2937 /* movzx reg32, reg16 */
2938 if (iGprDst >= 8)
2939 pCodeBuf[off++] = X86_OP_REX_B | X86_OP_REX_R;
2940 pCodeBuf[off++] = 0x0f;
2941 pCodeBuf[off++] = 0xb7;
2942 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGprDst & 7, iGprDst & 7);
2943# endif
2944 RT_NOREF(iGprTmp);
2945
2946#elif defined(RT_ARCH_ARM64)
2947 off = iemNativeEmitGprByGprLdStEx(pCodeBuf, off, iGprDst, iGprBase, offDisp,
2948 kArmv8A64InstrLdStType_Ld_SignByte32, sizeof(uint8_t), iGprTmp);
2949 Assert(Armv8A64ConvertImmRImmS2Mask32(15, 0) == 0xffff);
2950 pCodeBuf[off++] = Armv8A64MkInstrAndImm(iGprDst, iGprDst, 15, 0, false /*64Bit*/);
2951
2952#else
2953# error "port me"
2954#endif
2955 return off;
2956}
2957
2958
2959#ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
2960/**
2961 * Emits a 128-bit vector register load via a GPR base address with a displacement.
2962 *
2963 * @note ARM64: Misaligned @a offDisp values and values not in the
2964 * -0x7ff8...0x7ff8 range will require a temporary register (@a iGprTmp) if
2965 * @a iGprReg and @a iGprBase are the same. Will assert / throw if caller
2966 * does not heed this.
2967 */
2968DECL_FORCE_INLINE_THROW(uint32_t)
2969iemNativeEmitLoadVecRegByGprU128Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iVecRegDst, uint8_t iGprBase,
2970 int32_t offDisp = 0, uint8_t iGprTmp = UINT8_MAX)
2971{
2972#ifdef RT_ARCH_AMD64
2973 /* movdqu reg128, mem128 */
2974 pCodeBuf[off++] = 0xf3;
2975 if (iVecRegDst >= 8 || iGprBase >= 8)
2976 pCodeBuf[off++] = (iVecRegDst < 8 ? 0 : X86_OP_REX_R) | (iGprBase < 8 ? 0 : X86_OP_REX_B);
2977 pCodeBuf[off++] = 0x0f;
2978 pCodeBuf[off++] = 0x6f;
2979 off = iemNativeEmitGprByGprDisp(pCodeBuf, off, iVecRegDst, iGprBase, offDisp);
2980 RT_NOREF(iGprTmp);
2981
2982#elif defined(RT_ARCH_ARM64)
2983 off = iemNativeEmitVecRegByGprLdStEx(pCodeBuf, off, iVecRegDst, iGprBase, offDisp,
2984 kArmv8A64InstrLdStType_Ld_Vr_128, sizeof(RTUINT128U), iGprTmp);
2985
2986#else
2987# error "port me"
2988#endif
2989 return off;
2990}
2991
2992
2993/**
2994 * Emits a 128-bit GPR load via a GPR base address with a displacement.
2995 */
2996DECL_INLINE_THROW(uint32_t)
2997iemNativeEmitLoadVecRegByGprU128(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iVecRegDst, uint8_t iGprBase, int32_t offDisp)
2998{
2999#ifdef RT_ARCH_AMD64
3000 off = iemNativeEmitLoadVecRegByGprU128Ex(iemNativeInstrBufEnsure(pReNative, off, 8), off, iVecRegDst, iGprBase, offDisp);
3001 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
3002
3003#elif defined(RT_ARCH_ARM64)
3004 off = iemNativeEmitVecRegByGprLdSt(pReNative, off, iVecRegDst, iGprBase, offDisp, kArmv8A64InstrLdStType_Ld_Vr_128, sizeof(RTUINT128U));
3005
3006#else
3007# error "port me"
3008#endif
3009 return off;
3010}
3011
3012
3013/**
3014 * Emits a 256-bit vector register load via a GPR base address with a displacement.
3015 *
3016 * @note ARM64: Misaligned @a offDisp values and values not in the
3017 * -0x7ff8...0x7ff8 range will require a temporary register (@a iGprTmp) if
3018 * @a iGprReg and @a iGprBase are the same. Will assert / throw if caller
3019 * does not heed this.
3020 */
3021DECL_FORCE_INLINE_THROW(uint32_t)
3022iemNativeEmitLoadVecRegByGprU256Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iVecRegDst, uint8_t iGprBase,
3023 int32_t offDisp = 0, uint8_t iGprTmp = UINT8_MAX)
3024{
3025#ifdef RT_ARCH_AMD64
3026 /* vmovdqu reg256, mem256 */
3027 pCodeBuf[off++] = X86_OP_VEX3;
3028 pCodeBuf[off++] = (iVecRegDst < 8 ? X86_OP_VEX3_BYTE1_R : 0)
3029 | X86_OP_VEX3_BYTE1_X
3030 | (iGprBase < 8 ? X86_OP_VEX3_BYTE1_B : 0)
3031 | UINT8_C(0x01);
3032 pCodeBuf[off++] = X86_OP_VEX3_BYTE2_MAKE_NO_VVVV(false /*f64BitOpSz*/, true /*f256BitAvx*/, X86_OP_VEX2_BYTE1_P_0F3H);
3033 pCodeBuf[off++] = 0x6f;
3034 off = iemNativeEmitGprByGprDisp(pCodeBuf, off, iVecRegDst, iGprBase, offDisp);
3035 RT_NOREF(iGprTmp);
3036
3037#elif defined(RT_ARCH_ARM64)
3038 Assert(!(iVecRegDst & 0x1));
3039 off = iemNativeEmitVecRegByGprLdStEx(pCodeBuf, off, iVecRegDst, iGprBase, offDisp,
3040 kArmv8A64InstrLdStType_Ld_Vr_128, sizeof(RTUINT128U), iGprTmp);
3041 off = iemNativeEmitVecRegByGprLdStEx(pCodeBuf, off, iVecRegDst + 1, iGprBase, offDisp + sizeof(RTUINT128U),
3042 kArmv8A64InstrLdStType_Ld_Vr_128, sizeof(RTUINT128U), iGprTmp);
3043#else
3044# error "port me"
3045#endif
3046 return off;
3047}
3048
3049
3050/**
3051 * Emits a 256-bit GPR load via a GPR base address with a displacement.
3052 */
3053DECL_INLINE_THROW(uint32_t)
3054iemNativeEmitLoadVecRegByGprU256(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iVecRegDst, uint8_t iGprBase, int32_t offDisp)
3055{
3056#ifdef RT_ARCH_AMD64
3057 off = iemNativeEmitLoadVecRegByGprU128Ex(iemNativeInstrBufEnsure(pReNative, off, 8), off, iVecRegDst, iGprBase, offDisp);
3058 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
3059
3060#elif defined(RT_ARCH_ARM64)
3061 Assert(!(iVecRegDst & 0x1));
3062 off = iemNativeEmitVecRegByGprLdSt(pReNative, off, iVecRegDst, iGprBase, offDisp,
3063 kArmv8A64InstrLdStType_Ld_Vr_128, sizeof(RTUINT128U));
3064 off = iemNativeEmitVecRegByGprLdSt(pReNative, off, iVecRegDst + 1, iGprBase, offDisp + sizeof(RTUINT128U),
3065 kArmv8A64InstrLdStType_Ld_Vr_128, sizeof(RTUINT128U));
3066
3067#else
3068# error "port me"
3069#endif
3070 return off;
3071}
3072#endif
3073
3074
3075/**
3076 * Emits a 64-bit GPR store via a GPR base address with a displacement.
3077 *
3078 * @note ARM64: Misaligned @a offDisp values and values not in the
3079 * 0x7ff8...0x7ff8 range will require a temporary register (@a iGprTmp) if
3080 * @a iGprReg and @a iGprBase are the same. Will assert / throw if caller
3081 * does not heed this.
3082 */
3083DECL_FORCE_INLINE_THROW(uint32_t)
3084iemNativeEmitStoreGpr64ByGprEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprSrc, uint8_t iGprBase,
3085 int32_t offDisp = 0, uint8_t iGprTmp = UINT8_MAX)
3086{
3087#ifdef RT_ARCH_AMD64
3088 /* mov mem64, reg64 */
3089 pCodeBuf[off++] = X86_OP_REX_W | (iGprSrc < 8 ? 0 : X86_OP_REX_R) | (iGprBase < 8 ? 0 : X86_OP_REX_B);
3090 pCodeBuf[off++] = 0x89;
3091 off = iemNativeEmitGprByGprDisp(pCodeBuf, off, iGprSrc, iGprBase, offDisp);
3092 RT_NOREF(iGprTmp);
3093
3094#elif defined(RT_ARCH_ARM64)
3095 off = iemNativeEmitGprByGprLdStEx(pCodeBuf, off, iGprSrc, iGprBase, offDisp,
3096 kArmv8A64InstrLdStType_St_Dword, sizeof(uint64_t), iGprTmp);
3097
3098#else
3099# error "port me"
3100#endif
3101 return off;
3102}
3103
3104
3105/**
3106 * Emits a 32-bit GPR store via a GPR base address with a displacement.
3107 *
3108 * @note ARM64: Misaligned @a offDisp values and values not in the
3109 * 0x3ffc...0x3ffc range will require a temporary register (@a iGprTmp) if
3110 * @a iGprReg and @a iGprBase are the same. Will assert / throw if caller
3111 * does not heed this.
3112 */
3113DECL_FORCE_INLINE_THROW(uint32_t)
3114iemNativeEmitStoreGpr32ByGprEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprSrc, uint8_t iGprBase,
3115 int32_t offDisp = 0, uint8_t iGprTmp = UINT8_MAX)
3116{
3117#ifdef RT_ARCH_AMD64
3118 /* mov mem32, reg32 */
3119 if (iGprSrc >= 8 || iGprBase >= 8)
3120 pCodeBuf[off++] = (iGprSrc < 8 ? 0 : X86_OP_REX_R) | (iGprBase < 8 ? 0 : X86_OP_REX_B);
3121 pCodeBuf[off++] = 0x89;
3122 off = iemNativeEmitGprByGprDisp(pCodeBuf, off, iGprSrc, iGprBase, offDisp);
3123 RT_NOREF(iGprTmp);
3124
3125#elif defined(RT_ARCH_ARM64)
3126 off = iemNativeEmitGprByGprLdStEx(pCodeBuf, off, iGprSrc, iGprBase, offDisp,
3127 kArmv8A64InstrLdStType_St_Word, sizeof(uint32_t), iGprTmp);
3128
3129#else
3130# error "port me"
3131#endif
3132 return off;
3133}
3134
3135
3136/**
3137 * Emits a 16-bit GPR store via a GPR base address with a displacement.
3138 *
3139 * @note ARM64: Misaligned @a offDisp values and values not in the
3140 * 0x1ffe...0x1ffe range will require a temporary register (@a iGprTmp) if
3141 * @a iGprReg and @a iGprBase are the same. Will assert / throw if caller
3142 * does not heed this.
3143 */
3144DECL_FORCE_INLINE_THROW(uint32_t)
3145iemNativeEmitStoreGpr16ByGprEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprSrc, uint8_t iGprBase,
3146 int32_t offDisp = 0, uint8_t iGprTmp = UINT8_MAX)
3147{
3148#ifdef RT_ARCH_AMD64
3149 /* mov mem16, reg16 */
3150 pCodeBuf[off++] = X86_OP_PRF_SIZE_OP;
3151 if (iGprSrc >= 8 || iGprBase >= 8)
3152 pCodeBuf[off++] = (iGprSrc < 8 ? 0 : X86_OP_REX_R) | (iGprBase < 8 ? 0 : X86_OP_REX_B);
3153 pCodeBuf[off++] = 0x89;
3154 off = iemNativeEmitGprByGprDisp(pCodeBuf, off, iGprSrc, iGprBase, offDisp);
3155 RT_NOREF(iGprTmp);
3156
3157#elif defined(RT_ARCH_ARM64)
3158 off = iemNativeEmitGprByGprLdStEx(pCodeBuf, off, iGprSrc, iGprBase, offDisp,
3159 kArmv8A64InstrLdStType_St_Half, sizeof(uint16_t), iGprTmp);
3160
3161#else
3162# error "port me"
3163#endif
3164 return off;
3165}
3166
3167
3168/**
3169 * Emits a 8-bit GPR store via a GPR base address with a displacement.
3170 *
3171 * @note ARM64: @a offDisp values not in the 0xfff...0xfff range will require a
3172 * temporary register (@a iGprTmp) if @a iGprReg and @a iGprBase are the
3173 * same. Will assert / throw if caller does not heed this.
3174 */
3175DECL_FORCE_INLINE_THROW(uint32_t)
3176iemNativeEmitStoreGpr8ByGprEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprSrc, uint8_t iGprBase,
3177 int32_t offDisp = 0, uint8_t iGprTmp = UINT8_MAX)
3178{
3179#ifdef RT_ARCH_AMD64
3180 /* mov mem8, reg8 */
3181 if (iGprSrc >= 8 || iGprBase >= 8)
3182 pCodeBuf[off++] = (iGprSrc < 8 ? 0 : X86_OP_REX_R) | (iGprBase < 8 ? 0 : X86_OP_REX_B);
3183 else if (iGprSrc >= 4)
3184 pCodeBuf[off++] = X86_OP_REX;
3185 pCodeBuf[off++] = 0x88;
3186 off = iemNativeEmitGprByGprDisp(pCodeBuf, off, iGprSrc, iGprBase, offDisp);
3187 RT_NOREF(iGprTmp);
3188
3189#elif defined(RT_ARCH_ARM64)
3190 off = iemNativeEmitGprByGprLdStEx(pCodeBuf, off, iGprSrc, iGprBase, offDisp,
3191 kArmv8A64InstrLdStType_St_Byte, sizeof(uint8_t), iGprTmp);
3192
3193#else
3194# error "port me"
3195#endif
3196 return off;
3197}
3198
3199
3200/**
3201 * Emits a 64-bit immediate store via a GPR base address with a displacement.
3202 *
3203 * @note This will always require @a iGprTmpImm on ARM (except for uImm=0), on
3204 * AMD64 it depends on the immediate value.
3205 *
3206 * @note ARM64: Misaligned @a offDisp values and values not in the
3207 * 0x7ff8...0x7ff8 range will require a temporary register (@a iGprTmp) if
3208 * @a iGprReg and @a iGprBase are the same. Will assert / throw if caller
3209 * does not heed this.
3210 */
3211DECL_FORCE_INLINE_THROW(uint32_t)
3212iemNativeEmitStoreImm64ByGprEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint64_t uImm, uint8_t iGprBase,
3213 uint8_t iGprImmTmp = UINT8_MAX, int32_t offDisp = 0, uint8_t iGprTmp = UINT8_MAX)
3214{
3215#ifdef RT_ARCH_AMD64
3216 if ((int32_t)uImm == (int64_t)uImm)
3217 {
3218 /* mov mem64, imm32 (sign-extended) */
3219 pCodeBuf[off++] = X86_OP_REX_W | (iGprBase < 8 ? 0 : X86_OP_REX_B);
3220 pCodeBuf[off++] = 0xc7;
3221 off = iemNativeEmitGprByGprDisp(pCodeBuf, off, 0, iGprBase, offDisp);
3222 pCodeBuf[off++] = RT_BYTE1(uImm);
3223 pCodeBuf[off++] = RT_BYTE2(uImm);
3224 pCodeBuf[off++] = RT_BYTE3(uImm);
3225 pCodeBuf[off++] = RT_BYTE4(uImm);
3226 }
3227 else if (iGprImmTmp != UINT8_MAX || iGprTmp != UINT8_MAX)
3228 {
3229 /* require temporary register. */
3230 if (iGprImmTmp == UINT8_MAX)
3231 iGprImmTmp = iGprTmp;
3232 off = iemNativeEmitLoadGprImmEx(pCodeBuf, off, iGprImmTmp, uImm);
3233 off = iemNativeEmitStoreGpr64ByGprEx(pCodeBuf, off, iGprImmTmp, iGprBase, offDisp);
3234 }
3235 else
3236# ifdef IEM_WITH_THROW_CATCH
3237 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(NULL, VERR_IEM_IPE_9));
3238# else
3239 AssertReleaseFailedStmt(off = UINT32_MAX);
3240# endif
3241
3242#elif defined(RT_ARCH_ARM64)
3243 if (uImm == 0)
3244 iGprImmTmp = ARMV8_A64_REG_XZR;
3245 else
3246 {
3247 Assert(iGprImmTmp < 31);
3248 off = iemNativeEmitLoadGprImmEx(pCodeBuf, off, iGprImmTmp, uImm);
3249 }
3250 off = iemNativeEmitStoreGpr64ByGprEx(pCodeBuf, off, iGprImmTmp, iGprBase, offDisp, iGprTmp);
3251
3252#else
3253# error "port me"
3254#endif
3255 return off;
3256}
3257
3258
3259/**
3260 * Emits a 32-bit GPR store via a GPR base address with a displacement.
3261 *
3262 * @note This will always require @a iGprTmpImm on ARM64 (except for uImm=0).
3263 *
3264 * @note ARM64: Misaligned @a offDisp values and values not in the
3265 * 0x3ffc...0x3ffc range will require a temporary register (@a iGprTmp) if
3266 * @a iGprReg and @a iGprBase are the same. Will assert / throw if caller
3267 * does not heed this.
3268 */
3269DECL_FORCE_INLINE_THROW(uint32_t)
3270iemNativeEmitStoreImm32ByGprEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint32_t uImm, uint8_t iGprBase,
3271 uint8_t iGprImmTmp = UINT8_MAX, int32_t offDisp = 0, uint8_t iGprTmp = UINT8_MAX)
3272{
3273#ifdef RT_ARCH_AMD64
3274 /* mov mem32, imm32 */
3275 if (iGprBase >= 8)
3276 pCodeBuf[off++] = X86_OP_REX_B;
3277 pCodeBuf[off++] = 0xc7;
3278 off = iemNativeEmitGprByGprDisp(pCodeBuf, off, 0, iGprBase, offDisp);
3279 pCodeBuf[off++] = RT_BYTE1(uImm);
3280 pCodeBuf[off++] = RT_BYTE2(uImm);
3281 pCodeBuf[off++] = RT_BYTE3(uImm);
3282 pCodeBuf[off++] = RT_BYTE4(uImm);
3283 RT_NOREF(iGprImmTmp, iGprTmp);
3284
3285#elif defined(RT_ARCH_ARM64)
3286 Assert(iGprImmTmp < 31);
3287 if (uImm == 0)
3288 iGprImmTmp = ARMV8_A64_REG_XZR;
3289 else
3290 {
3291 Assert(iGprImmTmp < 31);
3292 off = iemNativeEmitLoadGpr32ImmEx(pCodeBuf, off, iGprImmTmp, uImm);
3293 }
3294 off = iemNativeEmitGprByGprLdStEx(pCodeBuf, off, iGprImmTmp, iGprBase, offDisp,
3295 kArmv8A64InstrLdStType_St_Word, sizeof(uint32_t), iGprTmp);
3296
3297#else
3298# error "port me"
3299#endif
3300 return off;
3301}
3302
3303
3304/**
3305 * Emits a 16-bit GPR store via a GPR base address with a displacement.
3306 *
3307 * @note This will always require @a iGprTmpImm on ARM64 (except for uImm=0).
3308 *
3309 * @note ARM64: Misaligned @a offDisp values and values not in the
3310 * 0x1ffe...0x1ffe range will require a temporary register (@a iGprTmp) if
3311 * @a iGprReg and @a iGprBase are the same. Will assert / throw if caller
3312 * does not heed this.
3313 */
3314DECL_FORCE_INLINE_THROW(uint32_t)
3315iemNativeEmitStoreImm16ByGprEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint16_t uImm, uint8_t iGprBase,
3316 uint8_t iGprImmTmp = UINT8_MAX, int32_t offDisp = 0, uint8_t iGprTmp = UINT8_MAX)
3317{
3318#ifdef RT_ARCH_AMD64
3319 /* mov mem16, imm16 */
3320 pCodeBuf[off++] = X86_OP_PRF_SIZE_OP;
3321 if (iGprBase >= 8)
3322 pCodeBuf[off++] = X86_OP_REX_B;
3323 pCodeBuf[off++] = 0xc7;
3324 off = iemNativeEmitGprByGprDisp(pCodeBuf, off, 0, iGprBase, offDisp);
3325 pCodeBuf[off++] = RT_BYTE1(uImm);
3326 pCodeBuf[off++] = RT_BYTE2(uImm);
3327 RT_NOREF(iGprImmTmp, iGprTmp);
3328
3329#elif defined(RT_ARCH_ARM64)
3330 if (uImm == 0)
3331 iGprImmTmp = ARMV8_A64_REG_XZR;
3332 else
3333 {
3334 Assert(iGprImmTmp < 31);
3335 pCodeBuf[off++] = Armv8A64MkInstrMovZ(iGprImmTmp, uImm);
3336 }
3337 off = iemNativeEmitGprByGprLdStEx(pCodeBuf, off, iGprImmTmp, iGprBase, offDisp,
3338 kArmv8A64InstrLdStType_St_Half, sizeof(uint16_t), iGprTmp);
3339
3340#else
3341# error "port me"
3342#endif
3343 return off;
3344}
3345
3346
3347/**
3348 * Emits a 8-bit GPR store via a GPR base address with a displacement.
3349 *
3350 * @note This will always require @a iGprTmpImm on ARM64 (except for uImm=0).
3351 *
3352 * @note ARM64: @a offDisp values not in the 0xfff...0xfff range will require a
3353 * temporary register (@a iGprTmp) if @a iGprReg and @a iGprBase are the
3354 * same. Will assert / throw if caller does not heed this.
3355 */
3356DECL_FORCE_INLINE_THROW(uint32_t)
3357iemNativeEmitStoreImm8ByGprEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t uImm, uint8_t iGprBase,
3358 uint8_t iGprImmTmp = UINT8_MAX, int32_t offDisp = 0, uint8_t iGprTmp = UINT8_MAX)
3359{
3360#ifdef RT_ARCH_AMD64
3361 /* mov mem8, imm8 */
3362 pCodeBuf[off++] = X86_OP_PRF_SIZE_OP;
3363 if (iGprBase >= 8)
3364 pCodeBuf[off++] = X86_OP_REX_B;
3365 pCodeBuf[off++] = 0xc6;
3366 off = iemNativeEmitGprByGprDisp(pCodeBuf, off, 0, iGprBase, offDisp);
3367 pCodeBuf[off++] = uImm;
3368 RT_NOREF(iGprImmTmp, iGprTmp);
3369
3370#elif defined(RT_ARCH_ARM64)
3371 if (uImm == 0)
3372 iGprImmTmp = ARMV8_A64_REG_XZR;
3373 else
3374 {
3375 Assert(iGprImmTmp < 31);
3376 pCodeBuf[off++] = Armv8A64MkInstrMovZ(iGprImmTmp, uImm);
3377 }
3378 off = iemNativeEmitGprByGprLdStEx(pCodeBuf, off, iGprImmTmp, iGprBase, offDisp,
3379 kArmv8A64InstrLdStType_St_Byte, sizeof(uint8_t), iGprTmp);
3380
3381#else
3382# error "port me"
3383#endif
3384 return off;
3385}
3386
3387
3388#ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
3389/**
3390 * Emits a 128-bit vector register store via a GPR base address with a displacement.
3391 *
3392 * @note ARM64: Misaligned @a offDisp values and values not in the
3393 * -0x7ff8...0x7ff8 range will require a temporary register (@a iGprTmp) if
3394 * @a iGprReg and @a iGprBase are the same. Will assert / throw if caller
3395 * does not heed this.
3396 */
3397DECL_FORCE_INLINE_THROW(uint32_t)
3398iemNativeEmitStoreVecRegByGprU128Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iVecRegDst, uint8_t iGprBase,
3399 int32_t offDisp = 0, uint8_t iGprTmp = UINT8_MAX)
3400{
3401#ifdef RT_ARCH_AMD64
3402 /* movdqu mem128, reg128 */
3403 pCodeBuf[off++] = 0xf3;
3404 if (iVecRegDst >= 8 || iGprBase >= 8)
3405 pCodeBuf[off++] = (iVecRegDst < 8 ? 0 : X86_OP_REX_R) | (iGprBase < 8 ? 0 : X86_OP_REX_B);
3406 pCodeBuf[off++] = 0x0f;
3407 pCodeBuf[off++] = 0x7f;
3408 off = iemNativeEmitGprByGprDisp(pCodeBuf, off, iVecRegDst, iGprBase, offDisp);
3409 RT_NOREF(iGprTmp);
3410
3411#elif defined(RT_ARCH_ARM64)
3412 off = iemNativeEmitVecRegByGprLdStEx(pCodeBuf, off, iVecRegDst, iGprBase, offDisp,
3413 kArmv8A64InstrLdStType_St_Vr_128, sizeof(RTUINT128U), iGprTmp);
3414
3415#else
3416# error "port me"
3417#endif
3418 return off;
3419}
3420
3421
3422/**
3423 * Emits a 128-bit vector register store via a GPR base address with a displacement.
3424 */
3425DECL_INLINE_THROW(uint32_t)
3426iemNativeEmitStoreVecRegByGprU128(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iVecRegDst, uint8_t iGprBase, int32_t offDisp)
3427{
3428#ifdef RT_ARCH_AMD64
3429 off = iemNativeEmitStoreVecRegByGprU128Ex(iemNativeInstrBufEnsure(pReNative, off, 8), off, iVecRegDst, iGprBase, offDisp);
3430 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
3431
3432#elif defined(RT_ARCH_ARM64)
3433 off = iemNativeEmitVecRegByGprLdSt(pReNative, off, iVecRegDst, iGprBase, offDisp, kArmv8A64InstrLdStType_St_Vr_128, sizeof(RTUINT128U));
3434
3435#else
3436# error "port me"
3437#endif
3438 return off;
3439}
3440
3441
3442/**
3443 * Emits a 256-bit vector register store via a GPR base address with a displacement.
3444 *
3445 * @note ARM64: Misaligned @a offDisp values and values not in the
3446 * -0x7ff8...0x7ff8 range will require a temporary register (@a iGprTmp) if
3447 * @a iGprReg and @a iGprBase are the same. Will assert / throw if caller
3448 * does not heed this.
3449 */
3450DECL_FORCE_INLINE_THROW(uint32_t)
3451iemNativeEmitStoreVecRegByGprU256Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iVecRegDst, uint8_t iGprBase,
3452 int32_t offDisp = 0, uint8_t iGprTmp = UINT8_MAX)
3453{
3454#ifdef RT_ARCH_AMD64
3455 /* vmovdqu mem256, reg256 */
3456 pCodeBuf[off++] = X86_OP_VEX3;
3457 pCodeBuf[off++] = (iVecRegDst < 8 ? X86_OP_VEX3_BYTE1_R : 0)
3458 | X86_OP_VEX3_BYTE1_X
3459 | (iGprBase < 8 ? X86_OP_VEX3_BYTE1_B : 0)
3460 | UINT8_C(0x01);
3461 pCodeBuf[off++] = X86_OP_VEX3_BYTE2_MAKE_NO_VVVV(false /*f64BitOpSz*/, true /*f256BitAvx*/, X86_OP_VEX2_BYTE1_P_0F3H);
3462 pCodeBuf[off++] = 0x7f;
3463 off = iemNativeEmitGprByGprDisp(pCodeBuf, off, iVecRegDst, iGprBase, offDisp);
3464 RT_NOREF(iGprTmp);
3465
3466#elif defined(RT_ARCH_ARM64)
3467 Assert(!(iVecRegDst & 0x1));
3468 off = iemNativeEmitVecRegByGprLdStEx(pCodeBuf, off, iVecRegDst, iGprBase, offDisp,
3469 kArmv8A64InstrLdStType_St_Vr_128, sizeof(RTUINT128U), iGprTmp);
3470 off = iemNativeEmitVecRegByGprLdStEx(pCodeBuf, off, iVecRegDst + 1, iGprBase, offDisp + sizeof(RTUINT128U),
3471 kArmv8A64InstrLdStType_St_Vr_128, sizeof(RTUINT128U), iGprTmp);
3472#else
3473# error "port me"
3474#endif
3475 return off;
3476}
3477
3478
3479/**
3480 * Emits a 256-bit GPR load via a GPR base address with a displacement.
3481 */
3482DECL_INLINE_THROW(uint32_t)
3483iemNativeEmitStoreVecRegByGprU256(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iVecRegDst, uint8_t iGprBase, int32_t offDisp)
3484{
3485#ifdef RT_ARCH_AMD64
3486 off = iemNativeEmitStoreVecRegByGprU256Ex(iemNativeInstrBufEnsure(pReNative, off, 8), off, iVecRegDst, iGprBase, offDisp);
3487 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
3488
3489#elif defined(RT_ARCH_ARM64)
3490 Assert(!(iVecRegDst & 0x1));
3491 off = iemNativeEmitVecRegByGprLdSt(pReNative, off, iVecRegDst, iGprBase, offDisp,
3492 kArmv8A64InstrLdStType_St_Vr_128, sizeof(RTUINT128U));
3493 off = iemNativeEmitVecRegByGprLdSt(pReNative, off, iVecRegDst + 1, iGprBase, offDisp + sizeof(RTUINT128U),
3494 kArmv8A64InstrLdStType_St_Vr_128, sizeof(RTUINT128U));
3495
3496#else
3497# error "port me"
3498#endif
3499 return off;
3500}
3501#endif
3502
3503
3504
3505/*********************************************************************************************************************************
3506* Subtraction and Additions *
3507*********************************************************************************************************************************/
3508
3509/**
3510 * Emits subtracting a 64-bit GPR from another, storing the result in the first.
3511 * @note The AMD64 version sets flags.
3512 */
3513DECL_INLINE_THROW(uint32_t)
3514iemNativeEmitSubTwoGprs(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t iGprSubtrahend)
3515{
3516#if defined(RT_ARCH_AMD64)
3517 /* sub Gv,Ev */
3518 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3);
3519 pbCodeBuf[off++] = (iGprDst < 8 ? X86_OP_REX_W : X86_OP_REX_W | X86_OP_REX_R)
3520 | (iGprSubtrahend < 8 ? 0 : X86_OP_REX_B);
3521 pbCodeBuf[off++] = 0x2b;
3522 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGprDst & 7, iGprSubtrahend & 7);
3523
3524#elif defined(RT_ARCH_ARM64)
3525 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
3526 pu32CodeBuf[off++] = Armv8A64MkInstrSubReg(iGprDst, iGprDst, iGprSubtrahend);
3527
3528#else
3529# error "Port me"
3530#endif
3531 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
3532 return off;
3533}
3534
3535
3536/**
3537 * Emits subtracting a 32-bit GPR from another, storing the result in the first.
3538 * @note The AMD64 version sets flags.
3539 */
3540DECL_FORCE_INLINE(uint32_t)
3541iemNativeEmitSubTwoGprs32Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint8_t iGprSubtrahend)
3542{
3543#if defined(RT_ARCH_AMD64)
3544 /* sub Gv,Ev */
3545 if (iGprDst >= 8 || iGprSubtrahend >= 8)
3546 pCodeBuf[off++] = (iGprDst < 8 ? 0 : X86_OP_REX_R)
3547 | (iGprSubtrahend < 8 ? 0 : X86_OP_REX_B);
3548 pCodeBuf[off++] = 0x2b;
3549 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGprDst & 7, iGprSubtrahend & 7);
3550
3551#elif defined(RT_ARCH_ARM64)
3552 pCodeBuf[off++] = Armv8A64MkInstrSubReg(iGprDst, iGprDst, iGprSubtrahend, false /*f64Bit*/);
3553
3554#else
3555# error "Port me"
3556#endif
3557 return off;
3558}
3559
3560
3561/**
3562 * Emits subtracting a 32-bit GPR from another, storing the result in the first.
3563 * @note The AMD64 version sets flags.
3564 */
3565DECL_INLINE_THROW(uint32_t)
3566iemNativeEmitSubTwoGprs32(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t iGprSubtrahend)
3567{
3568#if defined(RT_ARCH_AMD64)
3569 off = iemNativeEmitSubTwoGprs32Ex(iemNativeInstrBufEnsure(pReNative, off, 3), off, iGprDst, iGprSubtrahend);
3570#elif defined(RT_ARCH_ARM64)
3571 off = iemNativeEmitSubTwoGprs32Ex(iemNativeInstrBufEnsure(pReNative, off, 1), off, iGprDst, iGprSubtrahend);
3572#else
3573# error "Port me"
3574#endif
3575 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
3576 return off;
3577}
3578
3579
3580/**
3581 * Emits a 64-bit GPR subtract with a signed immediate subtrahend.
3582 *
3583 * This will optimize using DEC/INC/whatever, so try avoid flag dependencies.
3584 *
3585 * @note Larger constants will require a temporary register. Failing to specify
3586 * one when needed will trigger fatal assertion / throw.
3587 */
3588DECL_FORCE_INLINE_THROW(uint32_t)
3589iemNativeEmitSubGprImmEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, int64_t iSubtrahend,
3590 uint8_t iGprTmp = UINT8_MAX)
3591{
3592#ifdef RT_ARCH_AMD64
3593 pCodeBuf[off++] = iGprDst >= 8 ? X86_OP_REX_W | X86_OP_REX_B : X86_OP_REX_W;
3594 if (iSubtrahend == 1)
3595 {
3596 /* dec r/m64 */
3597 pCodeBuf[off++] = 0xff;
3598 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 1, iGprDst & 7);
3599 }
3600 else if (iSubtrahend == -1)
3601 {
3602 /* inc r/m64 */
3603 pCodeBuf[off++] = 0xff;
3604 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, iGprDst & 7);
3605 }
3606 else if ((int8_t)iSubtrahend == iSubtrahend)
3607 {
3608 /* sub r/m64, imm8 */
3609 pCodeBuf[off++] = 0x83;
3610 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 5, iGprDst & 7);
3611 pCodeBuf[off++] = (uint8_t)iSubtrahend;
3612 }
3613 else if ((int32_t)iSubtrahend == iSubtrahend)
3614 {
3615 /* sub r/m64, imm32 */
3616 pCodeBuf[off++] = 0x81;
3617 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 5, iGprDst & 7);
3618 pCodeBuf[off++] = RT_BYTE1((uint64_t)iSubtrahend);
3619 pCodeBuf[off++] = RT_BYTE2((uint64_t)iSubtrahend);
3620 pCodeBuf[off++] = RT_BYTE3((uint64_t)iSubtrahend);
3621 pCodeBuf[off++] = RT_BYTE4((uint64_t)iSubtrahend);
3622 }
3623 else if (iGprTmp != UINT8_MAX)
3624 {
3625 off = iemNativeEmitLoadGprImmEx(pCodeBuf, off - 1, iGprTmp, (uint64_t)iSubtrahend);
3626 /* sub r/m64, r64 */
3627 pCodeBuf[off++] = X86_OP_REX_W | (iGprDst < 8 ? 0 : X86_OP_REX_B) | (iGprTmp < 8 ? 0 : X86_OP_REX_R);
3628 pCodeBuf[off++] = 0x29;
3629 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGprTmp & 7, iGprDst & 7);
3630 }
3631 else
3632# ifdef IEM_WITH_THROW_CATCH
3633 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(NULL, VERR_IEM_IPE_9));
3634# else
3635 AssertReleaseFailedStmt(off = UINT32_MAX);
3636# endif
3637
3638#elif defined(RT_ARCH_ARM64)
3639 uint32_t uAbsSubtrahend = RT_ABS(iSubtrahend);
3640 if (uAbsSubtrahend < 4096)
3641 {
3642 if (iSubtrahend >= 0)
3643 pCodeBuf[off++] = Armv8A64MkInstrSubUImm12(iGprDst, iGprDst, uAbsSubtrahend);
3644 else
3645 pCodeBuf[off++] = Armv8A64MkInstrAddUImm12(iGprDst, iGprDst, uAbsSubtrahend);
3646 }
3647 else if (uAbsSubtrahend <= 0xfff000 && !(uAbsSubtrahend & 0xfff))
3648 {
3649 if (iSubtrahend >= 0)
3650 pCodeBuf[off++] = Armv8A64MkInstrSubUImm12(iGprDst, iGprDst, uAbsSubtrahend >> 12,
3651 true /*f64Bit*/, false /*fSetFlags*/, true /*fShift*/);
3652 else
3653 pCodeBuf[off++] = Armv8A64MkInstrAddUImm12(iGprDst, iGprDst, uAbsSubtrahend >> 12,
3654 true /*f64Bit*/, false /*fSetFlags*/, true /*fShift*/);
3655 }
3656 else if (iGprTmp != UINT8_MAX)
3657 {
3658 off = iemNativeEmitLoadGprImmEx(pCodeBuf, off, iGprTmp, (uint64_t)iSubtrahend);
3659 pCodeBuf[off++] = Armv8A64MkInstrSubReg(iGprDst, iGprDst, iGprTmp);
3660 }
3661 else
3662# ifdef IEM_WITH_THROW_CATCH
3663 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(NULL, VERR_IEM_IPE_9));
3664# else
3665 AssertReleaseFailedStmt(off = UINT32_MAX);
3666# endif
3667
3668#else
3669# error "Port me"
3670#endif
3671 return off;
3672}
3673
3674
3675/**
3676 * Emits a 64-bit GPR subtract with a signed immediate subtrahend.
3677 *
3678 * @note Larger constants will require a temporary register. Failing to specify
3679 * one when needed will trigger fatal assertion / throw.
3680 */
3681DECL_INLINE_THROW(uint32_t)
3682iemNativeEmitSubGprImm(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, int64_t iSubtrahend,
3683 uint8_t iGprTmp = UINT8_MAX)
3684
3685{
3686#ifdef RT_ARCH_AMD64
3687 off = iemNativeEmitSubGprImmEx(iemNativeInstrBufEnsure(pReNative, off, 13), off, iGprDst, iSubtrahend, iGprTmp);
3688#elif defined(RT_ARCH_ARM64)
3689 off = iemNativeEmitSubGprImmEx(iemNativeInstrBufEnsure(pReNative, off, 5), off, iGprDst, iSubtrahend, iGprTmp);
3690#else
3691# error "Port me"
3692#endif
3693 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
3694 return off;
3695}
3696
3697
3698/**
3699 * Emits a 32-bit GPR subtract with a signed immediate subtrahend.
3700 *
3701 * This will optimize using DEC/INC/whatever, so try avoid flag dependencies.
3702 *
3703 * @note ARM64: Larger constants will require a temporary register. Failing to
3704 * specify one when needed will trigger fatal assertion / throw.
3705 */
3706DECL_FORCE_INLINE_THROW(uint32_t)
3707iemNativeEmitSubGpr32ImmEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, int32_t iSubtrahend,
3708 uint8_t iGprTmp = UINT8_MAX)
3709{
3710#ifdef RT_ARCH_AMD64
3711 if (iGprDst >= 8)
3712 pCodeBuf[off++] = X86_OP_REX_B;
3713 if (iSubtrahend == 1)
3714 {
3715 /* dec r/m32 */
3716 pCodeBuf[off++] = 0xff;
3717 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 1, iGprDst & 7);
3718 }
3719 else if (iSubtrahend == -1)
3720 {
3721 /* inc r/m32 */
3722 pCodeBuf[off++] = 0xff;
3723 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, iGprDst & 7);
3724 }
3725 else if (iSubtrahend < 128 && iSubtrahend >= -128)
3726 {
3727 /* sub r/m32, imm8 */
3728 pCodeBuf[off++] = 0x83;
3729 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 5, iGprDst & 7);
3730 pCodeBuf[off++] = (uint8_t)iSubtrahend;
3731 }
3732 else
3733 {
3734 /* sub r/m32, imm32 */
3735 pCodeBuf[off++] = 0x81;
3736 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 5, iGprDst & 7);
3737 pCodeBuf[off++] = RT_BYTE1(iSubtrahend);
3738 pCodeBuf[off++] = RT_BYTE2(iSubtrahend);
3739 pCodeBuf[off++] = RT_BYTE3(iSubtrahend);
3740 pCodeBuf[off++] = RT_BYTE4(iSubtrahend);
3741 }
3742 RT_NOREF(iGprTmp);
3743
3744#elif defined(RT_ARCH_ARM64)
3745 uint32_t uAbsSubtrahend = RT_ABS(iSubtrahend);
3746 if (uAbsSubtrahend < 4096)
3747 {
3748 if (iSubtrahend >= 0)
3749 pCodeBuf[off++] = Armv8A64MkInstrSubUImm12(iGprDst, iGprDst, uAbsSubtrahend, false /*f64Bit*/);
3750 else
3751 pCodeBuf[off++] = Armv8A64MkInstrAddUImm12(iGprDst, iGprDst, uAbsSubtrahend, false /*f64Bit*/);
3752 }
3753 else if (uAbsSubtrahend <= 0xfff000 && !(uAbsSubtrahend & 0xfff))
3754 {
3755 if (iSubtrahend >= 0)
3756 pCodeBuf[off++] = Armv8A64MkInstrSubUImm12(iGprDst, iGprDst, uAbsSubtrahend >> 12,
3757 false /*f64Bit*/, false /*fSetFlags*/, true /*fShift*/);
3758 else
3759 pCodeBuf[off++] = Armv8A64MkInstrAddUImm12(iGprDst, iGprDst, uAbsSubtrahend >> 12,
3760 false /*f64Bit*/, false /*fSetFlags*/, true /*fShift*/);
3761 }
3762 else if (iGprTmp != UINT8_MAX)
3763 {
3764 off = iemNativeEmitLoadGpr32ImmEx(pCodeBuf, off, iGprTmp, (uint32_t)iSubtrahend);
3765 pCodeBuf[off++] = Armv8A64MkInstrSubReg(iGprDst, iGprDst, iGprTmp, false /*f64Bit*/);
3766 }
3767 else
3768# ifdef IEM_WITH_THROW_CATCH
3769 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(NULL, VERR_IEM_IPE_9));
3770# else
3771 AssertReleaseFailedStmt(off = UINT32_MAX);
3772# endif
3773
3774#else
3775# error "Port me"
3776#endif
3777 return off;
3778}
3779
3780
3781/**
3782 * Emits a 32-bit GPR subtract with a signed immediate subtrahend.
3783 *
3784 * @note ARM64: Larger constants will require a temporary register. Failing to
3785 * specify one when needed will trigger fatal assertion / throw.
3786 */
3787DECL_INLINE_THROW(uint32_t)
3788iemNativeEmitSubGpr32Imm(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, int32_t iSubtrahend,
3789 uint8_t iGprTmp = UINT8_MAX)
3790
3791{
3792#ifdef RT_ARCH_AMD64
3793 off = iemNativeEmitSubGprImmEx(iemNativeInstrBufEnsure(pReNative, off, 7), off, iGprDst, iSubtrahend, iGprTmp);
3794#elif defined(RT_ARCH_ARM64)
3795 off = iemNativeEmitSubGprImmEx(iemNativeInstrBufEnsure(pReNative, off, 3), off, iGprDst, iSubtrahend, iGprTmp);
3796#else
3797# error "Port me"
3798#endif
3799 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
3800 return off;
3801}
3802
3803
3804/**
3805 * Emits a 16-bit GPR subtract with a signed immediate subtrahend.
3806 *
3807 * This will optimize using DEC/INC/whatever and ARM64 will not set flags,
3808 * so not suitable as a base for conditional jumps.
3809 *
3810 * @note AMD64: Will only update the lower 16 bits of the register.
3811 * @note ARM64: Will update the entire register.
3812 * @note ARM64: Larger constants will require a temporary register. Failing to
3813 * specify one when needed will trigger fatal assertion / throw.
3814 */
3815DECL_FORCE_INLINE_THROW(uint32_t)
3816iemNativeEmitSubGpr16ImmEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, int16_t iSubtrahend,
3817 uint8_t iGprTmp = UINT8_MAX)
3818{
3819#ifdef RT_ARCH_AMD64
3820 pCodeBuf[off++] = X86_OP_PRF_SIZE_OP;
3821 if (iGprDst >= 8)
3822 pCodeBuf[off++] = X86_OP_REX_B;
3823 if (iSubtrahend == 1)
3824 {
3825 /* dec r/m16 */
3826 pCodeBuf[off++] = 0xff;
3827 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 1, iGprDst & 7);
3828 }
3829 else if (iSubtrahend == -1)
3830 {
3831 /* inc r/m16 */
3832 pCodeBuf[off++] = 0xff;
3833 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, iGprDst & 7);
3834 }
3835 else if ((int8_t)iSubtrahend == iSubtrahend)
3836 {
3837 /* sub r/m16, imm8 */
3838 pCodeBuf[off++] = 0x83;
3839 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 5, iGprDst & 7);
3840 pCodeBuf[off++] = (uint8_t)iSubtrahend;
3841 }
3842 else
3843 {
3844 /* sub r/m16, imm16 */
3845 pCodeBuf[off++] = 0x81;
3846 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 5, iGprDst & 7);
3847 pCodeBuf[off++] = RT_BYTE1((uint16_t)iSubtrahend);
3848 pCodeBuf[off++] = RT_BYTE2((uint16_t)iSubtrahend);
3849 }
3850 RT_NOREF(iGprTmp);
3851
3852#elif defined(RT_ARCH_ARM64)
3853 uint32_t uAbsSubtrahend = RT_ABS(iSubtrahend);
3854 if (uAbsSubtrahend < 4096)
3855 {
3856 if (iSubtrahend >= 0)
3857 pCodeBuf[off++] = Armv8A64MkInstrSubUImm12(iGprDst, iGprDst, uAbsSubtrahend, false /*f64Bit*/);
3858 else
3859 pCodeBuf[off++] = Armv8A64MkInstrAddUImm12(iGprDst, iGprDst, uAbsSubtrahend, false /*f64Bit*/);
3860 }
3861 else if (uAbsSubtrahend <= 0xfff000 && !(uAbsSubtrahend & 0xfff))
3862 {
3863 if (iSubtrahend >= 0)
3864 pCodeBuf[off++] = Armv8A64MkInstrSubUImm12(iGprDst, iGprDst, uAbsSubtrahend >> 12,
3865 false /*f64Bit*/, false /*fSetFlags*/, true /*fShift*/);
3866 else
3867 pCodeBuf[off++] = Armv8A64MkInstrAddUImm12(iGprDst, iGprDst, uAbsSubtrahend >> 12,
3868 false /*f64Bit*/, false /*fSetFlags*/, true /*fShift*/);
3869 }
3870 else if (iGprTmp != UINT8_MAX)
3871 {
3872 off = iemNativeEmitLoadGpr32ImmEx(pCodeBuf, off, iGprTmp, (uint32_t)iSubtrahend);
3873 pCodeBuf[off++] = Armv8A64MkInstrSubReg(iGprDst, iGprDst, iGprTmp, false /*f64Bit*/);
3874 }
3875 else
3876# ifdef IEM_WITH_THROW_CATCH
3877 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(NULL, VERR_IEM_IPE_9));
3878# else
3879 AssertReleaseFailedStmt(off = UINT32_MAX);
3880# endif
3881 pCodeBuf[off++] = Armv8A64MkInstrAndImm(iGprDst, iGprDst, 15, 0, false /*f64Bit*/);
3882
3883#else
3884# error "Port me"
3885#endif
3886 return off;
3887}
3888
3889
3890/**
3891 * Emits adding a 64-bit GPR to another, storing the result in the first.
3892 * @note The AMD64 version sets flags.
3893 */
3894DECL_FORCE_INLINE(uint32_t)
3895iemNativeEmitAddTwoGprsEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint8_t iGprAddend)
3896{
3897#if defined(RT_ARCH_AMD64)
3898 /* add Gv,Ev */
3899 pCodeBuf[off++] = (iGprDst < 8 ? X86_OP_REX_W : X86_OP_REX_W | X86_OP_REX_R)
3900 | (iGprAddend < 8 ? 0 : X86_OP_REX_B);
3901 pCodeBuf[off++] = 0x03;
3902 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGprDst & 7, iGprAddend & 7);
3903
3904#elif defined(RT_ARCH_ARM64)
3905 pCodeBuf[off++] = Armv8A64MkInstrAddSubReg(false /*fSub*/, iGprDst, iGprDst, iGprAddend);
3906
3907#else
3908# error "Port me"
3909#endif
3910 return off;
3911}
3912
3913
3914/**
3915 * Emits adding a 64-bit GPR to another, storing the result in the first.
3916 * @note The AMD64 version sets flags.
3917 */
3918DECL_INLINE_THROW(uint32_t)
3919iemNativeEmitAddTwoGprs(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t iGprAddend)
3920{
3921#if defined(RT_ARCH_AMD64)
3922 off = iemNativeEmitAddTwoGprsEx(iemNativeInstrBufEnsure(pReNative, off, 3), off, iGprDst, iGprAddend);
3923#elif defined(RT_ARCH_ARM64)
3924 off = iemNativeEmitAddTwoGprsEx(iemNativeInstrBufEnsure(pReNative, off, 1), off, iGprDst, iGprAddend);
3925#else
3926# error "Port me"
3927#endif
3928 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
3929 return off;
3930}
3931
3932
3933/**
3934 * Emits adding a 64-bit GPR to another, storing the result in the first.
3935 * @note The AMD64 version sets flags.
3936 */
3937DECL_FORCE_INLINE(uint32_t)
3938iemNativeEmitAddTwoGprs32Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint8_t iGprAddend)
3939{
3940#if defined(RT_ARCH_AMD64)
3941 /* add Gv,Ev */
3942 if (iGprDst >= 8 || iGprAddend >= 8)
3943 pCodeBuf[off++] = (iGprDst >= 8 ? X86_OP_REX_R : 0)
3944 | (iGprAddend >= 8 ? X86_OP_REX_B : 0);
3945 pCodeBuf[off++] = 0x03;
3946 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGprDst & 7, iGprAddend & 7);
3947
3948#elif defined(RT_ARCH_ARM64)
3949 pCodeBuf[off++] = Armv8A64MkInstrAddSubReg(false /*fSub*/, iGprDst, iGprDst, iGprAddend, false /*f64Bit*/);
3950
3951#else
3952# error "Port me"
3953#endif
3954 return off;
3955}
3956
3957
3958/**
3959 * Emits adding a 64-bit GPR to another, storing the result in the first.
3960 * @note The AMD64 version sets flags.
3961 */
3962DECL_INLINE_THROW(uint32_t)
3963iemNativeEmitAddTwoGprs32(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t iGprAddend)
3964{
3965#if defined(RT_ARCH_AMD64)
3966 off = iemNativeEmitAddTwoGprs32Ex(iemNativeInstrBufEnsure(pReNative, off, 3), off, iGprDst, iGprAddend);
3967#elif defined(RT_ARCH_ARM64)
3968 off = iemNativeEmitAddTwoGprs32Ex(iemNativeInstrBufEnsure(pReNative, off, 1), off, iGprDst, iGprAddend);
3969#else
3970# error "Port me"
3971#endif
3972 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
3973 return off;
3974}
3975
3976
3977/**
3978 * Emits a 64-bit GPR additions with a 8-bit signed immediate.
3979 */
3980DECL_INLINE_THROW(uint32_t)
3981iemNativeEmitAddGprImm8Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, int8_t iImm8)
3982{
3983#if defined(RT_ARCH_AMD64)
3984 /* add or inc */
3985 pCodeBuf[off++] = iGprDst < 8 ? X86_OP_REX_W : X86_OP_REX_W | X86_OP_REX_B;
3986 if (iImm8 != 1)
3987 {
3988 pCodeBuf[off++] = 0x83;
3989 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, iGprDst & 7);
3990 pCodeBuf[off++] = (uint8_t)iImm8;
3991 }
3992 else
3993 {
3994 pCodeBuf[off++] = 0xff;
3995 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, iGprDst & 7);
3996 }
3997
3998#elif defined(RT_ARCH_ARM64)
3999 if (iImm8 >= 0)
4000 pCodeBuf[off++] = Armv8A64MkInstrAddUImm12(iGprDst, iGprDst, (uint8_t)iImm8);
4001 else
4002 pCodeBuf[off++] = Armv8A64MkInstrSubUImm12(iGprDst, iGprDst, (uint8_t)-iImm8);
4003
4004#else
4005# error "Port me"
4006#endif
4007 return off;
4008}
4009
4010
4011/**
4012 * Emits a 64-bit GPR additions with a 8-bit signed immediate.
4013 */
4014DECL_INLINE_THROW(uint32_t)
4015iemNativeEmitAddGprImm8(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, int8_t iImm8)
4016{
4017#if defined(RT_ARCH_AMD64)
4018 off = iemNativeEmitAddGprImm8Ex(iemNativeInstrBufEnsure(pReNative, off, 4), off, iGprDst, iImm8);
4019#elif defined(RT_ARCH_ARM64)
4020 off = iemNativeEmitAddGprImm8Ex(iemNativeInstrBufEnsure(pReNative, off, 1), off, iGprDst, iImm8);
4021#else
4022# error "Port me"
4023#endif
4024 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
4025 return off;
4026}
4027
4028
4029/**
4030 * Emits a 32-bit GPR additions with a 8-bit signed immediate.
4031 * @note Bits 32 thru 63 in the GPR will be zero after the operation.
4032 */
4033DECL_FORCE_INLINE(uint32_t)
4034iemNativeEmitAddGpr32Imm8Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, int8_t iImm8)
4035{
4036#if defined(RT_ARCH_AMD64)
4037 /* add or inc */
4038 if (iGprDst >= 8)
4039 pCodeBuf[off++] = X86_OP_REX_B;
4040 if (iImm8 != 1)
4041 {
4042 pCodeBuf[off++] = 0x83;
4043 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, iGprDst & 7);
4044 pCodeBuf[off++] = (uint8_t)iImm8;
4045 }
4046 else
4047 {
4048 pCodeBuf[off++] = 0xff;
4049 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, iGprDst & 7);
4050 }
4051
4052#elif defined(RT_ARCH_ARM64)
4053 if (iImm8 >= 0)
4054 pCodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(false /*fSub*/, iGprDst, iGprDst, (uint8_t)iImm8, false /*f64Bit*/);
4055 else
4056 pCodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(true /*fSub*/, iGprDst, iGprDst, (uint8_t)-iImm8, false /*f64Bit*/);
4057
4058#else
4059# error "Port me"
4060#endif
4061 return off;
4062}
4063
4064
4065/**
4066 * Emits a 32-bit GPR additions with a 8-bit signed immediate.
4067 * @note Bits 32 thru 63 in the GPR will be zero after the operation.
4068 */
4069DECL_INLINE_THROW(uint32_t)
4070iemNativeEmitAddGpr32Imm8(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, int8_t iImm8)
4071{
4072#if defined(RT_ARCH_AMD64)
4073 off = iemNativeEmitAddGpr32Imm8Ex(iemNativeInstrBufEnsure(pReNative, off, 4), off, iGprDst, iImm8);
4074#elif defined(RT_ARCH_ARM64)
4075 off = iemNativeEmitAddGpr32Imm8Ex(iemNativeInstrBufEnsure(pReNative, off, 1), off, iGprDst, iImm8);
4076#else
4077# error "Port me"
4078#endif
4079 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
4080 return off;
4081}
4082
4083
4084/**
4085 * Emits a 64-bit GPR additions with a 64-bit signed addend.
4086 *
4087 * @note Will assert / throw if @a iGprTmp is not specified when needed.
4088 */
4089DECL_FORCE_INLINE_THROW(uint32_t)
4090iemNativeEmitAddGprImmEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, int64_t iAddend, uint8_t iGprTmp = UINT8_MAX)
4091{
4092#if defined(RT_ARCH_AMD64)
4093 if ((int8_t)iAddend == iAddend)
4094 return iemNativeEmitAddGprImm8Ex(pCodeBuf, off, iGprDst, (int8_t)iAddend);
4095
4096 if ((int32_t)iAddend == iAddend)
4097 {
4098 /* add grp, imm32 */
4099 pCodeBuf[off++] = iGprDst < 8 ? X86_OP_REX_W : X86_OP_REX_W | X86_OP_REX_B;
4100 pCodeBuf[off++] = 0x81;
4101 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, iGprDst & 7);
4102 pCodeBuf[off++] = RT_BYTE1((uint32_t)iAddend);
4103 pCodeBuf[off++] = RT_BYTE2((uint32_t)iAddend);
4104 pCodeBuf[off++] = RT_BYTE3((uint32_t)iAddend);
4105 pCodeBuf[off++] = RT_BYTE4((uint32_t)iAddend);
4106 }
4107 else if (iGprTmp != UINT8_MAX)
4108 {
4109 off = iemNativeEmitLoadGprImmEx(pCodeBuf, off, iGprTmp, iAddend);
4110
4111 /* add dst, tmpreg */
4112 pCodeBuf[off++] = (iGprDst < 8 ? X86_OP_REX_W : X86_OP_REX_W | X86_OP_REX_R)
4113 | (iGprTmp < 8 ? 0 : X86_OP_REX_B);
4114 pCodeBuf[off++] = 0x03;
4115 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGprDst & 7, iGprTmp & 7);
4116 }
4117 else
4118# ifdef IEM_WITH_THROW_CATCH
4119 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(NULL, VERR_IEM_IPE_9));
4120# else
4121 AssertReleaseFailedStmt(off = UINT32_MAX);
4122# endif
4123
4124#elif defined(RT_ARCH_ARM64)
4125 uint64_t const uAbsAddend = (uint64_t)RT_ABS(iAddend);
4126 if (uAbsAddend < 4096)
4127 {
4128 if (iAddend >= 0)
4129 pCodeBuf[off++] = Armv8A64MkInstrAddUImm12(iGprDst, iGprDst, (uint32_t)uAbsAddend);
4130 else
4131 pCodeBuf[off++] = Armv8A64MkInstrSubUImm12(iGprDst, iGprDst, (uint32_t)uAbsAddend);
4132 }
4133 else if (uAbsAddend <= 0xfff000 && !(uAbsAddend & 0xfff))
4134 {
4135 if (iAddend >= 0)
4136 pCodeBuf[off++] = Armv8A64MkInstrAddUImm12(iGprDst, iGprDst, (uint32_t)uAbsAddend >> 12,
4137 true /*f64Bit*/, true /*fShift12*/);
4138 else
4139 pCodeBuf[off++] = Armv8A64MkInstrSubUImm12(iGprDst, iGprDst, (uint32_t)uAbsAddend >> 12,
4140 true /*f64Bit*/, true /*fShift12*/);
4141 }
4142 else if (iGprTmp != UINT8_MAX)
4143 {
4144 off = iemNativeEmitLoadGprImmEx(pCodeBuf, off, iGprTmp, iAddend);
4145 pCodeBuf[off++] = Armv8A64MkInstrAddReg(iGprDst, iGprDst, iGprTmp);
4146 }
4147 else
4148# ifdef IEM_WITH_THROW_CATCH
4149 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(NULL, VERR_IEM_IPE_9));
4150# else
4151 AssertReleaseFailedStmt(off = UINT32_MAX);
4152# endif
4153
4154#else
4155# error "Port me"
4156#endif
4157 return off;
4158}
4159
4160
4161/**
4162 * Emits a 64-bit GPR additions with a 64-bit signed addend.
4163 */
4164DECL_INLINE_THROW(uint32_t)
4165iemNativeEmitAddGprImm(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, int64_t iAddend)
4166{
4167#if defined(RT_ARCH_AMD64)
4168 if (iAddend <= INT8_MAX && iAddend >= INT8_MIN)
4169 return iemNativeEmitAddGprImm8(pReNative, off, iGprDst, (int8_t)iAddend);
4170
4171 if (iAddend <= INT32_MAX && iAddend >= INT32_MIN)
4172 {
4173 /* add grp, imm32 */
4174 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
4175 pbCodeBuf[off++] = iGprDst < 8 ? X86_OP_REX_W : X86_OP_REX_W | X86_OP_REX_B;
4176 pbCodeBuf[off++] = 0x81;
4177 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, iGprDst & 7);
4178 pbCodeBuf[off++] = RT_BYTE1((uint32_t)iAddend);
4179 pbCodeBuf[off++] = RT_BYTE2((uint32_t)iAddend);
4180 pbCodeBuf[off++] = RT_BYTE3((uint32_t)iAddend);
4181 pbCodeBuf[off++] = RT_BYTE4((uint32_t)iAddend);
4182 }
4183 else
4184 {
4185 /* Best to use a temporary register to deal with this in the simplest way: */
4186 uint8_t iTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, (uint64_t)iAddend);
4187
4188 /* add dst, tmpreg */
4189 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3);
4190 pbCodeBuf[off++] = (iGprDst < 8 ? X86_OP_REX_W : X86_OP_REX_W | X86_OP_REX_R)
4191 | (iTmpReg < 8 ? 0 : X86_OP_REX_B);
4192 pbCodeBuf[off++] = 0x03;
4193 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGprDst & 7, iTmpReg & 7);
4194
4195 iemNativeRegFreeTmpImm(pReNative, iTmpReg);
4196 }
4197
4198#elif defined(RT_ARCH_ARM64)
4199 if ((uint64_t)RT_ABS(iAddend) < RT_BIT_32(12))
4200 {
4201 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
4202 if (iAddend >= 0)
4203 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(false /*fSub*/, iGprDst, iGprDst, (uint32_t)iAddend);
4204 else
4205 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(true /*fSub*/, iGprDst, iGprDst, (uint32_t)-iAddend);
4206 }
4207 else
4208 {
4209 /* Use temporary register for the immediate. */
4210 uint8_t iTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, (uint64_t)iAddend);
4211
4212 /* add gprdst, gprdst, tmpreg */
4213 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
4214 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubReg(false /*fSub*/, iGprDst, iGprDst, iTmpReg);
4215
4216 iemNativeRegFreeTmpImm(pReNative, iTmpReg);
4217 }
4218
4219#else
4220# error "Port me"
4221#endif
4222 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
4223 return off;
4224}
4225
4226
4227/**
4228 * Emits a 32-bit GPR additions with a 32-bit signed immediate.
4229 * @note Bits 32 thru 63 in the GPR will be zero after the operation.
4230 * @note For ARM64 the iAddend value must be in the range 0x000..0xfff,
4231 * or that range shifted 12 bits to the left (e.g. 0x1000..0xfff000 with
4232 * the lower 12 bits always zero). The negative ranges are also allowed,
4233 * making it behave like a subtraction. If the constant does not conform,
4234 * bad stuff will happen.
4235 */
4236DECL_FORCE_INLINE_THROW(uint32_t)
4237iemNativeEmitAddGpr32ImmEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, int32_t iAddend)
4238{
4239#if defined(RT_ARCH_AMD64)
4240 if (iAddend <= INT8_MAX && iAddend >= INT8_MIN)
4241 return iemNativeEmitAddGpr32Imm8Ex(pCodeBuf, off, iGprDst, (int8_t)iAddend);
4242
4243 /* add grp, imm32 */
4244 if (iGprDst >= 8)
4245 pCodeBuf[off++] = X86_OP_REX_B;
4246 pCodeBuf[off++] = 0x81;
4247 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, iGprDst & 7);
4248 pCodeBuf[off++] = RT_BYTE1((uint32_t)iAddend);
4249 pCodeBuf[off++] = RT_BYTE2((uint32_t)iAddend);
4250 pCodeBuf[off++] = RT_BYTE3((uint32_t)iAddend);
4251 pCodeBuf[off++] = RT_BYTE4((uint32_t)iAddend);
4252
4253#elif defined(RT_ARCH_ARM64)
4254 uint32_t const uAbsAddend = (uint32_t)RT_ABS(iAddend);
4255 if (uAbsAddend <= 0xfff)
4256 {
4257 if (iAddend >= 0)
4258 pCodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(false /*fSub*/, iGprDst, iGprDst, uAbsAddend, false /*f64Bit*/);
4259 else
4260 pCodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(true /*fSub*/, iGprDst, iGprDst, uAbsAddend, false /*f64Bit*/);
4261 }
4262 else if (uAbsAddend <= 0xfff000 && !(uAbsAddend & 0xfff))
4263 {
4264 if (iAddend >= 0)
4265 pCodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(false /*fSub*/, iGprDst, iGprDst, uAbsAddend >> 12,
4266 false /*f64Bit*/, true /*fShift12*/);
4267 else
4268 pCodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(true /*fSub*/, iGprDst, iGprDst, uAbsAddend >> 12,
4269 false /*f64Bit*/, true /*fShift12*/);
4270 }
4271 else
4272# ifdef IEM_WITH_THROW_CATCH
4273 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(NULL, VERR_IEM_IPE_9));
4274# else
4275 AssertReleaseFailedStmt(off = UINT32_MAX);
4276# endif
4277
4278#else
4279# error "Port me"
4280#endif
4281 return off;
4282}
4283
4284
4285/**
4286 * Emits a 32-bit GPR additions with a 32-bit signed immediate.
4287 * @note Bits 32 thru 63 in the GPR will be zero after the operation.
4288 */
4289DECL_INLINE_THROW(uint32_t)
4290iemNativeEmitAddGpr32Imm(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, int32_t iAddend)
4291{
4292#if defined(RT_ARCH_AMD64)
4293 off = iemNativeEmitAddGpr32ImmEx(iemNativeInstrBufEnsure(pReNative, off, 7), off, iGprDst, iAddend);
4294
4295#elif defined(RT_ARCH_ARM64)
4296 if ((uint64_t)RT_ABS(iAddend) < RT_BIT_32(12))
4297 {
4298 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
4299 if (iAddend >= 0)
4300 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(false /*fSub*/, iGprDst, iGprDst, (uint32_t)iAddend, false /*f64Bit*/);
4301 else
4302 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(true /*fSub*/, iGprDst, iGprDst, (uint32_t)-iAddend, false /*f64Bit*/);
4303 }
4304 else
4305 {
4306 /* Use temporary register for the immediate. */
4307 uint8_t iTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, (uint32_t)iAddend);
4308
4309 /* add gprdst, gprdst, tmpreg */
4310 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
4311 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubReg(false /*fSub*/, iGprDst, iGprDst, iTmpReg, false /*f64Bit*/);
4312
4313 iemNativeRegFreeTmpImm(pReNative, iTmpReg);
4314 }
4315
4316#else
4317# error "Port me"
4318#endif
4319 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
4320 return off;
4321}
4322
4323
4324/**
4325 * Emits a 16-bit GPR add with a signed immediate addend.
4326 *
4327 * This will optimize using INC/DEC/whatever and ARM64 will not set flags,
4328 * so not suitable as a base for conditional jumps.
4329 *
4330 * @note AMD64: Will only update the lower 16 bits of the register.
4331 * @note ARM64: Will update the entire register.
4332 * @note ARM64: Larger constants will require a temporary register. Failing to
4333 * specify one when needed will trigger fatal assertion / throw.
4334 * @sa iemNativeEmitSubGpr16ImmEx
4335 */
4336DECL_FORCE_INLINE_THROW(uint32_t)
4337iemNativeEmitAddGpr16ImmEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, int16_t iAddend,
4338 uint8_t iGprTmp = UINT8_MAX)
4339{
4340#ifdef RT_ARCH_AMD64
4341 pCodeBuf[off++] = X86_OP_PRF_SIZE_OP;
4342 if (iGprDst >= 8)
4343 pCodeBuf[off++] = X86_OP_REX_B;
4344 if (iAddend == 1)
4345 {
4346 /* inc r/m16 */
4347 pCodeBuf[off++] = 0xff;
4348 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, iGprDst & 7);
4349 }
4350 else if (iAddend == -1)
4351 {
4352 /* dec r/m16 */
4353 pCodeBuf[off++] = 0xff;
4354 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 1, iGprDst & 7);
4355 }
4356 else if ((int8_t)iAddend == iAddend)
4357 {
4358 /* add r/m16, imm8 */
4359 pCodeBuf[off++] = 0x83;
4360 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, iGprDst & 7);
4361 pCodeBuf[off++] = (uint8_t)iAddend;
4362 }
4363 else
4364 {
4365 /* add r/m16, imm16 */
4366 pCodeBuf[off++] = 0x81;
4367 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, iGprDst & 7);
4368 pCodeBuf[off++] = RT_BYTE1((uint16_t)iAddend);
4369 pCodeBuf[off++] = RT_BYTE2((uint16_t)iAddend);
4370 }
4371 RT_NOREF(iGprTmp);
4372
4373#elif defined(RT_ARCH_ARM64)
4374 uint32_t uAbsAddend = RT_ABS(iAddend);
4375 if (uAbsAddend < 4096)
4376 {
4377 if (iAddend >= 0)
4378 pCodeBuf[off++] = Armv8A64MkInstrAddUImm12(iGprDst, iGprDst, uAbsAddend, false /*f64Bit*/);
4379 else
4380 pCodeBuf[off++] = Armv8A64MkInstrSubUImm12(iGprDst, iGprDst, uAbsAddend, false /*f64Bit*/);
4381 }
4382 else if (uAbsAddend <= 0xfff000 && !(uAbsAddend & 0xfff))
4383 {
4384 if (iAddend >= 0)
4385 pCodeBuf[off++] = Armv8A64MkInstrAddUImm12(iGprDst, iGprDst, uAbsAddend >> 12,
4386 false /*f64Bit*/, false /*fSetFlags*/, true /*fShift*/);
4387 else
4388 pCodeBuf[off++] = Armv8A64MkInstrSubUImm12(iGprDst, iGprDst, uAbsAddend >> 12,
4389 false /*f64Bit*/, false /*fSetFlags*/, true /*fShift*/);
4390 }
4391 else if (iGprTmp != UINT8_MAX)
4392 {
4393 off = iemNativeEmitLoadGpr32ImmEx(pCodeBuf, off, iGprTmp, (uint32_t)iAddend);
4394 pCodeBuf[off++] = Armv8A64MkInstrAddReg(iGprDst, iGprDst, iGprTmp, false /*f64Bit*/);
4395 }
4396 else
4397# ifdef IEM_WITH_THROW_CATCH
4398 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(NULL, VERR_IEM_IPE_9));
4399# else
4400 AssertReleaseFailedStmt(off = UINT32_MAX);
4401# endif
4402 pCodeBuf[off++] = Armv8A64MkInstrAndImm(iGprDst, iGprDst, 15, 0, false /*f64Bit*/);
4403
4404#else
4405# error "Port me"
4406#endif
4407 return off;
4408}
4409
4410
4411
4412/**
4413 * Adds two 64-bit GPRs together, storing the result in a third register.
4414 */
4415DECL_FORCE_INLINE(uint32_t)
4416iemNativeEmitGprEqGprPlusGprEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint8_t iGprAddend1, uint8_t iGprAddend2)
4417{
4418#ifdef RT_ARCH_AMD64
4419 if (iGprDst != iGprAddend1 && iGprDst != iGprAddend2)
4420 {
4421 /** @todo consider LEA */
4422 off = iemNativeEmitLoadGprFromGprEx(pCodeBuf, off, iGprDst, iGprAddend1);
4423 off = iemNativeEmitAddTwoGprsEx(pCodeBuf, off, iGprDst, iGprAddend2);
4424 }
4425 else
4426 off = iemNativeEmitAddTwoGprsEx(pCodeBuf, off, iGprDst, iGprDst != iGprAddend1 ? iGprAddend1 : iGprAddend2);
4427
4428#elif defined(RT_ARCH_ARM64)
4429 pCodeBuf[off++] = Armv8A64MkInstrAddReg(iGprDst, iGprAddend1, iGprAddend2);
4430
4431#else
4432# error "Port me!"
4433#endif
4434 return off;
4435}
4436
4437
4438
4439/**
4440 * Adds two 32-bit GPRs together, storing the result in a third register.
4441 * @note Bits 32 thru 63 in @a iGprDst will be zero after the operation.
4442 */
4443DECL_FORCE_INLINE(uint32_t)
4444iemNativeEmitGpr32EqGprPlusGprEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint8_t iGprAddend1, uint8_t iGprAddend2)
4445{
4446#ifdef RT_ARCH_AMD64
4447 if (iGprDst != iGprAddend1 && iGprDst != iGprAddend2)
4448 {
4449 /** @todo consider LEA */
4450 off = iemNativeEmitLoadGprFromGpr32Ex(pCodeBuf, off, iGprDst, iGprAddend1);
4451 off = iemNativeEmitAddTwoGprs32Ex(pCodeBuf, off, iGprDst, iGprAddend2);
4452 }
4453 else
4454 off = iemNativeEmitAddTwoGprs32Ex(pCodeBuf, off, iGprDst, iGprDst != iGprAddend1 ? iGprAddend1 : iGprAddend2);
4455
4456#elif defined(RT_ARCH_ARM64)
4457 pCodeBuf[off++] = Armv8A64MkInstrAddReg(iGprDst, iGprAddend1, iGprAddend2, false /*f64Bit*/);
4458
4459#else
4460# error "Port me!"
4461#endif
4462 return off;
4463}
4464
4465
4466/**
4467 * Adds a 64-bit GPR and a 64-bit unsigned constant, storing the result in a
4468 * third register.
4469 *
4470 * @note The ARM64 version does not work for non-trivial constants if the
4471 * two registers are the same. Will assert / throw exception.
4472 */
4473DECL_FORCE_INLINE_THROW(uint32_t)
4474iemNativeEmitGprEqGprPlusImmEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint8_t iGprAddend, int64_t iImmAddend)
4475{
4476#ifdef RT_ARCH_AMD64
4477 /** @todo consider LEA */
4478 if ((int8_t)iImmAddend == iImmAddend)
4479 {
4480 off = iemNativeEmitLoadGprFromGprEx(pCodeBuf, off, iGprDst, iGprAddend);
4481 off = iemNativeEmitAddGprImm8Ex(pCodeBuf, off, iGprDst, (int8_t)iImmAddend);
4482 }
4483 else
4484 {
4485 off = iemNativeEmitLoadGprImmEx(pCodeBuf, off, iGprDst, iImmAddend);
4486 off = iemNativeEmitAddTwoGprsEx(pCodeBuf, off, iGprDst, iGprAddend);
4487 }
4488
4489#elif defined(RT_ARCH_ARM64)
4490 uint64_t const uAbsImmAddend = RT_ABS(iImmAddend);
4491 if (uAbsImmAddend < 4096)
4492 {
4493 if (iImmAddend >= 0)
4494 pCodeBuf[off++] = Armv8A64MkInstrAddUImm12(iGprDst, iGprAddend, uAbsImmAddend);
4495 else
4496 pCodeBuf[off++] = Armv8A64MkInstrSubUImm12(iGprDst, iGprAddend, uAbsImmAddend);
4497 }
4498 else if (uAbsImmAddend <= 0xfff000 && !(uAbsImmAddend & 0xfff))
4499 {
4500 if (iImmAddend >= 0)
4501 pCodeBuf[off++] = Armv8A64MkInstrAddUImm12(iGprDst, iGprDst, uAbsImmAddend >> 12, true /*f64Bit*/, true /*fShift12*/);
4502 else
4503 pCodeBuf[off++] = Armv8A64MkInstrSubUImm12(iGprDst, iGprDst, uAbsImmAddend >> 12, true /*f64Bit*/, true /*fShift12*/);
4504 }
4505 else if (iGprDst != iGprAddend)
4506 {
4507 off = iemNativeEmitLoadGprImmEx(pCodeBuf, off, iGprDst, (uint64_t)iImmAddend);
4508 off = iemNativeEmitAddTwoGprsEx(pCodeBuf, off, iGprDst, iGprAddend);
4509 }
4510 else
4511# ifdef IEM_WITH_THROW_CATCH
4512 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(NULL, VERR_IEM_IPE_9));
4513# else
4514 AssertReleaseFailedStmt(off = UINT32_MAX);
4515# endif
4516
4517#else
4518# error "Port me!"
4519#endif
4520 return off;
4521}
4522
4523
4524/**
4525 * Adds a 32-bit GPR and a 32-bit unsigned constant, storing the result in a
4526 * third register.
4527 *
4528 * @note Bits 32 thru 63 in @a iGprDst will be zero after the operation.
4529 *
4530 * @note The ARM64 version does not work for non-trivial constants if the
4531 * two registers are the same. Will assert / throw exception.
4532 */
4533DECL_FORCE_INLINE_THROW(uint32_t)
4534iemNativeEmitGpr32EqGprPlusImmEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint8_t iGprAddend, int32_t iImmAddend)
4535{
4536#ifdef RT_ARCH_AMD64
4537 /** @todo consider LEA */
4538 if ((int8_t)iImmAddend == iImmAddend)
4539 {
4540 off = iemNativeEmitLoadGprFromGpr32Ex(pCodeBuf, off, iGprDst, iGprAddend);
4541 off = iemNativeEmitAddGpr32Imm8Ex(pCodeBuf, off, iGprDst, (int8_t)iImmAddend);
4542 }
4543 else
4544 {
4545 off = iemNativeEmitLoadGpr32ImmEx(pCodeBuf, off, iGprDst, iImmAddend);
4546 off = iemNativeEmitAddTwoGprsEx(pCodeBuf, off, iGprDst, iGprAddend);
4547 }
4548
4549#elif defined(RT_ARCH_ARM64)
4550 uint32_t const uAbsImmAddend = RT_ABS(iImmAddend);
4551 if (uAbsImmAddend < 4096)
4552 {
4553 if (iImmAddend >= 0)
4554 pCodeBuf[off++] = Armv8A64MkInstrAddUImm12(iGprDst, iGprAddend, uAbsImmAddend, false /*f64Bit*/);
4555 else
4556 pCodeBuf[off++] = Armv8A64MkInstrSubUImm12(iGprDst, iGprAddend, uAbsImmAddend, false /*f64Bit*/);
4557 }
4558 else if (uAbsImmAddend <= 0xfff000 && !(uAbsImmAddend & 0xfff))
4559 {
4560 if (iImmAddend >= 0)
4561 pCodeBuf[off++] = Armv8A64MkInstrAddUImm12(iGprDst, iGprDst, uAbsImmAddend >> 12, false /*f64Bit*/, true /*fShift12*/);
4562 else
4563 pCodeBuf[off++] = Armv8A64MkInstrSubUImm12(iGprDst, iGprDst, uAbsImmAddend >> 12, false /*f64Bit*/, true /*fShift12*/);
4564 }
4565 else if (iGprDst != iGprAddend)
4566 {
4567 off = iemNativeEmitLoadGpr32ImmEx(pCodeBuf, off, iGprDst, (uint32_t)iImmAddend);
4568 off = iemNativeEmitAddTwoGprs32Ex(pCodeBuf, off, iGprDst, iGprAddend);
4569 }
4570 else
4571# ifdef IEM_WITH_THROW_CATCH
4572 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(NULL, VERR_IEM_IPE_9));
4573# else
4574 AssertReleaseFailedStmt(off = UINT32_MAX);
4575# endif
4576
4577#else
4578# error "Port me!"
4579#endif
4580 return off;
4581}
4582
4583
4584/*********************************************************************************************************************************
4585* Unary Operations *
4586*********************************************************************************************************************************/
4587
4588/**
4589 * Emits code for two complement negation of a 64-bit GPR.
4590 */
4591DECL_FORCE_INLINE_THROW(uint32_t)
4592iemNativeEmitNegGprEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst)
4593{
4594#if defined(RT_ARCH_AMD64)
4595 /* neg Ev */
4596 pCodeBuf[off++] = iGprDst < 8 ? X86_OP_REX_W : X86_OP_REX_W | X86_OP_REX_B;
4597 pCodeBuf[off++] = 0xf7;
4598 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 3, iGprDst & 7);
4599
4600#elif defined(RT_ARCH_ARM64)
4601 /* sub dst, xzr, dst */
4602 pCodeBuf[off++] = Armv8A64MkInstrNeg(iGprDst);
4603
4604#else
4605# error "Port me"
4606#endif
4607 return off;
4608}
4609
4610
4611/**
4612 * Emits code for two complement negation of a 64-bit GPR.
4613 */
4614DECL_INLINE_THROW(uint32_t)
4615iemNativeEmitNegGpr(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst)
4616{
4617#if defined(RT_ARCH_AMD64)
4618 off = iemNativeEmitNegGprEx(iemNativeInstrBufEnsure(pReNative, off, 3), off, iGprDst);
4619#elif defined(RT_ARCH_ARM64)
4620 off = iemNativeEmitNegGprEx(iemNativeInstrBufEnsure(pReNative, off, 1), off, iGprDst);
4621#else
4622# error "Port me"
4623#endif
4624 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
4625 return off;
4626}
4627
4628
4629/**
4630 * Emits code for two complement negation of a 32-bit GPR.
4631 * @note bit 32 thru 63 are set to zero.
4632 */
4633DECL_FORCE_INLINE_THROW(uint32_t)
4634iemNativeEmitNegGpr32Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst)
4635{
4636#if defined(RT_ARCH_AMD64)
4637 /* neg Ev */
4638 if (iGprDst >= 8)
4639 pCodeBuf[off++] = X86_OP_REX_B;
4640 pCodeBuf[off++] = 0xf7;
4641 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 3, iGprDst & 7);
4642
4643#elif defined(RT_ARCH_ARM64)
4644 /* sub dst, xzr, dst */
4645 pCodeBuf[off++] = Armv8A64MkInstrNeg(iGprDst, false /*f64Bit*/);
4646
4647#else
4648# error "Port me"
4649#endif
4650 return off;
4651}
4652
4653
4654/**
4655 * Emits code for two complement negation of a 32-bit GPR.
4656 * @note bit 32 thru 63 are set to zero.
4657 */
4658DECL_INLINE_THROW(uint32_t)
4659iemNativeEmitNegGpr32(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst)
4660{
4661#if defined(RT_ARCH_AMD64)
4662 off = iemNativeEmitNegGpr32Ex(iemNativeInstrBufEnsure(pReNative, off, 3), off, iGprDst);
4663#elif defined(RT_ARCH_ARM64)
4664 off = iemNativeEmitNegGpr32Ex(iemNativeInstrBufEnsure(pReNative, off, 1), off, iGprDst);
4665#else
4666# error "Port me"
4667#endif
4668 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
4669 return off;
4670}
4671
4672
4673
4674/*********************************************************************************************************************************
4675* Bit Operations *
4676*********************************************************************************************************************************/
4677
4678/**
4679 * Emits code for clearing bits 16 thru 63 in the GPR.
4680 */
4681DECL_INLINE_THROW(uint32_t)
4682iemNativeEmitClear16UpGpr(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst)
4683{
4684#if defined(RT_ARCH_AMD64)
4685 /* movzx Gv,Ew */
4686 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 4);
4687 if (iGprDst >= 8)
4688 pbCodeBuf[off++] = X86_OP_REX_B | X86_OP_REX_R;
4689 pbCodeBuf[off++] = 0x0f;
4690 pbCodeBuf[off++] = 0xb7;
4691 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGprDst & 7, iGprDst & 7);
4692
4693#elif defined(RT_ARCH_ARM64)
4694 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
4695# if 1
4696 pu32CodeBuf[off++] = Armv8A64MkInstrUxth(iGprDst, iGprDst);
4697# else
4698 ///* This produces 0xffff; 0x4f: N=1 imms=001111 (immr=0) => size=64 length=15 */
4699 //pu32CodeBuf[off++] = Armv8A64MkInstrAndImm(iGprDst, iGprDst, 0x4f);
4700# endif
4701#else
4702# error "Port me"
4703#endif
4704 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
4705 return off;
4706}
4707
4708
4709/**
4710 * Emits code for AND'ing two 64-bit GPRs.
4711 *
4712 * @note When fSetFlags=true, JZ/JNZ jumps can be used afterwards on both AMD64
4713 * and ARM64 hosts.
4714 */
4715DECL_FORCE_INLINE(uint32_t)
4716iemNativeEmitAndGprByGprEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc, bool fSetFlags = false)
4717{
4718#if defined(RT_ARCH_AMD64)
4719 /* and Gv, Ev */
4720 pCodeBuf[off++] = X86_OP_REX_W | (iGprDst < 8 ? 0 : X86_OP_REX_R) | (iGprSrc < 8 ? 0 : X86_OP_REX_B);
4721 pCodeBuf[off++] = 0x23;
4722 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGprDst & 7, iGprSrc & 7);
4723 RT_NOREF(fSetFlags);
4724
4725#elif defined(RT_ARCH_ARM64)
4726 if (!fSetFlags)
4727 pCodeBuf[off++] = Armv8A64MkInstrAnd(iGprDst, iGprDst, iGprSrc);
4728 else
4729 pCodeBuf[off++] = Armv8A64MkInstrAnds(iGprDst, iGprDst, iGprSrc);
4730
4731#else
4732# error "Port me"
4733#endif
4734 return off;
4735}
4736
4737
4738/**
4739 * Emits code for AND'ing two 64-bit GPRs.
4740 *
4741 * @note When fSetFlags=true, JZ/JNZ jumps can be used afterwards on both AMD64
4742 * and ARM64 hosts.
4743 */
4744DECL_INLINE_THROW(uint32_t)
4745iemNativeEmitAndGprByGpr(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc, bool fSetFlags = false)
4746{
4747#if defined(RT_ARCH_AMD64)
4748 off = iemNativeEmitAndGprByGprEx(iemNativeInstrBufEnsure(pReNative, off, 3), off, iGprDst, iGprSrc, fSetFlags);
4749#elif defined(RT_ARCH_ARM64)
4750 off = iemNativeEmitAndGprByGprEx(iemNativeInstrBufEnsure(pReNative, off, 1), off, iGprDst, iGprSrc, fSetFlags);
4751#else
4752# error "Port me"
4753#endif
4754 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
4755 return off;
4756}
4757
4758
4759/**
4760 * Emits code for AND'ing two 32-bit GPRs.
4761 */
4762DECL_FORCE_INLINE(uint32_t)
4763iemNativeEmitAndGpr32ByGpr32Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc, bool fSetFlags = false)
4764{
4765#if defined(RT_ARCH_AMD64)
4766 /* and Gv, Ev */
4767 if (iGprDst >= 8 || iGprSrc >= 8)
4768 pCodeBuf[off++] = (iGprDst < 8 ? 0 : X86_OP_REX_R) | (iGprSrc < 8 ? 0 : X86_OP_REX_B);
4769 pCodeBuf[off++] = 0x23;
4770 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGprDst & 7, iGprSrc & 7);
4771 RT_NOREF(fSetFlags);
4772
4773#elif defined(RT_ARCH_ARM64)
4774 if (!fSetFlags)
4775 pCodeBuf[off++] = Armv8A64MkInstrAnd(iGprDst, iGprDst, iGprSrc, false /*f64Bit*/);
4776 else
4777 pCodeBuf[off++] = Armv8A64MkInstrAnds(iGprDst, iGprDst, iGprSrc, false /*f64Bit*/);
4778
4779#else
4780# error "Port me"
4781#endif
4782 return off;
4783}
4784
4785
4786/**
4787 * Emits code for AND'ing two 32-bit GPRs.
4788 */
4789DECL_INLINE_THROW(uint32_t)
4790iemNativeEmitAndGpr32ByGpr32(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc, bool fSetFlags = false)
4791{
4792#if defined(RT_ARCH_AMD64)
4793 off = iemNativeEmitAndGpr32ByGpr32Ex(iemNativeInstrBufEnsure(pReNative, off, 3), off, iGprDst, iGprSrc, fSetFlags);
4794#elif defined(RT_ARCH_ARM64)
4795 off = iemNativeEmitAndGpr32ByGpr32Ex(iemNativeInstrBufEnsure(pReNative, off, 1), off, iGprDst, iGprSrc, fSetFlags);
4796#else
4797# error "Port me"
4798#endif
4799 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
4800 return off;
4801}
4802
4803
4804/**
4805 * Emits code for AND'ing a 64-bit GPRs with a constant.
4806 *
4807 * @note When fSetFlags=true, JZ/JNZ jumps can be used afterwards on both AMD64
4808 * and ARM64 hosts.
4809 */
4810DECL_INLINE_THROW(uint32_t)
4811iemNativeEmitAndGprByImm(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint64_t uImm, bool fSetFlags = false)
4812{
4813#if defined(RT_ARCH_AMD64)
4814 if ((int64_t)uImm == (int8_t)uImm)
4815 {
4816 /* and Ev, imm8 */
4817 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 4);
4818 pbCodeBuf[off++] = X86_OP_REX_W | (iGprDst < 8 ? 0 : X86_OP_REX_B);
4819 pbCodeBuf[off++] = 0x83;
4820 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 4, iGprDst & 7);
4821 pbCodeBuf[off++] = (uint8_t)uImm;
4822 }
4823 else if ((int64_t)uImm == (int32_t)uImm)
4824 {
4825 /* and Ev, imm32 */
4826 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
4827 pbCodeBuf[off++] = X86_OP_REX_W | (iGprDst < 8 ? 0 : X86_OP_REX_B);
4828 pbCodeBuf[off++] = 0x81;
4829 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 4, iGprDst & 7);
4830 pbCodeBuf[off++] = RT_BYTE1(uImm);
4831 pbCodeBuf[off++] = RT_BYTE2(uImm);
4832 pbCodeBuf[off++] = RT_BYTE3(uImm);
4833 pbCodeBuf[off++] = RT_BYTE4(uImm);
4834 }
4835 else
4836 {
4837 /* Use temporary register for the 64-bit immediate. */
4838 uint8_t iTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, uImm);
4839 off = iemNativeEmitAndGprByGpr(pReNative, off, iGprDst, iTmpReg);
4840 iemNativeRegFreeTmpImm(pReNative, iTmpReg);
4841 }
4842 RT_NOREF(fSetFlags);
4843
4844#elif defined(RT_ARCH_ARM64)
4845 uint32_t uImmR = 0;
4846 uint32_t uImmNandS = 0;
4847 if (Armv8A64ConvertMask64ToImmRImmS(uImm, &uImmNandS, &uImmR))
4848 {
4849 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
4850 if (!fSetFlags)
4851 pu32CodeBuf[off++] = Armv8A64MkInstrAndImm(iGprDst, iGprDst, uImmNandS, uImmR);
4852 else
4853 pu32CodeBuf[off++] = Armv8A64MkInstrAndsImm(iGprDst, iGprDst, uImmNandS, uImmR);
4854 }
4855 else
4856 {
4857 /* Use temporary register for the 64-bit immediate. */
4858 uint8_t iTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, uImm);
4859 off = iemNativeEmitAndGprByGpr(pReNative, off, iGprDst, iTmpReg, fSetFlags);
4860 iemNativeRegFreeTmpImm(pReNative, iTmpReg);
4861 }
4862
4863#else
4864# error "Port me"
4865#endif
4866 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
4867 return off;
4868}
4869
4870
4871/**
4872 * Emits code for AND'ing an 32-bit GPRs with a constant.
4873 * @note Bits 32 thru 63 in the destination will be zero after the operation.
4874 * @note For ARM64 this only supports @a uImm values that can be expressed using
4875 * the two 6-bit immediates of the AND/ANDS instructions. The caller must
4876 * make sure this is possible!
4877 */
4878DECL_FORCE_INLINE_THROW(uint32_t)
4879iemNativeEmitAndGpr32ByImmEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint32_t uImm, bool fSetFlags = false)
4880{
4881#if defined(RT_ARCH_AMD64)
4882 /* and Ev, imm */
4883 if (iGprDst >= 8)
4884 pCodeBuf[off++] = X86_OP_REX_B;
4885 if ((int32_t)uImm == (int8_t)uImm)
4886 {
4887 pCodeBuf[off++] = 0x83;
4888 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 4, iGprDst & 7);
4889 pCodeBuf[off++] = (uint8_t)uImm;
4890 }
4891 else
4892 {
4893 pCodeBuf[off++] = 0x81;
4894 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 4, iGprDst & 7);
4895 pCodeBuf[off++] = RT_BYTE1(uImm);
4896 pCodeBuf[off++] = RT_BYTE2(uImm);
4897 pCodeBuf[off++] = RT_BYTE3(uImm);
4898 pCodeBuf[off++] = RT_BYTE4(uImm);
4899 }
4900 RT_NOREF(fSetFlags);
4901
4902#elif defined(RT_ARCH_ARM64)
4903 uint32_t uImmR = 0;
4904 uint32_t uImmNandS = 0;
4905 if (Armv8A64ConvertMask32ToImmRImmS(uImm, &uImmNandS, &uImmR))
4906 {
4907 if (!fSetFlags)
4908 pCodeBuf[off++] = Armv8A64MkInstrAndImm(iGprDst, iGprDst, uImmNandS, uImmR, false /*f64Bit*/);
4909 else
4910 pCodeBuf[off++] = Armv8A64MkInstrAndsImm(iGprDst, iGprDst, uImmNandS, uImmR, false /*f64Bit*/);
4911 }
4912 else
4913# ifdef IEM_WITH_THROW_CATCH
4914 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(NULL, VERR_IEM_IPE_9));
4915# else
4916 AssertReleaseFailedStmt(off = UINT32_MAX);
4917# endif
4918
4919#else
4920# error "Port me"
4921#endif
4922 return off;
4923}
4924
4925
4926/**
4927 * Emits code for AND'ing an 32-bit GPRs with a constant.
4928 *
4929 * @note Bits 32 thru 63 in the destination will be zero after the operation.
4930 */
4931DECL_INLINE_THROW(uint32_t)
4932iemNativeEmitAndGpr32ByImm(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint32_t uImm, bool fSetFlags = false)
4933{
4934#if defined(RT_ARCH_AMD64)
4935 off = iemNativeEmitAndGpr32ByImmEx(iemNativeInstrBufEnsure(pReNative, off, 7), off, iGprDst, uImm, fSetFlags);
4936
4937#elif defined(RT_ARCH_ARM64)
4938 uint32_t uImmR = 0;
4939 uint32_t uImmNandS = 0;
4940 if (Armv8A64ConvertMask32ToImmRImmS(uImm, &uImmNandS, &uImmR))
4941 {
4942 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
4943 if (!fSetFlags)
4944 pu32CodeBuf[off++] = Armv8A64MkInstrAndImm(iGprDst, iGprDst, uImmNandS, uImmR, false /*f64Bit*/);
4945 else
4946 pu32CodeBuf[off++] = Armv8A64MkInstrAndsImm(iGprDst, iGprDst, uImmNandS, uImmR, false /*f64Bit*/);
4947 }
4948 else
4949 {
4950 /* Use temporary register for the 64-bit immediate. */
4951 uint8_t iTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, uImm);
4952 off = iemNativeEmitAndGpr32ByGpr32(pReNative, off, iGprDst, iTmpReg, fSetFlags);
4953 iemNativeRegFreeTmpImm(pReNative, iTmpReg);
4954 }
4955
4956#else
4957# error "Port me"
4958#endif
4959 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
4960 return off;
4961}
4962
4963
4964/**
4965 * Emits code for AND'ing an 64-bit GPRs with a constant.
4966 *
4967 * @note For ARM64 any complicated immediates w/o a AND/ANDS compatible
4968 * encoding will assert / throw exception if @a iGprDst and @a iGprSrc are
4969 * the same.
4970 */
4971DECL_FORCE_INLINE_THROW(uint32_t)
4972iemNativeEmitGprEqGprAndImmEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc, uint64_t uImm,
4973 bool fSetFlags = false)
4974{
4975#if defined(RT_ARCH_AMD64)
4976 off = iemNativeEmitLoadGprImmEx(pCodeBuf, off, iGprDst, uImm);
4977 off = iemNativeEmitAndGprByGprEx(pCodeBuf, off, iGprDst, iGprSrc);
4978 RT_NOREF(fSetFlags);
4979
4980#elif defined(RT_ARCH_ARM64)
4981 uint32_t uImmR = 0;
4982 uint32_t uImmNandS = 0;
4983 if (Armv8A64ConvertMask64ToImmRImmS(uImm, &uImmNandS, &uImmR))
4984 {
4985 if (!fSetFlags)
4986 pCodeBuf[off++] = Armv8A64MkInstrAndImm(iGprDst, iGprSrc, uImmNandS, uImmR);
4987 else
4988 pCodeBuf[off++] = Armv8A64MkInstrAndsImm(iGprDst, iGprSrc, uImmNandS, uImmR);
4989 }
4990 else if (iGprDst != iGprSrc)
4991 {
4992 off = iemNativeEmitLoadGprImmEx(pCodeBuf, off, iGprDst, uImm);
4993 off = iemNativeEmitAndGprByGprEx(pCodeBuf, off, iGprDst, iGprSrc, fSetFlags);
4994 }
4995 else
4996# ifdef IEM_WITH_THROW_CATCH
4997 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(NULL, VERR_IEM_IPE_9));
4998# else
4999 AssertReleaseFailedStmt(off = UINT32_MAX);
5000# endif
5001
5002#else
5003# error "Port me"
5004#endif
5005 return off;
5006}
5007
5008/**
5009 * Emits code for AND'ing an 32-bit GPRs with a constant.
5010 *
5011 * @note For ARM64 any complicated immediates w/o a AND/ANDS compatible
5012 * encoding will assert / throw exception if @a iGprDst and @a iGprSrc are
5013 * the same.
5014 *
5015 * @note Bits 32 thru 63 in the destination will be zero after the operation.
5016 */
5017DECL_FORCE_INLINE_THROW(uint32_t)
5018iemNativeEmitGpr32EqGprAndImmEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc, uint32_t uImm,
5019 bool fSetFlags = false)
5020{
5021#if defined(RT_ARCH_AMD64)
5022 off = iemNativeEmitLoadGpr32ImmEx(pCodeBuf, off, iGprDst, uImm);
5023 off = iemNativeEmitAndGpr32ByGpr32Ex(pCodeBuf, off, iGprDst, iGprSrc);
5024 RT_NOREF(fSetFlags);
5025
5026#elif defined(RT_ARCH_ARM64)
5027 uint32_t uImmR = 0;
5028 uint32_t uImmNandS = 0;
5029 if (Armv8A64ConvertMask32ToImmRImmS(uImm, &uImmNandS, &uImmR))
5030 {
5031 if (!fSetFlags)
5032 pCodeBuf[off++] = Armv8A64MkInstrAndImm(iGprDst, iGprSrc, uImmNandS, uImmR, false /*f64Bit*/);
5033 else
5034 pCodeBuf[off++] = Armv8A64MkInstrAndsImm(iGprDst, iGprSrc, uImmNandS, uImmR, false /*f64Bit*/);
5035 }
5036 else if (iGprDst != iGprSrc)
5037 {
5038 off = iemNativeEmitLoadGpr32ImmEx(pCodeBuf, off, iGprDst, uImm);
5039 off = iemNativeEmitAndGpr32ByGpr32Ex(pCodeBuf, off, iGprDst, iGprSrc, fSetFlags);
5040 }
5041 else
5042# ifdef IEM_WITH_THROW_CATCH
5043 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(NULL, VERR_IEM_IPE_9));
5044# else
5045 AssertReleaseFailedStmt(off = UINT32_MAX);
5046# endif
5047
5048#else
5049# error "Port me"
5050#endif
5051 return off;
5052}
5053
5054
5055/**
5056 * Emits code for OR'ing two 64-bit GPRs.
5057 */
5058DECL_FORCE_INLINE(uint32_t)
5059iemNativeEmitOrGprByGprEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc)
5060{
5061#if defined(RT_ARCH_AMD64)
5062 /* or Gv, Ev */
5063 pCodeBuf[off++] = X86_OP_REX_W | (iGprDst < 8 ? 0 : X86_OP_REX_R) | (iGprSrc < 8 ? 0 : X86_OP_REX_B);
5064 pCodeBuf[off++] = 0x0b;
5065 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGprDst & 7, iGprSrc & 7);
5066
5067#elif defined(RT_ARCH_ARM64)
5068 pCodeBuf[off++] = Armv8A64MkInstrOrr(iGprDst, iGprDst, iGprSrc);
5069
5070#else
5071# error "Port me"
5072#endif
5073 return off;
5074}
5075
5076
5077/**
5078 * Emits code for OR'ing two 64-bit GPRs.
5079 */
5080DECL_INLINE_THROW(uint32_t)
5081iemNativeEmitOrGprByGpr(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc)
5082{
5083#if defined(RT_ARCH_AMD64)
5084 off = iemNativeEmitOrGprByGprEx(iemNativeInstrBufEnsure(pReNative, off, 3), off, iGprDst, iGprSrc);
5085#elif defined(RT_ARCH_ARM64)
5086 off = iemNativeEmitOrGprByGprEx(iemNativeInstrBufEnsure(pReNative, off, 1), off, iGprDst, iGprSrc);
5087#else
5088# error "Port me"
5089#endif
5090 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
5091 return off;
5092}
5093
5094
5095/**
5096 * Emits code for OR'ing two 32-bit GPRs.
5097 * @note Bits 63:32 of the destination GPR will be cleared.
5098 */
5099DECL_FORCE_INLINE(uint32_t)
5100iemNativeEmitOrGpr32ByGprEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc)
5101{
5102#if defined(RT_ARCH_AMD64)
5103 /* or Gv, Ev */
5104 if (iGprDst >= 8 || iGprSrc >= 8)
5105 pCodeBuf[off++] = (iGprDst < 8 ? 0 : X86_OP_REX_R) | (iGprSrc < 8 ? 0 : X86_OP_REX_B);
5106 pCodeBuf[off++] = 0x0b;
5107 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGprDst & 7, iGprSrc & 7);
5108
5109#elif defined(RT_ARCH_ARM64)
5110 pCodeBuf[off++] = Armv8A64MkInstrOrr(iGprDst, iGprDst, iGprSrc, false /*f64Bit*/);
5111
5112#else
5113# error "Port me"
5114#endif
5115 return off;
5116}
5117
5118
5119/**
5120 * Emits code for OR'ing two 32-bit GPRs.
5121 * @note Bits 63:32 of the destination GPR will be cleared.
5122 */
5123DECL_INLINE_THROW(uint32_t)
5124iemNativeEmitOrGpr32ByGpr(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc)
5125{
5126#if defined(RT_ARCH_AMD64)
5127 off = iemNativeEmitOrGpr32ByGprEx(iemNativeInstrBufEnsure(pReNative, off, 3), off, iGprDst, iGprSrc);
5128#elif defined(RT_ARCH_ARM64)
5129 off = iemNativeEmitOrGpr32ByGprEx(iemNativeInstrBufEnsure(pReNative, off, 1), off, iGprDst, iGprSrc);
5130#else
5131# error "Port me"
5132#endif
5133 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
5134 return off;
5135}
5136
5137
5138/**
5139 * Emits code for OR'ing a 64-bit GPRs with a constant.
5140 */
5141DECL_INLINE_THROW(uint32_t)
5142iemNativeEmitOrGprByImm(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint64_t uImm)
5143{
5144#if defined(RT_ARCH_AMD64)
5145 if ((int64_t)uImm == (int8_t)uImm)
5146 {
5147 /* or Ev, imm8 */
5148 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 4);
5149 pbCodeBuf[off++] = X86_OP_REX_W | (iGprDst < 8 ? 0 : X86_OP_REX_B);
5150 pbCodeBuf[off++] = 0x83;
5151 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 1, iGprDst & 7);
5152 pbCodeBuf[off++] = (uint8_t)uImm;
5153 }
5154 else if ((int64_t)uImm == (int32_t)uImm)
5155 {
5156 /* or Ev, imm32 */
5157 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
5158 pbCodeBuf[off++] = X86_OP_REX_W | (iGprDst < 8 ? 0 : X86_OP_REX_B);
5159 pbCodeBuf[off++] = 0x81;
5160 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 1, iGprDst & 7);
5161 pbCodeBuf[off++] = RT_BYTE1(uImm);
5162 pbCodeBuf[off++] = RT_BYTE2(uImm);
5163 pbCodeBuf[off++] = RT_BYTE3(uImm);
5164 pbCodeBuf[off++] = RT_BYTE4(uImm);
5165 }
5166 else
5167 {
5168 /* Use temporary register for the 64-bit immediate. */
5169 uint8_t iTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, uImm);
5170 off = iemNativeEmitOrGprByGprEx(iemNativeInstrBufEnsure(pReNative, off, 3), off, iGprDst, iTmpReg);
5171 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
5172 iemNativeRegFreeTmpImm(pReNative, iTmpReg);
5173 }
5174
5175#elif defined(RT_ARCH_ARM64)
5176 uint32_t uImmR = 0;
5177 uint32_t uImmNandS = 0;
5178 if (Armv8A64ConvertMask64ToImmRImmS(uImm, &uImmNandS, &uImmR))
5179 {
5180 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
5181 pu32CodeBuf[off++] = Armv8A64MkInstrOrrImm(iGprDst, iGprDst, uImmNandS, uImmR);
5182 }
5183 else
5184 {
5185 /* Use temporary register for the 64-bit immediate. */
5186 uint8_t iTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, uImm);
5187 off = iemNativeEmitOrGprByGprEx(iemNativeInstrBufEnsure(pReNative, off, 1), off, iGprDst, iTmpReg);
5188 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
5189 iemNativeRegFreeTmpImm(pReNative, iTmpReg);
5190 }
5191
5192#else
5193# error "Port me"
5194#endif
5195 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
5196 return off;
5197}
5198
5199
5200/**
5201 * Emits code for OR'ing an 32-bit GPRs with a constant.
5202 * @note Bits 32 thru 63 in the destination will be zero after the operation.
5203 * @note For ARM64 this only supports @a uImm values that can be expressed using
5204 * the two 6-bit immediates of the OR instructions. The caller must make
5205 * sure this is possible!
5206 */
5207DECL_FORCE_INLINE_THROW(uint32_t)
5208iemNativeEmitOrGpr32ByImmEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint32_t uImm)
5209{
5210#if defined(RT_ARCH_AMD64)
5211 /* or Ev, imm */
5212 if (iGprDst >= 8)
5213 pCodeBuf[off++] = X86_OP_REX_B;
5214 if ((int32_t)uImm == (int8_t)uImm)
5215 {
5216 pCodeBuf[off++] = 0x83;
5217 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 1, iGprDst & 7);
5218 pCodeBuf[off++] = (uint8_t)uImm;
5219 }
5220 else
5221 {
5222 pCodeBuf[off++] = 0x81;
5223 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 1, iGprDst & 7);
5224 pCodeBuf[off++] = RT_BYTE1(uImm);
5225 pCodeBuf[off++] = RT_BYTE2(uImm);
5226 pCodeBuf[off++] = RT_BYTE3(uImm);
5227 pCodeBuf[off++] = RT_BYTE4(uImm);
5228 }
5229
5230#elif defined(RT_ARCH_ARM64)
5231 uint32_t uImmR = 0;
5232 uint32_t uImmNandS = 0;
5233 if (Armv8A64ConvertMask32ToImmRImmS(uImm, &uImmNandS, &uImmR))
5234 pCodeBuf[off++] = Armv8A64MkInstrOrrImm(iGprDst, iGprDst, uImmNandS, uImmR, false /*f64Bit*/);
5235 else
5236# ifdef IEM_WITH_THROW_CATCH
5237 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(NULL, VERR_IEM_IPE_9));
5238# else
5239 AssertReleaseFailedStmt(off = UINT32_MAX);
5240# endif
5241
5242#else
5243# error "Port me"
5244#endif
5245 return off;
5246}
5247
5248
5249/**
5250 * Emits code for OR'ing an 32-bit GPRs with a constant.
5251 *
5252 * @note Bits 32 thru 63 in the destination will be zero after the operation.
5253 */
5254DECL_INLINE_THROW(uint32_t)
5255iemNativeEmitOrGpr32ByImm(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint32_t uImm)
5256{
5257#if defined(RT_ARCH_AMD64)
5258 off = iemNativeEmitOrGpr32ByImmEx(iemNativeInstrBufEnsure(pReNative, off, 7), off, iGprDst, uImm);
5259
5260#elif defined(RT_ARCH_ARM64)
5261 uint32_t uImmR = 0;
5262 uint32_t uImmNandS = 0;
5263 if (Armv8A64ConvertMask32ToImmRImmS(uImm, &uImmNandS, &uImmR))
5264 {
5265 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
5266 pu32CodeBuf[off++] = Armv8A64MkInstrOrrImm(iGprDst, iGprDst, uImmNandS, uImmR, false /*f64Bit*/);
5267 }
5268 else
5269 {
5270 /* Use temporary register for the 64-bit immediate. */
5271 uint8_t iTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, uImm);
5272 off = iemNativeEmitOrGpr32ByGpr(pReNative, off, iGprDst, iTmpReg);
5273 iemNativeRegFreeTmpImm(pReNative, iTmpReg);
5274 }
5275
5276#else
5277# error "Port me"
5278#endif
5279 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
5280 return off;
5281}
5282
5283
5284
5285/**
5286 * ORs two 64-bit GPRs together, storing the result in a third register.
5287 */
5288DECL_FORCE_INLINE(uint32_t)
5289iemNativeEmitGprEqGprOrGprEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc1, uint8_t iGprSrc2)
5290{
5291#ifdef RT_ARCH_AMD64
5292 if (iGprDst != iGprSrc1 && iGprDst != iGprSrc2)
5293 {
5294 /** @todo consider LEA */
5295 off = iemNativeEmitLoadGprFromGprEx(pCodeBuf, off, iGprDst, iGprSrc1);
5296 off = iemNativeEmitOrGprByGprEx(pCodeBuf, off, iGprDst, iGprSrc2);
5297 }
5298 else
5299 off = iemNativeEmitOrGprByGprEx(pCodeBuf, off, iGprDst, iGprDst != iGprSrc1 ? iGprSrc1 : iGprSrc2);
5300
5301#elif defined(RT_ARCH_ARM64)
5302 pCodeBuf[off++] = Armv8A64MkInstrOrr(iGprDst, iGprSrc1, iGprSrc2);
5303
5304#else
5305# error "Port me!"
5306#endif
5307 return off;
5308}
5309
5310
5311
5312/**
5313 * Ors two 32-bit GPRs together, storing the result in a third register.
5314 * @note Bits 32 thru 63 in @a iGprDst will be zero after the operation.
5315 */
5316DECL_FORCE_INLINE(uint32_t)
5317iemNativeEmitGpr32EqGprOrGprEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc1, uint8_t iGprSrc2)
5318{
5319#ifdef RT_ARCH_AMD64
5320 if (iGprDst != iGprSrc1 && iGprDst != iGprSrc2)
5321 {
5322 off = iemNativeEmitLoadGprFromGpr32Ex(pCodeBuf, off, iGprDst, iGprSrc1);
5323 off = iemNativeEmitOrGpr32ByGprEx(pCodeBuf, off, iGprDst, iGprSrc2);
5324 }
5325 else
5326 off = iemNativeEmitOrGpr32ByGprEx(pCodeBuf, off, iGprDst, iGprDst != iGprSrc1 ? iGprSrc1 : iGprSrc2);
5327
5328#elif defined(RT_ARCH_ARM64)
5329 pCodeBuf[off++] = Armv8A64MkInstrOrr(iGprDst, iGprSrc1, iGprSrc2, false /*f64Bit*/);
5330
5331#else
5332# error "Port me!"
5333#endif
5334 return off;
5335}
5336
5337
5338/**
5339 * Emits code for XOR'ing two 64-bit GPRs.
5340 */
5341DECL_INLINE_THROW(uint32_t)
5342iemNativeEmitXorGprByGprEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc)
5343{
5344#if defined(RT_ARCH_AMD64)
5345 /* and Gv, Ev */
5346 pCodeBuf[off++] = X86_OP_REX_W | (iGprDst < 8 ? 0 : X86_OP_REX_R) | (iGprSrc < 8 ? 0 : X86_OP_REX_B);
5347 pCodeBuf[off++] = 0x33;
5348 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGprDst & 7, iGprSrc & 7);
5349
5350#elif defined(RT_ARCH_ARM64)
5351 pCodeBuf[off++] = Armv8A64MkInstrEor(iGprDst, iGprDst, iGprSrc);
5352
5353#else
5354# error "Port me"
5355#endif
5356 return off;
5357}
5358
5359
5360/**
5361 * Emits code for XOR'ing two 64-bit GPRs.
5362 */
5363DECL_INLINE_THROW(uint32_t)
5364iemNativeEmitXorGprByGpr(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc)
5365{
5366#if defined(RT_ARCH_AMD64)
5367 off = iemNativeEmitXorGprByGprEx(iemNativeInstrBufEnsure(pReNative, off, 3), off, iGprDst, iGprSrc);
5368#elif defined(RT_ARCH_ARM64)
5369 off = iemNativeEmitXorGprByGprEx(iemNativeInstrBufEnsure(pReNative, off, 1), off, iGprDst, iGprSrc);
5370#else
5371# error "Port me"
5372#endif
5373 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
5374 return off;
5375}
5376
5377
5378/**
5379 * Emits code for XOR'ing two 32-bit GPRs.
5380 */
5381DECL_INLINE_THROW(uint32_t)
5382iemNativeEmitXorGpr32ByGpr32Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc)
5383{
5384#if defined(RT_ARCH_AMD64)
5385 /* and Gv, Ev */
5386 if (iGprDst >= 8 || iGprSrc >= 8)
5387 pCodeBuf[off++] = (iGprDst < 8 ? 0 : X86_OP_REX_R) | (iGprSrc < 8 ? 0 : X86_OP_REX_B);
5388 pCodeBuf[off++] = 0x33;
5389 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGprDst & 7, iGprSrc & 7);
5390
5391#elif defined(RT_ARCH_ARM64)
5392 pCodeBuf[off++] = Armv8A64MkInstrEor(iGprDst, iGprDst, iGprSrc, false /*f64Bit*/);
5393
5394#else
5395# error "Port me"
5396#endif
5397 return off;
5398}
5399
5400
5401/**
5402 * Emits code for XOR'ing two 32-bit GPRs.
5403 */
5404DECL_INLINE_THROW(uint32_t)
5405iemNativeEmitXorGpr32ByGpr32(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc)
5406{
5407#if defined(RT_ARCH_AMD64)
5408 off = iemNativeEmitXorGpr32ByGpr32Ex(iemNativeInstrBufEnsure(pReNative, off, 3), off, iGprDst, iGprSrc);
5409#elif defined(RT_ARCH_ARM64)
5410 off = iemNativeEmitXorGpr32ByGpr32Ex(iemNativeInstrBufEnsure(pReNative, off, 1), off, iGprDst, iGprSrc);
5411#else
5412# error "Port me"
5413#endif
5414 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
5415 return off;
5416}
5417
5418
5419/**
5420 * Emits code for XOR'ing an 32-bit GPRs with a constant.
5421 * @note Bits 32 thru 63 in the destination will be zero after the operation.
5422 * @note For ARM64 this only supports @a uImm values that can be expressed using
5423 * the two 6-bit immediates of the EOR instructions. The caller must make
5424 * sure this is possible!
5425 */
5426DECL_FORCE_INLINE_THROW(uint32_t)
5427iemNativeEmitXorGpr32ByImmEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint32_t uImm)
5428{
5429#if defined(RT_ARCH_AMD64)
5430 /* and Ev, imm */
5431 if (iGprDst >= 8)
5432 pCodeBuf[off++] = X86_OP_REX_B;
5433 if ((int32_t)uImm == (int8_t)uImm)
5434 {
5435 pCodeBuf[off++] = 0x83;
5436 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 6, iGprDst & 7);
5437 pCodeBuf[off++] = (uint8_t)uImm;
5438 }
5439 else
5440 {
5441 pCodeBuf[off++] = 0x81;
5442 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 6, iGprDst & 7);
5443 pCodeBuf[off++] = RT_BYTE1(uImm);
5444 pCodeBuf[off++] = RT_BYTE2(uImm);
5445 pCodeBuf[off++] = RT_BYTE3(uImm);
5446 pCodeBuf[off++] = RT_BYTE4(uImm);
5447 }
5448
5449#elif defined(RT_ARCH_ARM64)
5450 uint32_t uImmR = 0;
5451 uint32_t uImmNandS = 0;
5452 if (Armv8A64ConvertMask32ToImmRImmS(uImm, &uImmNandS, &uImmR))
5453 pCodeBuf[off++] = Armv8A64MkInstrEorImm(iGprDst, iGprDst, uImmNandS, uImmR, false /*f64Bit*/);
5454 else
5455# ifdef IEM_WITH_THROW_CATCH
5456 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(NULL, VERR_IEM_IPE_9));
5457# else
5458 AssertReleaseFailedStmt(off = UINT32_MAX);
5459# endif
5460
5461#else
5462# error "Port me"
5463#endif
5464 return off;
5465}
5466
5467
5468/**
5469 * Emits code for XOR'ing two 32-bit GPRs.
5470 */
5471DECL_INLINE_THROW(uint32_t)
5472iemNativeEmitXorGpr32ByImm(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint32_t uImm)
5473{
5474#if defined(RT_ARCH_AMD64)
5475 off = iemNativeEmitXorGpr32ByImmEx(iemNativeInstrBufEnsure(pReNative, off, 7), off, iGprDst, uImm);
5476#elif defined(RT_ARCH_ARM64)
5477 off = iemNativeEmitXorGpr32ByImmEx(iemNativeInstrBufEnsure(pReNative, off, 1), off, iGprDst, uImm);
5478#else
5479# error "Port me"
5480#endif
5481 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
5482 return off;
5483}
5484
5485
5486/*********************************************************************************************************************************
5487* Shifting *
5488*********************************************************************************************************************************/
5489
5490/**
5491 * Emits code for shifting a GPR a fixed number of bits to the left.
5492 */
5493DECL_FORCE_INLINE(uint32_t)
5494iemNativeEmitShiftGprLeftEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint8_t cShift)
5495{
5496 Assert(cShift > 0 && cShift < 64);
5497
5498#if defined(RT_ARCH_AMD64)
5499 /* shl dst, cShift */
5500 pCodeBuf[off++] = iGprDst < 8 ? X86_OP_REX_W : X86_OP_REX_W | X86_OP_REX_B;
5501 if (cShift != 1)
5502 {
5503 pCodeBuf[off++] = 0xc1;
5504 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 4, iGprDst & 7);
5505 pCodeBuf[off++] = cShift;
5506 }
5507 else
5508 {
5509 pCodeBuf[off++] = 0xd1;
5510 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 4, iGprDst & 7);
5511 }
5512
5513#elif defined(RT_ARCH_ARM64)
5514 pCodeBuf[off++] = Armv8A64MkInstrLslImm(iGprDst, iGprDst, cShift);
5515
5516#else
5517# error "Port me"
5518#endif
5519 return off;
5520}
5521
5522
5523/**
5524 * Emits code for shifting a GPR a fixed number of bits to the left.
5525 */
5526DECL_INLINE_THROW(uint32_t)
5527iemNativeEmitShiftGprLeft(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t cShift)
5528{
5529#if defined(RT_ARCH_AMD64)
5530 off = iemNativeEmitShiftGprLeftEx(iemNativeInstrBufEnsure(pReNative, off, 4), off, iGprDst, cShift);
5531#elif defined(RT_ARCH_ARM64)
5532 off = iemNativeEmitShiftGprLeftEx(iemNativeInstrBufEnsure(pReNative, off, 1), off, iGprDst, cShift);
5533#else
5534# error "Port me"
5535#endif
5536 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
5537 return off;
5538}
5539
5540
5541/**
5542 * Emits code for shifting a 32-bit GPR a fixed number of bits to the left.
5543 */
5544DECL_FORCE_INLINE(uint32_t)
5545iemNativeEmitShiftGpr32LeftEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint8_t cShift)
5546{
5547 Assert(cShift > 0 && cShift < 32);
5548
5549#if defined(RT_ARCH_AMD64)
5550 /* shl dst, cShift */
5551 if (iGprDst >= 8)
5552 pCodeBuf[off++] = X86_OP_REX_B;
5553 if (cShift != 1)
5554 {
5555 pCodeBuf[off++] = 0xc1;
5556 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 4, iGprDst & 7);
5557 pCodeBuf[off++] = cShift;
5558 }
5559 else
5560 {
5561 pCodeBuf[off++] = 0xd1;
5562 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 4, iGprDst & 7);
5563 }
5564
5565#elif defined(RT_ARCH_ARM64)
5566 pCodeBuf[off++] = Armv8A64MkInstrLslImm(iGprDst, iGprDst, cShift, false /*64Bit*/);
5567
5568#else
5569# error "Port me"
5570#endif
5571 return off;
5572}
5573
5574
5575/**
5576 * Emits code for shifting a 32-bit GPR a fixed number of bits to the left.
5577 */
5578DECL_INLINE_THROW(uint32_t)
5579iemNativeEmitShiftGpr32Left(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t cShift)
5580{
5581#if defined(RT_ARCH_AMD64)
5582 off = iemNativeEmitShiftGpr32LeftEx(iemNativeInstrBufEnsure(pReNative, off, 4), off, iGprDst, cShift);
5583#elif defined(RT_ARCH_ARM64)
5584 off = iemNativeEmitShiftGpr32LeftEx(iemNativeInstrBufEnsure(pReNative, off, 1), off, iGprDst, cShift);
5585#else
5586# error "Port me"
5587#endif
5588 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
5589 return off;
5590}
5591
5592
5593/**
5594 * Emits code for (unsigned) shifting a GPR a fixed number of bits to the right.
5595 */
5596DECL_FORCE_INLINE(uint32_t)
5597iemNativeEmitShiftGprRightEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint8_t cShift)
5598{
5599 Assert(cShift > 0 && cShift < 64);
5600
5601#if defined(RT_ARCH_AMD64)
5602 /* shr dst, cShift */
5603 pCodeBuf[off++] = iGprDst < 8 ? X86_OP_REX_W : X86_OP_REX_W | X86_OP_REX_B;
5604 if (cShift != 1)
5605 {
5606 pCodeBuf[off++] = 0xc1;
5607 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 5, iGprDst & 7);
5608 pCodeBuf[off++] = cShift;
5609 }
5610 else
5611 {
5612 pCodeBuf[off++] = 0xd1;
5613 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 5, iGprDst & 7);
5614 }
5615
5616#elif defined(RT_ARCH_ARM64)
5617 pCodeBuf[off++] = Armv8A64MkInstrLsrImm(iGprDst, iGprDst, cShift);
5618
5619#else
5620# error "Port me"
5621#endif
5622 return off;
5623}
5624
5625
5626/**
5627 * Emits code for (unsigned) shifting a GPR a fixed number of bits to the right.
5628 */
5629DECL_INLINE_THROW(uint32_t)
5630iemNativeEmitShiftGprRight(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t cShift)
5631{
5632#if defined(RT_ARCH_AMD64)
5633 off = iemNativeEmitShiftGprRightEx(iemNativeInstrBufEnsure(pReNative, off, 4), off, iGprDst, cShift);
5634#elif defined(RT_ARCH_ARM64)
5635 off = iemNativeEmitShiftGprRightEx(iemNativeInstrBufEnsure(pReNative, off, 1), off, iGprDst, cShift);
5636#else
5637# error "Port me"
5638#endif
5639 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
5640 return off;
5641}
5642
5643
5644/**
5645 * Emits code for (unsigned) shifting a 32-bit GPR a fixed number of bits to the
5646 * right.
5647 */
5648DECL_FORCE_INLINE(uint32_t)
5649iemNativeEmitShiftGpr32RightEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint8_t cShift)
5650{
5651 Assert(cShift > 0 && cShift < 32);
5652
5653#if defined(RT_ARCH_AMD64)
5654 /* shr dst, cShift */
5655 if (iGprDst >= 8)
5656 pCodeBuf[off++] = X86_OP_REX_B;
5657 if (cShift != 1)
5658 {
5659 pCodeBuf[off++] = 0xc1;
5660 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 5, iGprDst & 7);
5661 pCodeBuf[off++] = cShift;
5662 }
5663 else
5664 {
5665 pCodeBuf[off++] = 0xd1;
5666 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 5, iGprDst & 7);
5667 }
5668
5669#elif defined(RT_ARCH_ARM64)
5670 pCodeBuf[off++] = Armv8A64MkInstrLsrImm(iGprDst, iGprDst, cShift, false /*64Bit*/);
5671
5672#else
5673# error "Port me"
5674#endif
5675 return off;
5676}
5677
5678
5679/**
5680 * Emits code for (unsigned) shifting a 32-bit GPR a fixed number of bits to the
5681 * right.
5682 */
5683DECL_INLINE_THROW(uint32_t)
5684iemNativeEmitShiftGpr32Right(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t cShift)
5685{
5686#if defined(RT_ARCH_AMD64)
5687 off = iemNativeEmitShiftGpr32RightEx(iemNativeInstrBufEnsure(pReNative, off, 4), off, iGprDst, cShift);
5688#elif defined(RT_ARCH_ARM64)
5689 off = iemNativeEmitShiftGpr32RightEx(iemNativeInstrBufEnsure(pReNative, off, 1), off, iGprDst, cShift);
5690#else
5691# error "Port me"
5692#endif
5693 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
5694 return off;
5695}
5696
5697
5698/**
5699 * Emits code for (unsigned) shifting a 32-bit GPR a fixed number of bits to the
5700 * right and assigning it to a different GPR.
5701 */
5702DECL_INLINE_THROW(uint32_t)
5703iemNativeEmitGpr32EqGprShiftRightImmEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc, uint8_t cShift)
5704{
5705 Assert(cShift > 0); Assert(cShift < 32);
5706#if defined(RT_ARCH_AMD64)
5707 off = iemNativeEmitLoadGprFromGpr32Ex(pCodeBuf, off, iGprDst, iGprSrc);
5708 off = iemNativeEmitShiftGpr32RightEx(pCodeBuf, off, iGprDst, cShift);
5709
5710#elif defined(RT_ARCH_ARM64)
5711 pCodeBuf[off++] = Armv8A64MkInstrLsrImm(iGprDst, iGprSrc, cShift, false /*64Bit*/);
5712
5713#else
5714# error "Port me"
5715#endif
5716 return off;
5717}
5718
5719
5720/**
5721 * Emits code for (signed) shifting a GPR a fixed number of bits to the right.
5722 */
5723DECL_FORCE_INLINE(uint32_t)
5724iemNativeEmitArithShiftGprRightEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint8_t cShift)
5725{
5726 Assert(cShift > 0 && cShift < 64);
5727
5728#if defined(RT_ARCH_AMD64)
5729 /* sar dst, cShift */
5730 pCodeBuf[off++] = iGprDst < 8 ? X86_OP_REX_W : X86_OP_REX_W | X86_OP_REX_B;
5731 if (cShift != 1)
5732 {
5733 pCodeBuf[off++] = 0xc1;
5734 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 7, iGprDst & 7);
5735 pCodeBuf[off++] = cShift;
5736 }
5737 else
5738 {
5739 pCodeBuf[off++] = 0xd1;
5740 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 7, iGprDst & 7);
5741 }
5742
5743#elif defined(RT_ARCH_ARM64)
5744 pCodeBuf[off++] = Armv8A64MkInstrAsrImm(iGprDst, iGprDst, cShift);
5745
5746#else
5747# error "Port me"
5748#endif
5749 return off;
5750}
5751
5752
5753/**
5754 * Emits code for (signed) shifting a GPR a fixed number of bits to the right.
5755 */
5756DECL_INLINE_THROW(uint32_t)
5757iemNativeEmitArithShiftGprRight(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t cShift)
5758{
5759#if defined(RT_ARCH_AMD64)
5760 off = iemNativeEmitArithShiftGprRightEx(iemNativeInstrBufEnsure(pReNative, off, 4), off, iGprDst, cShift);
5761#elif defined(RT_ARCH_ARM64)
5762 off = iemNativeEmitArithShiftGprRightEx(iemNativeInstrBufEnsure(pReNative, off, 1), off, iGprDst, cShift);
5763#else
5764# error "Port me"
5765#endif
5766 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
5767 return off;
5768}
5769
5770
5771/**
5772 * Emits code for (signed) shifting a 32-bit GPR a fixed number of bits to the right.
5773 */
5774DECL_FORCE_INLINE(uint32_t)
5775iemNativeEmitArithShiftGpr32RightEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint8_t cShift)
5776{
5777 Assert(cShift > 0 && cShift < 64);
5778
5779#if defined(RT_ARCH_AMD64)
5780 /* sar dst, cShift */
5781 if (iGprDst >= 8)
5782 pCodeBuf[off++] = X86_OP_REX_B;
5783 if (cShift != 1)
5784 {
5785 pCodeBuf[off++] = 0xc1;
5786 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 7, iGprDst & 7);
5787 pCodeBuf[off++] = cShift;
5788 }
5789 else
5790 {
5791 pCodeBuf[off++] = 0xd1;
5792 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 7, iGprDst & 7);
5793 }
5794
5795#elif defined(RT_ARCH_ARM64)
5796 pCodeBuf[off++] = Armv8A64MkInstrAsrImm(iGprDst, iGprDst, cShift, false /*f64Bit*/);
5797
5798#else
5799# error "Port me"
5800#endif
5801 return off;
5802}
5803
5804
5805/**
5806 * Emits code for (signed) shifting a GPR a fixed number of bits to the right.
5807 */
5808DECL_INLINE_THROW(uint32_t)
5809iemNativeEmitArithShiftGpr32Right(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t cShift)
5810{
5811#if defined(RT_ARCH_AMD64)
5812 off = iemNativeEmitArithShiftGpr32RightEx(iemNativeInstrBufEnsure(pReNative, off, 4), off, iGprDst, cShift);
5813#elif defined(RT_ARCH_ARM64)
5814 off = iemNativeEmitArithShiftGpr32RightEx(iemNativeInstrBufEnsure(pReNative, off, 1), off, iGprDst, cShift);
5815#else
5816# error "Port me"
5817#endif
5818 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
5819 return off;
5820}
5821
5822
5823/**
5824 * Emits code for rotating a GPR a fixed number of bits to the left.
5825 */
5826DECL_FORCE_INLINE(uint32_t)
5827iemNativeEmitRotateGprLeftEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint8_t cShift)
5828{
5829 Assert(cShift > 0 && cShift < 64);
5830
5831#if defined(RT_ARCH_AMD64)
5832 /* rol dst, cShift */
5833 pCodeBuf[off++] = iGprDst < 8 ? X86_OP_REX_W : X86_OP_REX_W | X86_OP_REX_B;
5834 if (cShift != 1)
5835 {
5836 pCodeBuf[off++] = 0xc1;
5837 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, iGprDst & 7);
5838 pCodeBuf[off++] = cShift;
5839 }
5840 else
5841 {
5842 pCodeBuf[off++] = 0xd1;
5843 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, iGprDst & 7);
5844 }
5845
5846#elif defined(RT_ARCH_ARM64)
5847 pCodeBuf[off++] = Armv8A64MkInstrRorImm(iGprDst, iGprDst, cShift);
5848
5849#else
5850# error "Port me"
5851#endif
5852 return off;
5853}
5854
5855
5856#if defined(RT_ARCH_AMD64)
5857/**
5858 * Emits code for rotating a 32-bit GPR a fixed number of bits to the left via carry.
5859 */
5860DECL_FORCE_INLINE(uint32_t)
5861iemNativeEmitAmd64RotateGpr32LeftViaCarryEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint8_t cShift)
5862{
5863 Assert(cShift > 0 && cShift < 32);
5864
5865 /* rcl dst, cShift */
5866 if (iGprDst >= 8)
5867 pCodeBuf[off++] = X86_OP_REX_B;
5868 if (cShift != 1)
5869 {
5870 pCodeBuf[off++] = 0xc1;
5871 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 2, iGprDst & 7);
5872 pCodeBuf[off++] = cShift;
5873 }
5874 else
5875 {
5876 pCodeBuf[off++] = 0xd1;
5877 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 2, iGprDst & 7);
5878 }
5879
5880 return off;
5881}
5882#endif /* RT_ARCH_AMD64 */
5883
5884
5885
5886/**
5887 * Emits code for reversing the byte order for a 16-bit value in a 32-bit GPR.
5888 * @note Bits 63:32 of the destination GPR will be cleared.
5889 */
5890DECL_FORCE_INLINE(uint32_t)
5891iemNativeEmitBswapGpr16(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr)
5892{
5893#if defined(RT_ARCH_AMD64)
5894 /*
5895 * There is no bswap r16 on x86 (the encoding exists but does not work).
5896 * So just use a rol (gcc -O2 is doing that).
5897 *
5898 * rol r16, 0x8
5899 */
5900 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 5);
5901 pbCodeBuf[off++] = X86_OP_PRF_SIZE_OP;
5902 if (iGpr >= 8)
5903 pbCodeBuf[off++] = X86_OP_REX_B;
5904 pbCodeBuf[off++] = 0xc1;
5905 pbCodeBuf[off++] = 0xc0 | (iGpr & 7);
5906 pbCodeBuf[off++] = 0x08;
5907#elif defined(RT_ARCH_ARM64)
5908 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
5909
5910 pu32CodeBuf[off++] = Armv8A64MkInstrRev16(iGpr, iGpr, false /*f64Bit*/);
5911#else
5912# error "Port me"
5913#endif
5914
5915 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
5916 return off;
5917}
5918
5919
5920/**
5921 * Emits code for reversing the byte order in a 32-bit GPR.
5922 * @note Bits 63:32 of the destination GPR will be cleared.
5923 */
5924DECL_FORCE_INLINE(uint32_t)
5925iemNativeEmitBswapGpr32(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr)
5926{
5927#if defined(RT_ARCH_AMD64)
5928 /* bswap r32 */
5929 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3);
5930
5931 if (iGpr >= 8)
5932 pbCodeBuf[off++] = X86_OP_REX_B;
5933 pbCodeBuf[off++] = 0x0f;
5934 pbCodeBuf[off++] = 0xc8 | (iGpr & 7);
5935#elif defined(RT_ARCH_ARM64)
5936 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
5937
5938 pu32CodeBuf[off++] = Armv8A64MkInstrRev(iGpr, iGpr, false /*f64Bit*/);
5939#else
5940# error "Port me"
5941#endif
5942
5943 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
5944 return off;
5945}
5946
5947
5948/**
5949 * Emits code for reversing the byte order in a 64-bit GPR.
5950 */
5951DECL_FORCE_INLINE(uint32_t)
5952iemNativeEmitBswapGpr(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr)
5953{
5954#if defined(RT_ARCH_AMD64)
5955 /* bswap r64 */
5956 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3);
5957
5958 if (iGpr >= 8)
5959 pbCodeBuf[off++] = X86_OP_REX_W | X86_OP_REX_B;
5960 else
5961 pbCodeBuf[off++] = X86_OP_REX_W;
5962 pbCodeBuf[off++] = 0x0f;
5963 pbCodeBuf[off++] = 0xc8 | (iGpr & 7);
5964#elif defined(RT_ARCH_ARM64)
5965 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
5966
5967 pu32CodeBuf[off++] = Armv8A64MkInstrRev(iGpr, iGpr, true /*f64Bit*/);
5968#else
5969# error "Port me"
5970#endif
5971
5972 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
5973 return off;
5974}
5975
5976
5977/*********************************************************************************************************************************
5978* Compare and Testing *
5979*********************************************************************************************************************************/
5980
5981
5982#ifdef RT_ARCH_ARM64
5983/**
5984 * Emits an ARM64 compare instruction.
5985 */
5986DECL_INLINE_THROW(uint32_t)
5987iemNativeEmitCmpArm64(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprLeft, uint8_t iGprRight,
5988 bool f64Bit = true, uint32_t cShift = 0, ARMV8A64INSTRSHIFT enmShift = kArmv8A64InstrShift_Lsr)
5989{
5990 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
5991 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubReg(true /*fSub*/, ARMV8_A64_REG_XZR /*iRegResult*/, iGprLeft, iGprRight,
5992 f64Bit, true /*fSetFlags*/, cShift, enmShift);
5993 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
5994 return off;
5995}
5996#endif
5997
5998
5999/**
6000 * Emits a compare of two 64-bit GPRs, settings status flags/whatever for use
6001 * with conditional instruction.
6002 */
6003DECL_FORCE_INLINE(uint32_t)
6004iemNativeEmitCmpGprWithGprEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprLeft, uint8_t iGprRight)
6005{
6006#ifdef RT_ARCH_AMD64
6007 /* cmp Gv, Ev */
6008 pCodeBuf[off++] = X86_OP_REX_W | (iGprLeft >= 8 ? X86_OP_REX_R : 0) | (iGprRight >= 8 ? X86_OP_REX_B : 0);
6009 pCodeBuf[off++] = 0x3b;
6010 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGprLeft & 7, iGprRight & 7);
6011
6012#elif defined(RT_ARCH_ARM64)
6013 pCodeBuf[off++] = Armv8A64MkInstrCmpReg(iGprLeft, iGprRight);
6014
6015#else
6016# error "Port me!"
6017#endif
6018 return off;
6019}
6020
6021
6022/**
6023 * Emits a compare of two 64-bit GPRs, settings status flags/whatever for use
6024 * with conditional instruction.
6025 */
6026DECL_INLINE_THROW(uint32_t)
6027iemNativeEmitCmpGprWithGpr(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprLeft, uint8_t iGprRight)
6028{
6029#ifdef RT_ARCH_AMD64
6030 off = iemNativeEmitCmpGprWithGprEx(iemNativeInstrBufEnsure(pReNative, off, 3), off, iGprLeft, iGprRight);
6031#elif defined(RT_ARCH_ARM64)
6032 off = iemNativeEmitCmpGprWithGprEx(iemNativeInstrBufEnsure(pReNative, off, 1), off, iGprLeft, iGprRight);
6033#else
6034# error "Port me!"
6035#endif
6036 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
6037 return off;
6038}
6039
6040
6041/**
6042 * Emits a compare of two 32-bit GPRs, settings status flags/whatever for use
6043 * with conditional instruction.
6044 */
6045DECL_FORCE_INLINE(uint32_t)
6046iemNativeEmitCmpGpr32WithGprEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprLeft, uint8_t iGprRight)
6047{
6048#ifdef RT_ARCH_AMD64
6049 /* cmp Gv, Ev */
6050 if (iGprLeft >= 8 || iGprRight >= 8)
6051 pCodeBuf[off++] = (iGprLeft >= 8 ? X86_OP_REX_R : 0) | (iGprRight >= 8 ? X86_OP_REX_B : 0);
6052 pCodeBuf[off++] = 0x3b;
6053 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGprLeft & 7, iGprRight & 7);
6054
6055#elif defined(RT_ARCH_ARM64)
6056 pCodeBuf[off++] = Armv8A64MkInstrCmpReg(iGprLeft, iGprRight, false /*f64Bit*/);
6057
6058#else
6059# error "Port me!"
6060#endif
6061 return off;
6062}
6063
6064
6065/**
6066 * Emits a compare of two 32-bit GPRs, settings status flags/whatever for use
6067 * with conditional instruction.
6068 */
6069DECL_INLINE_THROW(uint32_t)
6070iemNativeEmitCmpGpr32WithGpr(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprLeft, uint8_t iGprRight)
6071{
6072#ifdef RT_ARCH_AMD64
6073 off = iemNativeEmitCmpGpr32WithGprEx(iemNativeInstrBufEnsure(pReNative, off, 3), off, iGprLeft, iGprRight);
6074#elif defined(RT_ARCH_ARM64)
6075 off = iemNativeEmitCmpGpr32WithGprEx(iemNativeInstrBufEnsure(pReNative, off, 1), off, iGprLeft, iGprRight);
6076#else
6077# error "Port me!"
6078#endif
6079 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
6080 return off;
6081}
6082
6083
6084/**
6085 * Emits a compare of a 64-bit GPR with a constant value, settings status
6086 * flags/whatever for use with conditional instruction.
6087 */
6088DECL_INLINE_THROW(uint32_t)
6089iemNativeEmitCmpGprWithImm(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprLeft, uint64_t uImm)
6090{
6091#ifdef RT_ARCH_AMD64
6092 if (uImm <= UINT32_C(0xff))
6093 {
6094 /* cmp Ev, Ib */
6095 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 4);
6096 pbCodeBuf[off++] = X86_OP_REX_W | (iGprLeft >= 8 ? X86_OP_REX_B : 0);
6097 pbCodeBuf[off++] = 0x83;
6098 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 7, iGprLeft & 7);
6099 pbCodeBuf[off++] = (uint8_t)uImm;
6100 }
6101 else if ((int64_t)uImm == (int32_t)uImm)
6102 {
6103 /* cmp Ev, imm */
6104 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
6105 pbCodeBuf[off++] = X86_OP_REX_W | (iGprLeft >= 8 ? X86_OP_REX_B : 0);
6106 pbCodeBuf[off++] = 0x81;
6107 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 7, iGprLeft & 7);
6108 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
6109 pbCodeBuf[off++] = RT_BYTE1(uImm);
6110 pbCodeBuf[off++] = RT_BYTE2(uImm);
6111 pbCodeBuf[off++] = RT_BYTE3(uImm);
6112 pbCodeBuf[off++] = RT_BYTE4(uImm);
6113 }
6114 else
6115 {
6116 /* Use temporary register for the immediate. */
6117 uint8_t const iTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, uImm);
6118 off = iemNativeEmitCmpGprWithGpr(pReNative, off, iGprLeft, iTmpReg);
6119 iemNativeRegFreeTmpImm(pReNative, iTmpReg);
6120 }
6121
6122#elif defined(RT_ARCH_ARM64)
6123 /** @todo guess there are clevere things we can do here... */
6124 if (uImm < _4K)
6125 {
6126 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
6127 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(true /*fSub*/, ARMV8_A64_REG_XZR, iGprLeft, (uint32_t)uImm,
6128 true /*64Bit*/, true /*fSetFlags*/);
6129 }
6130 else if (uImm < RT_BIT_32(12+12) && (uImm & (_4K - 1)) == 0)
6131 {
6132 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
6133 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(true /*fSub*/, ARMV8_A64_REG_XZR, iGprLeft, (uint32_t)uImm >> 12,
6134 true /*64Bit*/, true /*fSetFlags*/, true /*fShift12*/);
6135 }
6136 else
6137 {
6138 /* Use temporary register for the immediate. */
6139 uint8_t iTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, uImm);
6140 off = iemNativeEmitCmpGprWithGpr(pReNative, off, iGprLeft, iTmpReg);
6141 iemNativeRegFreeTmpImm(pReNative, iTmpReg);
6142 }
6143
6144#else
6145# error "Port me!"
6146#endif
6147
6148 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
6149 return off;
6150}
6151
6152
6153/**
6154 * Emits a compare of a 32-bit GPR with a constant value, settings status
6155 * flags/whatever for use with conditional instruction.
6156 *
6157 * @note On ARM64 the @a uImm value must be in the range 0x000..0xfff or that
6158 * shifted 12 bits to the left (e.g. 0x1000..0xfff0000 with the lower 12
6159 * bits all zero). Will release assert or throw exception if the caller
6160 * violates this restriction.
6161 */
6162DECL_FORCE_INLINE_THROW(uint32_t)
6163iemNativeEmitCmpGpr32WithImmEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprLeft, uint32_t uImm)
6164{
6165#ifdef RT_ARCH_AMD64
6166 if (iGprLeft >= 8)
6167 pCodeBuf[off++] = X86_OP_REX_B;
6168 if (uImm <= UINT32_C(0x7f))
6169 {
6170 /* cmp Ev, Ib */
6171 pCodeBuf[off++] = 0x83;
6172 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 7, iGprLeft & 7);
6173 pCodeBuf[off++] = (uint8_t)uImm;
6174 }
6175 else
6176 {
6177 /* cmp Ev, imm */
6178 pCodeBuf[off++] = 0x81;
6179 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 7, iGprLeft & 7);
6180 pCodeBuf[off++] = RT_BYTE1(uImm);
6181 pCodeBuf[off++] = RT_BYTE2(uImm);
6182 pCodeBuf[off++] = RT_BYTE3(uImm);
6183 pCodeBuf[off++] = RT_BYTE4(uImm);
6184 }
6185
6186#elif defined(RT_ARCH_ARM64)
6187 /** @todo guess there are clevere things we can do here... */
6188 if (uImm < _4K)
6189 pCodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(true /*fSub*/, ARMV8_A64_REG_XZR, iGprLeft, (uint32_t)uImm,
6190 false /*64Bit*/, true /*fSetFlags*/);
6191 else if (uImm < RT_BIT_32(12+12) && (uImm & (_4K - 1)) == 0)
6192 pCodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(true /*fSub*/, ARMV8_A64_REG_XZR, iGprLeft, (uint32_t)uImm,
6193 false /*64Bit*/, true /*fSetFlags*/, true /*fShift12*/);
6194 else
6195# ifdef IEM_WITH_THROW_CATCH
6196 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(NULL, VERR_IEM_IPE_9));
6197# else
6198 AssertReleaseFailedStmt(off = UINT32_MAX);
6199# endif
6200
6201#else
6202# error "Port me!"
6203#endif
6204 return off;
6205}
6206
6207
6208/**
6209 * Emits a compare of a 32-bit GPR with a constant value, settings status
6210 * flags/whatever for use with conditional instruction.
6211 */
6212DECL_INLINE_THROW(uint32_t)
6213iemNativeEmitCmpGpr32WithImm(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprLeft, uint32_t uImm)
6214{
6215#ifdef RT_ARCH_AMD64
6216 off = iemNativeEmitCmpGpr32WithImmEx(iemNativeInstrBufEnsure(pReNative, off, 7), off, iGprLeft, uImm);
6217
6218#elif defined(RT_ARCH_ARM64)
6219 /** @todo guess there are clevere things we can do here... */
6220 if (uImm < _4K)
6221 {
6222 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
6223 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(true /*fSub*/, ARMV8_A64_REG_XZR, iGprLeft, (uint32_t)uImm,
6224 false /*64Bit*/, true /*fSetFlags*/);
6225 }
6226 else if (uImm < RT_BIT_32(12+12) && (uImm & (_4K - 1)) == 0)
6227 {
6228 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
6229 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(true /*fSub*/, ARMV8_A64_REG_XZR, iGprLeft, (uint32_t)uImm,
6230 false /*64Bit*/, true /*fSetFlags*/, true /*fShift12*/);
6231 }
6232 else
6233 {
6234 /* Use temporary register for the immediate. */
6235 uint8_t iTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, uImm);
6236 off = iemNativeEmitCmpGpr32WithGpr(pReNative, off, iGprLeft, iTmpReg);
6237 iemNativeRegFreeTmpImm(pReNative, iTmpReg);
6238 }
6239
6240#else
6241# error "Port me!"
6242#endif
6243
6244 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
6245 return off;
6246}
6247
6248
6249/**
6250 * Emits a compare of a 32-bit GPR with a constant value, settings status
6251 * flags/whatever for use with conditional instruction.
6252 *
6253 * @note ARM64: Helper register is required (@a idxTmpReg) for isolating the
6254 * 16-bit value from @a iGrpLeft.
6255 * @note On ARM64 the @a uImm value must be in the range 0x000..0xfff or that
6256 * shifted 12 bits to the left (e.g. 0x1000..0xfff0000 with the lower 12
6257 * bits all zero). Will release assert or throw exception if the caller
6258 * violates this restriction.
6259 */
6260DECL_FORCE_INLINE_THROW(uint32_t)
6261iemNativeEmitCmpGpr16WithImmEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprLeft, uint16_t uImm,
6262 uint8_t idxTmpReg = UINT8_MAX)
6263{
6264#ifdef RT_ARCH_AMD64
6265 pCodeBuf[off++] = X86_OP_PRF_SIZE_OP;
6266 if (iGprLeft >= 8)
6267 pCodeBuf[off++] = X86_OP_REX_B;
6268 if (uImm <= UINT32_C(0x7f))
6269 {
6270 /* cmp Ev, Ib */
6271 pCodeBuf[off++] = 0x83;
6272 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 7, iGprLeft & 7);
6273 pCodeBuf[off++] = (uint8_t)uImm;
6274 }
6275 else
6276 {
6277 /* cmp Ev, imm */
6278 pCodeBuf[off++] = 0x81;
6279 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 7, iGprLeft & 7);
6280 pCodeBuf[off++] = RT_BYTE1(uImm);
6281 pCodeBuf[off++] = RT_BYTE2(uImm);
6282 }
6283 RT_NOREF(idxTmpReg);
6284
6285#elif defined(RT_ARCH_ARM64)
6286# ifdef IEM_WITH_THROW_CATCH
6287 AssertStmt(idxTmpReg < 32, IEMNATIVE_DO_LONGJMP(NULL, VERR_IEM_IPE_9));
6288# else
6289 AssertReleaseStmt(idxTmpReg < 32, off = UINT32_MAX);
6290# endif
6291 Assert(Armv8A64ConvertImmRImmS2Mask32(15, 0) == 0xffff);
6292 pCodeBuf[off++] = Armv8A64MkInstrAndImm(idxTmpReg, iGprLeft, 15, 0, false /*f64Bit*/);
6293 off = iemNativeEmitCmpGpr32WithImmEx(pCodeBuf, off, idxTmpReg, uImm);
6294
6295#else
6296# error "Port me!"
6297#endif
6298 return off;
6299}
6300
6301
6302/**
6303 * Emits a compare of a 16-bit GPR with a constant value, settings status
6304 * flags/whatever for use with conditional instruction.
6305 *
6306 * @note ARM64: Helper register is required (idxTmpReg).
6307 */
6308DECL_INLINE_THROW(uint32_t)
6309iemNativeEmitCmpGpr16WithImm(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprLeft, uint16_t uImm,
6310 uint8_t idxTmpReg = UINT8_MAX)
6311{
6312#ifdef RT_ARCH_AMD64
6313 off = iemNativeEmitCmpGpr16WithImmEx(iemNativeInstrBufEnsure(pReNative, off, 7), off, iGprLeft, uImm, idxTmpReg);
6314#elif defined(RT_ARCH_ARM64)
6315 off = iemNativeEmitCmpGpr16WithImmEx(iemNativeInstrBufEnsure(pReNative, off, 2), off, iGprLeft, uImm, idxTmpReg);
6316#else
6317# error "Port me!"
6318#endif
6319 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
6320 return off;
6321}
6322
6323
6324
6325/*********************************************************************************************************************************
6326* Branching *
6327*********************************************************************************************************************************/
6328
6329/**
6330 * Emits a JMP rel32 / B imm19 to the given label.
6331 */
6332DECL_FORCE_INLINE_THROW(uint32_t)
6333iemNativeEmitJmpToLabelEx(PIEMRECOMPILERSTATE pReNative, PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint32_t idxLabel)
6334{
6335 Assert(idxLabel < pReNative->cLabels);
6336
6337#ifdef RT_ARCH_AMD64
6338 if (pReNative->paLabels[idxLabel].off != UINT32_MAX)
6339 {
6340 uint32_t offRel = pReNative->paLabels[idxLabel].off - (off + 2);
6341 if ((int32_t)offRel < 128 && (int32_t)offRel >= -128)
6342 {
6343 pCodeBuf[off++] = 0xeb; /* jmp rel8 */
6344 pCodeBuf[off++] = (uint8_t)offRel;
6345 }
6346 else
6347 {
6348 offRel -= 3;
6349 pCodeBuf[off++] = 0xe9; /* jmp rel32 */
6350 pCodeBuf[off++] = RT_BYTE1(offRel);
6351 pCodeBuf[off++] = RT_BYTE2(offRel);
6352 pCodeBuf[off++] = RT_BYTE3(offRel);
6353 pCodeBuf[off++] = RT_BYTE4(offRel);
6354 }
6355 }
6356 else
6357 {
6358 pCodeBuf[off++] = 0xe9; /* jmp rel32 */
6359 iemNativeAddFixup(pReNative, off, idxLabel, kIemNativeFixupType_Rel32, -4);
6360 pCodeBuf[off++] = 0xfe;
6361 pCodeBuf[off++] = 0xff;
6362 pCodeBuf[off++] = 0xff;
6363 pCodeBuf[off++] = 0xff;
6364 }
6365 pCodeBuf[off++] = 0xcc; /* int3 poison */
6366
6367#elif defined(RT_ARCH_ARM64)
6368 if (pReNative->paLabels[idxLabel].off != UINT32_MAX)
6369 {
6370 pCodeBuf[off] = Armv8A64MkInstrB(pReNative->paLabels[idxLabel].off - off);
6371 off++;
6372 }
6373 else
6374 {
6375 iemNativeAddFixup(pReNative, off, idxLabel, kIemNativeFixupType_RelImm26At0);
6376 pCodeBuf[off++] = Armv8A64MkInstrB(-1);
6377 }
6378
6379#else
6380# error "Port me!"
6381#endif
6382 return off;
6383}
6384
6385
6386/**
6387 * Emits a JMP rel32 / B imm19 to the given label.
6388 */
6389DECL_INLINE_THROW(uint32_t)
6390iemNativeEmitJmpToLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t idxLabel)
6391{
6392#ifdef RT_ARCH_AMD64
6393 off = iemNativeEmitJmpToLabelEx(pReNative, iemNativeInstrBufEnsure(pReNative, off, 6), off, idxLabel);
6394#elif defined(RT_ARCH_ARM64)
6395 off = iemNativeEmitJmpToLabelEx(pReNative, iemNativeInstrBufEnsure(pReNative, off, 1), off, idxLabel);
6396#else
6397# error "Port me!"
6398#endif
6399 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
6400 return off;
6401}
6402
6403
6404/**
6405 * Emits a JMP rel32 / B imm19 to a new undefined label.
6406 */
6407DECL_INLINE_THROW(uint32_t)
6408iemNativeEmitJmpToNewLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off, IEMNATIVELABELTYPE enmLabelType, uint16_t uData = 0)
6409{
6410 uint32_t const idxLabel = iemNativeLabelCreate(pReNative, enmLabelType, UINT32_MAX /*offWhere*/, uData);
6411 return iemNativeEmitJmpToLabel(pReNative, off, idxLabel);
6412}
6413
6414/** Condition type. */
6415#ifdef RT_ARCH_AMD64
6416typedef enum IEMNATIVEINSTRCOND : uint8_t
6417{
6418 kIemNativeInstrCond_o = 0,
6419 kIemNativeInstrCond_no,
6420 kIemNativeInstrCond_c,
6421 kIemNativeInstrCond_nc,
6422 kIemNativeInstrCond_e,
6423 kIemNativeInstrCond_z = kIemNativeInstrCond_e,
6424 kIemNativeInstrCond_ne,
6425 kIemNativeInstrCond_nz = kIemNativeInstrCond_ne,
6426 kIemNativeInstrCond_be,
6427 kIemNativeInstrCond_nbe,
6428 kIemNativeInstrCond_s,
6429 kIemNativeInstrCond_ns,
6430 kIemNativeInstrCond_p,
6431 kIemNativeInstrCond_np,
6432 kIemNativeInstrCond_l,
6433 kIemNativeInstrCond_nl,
6434 kIemNativeInstrCond_le,
6435 kIemNativeInstrCond_nle
6436} IEMNATIVEINSTRCOND;
6437#elif defined(RT_ARCH_ARM64)
6438typedef ARMV8INSTRCOND IEMNATIVEINSTRCOND;
6439# define kIemNativeInstrCond_o todo_conditional_codes
6440# define kIemNativeInstrCond_no todo_conditional_codes
6441# define kIemNativeInstrCond_c todo_conditional_codes
6442# define kIemNativeInstrCond_nc todo_conditional_codes
6443# define kIemNativeInstrCond_e kArmv8InstrCond_Eq
6444# define kIemNativeInstrCond_ne kArmv8InstrCond_Ne
6445# define kIemNativeInstrCond_be kArmv8InstrCond_Ls
6446# define kIemNativeInstrCond_nbe kArmv8InstrCond_Hi
6447# define kIemNativeInstrCond_s todo_conditional_codes
6448# define kIemNativeInstrCond_ns todo_conditional_codes
6449# define kIemNativeInstrCond_p todo_conditional_codes
6450# define kIemNativeInstrCond_np todo_conditional_codes
6451# define kIemNativeInstrCond_l kArmv8InstrCond_Lt
6452# define kIemNativeInstrCond_nl kArmv8InstrCond_Ge
6453# define kIemNativeInstrCond_le kArmv8InstrCond_Le
6454# define kIemNativeInstrCond_nle kArmv8InstrCond_Gt
6455#else
6456# error "Port me!"
6457#endif
6458
6459
6460/**
6461 * Emits a Jcc rel32 / B.cc imm19 to the given label (ASSUMED requiring fixup).
6462 */
6463DECL_FORCE_INLINE_THROW(uint32_t)
6464iemNativeEmitJccToLabelEx(PIEMRECOMPILERSTATE pReNative, PIEMNATIVEINSTR pCodeBuf, uint32_t off,
6465 uint32_t idxLabel, IEMNATIVEINSTRCOND enmCond)
6466{
6467 Assert(idxLabel < pReNative->cLabels);
6468
6469 uint32_t const offLabel = pReNative->paLabels[idxLabel].off;
6470#ifdef RT_ARCH_AMD64
6471 if (offLabel >= off)
6472 {
6473 /* jcc rel32 */
6474 pCodeBuf[off++] = 0x0f;
6475 pCodeBuf[off++] = (uint8_t)enmCond | 0x80;
6476 iemNativeAddFixup(pReNative, off, idxLabel, kIemNativeFixupType_Rel32, -4);
6477 pCodeBuf[off++] = 0x00;
6478 pCodeBuf[off++] = 0x00;
6479 pCodeBuf[off++] = 0x00;
6480 pCodeBuf[off++] = 0x00;
6481 }
6482 else
6483 {
6484 int32_t offDisp = offLabel - (off + 2);
6485 if ((int8_t)offDisp == offDisp)
6486 {
6487 /* jcc rel8 */
6488 pCodeBuf[off++] = (uint8_t)enmCond | 0x70;
6489 pCodeBuf[off++] = RT_BYTE1((uint32_t)offDisp);
6490 }
6491 else
6492 {
6493 /* jcc rel32 */
6494 offDisp -= 4;
6495 pCodeBuf[off++] = 0x0f;
6496 pCodeBuf[off++] = (uint8_t)enmCond | 0x80;
6497 pCodeBuf[off++] = RT_BYTE1((uint32_t)offDisp);
6498 pCodeBuf[off++] = RT_BYTE2((uint32_t)offDisp);
6499 pCodeBuf[off++] = RT_BYTE3((uint32_t)offDisp);
6500 pCodeBuf[off++] = RT_BYTE4((uint32_t)offDisp);
6501 }
6502 }
6503
6504#elif defined(RT_ARCH_ARM64)
6505 if (offLabel >= off)
6506 {
6507 iemNativeAddFixup(pReNative, off, idxLabel, kIemNativeFixupType_RelImm19At5);
6508 pCodeBuf[off++] = Armv8A64MkInstrBCond(enmCond, -1);
6509 }
6510 else
6511 {
6512 Assert(off - offLabel <= 0x3ffffU);
6513 pCodeBuf[off] = Armv8A64MkInstrBCond(enmCond, offLabel - off);
6514 off++;
6515 }
6516
6517#else
6518# error "Port me!"
6519#endif
6520 return off;
6521}
6522
6523
6524/**
6525 * Emits a Jcc rel32 / B.cc imm19 to the given label (ASSUMED requiring fixup).
6526 */
6527DECL_INLINE_THROW(uint32_t)
6528iemNativeEmitJccToLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t idxLabel, IEMNATIVEINSTRCOND enmCond)
6529{
6530#ifdef RT_ARCH_AMD64
6531 off = iemNativeEmitJccToLabelEx(pReNative, iemNativeInstrBufEnsure(pReNative, off, 6), off, idxLabel, enmCond);
6532#elif defined(RT_ARCH_ARM64)
6533 off = iemNativeEmitJccToLabelEx(pReNative, iemNativeInstrBufEnsure(pReNative, off, 1), off, idxLabel, enmCond);
6534#else
6535# error "Port me!"
6536#endif
6537 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
6538 return off;
6539}
6540
6541
6542/**
6543 * Emits a Jcc rel32 / B.cc imm19 to a new label.
6544 */
6545DECL_INLINE_THROW(uint32_t)
6546iemNativeEmitJccToNewLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off,
6547 IEMNATIVELABELTYPE enmLabelType, uint16_t uData, IEMNATIVEINSTRCOND enmCond)
6548{
6549 uint32_t const idxLabel = iemNativeLabelCreate(pReNative, enmLabelType, UINT32_MAX /*offWhere*/, uData);
6550 return iemNativeEmitJccToLabel(pReNative, off, idxLabel, enmCond);
6551}
6552
6553
6554/**
6555 * Emits a JZ/JE rel32 / B.EQ imm19 to the given label.
6556 */
6557DECL_INLINE_THROW(uint32_t) iemNativeEmitJzToLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t idxLabel)
6558{
6559#ifdef RT_ARCH_AMD64
6560 return iemNativeEmitJccToLabel(pReNative, off, idxLabel, kIemNativeInstrCond_e);
6561#elif defined(RT_ARCH_ARM64)
6562 return iemNativeEmitJccToLabel(pReNative, off, idxLabel, kArmv8InstrCond_Eq);
6563#else
6564# error "Port me!"
6565#endif
6566}
6567
6568/**
6569 * Emits a JZ/JE rel32 / B.EQ imm19 to a new label.
6570 */
6571DECL_INLINE_THROW(uint32_t) iemNativeEmitJzToNewLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off,
6572 IEMNATIVELABELTYPE enmLabelType, uint16_t uData = 0)
6573{
6574#ifdef RT_ARCH_AMD64
6575 return iemNativeEmitJccToNewLabel(pReNative, off, enmLabelType, uData, kIemNativeInstrCond_e);
6576#elif defined(RT_ARCH_ARM64)
6577 return iemNativeEmitJccToNewLabel(pReNative, off, enmLabelType, uData, kArmv8InstrCond_Eq);
6578#else
6579# error "Port me!"
6580#endif
6581}
6582
6583
6584/**
6585 * Emits a JNZ/JNE rel32 / B.NE imm19 to the given label.
6586 */
6587DECL_INLINE_THROW(uint32_t) iemNativeEmitJnzToLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t idxLabel)
6588{
6589#ifdef RT_ARCH_AMD64
6590 return iemNativeEmitJccToLabel(pReNative, off, idxLabel, kIemNativeInstrCond_ne);
6591#elif defined(RT_ARCH_ARM64)
6592 return iemNativeEmitJccToLabel(pReNative, off, idxLabel, kArmv8InstrCond_Ne);
6593#else
6594# error "Port me!"
6595#endif
6596}
6597
6598/**
6599 * Emits a JNZ/JNE rel32 / B.NE imm19 to a new label.
6600 */
6601DECL_INLINE_THROW(uint32_t) iemNativeEmitJnzToNewLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off,
6602 IEMNATIVELABELTYPE enmLabelType, uint16_t uData = 0)
6603{
6604#ifdef RT_ARCH_AMD64
6605 return iemNativeEmitJccToNewLabel(pReNative, off, enmLabelType, uData, kIemNativeInstrCond_ne);
6606#elif defined(RT_ARCH_ARM64)
6607 return iemNativeEmitJccToNewLabel(pReNative, off, enmLabelType, uData, kArmv8InstrCond_Ne);
6608#else
6609# error "Port me!"
6610#endif
6611}
6612
6613
6614/**
6615 * Emits a JBE/JNA rel32 / B.LS imm19 to the given label.
6616 */
6617DECL_INLINE_THROW(uint32_t) iemNativeEmitJbeToLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t idxLabel)
6618{
6619#ifdef RT_ARCH_AMD64
6620 return iemNativeEmitJccToLabel(pReNative, off, idxLabel, kIemNativeInstrCond_be);
6621#elif defined(RT_ARCH_ARM64)
6622 return iemNativeEmitJccToLabel(pReNative, off, idxLabel, kArmv8InstrCond_Ls);
6623#else
6624# error "Port me!"
6625#endif
6626}
6627
6628/**
6629 * Emits a JBE/JNA rel32 / B.LS imm19 to a new label.
6630 */
6631DECL_INLINE_THROW(uint32_t) iemNativeEmitJbeToNewLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off,
6632 IEMNATIVELABELTYPE enmLabelType, uint16_t uData = 0)
6633{
6634#ifdef RT_ARCH_AMD64
6635 return iemNativeEmitJccToNewLabel(pReNative, off, enmLabelType, uData, kIemNativeInstrCond_be);
6636#elif defined(RT_ARCH_ARM64)
6637 return iemNativeEmitJccToNewLabel(pReNative, off, enmLabelType, uData, kArmv8InstrCond_Ls);
6638#else
6639# error "Port me!"
6640#endif
6641}
6642
6643
6644/**
6645 * Emits a JA/JNBE rel32 / B.HI imm19 to the given label.
6646 */
6647DECL_INLINE_THROW(uint32_t) iemNativeEmitJaToLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t idxLabel)
6648{
6649#ifdef RT_ARCH_AMD64
6650 return iemNativeEmitJccToLabel(pReNative, off, idxLabel, kIemNativeInstrCond_nbe);
6651#elif defined(RT_ARCH_ARM64)
6652 return iemNativeEmitJccToLabel(pReNative, off, idxLabel, kArmv8InstrCond_Hi);
6653#else
6654# error "Port me!"
6655#endif
6656}
6657
6658/**
6659 * Emits a JA/JNBE rel32 / B.HI imm19 to a new label.
6660 */
6661DECL_INLINE_THROW(uint32_t) iemNativeEmitJaToNewLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off,
6662 IEMNATIVELABELTYPE enmLabelType, uint16_t uData = 0)
6663{
6664#ifdef RT_ARCH_AMD64
6665 return iemNativeEmitJccToNewLabel(pReNative, off, enmLabelType, uData, kIemNativeInstrCond_nbe);
6666#elif defined(RT_ARCH_ARM64)
6667 return iemNativeEmitJccToNewLabel(pReNative, off, enmLabelType, uData, kArmv8InstrCond_Hi);
6668#else
6669# error "Port me!"
6670#endif
6671}
6672
6673
6674/**
6675 * Emits a JL/JNGE rel32 / B.LT imm19 to the given label.
6676 */
6677DECL_INLINE_THROW(uint32_t) iemNativeEmitJlToLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t idxLabel)
6678{
6679#ifdef RT_ARCH_AMD64
6680 return iemNativeEmitJccToLabel(pReNative, off, idxLabel, kIemNativeInstrCond_l);
6681#elif defined(RT_ARCH_ARM64)
6682 return iemNativeEmitJccToLabel(pReNative, off, idxLabel, kArmv8InstrCond_Lt);
6683#else
6684# error "Port me!"
6685#endif
6686}
6687
6688/**
6689 * Emits a JA/JNGE rel32 / B.HI imm19 to a new label.
6690 */
6691DECL_INLINE_THROW(uint32_t) iemNativeEmitJlToNewLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off,
6692 IEMNATIVELABELTYPE enmLabelType, uint16_t uData = 0)
6693{
6694#ifdef RT_ARCH_AMD64
6695 return iemNativeEmitJccToNewLabel(pReNative, off, enmLabelType, uData, kIemNativeInstrCond_l);
6696#elif defined(RT_ARCH_ARM64)
6697 return iemNativeEmitJccToNewLabel(pReNative, off, enmLabelType, uData, kArmv8InstrCond_Lt);
6698#else
6699# error "Port me!"
6700#endif
6701}
6702
6703
6704/**
6705 * Emits a Jcc rel32 / B.cc imm19 with a fixed displacement.
6706 *
6707 * @note The @a offTarget is the absolute jump target (unit is IEMNATIVEINSTR).
6708 *
6709 * Only use hardcoded jumps forward when emitting for exactly one
6710 * platform, otherwise apply iemNativeFixupFixedJump() to ensure hitting
6711 * the right target address on all platforms!
6712 *
6713 * Please also note that on x86 it is necessary pass off + 256 or higher
6714 * for @a offTarget one believe the intervening code is more than 127
6715 * bytes long.
6716 */
6717DECL_FORCE_INLINE(uint32_t)
6718iemNativeEmitJccToFixedEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint32_t offTarget, IEMNATIVEINSTRCOND enmCond)
6719{
6720#ifdef RT_ARCH_AMD64
6721 /* jcc rel8 / rel32 */
6722 int32_t offDisp = (int32_t)(offTarget - (off + 2));
6723 if (offDisp < 128 && offDisp >= -128)
6724 {
6725 pCodeBuf[off++] = (uint8_t)enmCond | 0x70;
6726 pCodeBuf[off++] = RT_BYTE1((uint32_t)offDisp);
6727 }
6728 else
6729 {
6730 offDisp -= 4;
6731 pCodeBuf[off++] = 0x0f;
6732 pCodeBuf[off++] = (uint8_t)enmCond | 0x80;
6733 pCodeBuf[off++] = RT_BYTE1((uint32_t)offDisp);
6734 pCodeBuf[off++] = RT_BYTE2((uint32_t)offDisp);
6735 pCodeBuf[off++] = RT_BYTE3((uint32_t)offDisp);
6736 pCodeBuf[off++] = RT_BYTE4((uint32_t)offDisp);
6737 }
6738
6739#elif defined(RT_ARCH_ARM64)
6740 pCodeBuf[off] = Armv8A64MkInstrBCond(enmCond, (int32_t)(offTarget - off));
6741 off++;
6742#else
6743# error "Port me!"
6744#endif
6745 return off;
6746}
6747
6748
6749/**
6750 * Emits a Jcc rel32 / B.cc imm19 with a fixed displacement.
6751 *
6752 * @note The @a offTarget is the absolute jump target (unit is IEMNATIVEINSTR).
6753 *
6754 * Only use hardcoded jumps forward when emitting for exactly one
6755 * platform, otherwise apply iemNativeFixupFixedJump() to ensure hitting
6756 * the right target address on all platforms!
6757 *
6758 * Please also note that on x86 it is necessary pass off + 256 or higher
6759 * for @a offTarget if one believe the intervening code is more than 127
6760 * bytes long.
6761 */
6762DECL_INLINE_THROW(uint32_t)
6763iemNativeEmitJccToFixed(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t offTarget, IEMNATIVEINSTRCOND enmCond)
6764{
6765#ifdef RT_ARCH_AMD64
6766 off = iemNativeEmitJccToFixedEx(iemNativeInstrBufEnsure(pReNative, off, 6), off, offTarget, enmCond);
6767#elif defined(RT_ARCH_ARM64)
6768 off = iemNativeEmitJccToFixedEx(iemNativeInstrBufEnsure(pReNative, off, 1), off, offTarget, enmCond);
6769#else
6770# error "Port me!"
6771#endif
6772 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
6773 return off;
6774}
6775
6776
6777/**
6778 * Emits a JZ/JE rel32 / B.EQ imm19 with a fixed displacement.
6779 *
6780 * See notes on @a offTarget in the iemNativeEmitJccToFixed() documentation.
6781 */
6782DECL_INLINE_THROW(uint32_t) iemNativeEmitJzToFixed(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t offTarget)
6783{
6784#ifdef RT_ARCH_AMD64
6785 return iemNativeEmitJccToFixed(pReNative, off, offTarget, kIemNativeInstrCond_e);
6786#elif defined(RT_ARCH_ARM64)
6787 return iemNativeEmitJccToFixed(pReNative, off, offTarget, kArmv8InstrCond_Eq);
6788#else
6789# error "Port me!"
6790#endif
6791}
6792
6793
6794/**
6795 * Emits a JNZ/JNE rel32 / B.NE imm19 with a fixed displacement.
6796 *
6797 * See notes on @a offTarget in the iemNativeEmitJccToFixed() documentation.
6798 */
6799DECL_INLINE_THROW(uint32_t) iemNativeEmitJnzToFixed(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t offTarget)
6800{
6801#ifdef RT_ARCH_AMD64
6802 return iemNativeEmitJccToFixed(pReNative, off, offTarget, kIemNativeInstrCond_ne);
6803#elif defined(RT_ARCH_ARM64)
6804 return iemNativeEmitJccToFixed(pReNative, off, offTarget, kArmv8InstrCond_Ne);
6805#else
6806# error "Port me!"
6807#endif
6808}
6809
6810
6811/**
6812 * Emits a JBE/JNA rel32 / B.LS imm19 with a fixed displacement.
6813 *
6814 * See notes on @a offTarget in the iemNativeEmitJccToFixed() documentation.
6815 */
6816DECL_INLINE_THROW(uint32_t) iemNativeEmitJbeToFixed(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t offTarget)
6817{
6818#ifdef RT_ARCH_AMD64
6819 return iemNativeEmitJccToFixed(pReNative, off, offTarget, kIemNativeInstrCond_be);
6820#elif defined(RT_ARCH_ARM64)
6821 return iemNativeEmitJccToFixed(pReNative, off, offTarget, kArmv8InstrCond_Ls);
6822#else
6823# error "Port me!"
6824#endif
6825}
6826
6827
6828/**
6829 * Emits a JA/JNBE rel32 / B.HI imm19 with a fixed displacement.
6830 *
6831 * See notes on @a offTarget in the iemNativeEmitJccToFixed() documentation.
6832 */
6833DECL_INLINE_THROW(uint32_t) iemNativeEmitJaToFixed(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t offTarget)
6834{
6835#ifdef RT_ARCH_AMD64
6836 return iemNativeEmitJccToFixed(pReNative, off, offTarget, kIemNativeInstrCond_nbe);
6837#elif defined(RT_ARCH_ARM64)
6838 return iemNativeEmitJccToFixed(pReNative, off, offTarget, kArmv8InstrCond_Hi);
6839#else
6840# error "Port me!"
6841#endif
6842}
6843
6844
6845/**
6846 * Emits a JMP rel32/rel8 / B imm26 with a fixed displacement.
6847 *
6848 * See notes on @a offTarget in the iemNativeEmitJccToFixed() documentation.
6849 */
6850DECL_FORCE_INLINE(uint32_t) iemNativeEmitJmpToFixedEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint32_t offTarget)
6851{
6852#ifdef RT_ARCH_AMD64
6853 /* jmp rel8 or rel32 */
6854 int32_t offDisp = offTarget - (off + 2);
6855 if (offDisp < 128 && offDisp >= -128)
6856 {
6857 pCodeBuf[off++] = 0xeb;
6858 pCodeBuf[off++] = RT_BYTE1((uint32_t)offDisp);
6859 }
6860 else
6861 {
6862 offDisp -= 3;
6863 pCodeBuf[off++] = 0xe9;
6864 pCodeBuf[off++] = RT_BYTE1((uint32_t)offDisp);
6865 pCodeBuf[off++] = RT_BYTE2((uint32_t)offDisp);
6866 pCodeBuf[off++] = RT_BYTE3((uint32_t)offDisp);
6867 pCodeBuf[off++] = RT_BYTE4((uint32_t)offDisp);
6868 }
6869
6870#elif defined(RT_ARCH_ARM64)
6871 pCodeBuf[off] = Armv8A64MkInstrB((int32_t)(offTarget - off));
6872 off++;
6873
6874#else
6875# error "Port me!"
6876#endif
6877 return off;
6878}
6879
6880
6881/**
6882 * Emits a JMP rel32/rel8 / B imm26 with a fixed displacement.
6883 *
6884 * See notes on @a offTarget in the iemNativeEmitJccToFixed() documentation.
6885 */
6886DECL_INLINE_THROW(uint32_t) iemNativeEmitJmpToFixed(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t offTarget)
6887{
6888#ifdef RT_ARCH_AMD64
6889 off = iemNativeEmitJmpToFixedEx(iemNativeInstrBufEnsure(pReNative, off, 5), off, offTarget);
6890#elif defined(RT_ARCH_ARM64)
6891 off = iemNativeEmitJmpToFixedEx(iemNativeInstrBufEnsure(pReNative, off, 1), off, offTarget);
6892#else
6893# error "Port me!"
6894#endif
6895 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
6896 return off;
6897}
6898
6899
6900/**
6901 * Fixes up a conditional jump to a fixed label.
6902 * @see iemNativeEmitJmpToFixed, iemNativeEmitJnzToFixed,
6903 * iemNativeEmitJzToFixed, ...
6904 */
6905DECL_INLINE_THROW(void) iemNativeFixupFixedJump(PIEMRECOMPILERSTATE pReNative, uint32_t offFixup, uint32_t offTarget)
6906{
6907#ifdef RT_ARCH_AMD64
6908 uint8_t * const pbCodeBuf = pReNative->pInstrBuf;
6909 uint8_t const bOpcode = pbCodeBuf[offFixup];
6910 if ((uint8_t)(bOpcode - 0x70) < (uint8_t)0x10 || bOpcode == 0xeb)
6911 {
6912 pbCodeBuf[offFixup + 1] = (uint8_t)(offTarget - (offFixup + 2));
6913 AssertStmt(pbCodeBuf[offFixup + 1] == offTarget - (offFixup + 2),
6914 IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_EMIT_FIXED_JUMP_OUT_OF_RANGE));
6915 }
6916 else
6917 {
6918 if (bOpcode != 0x0f)
6919 Assert(bOpcode == 0xe9);
6920 else
6921 {
6922 offFixup += 1;
6923 Assert((uint8_t)(pbCodeBuf[offFixup] - 0x80) <= 0x10);
6924 }
6925 uint32_t const offRel32 = offTarget - (offFixup + 5);
6926 pbCodeBuf[offFixup + 1] = RT_BYTE1(offRel32);
6927 pbCodeBuf[offFixup + 2] = RT_BYTE2(offRel32);
6928 pbCodeBuf[offFixup + 3] = RT_BYTE3(offRel32);
6929 pbCodeBuf[offFixup + 4] = RT_BYTE4(offRel32);
6930 }
6931
6932#elif defined(RT_ARCH_ARM64)
6933 uint32_t * const pu32CodeBuf = pReNative->pInstrBuf;
6934 if ((pu32CodeBuf[offFixup] & UINT32_C(0xff000000)) == UINT32_C(0x54000000))
6935 {
6936 /* B.COND + BC.COND */
6937 int32_t const offDisp = offTarget - offFixup;
6938 Assert(offDisp >= -262144 && offDisp < 262144);
6939 pu32CodeBuf[offFixup] = (pu32CodeBuf[offFixup] & UINT32_C(0xff00001f))
6940 | (((uint32_t)offDisp & UINT32_C(0x0007ffff)) << 5);
6941 }
6942 else
6943 {
6944 /* B imm26 */
6945 Assert((pu32CodeBuf[offFixup] & UINT32_C(0xfc000000)) == UINT32_C(0x14000000));
6946 int32_t const offDisp = offTarget - offFixup;
6947 Assert(offDisp >= -33554432 && offDisp < 33554432);
6948 pu32CodeBuf[offFixup] = (pu32CodeBuf[offFixup] & UINT32_C(0xfc000000))
6949 | ((uint32_t)offDisp & UINT32_C(0x03ffffff));
6950 }
6951
6952#else
6953# error "Port me!"
6954#endif
6955}
6956
6957
6958#ifdef RT_ARCH_AMD64
6959/**
6960 * For doing bt on a register.
6961 */
6962DECL_INLINE_THROW(uint32_t)
6963iemNativeEmitAmd64TestBitInGprEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprSrc, uint8_t iBitNo)
6964{
6965 Assert(iBitNo < 64);
6966 /* bt Ev, imm8 */
6967 if (iBitNo >= 32)
6968 pCodeBuf[off++] = X86_OP_REX_W | (iGprSrc < 8 ? 0 : X86_OP_REX_B);
6969 else if (iGprSrc >= 8)
6970 pCodeBuf[off++] = X86_OP_REX_B;
6971 pCodeBuf[off++] = 0x0f;
6972 pCodeBuf[off++] = 0xba;
6973 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 4, iGprSrc & 7);
6974 pCodeBuf[off++] = iBitNo;
6975 return off;
6976}
6977#endif /* RT_ARCH_AMD64 */
6978
6979
6980/**
6981 * Internal helper, don't call directly.
6982 */
6983DECL_INLINE_THROW(uint32_t)
6984iemNativeEmitTestBitInGprAndJmpToLabelIfCc(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprSrc,
6985 uint8_t iBitNo, uint32_t idxLabel, bool fJmpIfSet)
6986{
6987 Assert(iBitNo < 64);
6988#ifdef RT_ARCH_AMD64
6989 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 5);
6990 if (iBitNo < 8)
6991 {
6992 /* test Eb, imm8 */
6993 if (iGprSrc >= 4)
6994 pbCodeBuf[off++] = iGprSrc >= 8 ? X86_OP_REX_B : X86_OP_REX;
6995 pbCodeBuf[off++] = 0xf6;
6996 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, iGprSrc & 7);
6997 pbCodeBuf[off++] = (uint8_t)1 << iBitNo;
6998 off = iemNativeEmitJccToLabel(pReNative, off, idxLabel, fJmpIfSet ? kIemNativeInstrCond_ne : kIemNativeInstrCond_e);
6999 }
7000 else
7001 {
7002 /* bt Ev, imm8 */
7003 if (iBitNo >= 32)
7004 pbCodeBuf[off++] = X86_OP_REX_W | (iGprSrc < 8 ? 0 : X86_OP_REX_B);
7005 else if (iGprSrc >= 8)
7006 pbCodeBuf[off++] = X86_OP_REX_B;
7007 pbCodeBuf[off++] = 0x0f;
7008 pbCodeBuf[off++] = 0xba;
7009 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 4, iGprSrc & 7);
7010 pbCodeBuf[off++] = iBitNo;
7011 off = iemNativeEmitJccToLabel(pReNative, off, idxLabel, fJmpIfSet ? kIemNativeInstrCond_c : kIemNativeInstrCond_nc);
7012 }
7013
7014#elif defined(RT_ARCH_ARM64)
7015 /* Use the TBNZ instruction here. */
7016 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2);
7017 if (pReNative->paLabels[idxLabel].enmType > kIemNativeLabelType_LastWholeTbBranch)
7018 {
7019 AssertMsg(pReNative->paLabels[idxLabel].off == UINT32_MAX,
7020 ("TODO: Please enable & test commented out code for jumping back to a predefined label.\n"));
7021 //uint32_t offLabel = pReNative->paLabels[idxLabel].off;
7022 //if (offLabel == UINT32_MAX)
7023 {
7024 iemNativeAddFixup(pReNative, off, idxLabel, kIemNativeFixupType_RelImm14At5);
7025 pu32CodeBuf[off++] = Armv8A64MkInstrTbzTbnz(fJmpIfSet, 0, iGprSrc, iBitNo);
7026 }
7027 //else
7028 //{
7029 // RT_BREAKPOINT();
7030 // Assert(off - offLabel <= 0x1fffU);
7031 // pu32CodeBuf[off++] = Armv8A64MkInstrTbzTbnz(fJmpIfSet, offLabel - off, iGprSrc, iBitNo);
7032 //
7033 //}
7034 }
7035 else
7036 {
7037 Assert(Armv8A64ConvertImmRImmS2Mask64(0x40, (64U - iBitNo) & 63U) == RT_BIT_64(iBitNo));
7038 pu32CodeBuf[off++] = Armv8A64MkInstrTstImm(iGprSrc, 0x40, (64U - iBitNo) & 63U);
7039 iemNativeAddFixup(pReNative, off, idxLabel, kIemNativeFixupType_RelImm19At5);
7040 pu32CodeBuf[off++] = Armv8A64MkInstrBCond(fJmpIfSet ? kArmv8InstrCond_Ne : kArmv8InstrCond_Eq, 0);
7041 }
7042
7043#else
7044# error "Port me!"
7045#endif
7046 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
7047 return off;
7048}
7049
7050
7051/**
7052 * Emits a jump to @a idxLabel on the condition that bit @a iBitNo _is_ _set_ in
7053 * @a iGprSrc.
7054 *
7055 * @note On ARM64 the range is only +/-8191 instructions.
7056 */
7057DECL_INLINE_THROW(uint32_t) iemNativeEmitTestBitInGprAndJmpToLabelIfSet(PIEMRECOMPILERSTATE pReNative, uint32_t off,
7058 uint8_t iGprSrc, uint8_t iBitNo, uint32_t idxLabel)
7059{
7060 return iemNativeEmitTestBitInGprAndJmpToLabelIfCc(pReNative, off, iGprSrc, iBitNo, idxLabel, true /*fJmpIfSet*/);
7061}
7062
7063
7064/**
7065 * Emits a jump to @a idxLabel on the condition that bit @a iBitNo _is_ _not_
7066 * _set_ in @a iGprSrc.
7067 *
7068 * @note On ARM64 the range is only +/-8191 instructions.
7069 */
7070DECL_INLINE_THROW(uint32_t) iemNativeEmitTestBitInGprAndJmpToLabelIfNotSet(PIEMRECOMPILERSTATE pReNative, uint32_t off,
7071 uint8_t iGprSrc, uint8_t iBitNo, uint32_t idxLabel)
7072{
7073 return iemNativeEmitTestBitInGprAndJmpToLabelIfCc(pReNative, off, iGprSrc, iBitNo, idxLabel, false /*fJmpIfSet*/);
7074}
7075
7076
7077/**
7078 * Emits a test for any of the bits from @a fBits in @a iGprSrc, setting CPU
7079 * flags accordingly.
7080 */
7081DECL_INLINE_THROW(uint32_t)
7082iemNativeEmitTestAnyBitsInGpr(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprSrc, uint64_t fBits)
7083{
7084 Assert(fBits != 0);
7085#ifdef RT_ARCH_AMD64
7086
7087 if (fBits >= UINT32_MAX)
7088 {
7089 uint8_t iTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, fBits);
7090
7091 /* test Ev,Gv */
7092 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 5);
7093 pbCodeBuf[off++] = X86_OP_REX_W | (iGprSrc < 8 ? 0 : X86_OP_REX_R) | (iTmpReg < 8 ? 0 : X86_OP_REX_B);
7094 pbCodeBuf[off++] = 0x85;
7095 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGprSrc & 8, iTmpReg & 7);
7096
7097 iemNativeRegFreeTmpImm(pReNative, iTmpReg);
7098 }
7099 else if (fBits <= UINT32_MAX)
7100 {
7101 /* test Eb, imm8 or test Ev, imm32 */
7102 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
7103 if (fBits <= UINT8_MAX)
7104 {
7105 if (iGprSrc >= 4)
7106 pbCodeBuf[off++] = iGprSrc >= 8 ? X86_OP_REX_B : X86_OP_REX;
7107 pbCodeBuf[off++] = 0xf6;
7108 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, iGprSrc & 7);
7109 pbCodeBuf[off++] = (uint8_t)fBits;
7110 }
7111 else
7112 {
7113 if (iGprSrc >= 8)
7114 pbCodeBuf[off++] = X86_OP_REX_B;
7115 pbCodeBuf[off++] = 0xf7;
7116 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, iGprSrc & 7);
7117 pbCodeBuf[off++] = RT_BYTE1(fBits);
7118 pbCodeBuf[off++] = RT_BYTE2(fBits);
7119 pbCodeBuf[off++] = RT_BYTE3(fBits);
7120 pbCodeBuf[off++] = RT_BYTE4(fBits);
7121 }
7122 }
7123 /** @todo implement me. */
7124 else
7125 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_EMIT_CASE_NOT_IMPLEMENTED_1));
7126
7127#elif defined(RT_ARCH_ARM64)
7128 uint32_t uImmR = 0;
7129 uint32_t uImmNandS = 0;
7130 if (Armv8A64ConvertMask64ToImmRImmS(fBits, &uImmNandS, &uImmR))
7131 {
7132 /* ands xzr, iGprSrc, #fBits */
7133 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
7134 pu32CodeBuf[off++] = Armv8A64MkInstrAndsImm(ARMV8_A64_REG_XZR, iGprSrc, uImmNandS, uImmR);
7135 }
7136 else
7137 {
7138 /* ands xzr, iGprSrc, iTmpReg */
7139 uint8_t const iTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, fBits);
7140 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
7141 pu32CodeBuf[off++] = Armv8A64MkInstrAnds(ARMV8_A64_REG_XZR, iGprSrc, iTmpReg);
7142 iemNativeRegFreeTmpImm(pReNative, iTmpReg);
7143 }
7144
7145#else
7146# error "Port me!"
7147#endif
7148 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
7149 return off;
7150}
7151
7152
7153/**
7154 * Emits a test for any of the bits from @a fBits in the lower 32 bits of
7155 * @a iGprSrc, setting CPU flags accordingly.
7156 *
7157 * @note For ARM64 this only supports @a fBits values that can be expressed
7158 * using the two 6-bit immediates of the ANDS instruction. The caller
7159 * must make sure this is possible!
7160 */
7161DECL_FORCE_INLINE_THROW(uint32_t)
7162iemNativeEmitTestAnyBitsInGpr32Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprSrc, uint32_t fBits)
7163{
7164 Assert(fBits != 0);
7165
7166#ifdef RT_ARCH_AMD64
7167 if (fBits <= UINT8_MAX)
7168 {
7169 /* test Eb, imm8 */
7170 if (iGprSrc >= 4)
7171 pCodeBuf[off++] = iGprSrc >= 8 ? X86_OP_REX_B : X86_OP_REX;
7172 pCodeBuf[off++] = 0xf6;
7173 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, iGprSrc & 7);
7174 pCodeBuf[off++] = (uint8_t)fBits;
7175 }
7176 else
7177 {
7178 /* test Ev, imm32 */
7179 if (iGprSrc >= 8)
7180 pCodeBuf[off++] = X86_OP_REX_B;
7181 pCodeBuf[off++] = 0xf7;
7182 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, iGprSrc & 7);
7183 pCodeBuf[off++] = RT_BYTE1(fBits);
7184 pCodeBuf[off++] = RT_BYTE2(fBits);
7185 pCodeBuf[off++] = RT_BYTE3(fBits);
7186 pCodeBuf[off++] = RT_BYTE4(fBits);
7187 }
7188
7189#elif defined(RT_ARCH_ARM64)
7190 /* ands xzr, src, #fBits */
7191 uint32_t uImmR = 0;
7192 uint32_t uImmNandS = 0;
7193 if (Armv8A64ConvertMask32ToImmRImmS(fBits, &uImmNandS, &uImmR))
7194 pCodeBuf[off++] = Armv8A64MkInstrAndsImm(ARMV8_A64_REG_XZR, iGprSrc, uImmNandS, uImmR, false /*f64Bit*/);
7195 else
7196# ifdef IEM_WITH_THROW_CATCH
7197 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(NULL, VERR_IEM_IPE_9));
7198# else
7199 AssertReleaseFailedStmt(off = UINT32_MAX);
7200# endif
7201
7202#else
7203# error "Port me!"
7204#endif
7205 return off;
7206}
7207
7208
7209
7210/**
7211 * Emits a test for any of the bits from @a fBits in the lower 8 bits of
7212 * @a iGprSrc, setting CPU flags accordingly.
7213 *
7214 * @note For ARM64 this only supports @a fBits values that can be expressed
7215 * using the two 6-bit immediates of the ANDS instruction. The caller
7216 * must make sure this is possible!
7217 */
7218DECL_FORCE_INLINE_THROW(uint32_t)
7219iemNativeEmitTestAnyBitsInGpr8Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprSrc, uint8_t fBits)
7220{
7221 Assert(fBits != 0);
7222
7223#ifdef RT_ARCH_AMD64
7224 /* test Eb, imm8 */
7225 if (iGprSrc >= 4)
7226 pCodeBuf[off++] = iGprSrc >= 8 ? X86_OP_REX_B : X86_OP_REX;
7227 pCodeBuf[off++] = 0xf6;
7228 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, iGprSrc & 7);
7229 pCodeBuf[off++] = fBits;
7230
7231#elif defined(RT_ARCH_ARM64)
7232 /* ands xzr, src, #fBits */
7233 uint32_t uImmR = 0;
7234 uint32_t uImmNandS = 0;
7235 if (Armv8A64ConvertMask32ToImmRImmS(fBits, &uImmNandS, &uImmR))
7236 pCodeBuf[off++] = Armv8A64MkInstrAndsImm(ARMV8_A64_REG_XZR, iGprSrc, uImmNandS, uImmR, false /*f64Bit*/);
7237 else
7238# ifdef IEM_WITH_THROW_CATCH
7239 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(NULL, VERR_IEM_IPE_9));
7240# else
7241 AssertReleaseFailedStmt(off = UINT32_MAX);
7242# endif
7243
7244#else
7245# error "Port me!"
7246#endif
7247 return off;
7248}
7249
7250
7251/**
7252 * Emits a test for any of the bits from @a fBits in the lower 8 bits of
7253 * @a iGprSrc, setting CPU flags accordingly.
7254 */
7255DECL_INLINE_THROW(uint32_t)
7256iemNativeEmitTestAnyBitsInGpr8(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprSrc, uint8_t fBits)
7257{
7258 Assert(fBits != 0);
7259
7260#ifdef RT_ARCH_AMD64
7261 off = iemNativeEmitTestAnyBitsInGpr8Ex(iemNativeInstrBufEnsure(pReNative, off, 4), off, iGprSrc, fBits);
7262
7263#elif defined(RT_ARCH_ARM64)
7264 /* ands xzr, src, [tmp|#imm] */
7265 uint32_t uImmR = 0;
7266 uint32_t uImmNandS = 0;
7267 if (Armv8A64ConvertMask32ToImmRImmS(fBits, &uImmNandS, &uImmR))
7268 {
7269 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
7270 pu32CodeBuf[off++] = Armv8A64MkInstrAndsImm(ARMV8_A64_REG_XZR, iGprSrc, uImmNandS, uImmR, false /*f64Bit*/);
7271 }
7272 else
7273 {
7274 /* Use temporary register for the 64-bit immediate. */
7275 uint8_t iTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, fBits);
7276 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
7277 pu32CodeBuf[off++] = Armv8A64MkInstrAnds(ARMV8_A64_REG_XZR, iGprSrc, iTmpReg, false /*f64Bit*/);
7278 iemNativeRegFreeTmpImm(pReNative, iTmpReg);
7279 }
7280
7281#else
7282# error "Port me!"
7283#endif
7284 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
7285 return off;
7286}
7287
7288
7289/**
7290 * Emits a jump to @a idxLabel on the condition _any_ of the bits in @a fBits
7291 * are set in @a iGprSrc.
7292 */
7293DECL_INLINE_THROW(uint32_t)
7294iemNativeEmitTestAnyBitsInGprAndJmpToLabelIfAnySet(PIEMRECOMPILERSTATE pReNative, uint32_t off,
7295 uint8_t iGprSrc, uint64_t fBits, uint32_t idxLabel)
7296{
7297 Assert(fBits); Assert(!RT_IS_POWER_OF_TWO(fBits));
7298
7299 off = iemNativeEmitTestAnyBitsInGpr(pReNative, off, iGprSrc, fBits);
7300 off = iemNativeEmitJnzToLabel(pReNative, off, idxLabel);
7301
7302 return off;
7303}
7304
7305
7306/**
7307 * Emits a jump to @a idxLabel on the condition _none_ of the bits in @a fBits
7308 * are set in @a iGprSrc.
7309 */
7310DECL_INLINE_THROW(uint32_t)
7311iemNativeEmitTestAnyBitsInGprAndJmpToLabelIfNoneSet(PIEMRECOMPILERSTATE pReNative, uint32_t off,
7312 uint8_t iGprSrc, uint64_t fBits, uint32_t idxLabel)
7313{
7314 Assert(fBits); Assert(!RT_IS_POWER_OF_TWO(fBits));
7315
7316 off = iemNativeEmitTestAnyBitsInGpr(pReNative, off, iGprSrc, fBits);
7317 off = iemNativeEmitJzToLabel(pReNative, off, idxLabel);
7318
7319 return off;
7320}
7321
7322
7323/**
7324 * Emits code that jumps to @a idxLabel if @a iGprSrc is not zero.
7325 *
7326 * The operand size is given by @a f64Bit.
7327 */
7328DECL_FORCE_INLINE_THROW(uint32_t)
7329iemNativeEmitTestIfGprIsZeroOrNotZeroAndJmpToLabelEx(PIEMRECOMPILERSTATE pReNative, PIEMNATIVEINSTR pCodeBuf, uint32_t off,
7330 uint8_t iGprSrc, bool f64Bit, bool fJmpIfNotZero, uint32_t idxLabel)
7331{
7332 Assert(idxLabel < pReNative->cLabels);
7333
7334#ifdef RT_ARCH_AMD64
7335 /* test reg32,reg32 / test reg64,reg64 */
7336 if (f64Bit)
7337 pCodeBuf[off++] = X86_OP_REX_W | (iGprSrc < 8 ? 0 : X86_OP_REX_R | X86_OP_REX_B);
7338 else if (iGprSrc >= 8)
7339 pCodeBuf[off++] = X86_OP_REX_R | X86_OP_REX_B;
7340 pCodeBuf[off++] = 0x85;
7341 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGprSrc & 7, iGprSrc & 7);
7342
7343 /* jnz idxLabel */
7344 off = iemNativeEmitJccToLabelEx(pReNative, pCodeBuf, off, idxLabel,
7345 fJmpIfNotZero ? kIemNativeInstrCond_ne : kIemNativeInstrCond_e);
7346
7347#elif defined(RT_ARCH_ARM64)
7348 if (pReNative->paLabels[idxLabel].off != UINT32_MAX)
7349 {
7350 pCodeBuf[off] = Armv8A64MkInstrCbzCbnz(fJmpIfNotZero, (int32_t)(pReNative->paLabels[idxLabel].off - off),
7351 iGprSrc, f64Bit);
7352 off++;
7353 }
7354 else
7355 {
7356 iemNativeAddFixup(pReNative, off, idxLabel, kIemNativeFixupType_RelImm19At5);
7357 pCodeBuf[off++] = Armv8A64MkInstrCbzCbnz(fJmpIfNotZero, 0, iGprSrc, f64Bit);
7358 }
7359
7360#else
7361# error "Port me!"
7362#endif
7363 return off;
7364}
7365
7366
7367/**
7368 * Emits code that jumps to @a idxLabel if @a iGprSrc is not zero.
7369 *
7370 * The operand size is given by @a f64Bit.
7371 */
7372DECL_FORCE_INLINE_THROW(uint32_t)
7373iemNativeEmitTestIfGprIsZeroOrNotZeroAndJmpToLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprSrc,
7374 bool f64Bit, bool fJmpIfNotZero, uint32_t idxLabel)
7375{
7376#ifdef RT_ARCH_AMD64
7377 off = iemNativeEmitTestIfGprIsZeroOrNotZeroAndJmpToLabelEx(pReNative, iemNativeInstrBufEnsure(pReNative, off, 3 + 6),
7378 off, iGprSrc, f64Bit, fJmpIfNotZero, idxLabel);
7379#elif defined(RT_ARCH_ARM64)
7380 off = iemNativeEmitTestIfGprIsZeroOrNotZeroAndJmpToLabelEx(pReNative, iemNativeInstrBufEnsure(pReNative, off, 1),
7381 off, iGprSrc, f64Bit, fJmpIfNotZero, idxLabel);
7382#else
7383# error "Port me!"
7384#endif
7385 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
7386 return off;
7387}
7388
7389
7390/* if (Grp1 == 0) Jmp idxLabel; */
7391
7392/**
7393 * Emits code that jumps to @a idxLabel if @a iGprSrc is zero.
7394 *
7395 * The operand size is given by @a f64Bit.
7396 */
7397DECL_FORCE_INLINE_THROW(uint32_t)
7398iemNativeEmitTestIfGprIsZeroAndJmpToLabelEx(PIEMRECOMPILERSTATE pReNative, PIEMNATIVEINSTR pCodeBuf, uint32_t off,
7399 uint8_t iGprSrc, bool f64Bit, uint32_t idxLabel)
7400{
7401 return iemNativeEmitTestIfGprIsZeroOrNotZeroAndJmpToLabelEx(pReNative, pCodeBuf, off, iGprSrc,
7402 f64Bit, false /*fJmpIfNotZero*/, idxLabel);
7403}
7404
7405
7406/**
7407 * Emits code that jumps to @a idxLabel if @a iGprSrc is zero.
7408 *
7409 * The operand size is given by @a f64Bit.
7410 */
7411DECL_INLINE_THROW(uint32_t) iemNativeEmitTestIfGprIsZeroAndJmpToLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off,
7412 uint8_t iGprSrc, bool f64Bit, uint32_t idxLabel)
7413{
7414 return iemNativeEmitTestIfGprIsZeroOrNotZeroAndJmpToLabel(pReNative, off, iGprSrc, f64Bit, false /*fJmpIfNotZero*/, idxLabel);
7415}
7416
7417
7418/**
7419 * Emits code that jumps to a new label if @a iGprSrc is zero.
7420 *
7421 * The operand size is given by @a f64Bit.
7422 */
7423DECL_INLINE_THROW(uint32_t)
7424iemNativeEmitTestIfGprIsZeroAndJmpToNewLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprSrc, bool f64Bit,
7425 IEMNATIVELABELTYPE enmLabelType, uint16_t uData = 0)
7426{
7427 uint32_t const idxLabel = iemNativeLabelCreate(pReNative, enmLabelType, UINT32_MAX /*offWhere*/, uData);
7428 return iemNativeEmitTestIfGprIsZeroAndJmpToLabel(pReNative, off, iGprSrc, f64Bit, idxLabel);
7429}
7430
7431
7432/* if (Grp1 != 0) Jmp idxLabel; */
7433
7434/**
7435 * Emits code that jumps to @a idxLabel if @a iGprSrc is not zero.
7436 *
7437 * The operand size is given by @a f64Bit.
7438 */
7439DECL_FORCE_INLINE_THROW(uint32_t)
7440iemNativeEmitTestIfGprIsNotZeroAndJmpToLabelEx(PIEMRECOMPILERSTATE pReNative, PIEMNATIVEINSTR pCodeBuf, uint32_t off,
7441 uint8_t iGprSrc, bool f64Bit, uint32_t idxLabel)
7442{
7443 return iemNativeEmitTestIfGprIsZeroOrNotZeroAndJmpToLabelEx(pReNative, pCodeBuf, off, iGprSrc,
7444 f64Bit, true /*fJmpIfNotZero*/, idxLabel);
7445}
7446
7447
7448/**
7449 * Emits code that jumps to @a idxLabel if @a iGprSrc is not zero.
7450 *
7451 * The operand size is given by @a f64Bit.
7452 */
7453DECL_INLINE_THROW(uint32_t) iemNativeEmitTestIfGprIsNotZeroAndJmpToLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off,
7454 uint8_t iGprSrc, bool f64Bit, uint32_t idxLabel)
7455{
7456 return iemNativeEmitTestIfGprIsZeroOrNotZeroAndJmpToLabel(pReNative, off, iGprSrc, f64Bit, true /*fJmpIfNotZero*/, idxLabel);
7457}
7458
7459
7460/**
7461 * Emits code that jumps to a new label if @a iGprSrc is not zero.
7462 *
7463 * The operand size is given by @a f64Bit.
7464 */
7465DECL_INLINE_THROW(uint32_t)
7466iemNativeEmitTestIfGprIsNotZeroAndJmpToNewLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprSrc, bool f64Bit,
7467 IEMNATIVELABELTYPE enmLabelType, uint16_t uData = 0)
7468{
7469 uint32_t const idxLabel = iemNativeLabelCreate(pReNative, enmLabelType, UINT32_MAX /*offWhere*/, uData);
7470 return iemNativeEmitTestIfGprIsNotZeroAndJmpToLabel(pReNative, off, iGprSrc, f64Bit, idxLabel);
7471}
7472
7473
7474/* if (Grp1 != Gpr2) Jmp idxLabel; */
7475
7476/**
7477 * Emits code that jumps to the given label if @a iGprLeft and @a iGprRight
7478 * differs.
7479 */
7480DECL_INLINE_THROW(uint32_t)
7481iemNativeEmitTestIfGprNotEqualGprAndJmpToLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off,
7482 uint8_t iGprLeft, uint8_t iGprRight, uint32_t idxLabel)
7483{
7484 off = iemNativeEmitCmpGprWithGpr(pReNative, off, iGprLeft, iGprRight);
7485 off = iemNativeEmitJnzToLabel(pReNative, off, idxLabel);
7486 return off;
7487}
7488
7489
7490/**
7491 * Emits code that jumps to a new label if @a iGprLeft and @a iGprRight differs.
7492 */
7493DECL_INLINE_THROW(uint32_t)
7494iemNativeEmitTestIfGprNotEqualGprAndJmpToNewLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off,
7495 uint8_t iGprLeft, uint8_t iGprRight,
7496 IEMNATIVELABELTYPE enmLabelType, uint16_t uData = 0)
7497{
7498 uint32_t const idxLabel = iemNativeLabelCreate(pReNative, enmLabelType, UINT32_MAX /*offWhere*/, uData);
7499 return iemNativeEmitTestIfGprNotEqualGprAndJmpToLabel(pReNative, off, iGprLeft, iGprRight, idxLabel);
7500}
7501
7502
7503/* if (Grp != Imm) Jmp idxLabel; */
7504
7505/**
7506 * Emits code that jumps to the given label if @a iGprSrc differs from @a uImm.
7507 */
7508DECL_INLINE_THROW(uint32_t)
7509iemNativeEmitTestIfGprNotEqualImmAndJmpToLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off,
7510 uint8_t iGprSrc, uint64_t uImm, uint32_t idxLabel)
7511{
7512 off = iemNativeEmitCmpGprWithImm(pReNative, off, iGprSrc, uImm);
7513 off = iemNativeEmitJnzToLabel(pReNative, off, idxLabel);
7514 return off;
7515}
7516
7517
7518/**
7519 * Emits code that jumps to a new label if @a iGprSrc differs from @a uImm.
7520 */
7521DECL_INLINE_THROW(uint32_t)
7522iemNativeEmitTestIfGprNotEqualImmAndJmpToNewLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off,
7523 uint8_t iGprSrc, uint64_t uImm,
7524 IEMNATIVELABELTYPE enmLabelType, uint16_t uData = 0)
7525{
7526 uint32_t const idxLabel = iemNativeLabelCreate(pReNative, enmLabelType, UINT32_MAX /*offWhere*/, uData);
7527 return iemNativeEmitTestIfGprNotEqualImmAndJmpToLabel(pReNative, off, iGprSrc, uImm, idxLabel);
7528}
7529
7530
7531/**
7532 * Emits code that jumps to the given label if 32-bit @a iGprSrc differs from
7533 * @a uImm.
7534 */
7535DECL_INLINE_THROW(uint32_t) iemNativeEmitTestIfGpr32NotEqualImmAndJmpToLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off,
7536 uint8_t iGprSrc, uint32_t uImm, uint32_t idxLabel)
7537{
7538 off = iemNativeEmitCmpGpr32WithImm(pReNative, off, iGprSrc, uImm);
7539 off = iemNativeEmitJnzToLabel(pReNative, off, idxLabel);
7540 return off;
7541}
7542
7543
7544/**
7545 * Emits code that jumps to a new label if 32-bit @a iGprSrc differs from
7546 * @a uImm.
7547 */
7548DECL_INLINE_THROW(uint32_t)
7549iemNativeEmitTestIfGpr32NotEqualImmAndJmpToNewLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off,
7550 uint8_t iGprSrc, uint32_t uImm,
7551 IEMNATIVELABELTYPE enmLabelType, uint16_t uData = 0)
7552{
7553 uint32_t const idxLabel = iemNativeLabelCreate(pReNative, enmLabelType, UINT32_MAX /*offWhere*/, uData);
7554 return iemNativeEmitTestIfGpr32NotEqualImmAndJmpToLabel(pReNative, off, iGprSrc, uImm, idxLabel);
7555}
7556
7557
7558/**
7559 * Emits code that jumps to the given label if 16-bit @a iGprSrc differs from
7560 * @a uImm.
7561 */
7562DECL_INLINE_THROW(uint32_t) iemNativeEmitTestIfGpr16NotEqualImmAndJmpToLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off,
7563 uint8_t iGprSrc, uint16_t uImm, uint32_t idxLabel)
7564{
7565 off = iemNativeEmitCmpGpr16WithImm(pReNative, off, iGprSrc, uImm);
7566 off = iemNativeEmitJnzToLabel(pReNative, off, idxLabel);
7567 return off;
7568}
7569
7570
7571/**
7572 * Emits code that jumps to a new label if 16-bit @a iGprSrc differs from
7573 * @a uImm.
7574 */
7575DECL_INLINE_THROW(uint32_t)
7576iemNativeEmitTestIfGpr16NotEqualImmAndJmpToNewLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off,
7577 uint8_t iGprSrc, uint16_t uImm,
7578 IEMNATIVELABELTYPE enmLabelType, uint16_t uData = 0)
7579{
7580 uint32_t const idxLabel = iemNativeLabelCreate(pReNative, enmLabelType, UINT32_MAX /*offWhere*/, uData);
7581 return iemNativeEmitTestIfGpr16NotEqualImmAndJmpToLabel(pReNative, off, iGprSrc, uImm, idxLabel);
7582}
7583
7584
7585/* if (Grp == Imm) Jmp idxLabel; */
7586
7587/**
7588 * Emits code that jumps to the given label if @a iGprSrc equals @a uImm.
7589 */
7590DECL_INLINE_THROW(uint32_t)
7591iemNativeEmitTestIfGprEqualsImmAndJmpToLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off,
7592 uint8_t iGprSrc, uint64_t uImm, uint32_t idxLabel)
7593{
7594 off = iemNativeEmitCmpGprWithImm(pReNative, off, iGprSrc, uImm);
7595 off = iemNativeEmitJzToLabel(pReNative, off, idxLabel);
7596 return off;
7597}
7598
7599
7600/**
7601 * Emits code that jumps to a new label if @a iGprSrc equals from @a uImm.
7602 */
7603DECL_INLINE_THROW(uint32_t)
7604iemNativeEmitTestIfGprEqualsImmAndJmpToNewLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprSrc, uint64_t uImm,
7605 IEMNATIVELABELTYPE enmLabelType, uint16_t uData = 0)
7606{
7607 uint32_t const idxLabel = iemNativeLabelCreate(pReNative, enmLabelType, UINT32_MAX /*offWhere*/, uData);
7608 return iemNativeEmitTestIfGprEqualsImmAndJmpToLabel(pReNative, off, iGprSrc, uImm, idxLabel);
7609}
7610
7611
7612/**
7613 * Emits code that jumps to the given label if 32-bit @a iGprSrc equals @a uImm.
7614 */
7615DECL_INLINE_THROW(uint32_t) iemNativeEmitTestIfGpr32EqualsImmAndJmpToLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off,
7616 uint8_t iGprSrc, uint32_t uImm, uint32_t idxLabel)
7617{
7618 off = iemNativeEmitCmpGpr32WithImm(pReNative, off, iGprSrc, uImm);
7619 off = iemNativeEmitJzToLabel(pReNative, off, idxLabel);
7620 return off;
7621}
7622
7623
7624/**
7625 * Emits code that jumps to a new label if 32-bit @a iGprSrc equals @a uImm.
7626 */
7627DECL_INLINE_THROW(uint32_t)
7628iemNativeEmitTestIfGpr32EqualsImmAndJmpToNewLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprSrc, uint32_t uImm,
7629 IEMNATIVELABELTYPE enmLabelType, uint16_t uData = 0)
7630{
7631 uint32_t const idxLabel = iemNativeLabelCreate(pReNative, enmLabelType, UINT32_MAX /*offWhere*/, uData);
7632 return iemNativeEmitTestIfGpr32EqualsImmAndJmpToLabel(pReNative, off, iGprSrc, uImm, idxLabel);
7633}
7634
7635
7636/**
7637 * Emits code that jumps to the given label if 16-bit @a iGprSrc equals @a uImm.
7638 *
7639 * @note ARM64: Helper register is required (idxTmpReg).
7640 */
7641DECL_INLINE_THROW(uint32_t) iemNativeEmitTestIfGpr16EqualsImmAndJmpToLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off,
7642 uint8_t iGprSrc, uint16_t uImm, uint32_t idxLabel,
7643 uint8_t idxTmpReg = UINT8_MAX)
7644{
7645 off = iemNativeEmitCmpGpr16WithImm(pReNative, off, iGprSrc, uImm, idxTmpReg);
7646 off = iemNativeEmitJzToLabel(pReNative, off, idxLabel);
7647 return off;
7648}
7649
7650
7651/**
7652 * Emits code that jumps to a new label if 16-bit @a iGprSrc equals @a uImm.
7653 *
7654 * @note ARM64: Helper register is required (idxTmpReg).
7655 */
7656DECL_INLINE_THROW(uint32_t)
7657iemNativeEmitTestIfGpr16EqualsImmAndJmpToNewLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprSrc, uint16_t uImm,
7658 IEMNATIVELABELTYPE enmLabelType, uint16_t uData = 0,
7659 uint8_t idxTmpReg = UINT8_MAX)
7660{
7661 uint32_t const idxLabel = iemNativeLabelCreate(pReNative, enmLabelType, UINT32_MAX /*offWhere*/, uData);
7662 return iemNativeEmitTestIfGpr16EqualsImmAndJmpToLabel(pReNative, off, iGprSrc, uImm, idxLabel, idxTmpReg);
7663}
7664
7665
7666/*********************************************************************************************************************************
7667* Calls. *
7668*********************************************************************************************************************************/
7669
7670/**
7671 * Emits a call to a 64-bit address.
7672 */
7673DECL_INLINE_THROW(uint32_t) iemNativeEmitCallImm(PIEMRECOMPILERSTATE pReNative, uint32_t off, uintptr_t uPfn)
7674{
7675#ifdef RT_ARCH_AMD64
7676 off = iemNativeEmitLoadGprImm64(pReNative, off, X86_GREG_xAX, uPfn);
7677
7678 /* call rax */
7679 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2);
7680 pbCodeBuf[off++] = 0xff;
7681 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 2, X86_GREG_xAX);
7682
7683#elif defined(RT_ARCH_ARM64)
7684 off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_REG_FIXED_TMP0, uPfn);
7685
7686 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
7687 pu32CodeBuf[off++] = Armv8A64MkInstrBlr(IEMNATIVE_REG_FIXED_TMP0);
7688
7689#else
7690# error "port me"
7691#endif
7692 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
7693 return off;
7694}
7695
7696
7697/**
7698 * Emits code to load a stack variable into an argument GPR.
7699 * @throws VERR_IEM_VAR_NOT_INITIALIZED, VERR_IEM_VAR_UNEXPECTED_KIND
7700 */
7701DECL_FORCE_INLINE_THROW(uint32_t)
7702iemNativeEmitLoadArgGregFromStackVar(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxRegArg, uint8_t idxVar,
7703 int32_t offAddend = 0, uint32_t fHstVolatileRegsAllowed = UINT32_MAX,
7704 bool fSpilledVarsInVolatileRegs = false)
7705{
7706 IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVar);
7707 PIEMNATIVEVAR const pVar = &pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)];
7708 AssertStmt(pVar->enmKind == kIemNativeVarKind_Stack, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_UNEXPECTED_KIND));
7709
7710 uint8_t const idxRegVar = pVar->idxReg;
7711 if ( idxRegVar < RT_ELEMENTS(pReNative->Core.aHstRegs)
7712 && ( (RT_BIT_32(idxRegVar) & (~IEMNATIVE_CALL_VOLATILE_GREG_MASK | fHstVolatileRegsAllowed))
7713 || !fSpilledVarsInVolatileRegs ))
7714 {
7715 AssertStmt( !(RT_BIT_32(idxRegVar) & IEMNATIVE_CALL_VOLATILE_GREG_MASK)
7716 || (RT_BIT_32(idxRegVar) & fHstVolatileRegsAllowed),
7717 IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_REG_IPE_13));
7718 if (!offAddend)
7719 {
7720 if (idxRegArg != idxRegVar)
7721 off = iemNativeEmitLoadGprFromGpr(pReNative, off, idxRegArg, idxRegVar);
7722 }
7723 else
7724 off = iemNativeEmitLoadGprFromGprWithAddend(pReNative, off, idxRegArg, idxRegVar, offAddend);
7725 }
7726 else
7727 {
7728 uint8_t const idxStackSlot = pVar->idxStackSlot;
7729 AssertStmt(idxStackSlot != UINT8_MAX, IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_NOT_INITIALIZED));
7730 off = iemNativeEmitLoadGprByBp(pReNative, off, idxRegArg, iemNativeStackCalcBpDisp(idxStackSlot));
7731 if (offAddend)
7732 off = iemNativeEmitAddGprImm(pReNative, off, idxRegArg, offAddend);
7733 }
7734 return off;
7735}
7736
7737
7738/**
7739 * Emits code to load a stack or immediate variable value into an argument GPR,
7740 * optional with a addend.
7741 * @throws VERR_IEM_VAR_NOT_INITIALIZED, VERR_IEM_VAR_UNEXPECTED_KIND
7742 */
7743DECL_FORCE_INLINE_THROW(uint32_t)
7744iemNativeEmitLoadArgGregFromImmOrStackVar(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxRegArg, uint8_t idxVar,
7745 int32_t offAddend = 0, uint32_t fHstVolatileRegsAllowed = 0,
7746 bool fSpilledVarsInVolatileRegs = false)
7747{
7748 IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVar);
7749 PIEMNATIVEVAR const pVar = &pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)];
7750 if (pVar->enmKind == kIemNativeVarKind_Immediate)
7751 off = iemNativeEmitLoadGprImm64(pReNative, off, idxRegArg, pVar->u.uValue + offAddend);
7752 else
7753 off = iemNativeEmitLoadArgGregFromStackVar(pReNative, off, idxRegArg, idxVar, offAddend,
7754 fHstVolatileRegsAllowed, fSpilledVarsInVolatileRegs);
7755 return off;
7756}
7757
7758
7759/**
7760 * Emits code to load the variable address into an argument GPR.
7761 *
7762 * This only works for uninitialized and stack variables.
7763 */
7764DECL_FORCE_INLINE_THROW(uint32_t)
7765iemNativeEmitLoadArgGregWithVarAddr(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxRegArg, uint8_t idxVar,
7766 bool fFlushShadows)
7767{
7768 IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVar);
7769 PIEMNATIVEVAR const pVar = &pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)];
7770 AssertStmt( pVar->enmKind == kIemNativeVarKind_Invalid
7771 || pVar->enmKind == kIemNativeVarKind_Stack,
7772 IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_UNEXPECTED_KIND));
7773 AssertStmt(!pVar->fSimdReg,
7774 IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_UNEXPECTED_KIND));
7775
7776 uint8_t const idxStackSlot = iemNativeVarGetStackSlot(pReNative, idxVar);
7777 int32_t const offBpDisp = iemNativeStackCalcBpDisp(idxStackSlot);
7778
7779 uint8_t const idxRegVar = pVar->idxReg;
7780 if (idxRegVar < RT_ELEMENTS(pReNative->Core.aHstRegs))
7781 {
7782 off = iemNativeEmitStoreGprByBp(pReNative, off, offBpDisp, idxRegVar);
7783 iemNativeRegFreeVar(pReNative, idxRegVar, fFlushShadows);
7784 Assert(pVar->idxReg == UINT8_MAX);
7785 }
7786 Assert( pVar->idxStackSlot != UINT8_MAX
7787 && pVar->idxReg == UINT8_MAX);
7788
7789 return iemNativeEmitLeaGprByBp(pReNative, off, idxRegArg, offBpDisp);
7790}
7791
7792
7793#ifdef IEMNATIVE_WITH_SIMD_REG_ALLOCATOR
7794/**
7795 * Emits code to load the variable address into an argument GPR.
7796 *
7797 * This is a special variant intended for SIMD variables only and only called
7798 * by the TLB miss path in the memory fetch/store code because there we pass
7799 * the value by reference and need both the register and stack depending on which
7800 * path is taken (TLB hit vs. miss).
7801 */
7802DECL_FORCE_INLINE_THROW(uint32_t)
7803iemNativeEmitLoadArgGregWithSimdVarAddrForMemAccess(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxRegArg, uint8_t idxVar,
7804 bool fSyncRegWithStack = true)
7805{
7806 IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVar);
7807 PIEMNATIVEVAR const pVar = &pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)];
7808 AssertStmt( pVar->enmKind == kIemNativeVarKind_Invalid
7809 || pVar->enmKind == kIemNativeVarKind_Stack,
7810 IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_UNEXPECTED_KIND));
7811 AssertStmt(pVar->fSimdReg,
7812 IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_UNEXPECTED_KIND));
7813 Assert( pVar->idxStackSlot != UINT8_MAX
7814 && pVar->idxReg != UINT8_MAX);
7815
7816 uint8_t const idxStackSlot = iemNativeVarGetStackSlot(pReNative, idxVar);
7817 int32_t const offBpDisp = iemNativeStackCalcBpDisp(idxStackSlot);
7818
7819 uint8_t const idxRegVar = pVar->idxReg;
7820 Assert(idxRegVar < RT_ELEMENTS(pReNative->Core.aHstSimdRegs));
7821 Assert(pVar->cbVar == sizeof(RTUINT128U) || pVar->cbVar == sizeof(RTUINT256U));
7822
7823 if (fSyncRegWithStack)
7824 {
7825 if (pVar->cbVar == sizeof(RTUINT128U))
7826 off = iemNativeEmitStoreVecRegByBpU128(pReNative, off, offBpDisp, idxRegVar);
7827 else
7828 off = iemNativeEmitStoreVecRegByBpU256(pReNative, off, offBpDisp, idxRegVar);
7829 }
7830
7831 return iemNativeEmitLeaGprByBp(pReNative, off, idxRegArg, offBpDisp);
7832}
7833
7834
7835/**
7836 * Emits code to sync the host SIMD register assigned to the given SIMD variable.
7837 *
7838 * This is a special helper and only called
7839 * by the TLB miss path in the memory fetch/store code because there we pass
7840 * the value by reference and need to sync the value on the stack with the assigned host register
7841 * after a TLB miss where the value ends up on the stack.
7842 */
7843DECL_FORCE_INLINE_THROW(uint32_t)
7844iemNativeEmitSimdVarSyncStackToRegister(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxVar)
7845{
7846 IEMNATIVE_ASSERT_VAR_IDX(pReNative, idxVar);
7847 PIEMNATIVEVAR const pVar = &pReNative->Core.aVars[IEMNATIVE_VAR_IDX_UNPACK(idxVar)];
7848 AssertStmt( pVar->enmKind == kIemNativeVarKind_Invalid
7849 || pVar->enmKind == kIemNativeVarKind_Stack,
7850 IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_UNEXPECTED_KIND));
7851 AssertStmt(pVar->fSimdReg,
7852 IEMNATIVE_DO_LONGJMP(pReNative, VERR_IEM_VAR_UNEXPECTED_KIND));
7853 Assert( pVar->idxStackSlot != UINT8_MAX
7854 && pVar->idxReg != UINT8_MAX);
7855
7856 uint8_t const idxStackSlot = iemNativeVarGetStackSlot(pReNative, idxVar);
7857 int32_t const offBpDisp = iemNativeStackCalcBpDisp(idxStackSlot);
7858
7859 uint8_t const idxRegVar = pVar->idxReg;
7860 Assert(idxRegVar < RT_ELEMENTS(pReNative->Core.aHstSimdRegs));
7861 Assert(pVar->cbVar == sizeof(RTUINT128U) || pVar->cbVar == sizeof(RTUINT256U));
7862
7863 if (pVar->cbVar == sizeof(RTUINT128U))
7864 off = iemNativeEmitLoadVecRegByBpU128(pReNative, off, idxRegVar, offBpDisp);
7865 else
7866 off = iemNativeEmitLoadVecRegByBpU256(pReNative, off, idxRegVar, offBpDisp);
7867
7868 return off;
7869}
7870
7871
7872/**
7873 * Emits a gprdst = ~gprsrc store.
7874 */
7875DECL_FORCE_INLINE_THROW(uint32_t)
7876iemNativeEmitInvBitsGprEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc, bool f64Bit = true)
7877{
7878#ifdef RT_ARCH_AMD64
7879 if (iGprDst != iGprSrc)
7880 {
7881 /* mov gprdst, gprsrc. */
7882 if (f64Bit)
7883 off = iemNativeEmitLoadGprFromGprEx(pCodeBuf, off, iGprDst, iGprSrc);
7884 else
7885 off = iemNativeEmitLoadGprFromGpr32Ex(pCodeBuf, off, iGprDst, iGprSrc); /* Bits 32:63 are cleared. */
7886 }
7887
7888 /* not gprdst */
7889 if (f64Bit || iGprDst >= 8)
7890 pCodeBuf[off++] = (f64Bit ? X86_OP_REX_W : 0)
7891 | (iGprDst >= 8 ? X86_OP_REX_B : 0);
7892 pCodeBuf[off++] = 0xf7;
7893 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 2, iGprDst & 7);
7894#elif defined(RT_ARCH_ARM64)
7895 pCodeBuf[off++] = Armv8A64MkInstrOrn(iGprDst, ARMV8_A64_REG_XZR, iGprSrc, f64Bit);
7896#else
7897# error "port me"
7898#endif
7899 return off;
7900}
7901
7902
7903/**
7904 * Emits a gprdst = ~gprsrc store.
7905 */
7906DECL_INLINE_THROW(uint32_t)
7907iemNativeEmitInvBitsGpr(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc, bool f64Bit = true)
7908{
7909#ifdef RT_ARCH_AMD64
7910 off = iemNativeEmitInvBitsGprEx(iemNativeInstrBufEnsure(pReNative, off, 9), off, iGprDst, iGprSrc, f64Bit);
7911#elif defined(RT_ARCH_ARM64)
7912 off = iemNativeEmitInvBitsGprEx(iemNativeInstrBufEnsure(pReNative, off, 1), off, iGprDst, iGprSrc, f64Bit);
7913#else
7914# error "port me"
7915#endif
7916 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
7917 return off;
7918}
7919
7920
7921/**
7922 * Emits a 128-bit vector register store to a VCpu value.
7923 */
7924DECL_FORCE_INLINE_THROW(uint32_t)
7925iemNativeEmitSimdStoreVecRegToVCpuLowU128Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iVecReg, uint32_t offVCpu)
7926{
7927#ifdef RT_ARCH_AMD64
7928 /* movdqa mem128, reg128 */ /* ASSUMING an aligned location here. */
7929 pCodeBuf[off++] = 0x66;
7930 if (iVecReg >= 8)
7931 pCodeBuf[off++] = X86_OP_REX_R;
7932 pCodeBuf[off++] = 0x0f;
7933 pCodeBuf[off++] = 0x7f;
7934 off = iemNativeEmitGprByVCpuDisp(pCodeBuf, off, iVecReg, offVCpu);
7935#elif defined(RT_ARCH_ARM64)
7936 off = iemNativeEmitGprByVCpuLdStEx(pCodeBuf, off, iVecReg, offVCpu, kArmv8A64InstrLdStType_St_Vr_128, sizeof(RTUINT128U));
7937
7938#else
7939# error "port me"
7940#endif
7941 return off;
7942}
7943
7944
7945/**
7946 * Emits a 128-bit vector register load of a VCpu value.
7947 */
7948DECL_INLINE_THROW(uint32_t)
7949iemNativeEmitSimdStoreVecRegToVCpuLowU128(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iVecReg, uint32_t offVCpu)
7950{
7951#ifdef RT_ARCH_AMD64
7952 off = iemNativeEmitSimdStoreVecRegToVCpuLowU128Ex(iemNativeInstrBufEnsure(pReNative, off, 9), off, iVecReg, offVCpu);
7953#elif defined(RT_ARCH_ARM64)
7954 off = iemNativeEmitSimdStoreVecRegToVCpuLowU128Ex(iemNativeInstrBufEnsure(pReNative, off, 1), off, iVecReg, offVCpu);
7955#else
7956# error "port me"
7957#endif
7958 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
7959 return off;
7960}
7961
7962
7963/**
7964 * Emits a high 128-bit vector register store to a VCpu value.
7965 */
7966DECL_FORCE_INLINE_THROW(uint32_t)
7967iemNativeEmitSimdStoreVecRegToVCpuHighU128Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iVecReg, uint32_t offVCpu)
7968{
7969#ifdef RT_ARCH_AMD64
7970 /* vextracti128 mem128, reg128, 1 */ /* ASSUMES AVX2 support. */
7971 pCodeBuf[off++] = X86_OP_VEX3;
7972 if (iVecReg >= 8)
7973 pCodeBuf[off++] = 0x63;
7974 else
7975 pCodeBuf[off++] = 0xe3;
7976 pCodeBuf[off++] = 0x7d;
7977 pCodeBuf[off++] = 0x39;
7978 off = iemNativeEmitGprByVCpuDisp(pCodeBuf, off, iVecReg, offVCpu);
7979 pCodeBuf[off++] = 0x01; /* Immediate */
7980#elif defined(RT_ARCH_ARM64)
7981 off = iemNativeEmitGprByVCpuLdStEx(pCodeBuf, off, iVecReg, offVCpu, kArmv8A64InstrLdStType_St_Vr_128, sizeof(RTUINT128U));
7982#else
7983# error "port me"
7984#endif
7985 return off;
7986}
7987
7988
7989/**
7990 * Emits a high 128-bit vector register load of a VCpu value.
7991 */
7992DECL_INLINE_THROW(uint32_t)
7993iemNativeEmitSimdStoreVecRegToVCpuHighU128(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iVecReg, uint32_t offVCpu)
7994{
7995#ifdef RT_ARCH_AMD64
7996 off = iemNativeEmitSimdStoreVecRegToVCpuHighU128Ex(iemNativeInstrBufEnsure(pReNative, off, 10), off, iVecReg, offVCpu);
7997#elif defined(RT_ARCH_ARM64)
7998 /* ASSUMES that there are two adjacent 128-bit registers available for the 256-bit value. */
7999 Assert(!(iVecReg & 0x1));
8000 off = iemNativeEmitSimdStoreVecRegToVCpuHighU128Ex(iemNativeInstrBufEnsure(pReNative, off, 1), off, iVecReg + 1, offVCpu);
8001#else
8002# error "port me"
8003#endif
8004 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
8005 return off;
8006}
8007
8008
8009/**
8010 * Emits a 128-bit vector register load of a VCpu value.
8011 */
8012DECL_FORCE_INLINE_THROW(uint32_t)
8013iemNativeEmitSimdLoadVecRegFromVCpuLowU128Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iVecReg, uint32_t offVCpu)
8014{
8015#ifdef RT_ARCH_AMD64
8016 /* movdqa reg128, mem128 */ /* ASSUMING an aligned location here. */
8017 pCodeBuf[off++] = X86_OP_PRF_SIZE_OP;
8018 if (iVecReg >= 8)
8019 pCodeBuf[off++] = X86_OP_REX_R;
8020 pCodeBuf[off++] = 0x0f;
8021 pCodeBuf[off++] = 0x6f;
8022 off = iemNativeEmitGprByVCpuDisp(pCodeBuf, off, iVecReg, offVCpu);
8023#elif defined(RT_ARCH_ARM64)
8024 off = iemNativeEmitGprByVCpuLdStEx(pCodeBuf, off, iVecReg, offVCpu, kArmv8A64InstrLdStType_Ld_Vr_128, sizeof(RTUINT128U));
8025
8026#else
8027# error "port me"
8028#endif
8029 return off;
8030}
8031
8032
8033/**
8034 * Emits a 128-bit vector register load of a VCpu value.
8035 */
8036DECL_INLINE_THROW(uint32_t)
8037iemNativeEmitSimdLoadVecRegFromVCpuLowU128(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iVecReg, uint32_t offVCpu)
8038{
8039#ifdef RT_ARCH_AMD64
8040 off = iemNativeEmitSimdLoadVecRegFromVCpuLowU128Ex(iemNativeInstrBufEnsure(pReNative, off, 9), off, iVecReg, offVCpu);
8041#elif defined(RT_ARCH_ARM64)
8042 off = iemNativeEmitSimdLoadVecRegFromVCpuLowU128Ex(iemNativeInstrBufEnsure(pReNative, off, 1), off, iVecReg, offVCpu);
8043#else
8044# error "port me"
8045#endif
8046 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
8047 return off;
8048}
8049
8050
8051/**
8052 * Emits a 128-bit vector register load of a VCpu value.
8053 */
8054DECL_FORCE_INLINE_THROW(uint32_t)
8055iemNativeEmitSimdLoadVecRegFromVCpuHighU128Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iVecReg, uint32_t offVCpu)
8056{
8057#ifdef RT_ARCH_AMD64
8058 /* vinserti128 ymm, ymm, mem128, 1. */ /* ASSUMES AVX2 support */
8059 pCodeBuf[off++] = X86_OP_VEX3;
8060 if (iVecReg >= 8)
8061 pCodeBuf[off++] = 0x63;
8062 else
8063 pCodeBuf[off++] = 0xe3;
8064 pCodeBuf[off++] = X86_OP_VEX3_BYTE2_MAKE(false, iVecReg, true, X86_OP_VEX3_BYTE2_P_066H);
8065 pCodeBuf[off++] = 0x38;
8066 off = iemNativeEmitGprByVCpuDisp(pCodeBuf, off, iVecReg, offVCpu);
8067 pCodeBuf[off++] = 0x01; /* Immediate */
8068#elif defined(RT_ARCH_ARM64)
8069 off = iemNativeEmitGprByVCpuLdStEx(pCodeBuf, off, iVecReg, offVCpu, kArmv8A64InstrLdStType_Ld_Vr_128, sizeof(RTUINT128U));
8070#else
8071# error "port me"
8072#endif
8073 return off;
8074}
8075
8076
8077/**
8078 * Emits a 128-bit vector register load of a VCpu value.
8079 */
8080DECL_INLINE_THROW(uint32_t)
8081iemNativeEmitSimdLoadVecRegFromVCpuHighU128(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iVecReg, uint32_t offVCpu)
8082{
8083#ifdef RT_ARCH_AMD64
8084 off = iemNativeEmitSimdLoadVecRegFromVCpuHighU128Ex(iemNativeInstrBufEnsure(pReNative, off, 10), off, iVecReg, offVCpu);
8085#elif defined(RT_ARCH_ARM64)
8086 /* ASSUMES that there are two adjacent 128-bit registers available for the 256-bit value. */
8087 Assert(!(iVecReg & 0x1));
8088 off = iemNativeEmitSimdLoadVecRegFromVCpuHighU128Ex(iemNativeInstrBufEnsure(pReNative, off, 1), off, iVecReg + 1, offVCpu);
8089#else
8090# error "port me"
8091#endif
8092 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
8093 return off;
8094}
8095
8096
8097/**
8098 * Emits a vecdst = vecsrc load.
8099 */
8100DECL_FORCE_INLINE(uint32_t)
8101iemNativeEmitSimdLoadVecRegFromVecRegU128Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iVecRegDst, uint8_t iVecRegSrc)
8102{
8103#ifdef RT_ARCH_AMD64
8104 /* movdqu vecdst, vecsrc */
8105 pCodeBuf[off++] = 0xf3;
8106
8107 if ((iVecRegDst | iVecRegSrc) >= 8)
8108 pCodeBuf[off++] = iVecRegDst < 8 ? X86_OP_REX_B
8109 : iVecRegSrc >= 8 ? X86_OP_REX_R | X86_OP_REX_B
8110 : X86_OP_REX_R;
8111 pCodeBuf[off++] = 0x0f;
8112 pCodeBuf[off++] = 0x6f;
8113 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iVecRegDst & 7, iVecRegSrc & 7);
8114
8115#elif defined(RT_ARCH_ARM64)
8116 /* mov dst, src; alias for: orr dst, src, src */
8117 pCodeBuf[off++] = Armv8A64MkVecInstrOrr(iVecRegDst, iVecRegSrc, iVecRegSrc);
8118
8119#else
8120# error "port me"
8121#endif
8122 return off;
8123}
8124
8125
8126/**
8127 * Emits a vecdst = vecsrc load, 128-bit.
8128 */
8129DECL_INLINE_THROW(uint32_t)
8130iemNativeEmitSimdLoadVecRegFromVecRegU128(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iVecRegDst, uint8_t iVecRegSrc)
8131{
8132#ifdef RT_ARCH_AMD64
8133 off = iemNativeEmitSimdLoadVecRegFromVecRegU128Ex(iemNativeInstrBufEnsure(pReNative, off, 5), off, iVecRegDst, iVecRegSrc);
8134#elif defined(RT_ARCH_ARM64)
8135 off = iemNativeEmitSimdLoadVecRegFromVecRegU128Ex(iemNativeInstrBufEnsure(pReNative, off, 1), off, iVecRegDst, iVecRegSrc);
8136#else
8137# error "port me"
8138#endif
8139 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
8140 return off;
8141}
8142
8143
8144/**
8145 * Emits a vecdst[128:255] = vecsrc[128:255] load.
8146 */
8147DECL_FORCE_INLINE_THROW(uint32_t)
8148iemNativeEmitSimdLoadVecRegHighU128FromVecRegHighU128Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iVecRegDst, uint8_t iVecRegSrc)
8149{
8150#ifdef RT_ARCH_AMD64
8151 /* vperm2i128 dst, dst, src, 0x30. */ /* ASSUMES AVX2 support */
8152 pCodeBuf[off++] = X86_OP_VEX3;
8153 pCodeBuf[off++] = X86_OP_VEX3_BYTE1_MAKE(0x3, iVecRegSrc >= 8, false, iVecRegDst >= 8);
8154 pCodeBuf[off++] = X86_OP_VEX3_BYTE2_MAKE(false, iVecRegDst, true, X86_OP_VEX3_BYTE2_P_066H);
8155 pCodeBuf[off++] = 0x46;
8156 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iVecRegDst & 7, iVecRegSrc & 7);
8157 pCodeBuf[off++] = 0x30; /* Immediate, this will leave the low 128 bits of dst untouched and move the high 128 bits from src to dst. */
8158
8159#elif defined(RT_ARCH_ARM64)
8160 RT_NOREF(pCodeBuf, iVecRegDst, iVecRegSrc);
8161
8162 /* Should never be called because we can just use iemNativeEmitSimdLoadVecRegFromVecRegU128(). */
8163# ifdef IEM_WITH_THROW_CATCH
8164 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(NULL, VERR_IEM_IPE_9));
8165# else
8166 AssertReleaseFailedStmt(off = UINT32_MAX);
8167# endif
8168#else
8169# error "port me"
8170#endif
8171 return off;
8172}
8173
8174
8175/**
8176 * Emits a vecdst[128:255] = vecsrc[128:255] load, high 128-bit.
8177 */
8178DECL_INLINE_THROW(uint32_t)
8179iemNativeEmitSimdLoadVecRegHighU128FromVecRegHighU128(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iVecRegDst, uint8_t iVecRegSrc)
8180{
8181#ifdef RT_ARCH_AMD64
8182 off = iemNativeEmitSimdLoadVecRegHighU128FromVecRegHighU128Ex(iemNativeInstrBufEnsure(pReNative, off, 5), off, iVecRegDst, iVecRegSrc);
8183#elif defined(RT_ARCH_ARM64)
8184 Assert(!(iVecRegDst & 0x1) && !(iVecRegSrc & 0x1));
8185 off = iemNativeEmitSimdLoadVecRegFromVecRegU128Ex(iemNativeInstrBufEnsure(pReNative, off, 1), off, iVecRegDst + 1, iVecRegSrc + 1);
8186#else
8187# error "port me"
8188#endif
8189 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
8190 return off;
8191}
8192
8193
8194/**
8195 * Emits a vecdst[0:127] = vecsrc[128:255] load.
8196 */
8197DECL_FORCE_INLINE_THROW(uint32_t)
8198iemNativeEmitSimdLoadVecRegLowU128FromVecRegHighU128Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iVecRegDst, uint8_t iVecRegSrc)
8199{
8200#ifdef RT_ARCH_AMD64
8201 /* vextracti128 dst, src, 1. */ /* ASSUMES AVX2 support */
8202 pCodeBuf[off++] = X86_OP_VEX3;
8203 pCodeBuf[off++] = X86_OP_VEX3_BYTE1_MAKE(0x3, iVecRegDst >= 8, false, iVecRegSrc >= 8);
8204 pCodeBuf[off++] = X86_OP_VEX3_BYTE2_MAKE_NO_VVVV(false, true, X86_OP_VEX3_BYTE2_P_066H);
8205 pCodeBuf[off++] = 0x39;
8206 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iVecRegSrc & 7, iVecRegDst & 7);
8207 pCodeBuf[off++] = 0x1;
8208
8209#elif defined(RT_ARCH_ARM64)
8210 RT_NOREF(pCodeBuf, iVecRegDst, iVecRegSrc);
8211
8212 /* Should never be called because we can just use iemNativeEmitSimdLoadVecRegFromVecRegU128Ex(). */
8213# ifdef IEM_WITH_THROW_CATCH
8214 AssertFailedStmt(IEMNATIVE_DO_LONGJMP(NULL, VERR_IEM_IPE_9));
8215# else
8216 AssertReleaseFailedStmt(off = UINT32_MAX);
8217# endif
8218#else
8219# error "port me"
8220#endif
8221 return off;
8222}
8223
8224
8225/**
8226 * Emits a vecdst[0:127] = vecsrc[128:255] load, high 128-bit.
8227 */
8228DECL_INLINE_THROW(uint32_t)
8229iemNativeEmitSimdLoadVecRegLowU128FromVecRegHighU128(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iVecRegDst, uint8_t iVecRegSrc)
8230{
8231#ifdef RT_ARCH_AMD64
8232 off = iemNativeEmitSimdLoadVecRegLowU128FromVecRegHighU128Ex(iemNativeInstrBufEnsure(pReNative, off, 6), off, iVecRegDst, iVecRegSrc);
8233#elif defined(RT_ARCH_ARM64)
8234 Assert(!(iVecRegDst & 0x1) && !(iVecRegSrc & 0x1));
8235 off = iemNativeEmitSimdLoadVecRegFromVecRegU128Ex(iemNativeInstrBufEnsure(pReNative, off, 1), off, iVecRegDst, iVecRegSrc + 1);
8236#else
8237# error "port me"
8238#endif
8239 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
8240 return off;
8241}
8242
8243
8244/**
8245 * Emits a vecdst = vecsrc load, 256-bit.
8246 */
8247DECL_INLINE_THROW(uint32_t)
8248iemNativeEmitSimdLoadVecRegFromVecRegU256(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iVecRegDst, uint8_t iVecRegSrc)
8249{
8250#ifdef RT_ARCH_AMD64
8251 /* vmovdqa ymm, ymm */
8252 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 5);
8253 if (iVecRegDst >= 8 && iVecRegSrc >= 8)
8254 {
8255 pbCodeBuf[off++] = X86_OP_VEX3;
8256 pbCodeBuf[off++] = 0x41;
8257 pbCodeBuf[off++] = 0x7d;
8258 pbCodeBuf[off++] = 0x6f;
8259 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iVecRegDst & 7, iVecRegSrc & 7);
8260 }
8261 else
8262 {
8263 pbCodeBuf[off++] = X86_OP_VEX2;
8264 pbCodeBuf[off++] = (iVecRegSrc >= 8 || iVecRegDst >= 8) ? 0x7d : 0xfd;
8265 pbCodeBuf[off++] = iVecRegSrc >= 8 ? 0x7f : 0x6f;
8266 pbCodeBuf[off++] = iVecRegSrc >= 8
8267 ? X86_MODRM_MAKE(X86_MOD_REG, iVecRegSrc & 7, iVecRegDst & 7)
8268 : X86_MODRM_MAKE(X86_MOD_REG, iVecRegDst & 7, iVecRegSrc & 7);
8269 }
8270#elif defined(RT_ARCH_ARM64)
8271 /* ASSUMES that there are two adjacent 128-bit registers available for the 256-bit value. */
8272 Assert(!(iVecRegDst & 0x1)); Assert(!(iVecRegSrc & 0x1));
8273 off = iemNativeEmitSimdLoadVecRegFromVecRegU128(pReNative, off, iVecRegDst, iVecRegSrc );
8274 off = iemNativeEmitSimdLoadVecRegFromVecRegU128(pReNative, off, iVecRegDst + 1, iVecRegSrc + 1);
8275#else
8276# error "port me"
8277#endif
8278 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
8279 return off;
8280}
8281
8282
8283/**
8284 * Emits a vecdst = vecsrc load.
8285 */
8286DECL_FORCE_INLINE(uint32_t)
8287iemNativeEmitSimdLoadVecRegHighU128FromVecRegLowU128Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iVecRegDst, uint8_t iVecRegSrc)
8288{
8289#ifdef RT_ARCH_AMD64
8290 /* vinserti128 dst, dst, src, 1. */ /* ASSUMES AVX2 support */
8291 pCodeBuf[off++] = X86_OP_VEX3;
8292 pCodeBuf[off++] = X86_OP_VEX3_BYTE1_MAKE(0x3, iVecRegSrc >= 8, false, iVecRegDst >= 8);
8293 pCodeBuf[off++] = X86_OP_VEX3_BYTE2_MAKE(false, iVecRegDst, true, X86_OP_VEX3_BYTE2_P_066H);
8294 pCodeBuf[off++] = 0x38;
8295 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iVecRegDst & 7, iVecRegSrc & 7);
8296 pCodeBuf[off++] = 0x01; /* Immediate */
8297
8298#elif defined(RT_ARCH_ARM64)
8299 Assert(!(iVecRegDst & 0x1) && !(iVecRegSrc & 0x1));
8300 /* mov dst, src; alias for: orr dst, src, src */
8301 pCodeBuf[off++] = Armv8A64MkVecInstrOrr(iVecRegDst + 1, iVecRegSrc, iVecRegSrc);
8302
8303#else
8304# error "port me"
8305#endif
8306 return off;
8307}
8308
8309
8310/**
8311 * Emits a vecdst[128:255] = vecsrc[0:127] load, 128-bit.
8312 */
8313DECL_INLINE_THROW(uint32_t)
8314iemNativeEmitSimdLoadVecRegHighU128FromVecRegLowU128(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iVecRegDst, uint8_t iVecRegSrc)
8315{
8316#ifdef RT_ARCH_AMD64
8317 off = iemNativeEmitSimdLoadVecRegHighU128FromVecRegLowU128Ex(iemNativeInstrBufEnsure(pReNative, off, 6), off, iVecRegDst, iVecRegSrc);
8318#elif defined(RT_ARCH_ARM64)
8319 off = iemNativeEmitSimdLoadVecRegHighU128FromVecRegLowU128Ex(iemNativeInstrBufEnsure(pReNative, off, 1), off, iVecRegDst, iVecRegSrc);
8320#else
8321# error "port me"
8322#endif
8323 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
8324 return off;
8325}
8326
8327
8328/**
8329 * Emits a gprdst = vecsrc[x] load, 64-bit.
8330 */
8331DECL_FORCE_INLINE(uint32_t)
8332iemNativeEmitSimdLoadGprFromVecRegU64Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint8_t iVecRegSrc, uint8_t iQWord)
8333{
8334#ifdef RT_ARCH_AMD64
8335 if (iQWord >= 2)
8336 {
8337 /*
8338 * vpextrq doesn't work on the upper 128-bits.
8339 * So we use the following sequence:
8340 * vextracti128 vectmp0, vecsrc, 1
8341 * pextrq gpr, vectmp0, #(iQWord - 2)
8342 */
8343 /* vextracti128 */
8344 pCodeBuf[off++] = X86_OP_VEX3;
8345 pCodeBuf[off++] = X86_OP_VEX3_BYTE1_MAKE(0x3, IEMNATIVE_SIMD_REG_FIXED_TMP0 >= 8, false, iVecRegSrc >= 8);
8346 pCodeBuf[off++] = X86_OP_VEX3_BYTE2_MAKE_NO_VVVV(false, true, X86_OP_VEX3_BYTE2_P_066H);
8347 pCodeBuf[off++] = 0x39;
8348 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iVecRegSrc & 7, IEMNATIVE_SIMD_REG_FIXED_TMP0 & 7);
8349 pCodeBuf[off++] = 0x1;
8350
8351 /* pextrq */
8352 pCodeBuf[off++] = X86_OP_PRF_SIZE_OP;
8353 pCodeBuf[off++] = X86_OP_REX_W
8354 | (IEMNATIVE_SIMD_REG_FIXED_TMP0 < 8 ? 0 : X86_OP_REX_R)
8355 | (iGprDst < 8 ? 0 : X86_OP_REX_B);
8356 pCodeBuf[off++] = 0x0f;
8357 pCodeBuf[off++] = 0x3a;
8358 pCodeBuf[off++] = 0x16;
8359 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, IEMNATIVE_SIMD_REG_FIXED_TMP0 & 7, iGprDst & 7);
8360 pCodeBuf[off++] = iQWord - 2;
8361 }
8362 else
8363 {
8364 /* pextrq gpr, vecsrc, #iQWord (ASSUMES SSE4.1). */
8365 pCodeBuf[off++] = X86_OP_PRF_SIZE_OP;
8366 pCodeBuf[off++] = X86_OP_REX_W
8367 | (iVecRegSrc < 8 ? 0 : X86_OP_REX_R)
8368 | (iGprDst < 8 ? 0 : X86_OP_REX_B);
8369 pCodeBuf[off++] = 0x0f;
8370 pCodeBuf[off++] = 0x3a;
8371 pCodeBuf[off++] = 0x16;
8372 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iVecRegSrc & 7, iGprDst & 7);
8373 pCodeBuf[off++] = iQWord;
8374 }
8375#elif defined(RT_ARCH_ARM64)
8376 /* umov gprdst, vecsrc[iQWord] */
8377 pCodeBuf[off++] = Armv8A64MkVecInstrUmov(iGprDst, iVecRegSrc, iQWord, kArmv8InstrUmovInsSz_U64);
8378#else
8379# error "port me"
8380#endif
8381 return off;
8382}
8383
8384
8385/**
8386 * Emits a gprdst = vecsrc[x] load, 64-bit.
8387 */
8388DECL_INLINE_THROW(uint32_t)
8389iemNativeEmitSimdLoadGprFromVecRegU64(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t iVecRegSrc, uint8_t iQWord)
8390{
8391 Assert(iQWord <= 3);
8392
8393#ifdef RT_ARCH_AMD64
8394 off = iemNativeEmitSimdLoadGprFromVecRegU64Ex(iemNativeInstrBufEnsure(pReNative, off, 7), off, iGprDst, iVecRegSrc, iQWord);
8395#elif defined(RT_ARCH_ARM64)
8396 /* ASSUMES that there are two adjacent 128-bit registers available for the 256-bit value. */
8397 Assert(!(iVecRegSrc & 0x1));
8398 /* Need to access the "high" 128-bit vector register. */
8399 if (iQWord >= 2)
8400 off = iemNativeEmitSimdLoadGprFromVecRegU64Ex(iemNativeInstrBufEnsure(pReNative, off, 1), off, iGprDst, iVecRegSrc + 1, iQWord - 2);
8401 else
8402 off = iemNativeEmitSimdLoadGprFromVecRegU64Ex(iemNativeInstrBufEnsure(pReNative, off, 1), off, iGprDst, iVecRegSrc, iQWord);
8403#else
8404# error "port me"
8405#endif
8406 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
8407 return off;
8408}
8409
8410
8411/**
8412 * Emits a gprdst = vecsrc[x] load, 32-bit.
8413 */
8414DECL_FORCE_INLINE(uint32_t)
8415iemNativeEmitSimdLoadGprFromVecRegU32Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint8_t iVecRegSrc, uint8_t iDWord)
8416{
8417#ifdef RT_ARCH_AMD64
8418 if (iDWord >= 4)
8419 {
8420 /*
8421 * vpextrd doesn't work on the upper 128-bits.
8422 * So we use the following sequence:
8423 * vextracti128 vectmp0, vecsrc, 1
8424 * pextrd gpr, vectmp0, #(iDWord - 4)
8425 */
8426 /* vextracti128 */
8427 pCodeBuf[off++] = X86_OP_VEX3;
8428 pCodeBuf[off++] = X86_OP_VEX3_BYTE1_MAKE(0x3, IEMNATIVE_SIMD_REG_FIXED_TMP0 >= 8, false, iVecRegSrc >= 8);
8429 pCodeBuf[off++] = X86_OP_VEX3_BYTE2_MAKE_NO_VVVV(false, true, X86_OP_VEX3_BYTE2_P_066H);
8430 pCodeBuf[off++] = 0x39;
8431 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iVecRegSrc & 7, IEMNATIVE_SIMD_REG_FIXED_TMP0 & 7);
8432 pCodeBuf[off++] = 0x1;
8433
8434 /* pextrd gpr, vecsrc, #iDWord (ASSUMES SSE4.1). */
8435 pCodeBuf[off++] = X86_OP_PRF_SIZE_OP;
8436 if (iGprDst >= 8 || IEMNATIVE_SIMD_REG_FIXED_TMP0 >= 8)
8437 pCodeBuf[off++] = (IEMNATIVE_SIMD_REG_FIXED_TMP0 < 8 ? 0 : X86_OP_REX_R)
8438 | (iGprDst < 8 ? 0 : X86_OP_REX_B);
8439 pCodeBuf[off++] = 0x0f;
8440 pCodeBuf[off++] = 0x3a;
8441 pCodeBuf[off++] = 0x16;
8442 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, IEMNATIVE_SIMD_REG_FIXED_TMP0 & 7, iGprDst & 7);
8443 pCodeBuf[off++] = iDWord - 4;
8444 }
8445 else
8446 {
8447 /* pextrd gpr, vecsrc, #iDWord (ASSUMES SSE4.1). */
8448 pCodeBuf[off++] = X86_OP_PRF_SIZE_OP;
8449 if (iGprDst >= 8 || iVecRegSrc >= 8)
8450 pCodeBuf[off++] = (iVecRegSrc < 8 ? 0 : X86_OP_REX_R)
8451 | (iGprDst < 8 ? 0 : X86_OP_REX_B);
8452 pCodeBuf[off++] = 0x0f;
8453 pCodeBuf[off++] = 0x3a;
8454 pCodeBuf[off++] = 0x16;
8455 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iVecRegSrc & 7, iGprDst & 7);
8456 pCodeBuf[off++] = iDWord;
8457 }
8458#elif defined(RT_ARCH_ARM64)
8459 Assert(iDWord < 4);
8460
8461 /* umov gprdst, vecsrc[iDWord] */
8462 pCodeBuf[off++] = Armv8A64MkVecInstrUmov(iGprDst, iVecRegSrc, iDWord, kArmv8InstrUmovInsSz_U32, false /*fDst64Bit*/);
8463#else
8464# error "port me"
8465#endif
8466 return off;
8467}
8468
8469
8470/**
8471 * Emits a gprdst = vecsrc[x] load, 32-bit.
8472 */
8473DECL_INLINE_THROW(uint32_t)
8474iemNativeEmitSimdLoadGprFromVecRegU32(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t iVecRegSrc, uint8_t iDWord)
8475{
8476 Assert(iDWord <= 7);
8477
8478#ifdef RT_ARCH_AMD64
8479 off = iemNativeEmitSimdLoadGprFromVecRegU32Ex(iemNativeInstrBufEnsure(pReNative, off, 15), off, iGprDst, iVecRegSrc, iDWord);
8480#elif defined(RT_ARCH_ARM64)
8481 /* ASSUMES that there are two adjacent 128-bit registers available for the 256-bit value. */
8482 Assert(!(iVecRegSrc & 0x1));
8483 /* Need to access the "high" 128-bit vector register. */
8484 if (iDWord >= 4)
8485 off = iemNativeEmitSimdLoadGprFromVecRegU32Ex(iemNativeInstrBufEnsure(pReNative, off, 1), off, iGprDst, iVecRegSrc + 1, iDWord - 4);
8486 else
8487 off = iemNativeEmitSimdLoadGprFromVecRegU32Ex(iemNativeInstrBufEnsure(pReNative, off, 1), off, iGprDst, iVecRegSrc, iDWord);
8488#else
8489# error "port me"
8490#endif
8491 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
8492 return off;
8493}
8494
8495
8496/**
8497 * Emits a gprdst = vecsrc[x] load, 16-bit.
8498 */
8499DECL_FORCE_INLINE(uint32_t)
8500iemNativeEmitSimdLoadGprFromVecRegU16Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint8_t iVecRegSrc, uint8_t iWord)
8501{
8502#ifdef RT_ARCH_AMD64
8503 if (iWord >= 8)
8504 {
8505 /** @todo Currently not used. */
8506 AssertReleaseFailed();
8507 }
8508 else
8509 {
8510 /* pextrw gpr, vecsrc, #iWord */
8511 pCodeBuf[off++] = X86_OP_PRF_SIZE_OP;
8512 if (iGprDst >= 8 || iVecRegSrc >= 8)
8513 pCodeBuf[off++] = (iGprDst < 8 ? 0 : X86_OP_REX_R)
8514 | (iVecRegSrc < 8 ? 0 : X86_OP_REX_B);
8515 pCodeBuf[off++] = 0x0f;
8516 pCodeBuf[off++] = 0xc5;
8517 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGprDst & 7, iVecRegSrc & 7);
8518 pCodeBuf[off++] = iWord;
8519 }
8520#elif defined(RT_ARCH_ARM64)
8521 /* umov gprdst, vecsrc[iWord] */
8522 pCodeBuf[off++] = Armv8A64MkVecInstrUmov(iGprDst, iVecRegSrc, iWord, kArmv8InstrUmovInsSz_U16, false /*fDst64Bit*/);
8523#else
8524# error "port me"
8525#endif
8526 return off;
8527}
8528
8529
8530/**
8531 * Emits a gprdst = vecsrc[x] load, 16-bit.
8532 */
8533DECL_INLINE_THROW(uint32_t)
8534iemNativeEmitSimdLoadGprFromVecRegU16(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t iVecRegSrc, uint8_t iWord)
8535{
8536 Assert(iWord <= 16);
8537
8538#ifdef RT_ARCH_AMD64
8539 off = iemNativeEmitSimdLoadGprFromVecRegU16Ex(iemNativeInstrBufEnsure(pReNative, off, 6), off, iGprDst, iVecRegSrc, iWord);
8540#elif defined(RT_ARCH_ARM64)
8541 /* ASSUMES that there are two adjacent 128-bit registers available for the 256-bit value. */
8542 Assert(!(iVecRegSrc & 0x1));
8543 /* Need to access the "high" 128-bit vector register. */
8544 if (iWord >= 8)
8545 off = iemNativeEmitSimdLoadGprFromVecRegU16Ex(iemNativeInstrBufEnsure(pReNative, off, 1), off, iGprDst, iVecRegSrc + 1, iWord - 8);
8546 else
8547 off = iemNativeEmitSimdLoadGprFromVecRegU16Ex(iemNativeInstrBufEnsure(pReNative, off, 1), off, iGprDst, iVecRegSrc, iWord);
8548#else
8549# error "port me"
8550#endif
8551 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
8552 return off;
8553}
8554
8555
8556/**
8557 * Emits a gprdst = vecsrc[x] load, 8-bit.
8558 */
8559DECL_FORCE_INLINE(uint32_t)
8560iemNativeEmitSimdLoadGprFromVecRegU8Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iGprDst, uint8_t iVecRegSrc, uint8_t iByte)
8561{
8562#ifdef RT_ARCH_AMD64
8563 if (iByte >= 16)
8564 {
8565 /** @todo Currently not used. */
8566 AssertReleaseFailed();
8567 }
8568 else
8569 {
8570 /* pextrb gpr, vecsrc, #iByte */
8571 pCodeBuf[off++] = X86_OP_PRF_SIZE_OP;
8572 if (iGprDst >= 8 || iVecRegSrc >= 8)
8573 pCodeBuf[off++] = (iVecRegSrc < 8 ? 0 : X86_OP_REX_R)
8574 | (iGprDst < 8 ? 0 : X86_OP_REX_B);
8575 pCodeBuf[off++] = 0x0f;
8576 pCodeBuf[off++] = 0x3a;
8577 pCodeBuf[off++] = 0x14;
8578 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iVecRegSrc & 7, iGprDst & 7);
8579 pCodeBuf[off++] = iByte;
8580 }
8581#elif defined(RT_ARCH_ARM64)
8582 /* umov gprdst, vecsrc[iByte] */
8583 pCodeBuf[off++] = Armv8A64MkVecInstrUmov(iGprDst, iVecRegSrc, iByte, kArmv8InstrUmovInsSz_U8, false /*fDst64Bit*/);
8584#else
8585# error "port me"
8586#endif
8587 return off;
8588}
8589
8590
8591/**
8592 * Emits a gprdst = vecsrc[x] load, 8-bit.
8593 */
8594DECL_INLINE_THROW(uint32_t)
8595iemNativeEmitSimdLoadGprFromVecRegU8(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t iVecRegSrc, uint8_t iByte)
8596{
8597 Assert(iByte <= 32);
8598
8599#ifdef RT_ARCH_AMD64
8600 off = iemNativeEmitSimdLoadGprFromVecRegU8Ex(iemNativeInstrBufEnsure(pReNative, off, 7), off, iGprDst, iVecRegSrc, iByte);
8601#elif defined(RT_ARCH_ARM64)
8602 /* ASSUMES that there are two adjacent 128-bit registers available for the 256-bit value. */
8603 Assert(!(iVecRegSrc & 0x1));
8604 /* Need to access the "high" 128-bit vector register. */
8605 if (iByte >= 16)
8606 off = iemNativeEmitSimdLoadGprFromVecRegU8Ex(iemNativeInstrBufEnsure(pReNative, off, 1), off, iGprDst, iVecRegSrc + 1, iByte - 16);
8607 else
8608 off = iemNativeEmitSimdLoadGprFromVecRegU8Ex(iemNativeInstrBufEnsure(pReNative, off, 1), off, iGprDst, iVecRegSrc, iByte);
8609#else
8610# error "port me"
8611#endif
8612 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
8613 return off;
8614}
8615
8616
8617/**
8618 * Emits a vecdst[x] = gprsrc store, 64-bit.
8619 */
8620DECL_FORCE_INLINE(uint32_t)
8621iemNativeEmitSimdStoreGprToVecRegU64Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iVecRegDst, uint8_t iGprSrc, uint8_t iQWord)
8622{
8623#ifdef RT_ARCH_AMD64
8624 if (iQWord >= 2)
8625 {
8626 /*
8627 * vpinsrq doesn't work on the upper 128-bits.
8628 * So we use the following sequence:
8629 * vextracti128 vectmp0, vecdst, 1
8630 * pinsrq vectmp0, gpr, #(iQWord - 2)
8631 * vinserti128 vecdst, vectmp0, 1
8632 */
8633 /* vextracti128 */
8634 pCodeBuf[off++] = X86_OP_VEX3;
8635 pCodeBuf[off++] = X86_OP_VEX3_BYTE1_MAKE(0x3, IEMNATIVE_SIMD_REG_FIXED_TMP0 >= 8, false, iVecRegDst >= 8);
8636 pCodeBuf[off++] = X86_OP_VEX3_BYTE2_MAKE_NO_VVVV(false, true, X86_OP_VEX3_BYTE2_P_066H);
8637 pCodeBuf[off++] = 0x39;
8638 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iVecRegDst & 7, IEMNATIVE_SIMD_REG_FIXED_TMP0 & 7);
8639 pCodeBuf[off++] = 0x1;
8640
8641 /* pinsrq */
8642 pCodeBuf[off++] = X86_OP_PRF_SIZE_OP;
8643 pCodeBuf[off++] = X86_OP_REX_W
8644 | (IEMNATIVE_SIMD_REG_FIXED_TMP0 < 8 ? 0 : X86_OP_REX_R)
8645 | (iGprSrc < 8 ? 0 : X86_OP_REX_B);
8646 pCodeBuf[off++] = 0x0f;
8647 pCodeBuf[off++] = 0x3a;
8648 pCodeBuf[off++] = 0x22;
8649 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, IEMNATIVE_SIMD_REG_FIXED_TMP0 & 7, iGprSrc & 7);
8650 pCodeBuf[off++] = iQWord - 2;
8651
8652 /* vinserti128 */
8653 pCodeBuf[off++] = X86_OP_VEX3;
8654 pCodeBuf[off++] = X86_OP_VEX3_BYTE1_MAKE(0x3, IEMNATIVE_SIMD_REG_FIXED_TMP0 >= 8, false, iVecRegDst >= 8);
8655 pCodeBuf[off++] = X86_OP_VEX3_BYTE2_MAKE(false, iVecRegDst, true, X86_OP_VEX3_BYTE2_P_066H);
8656 pCodeBuf[off++] = 0x38;
8657 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iVecRegDst & 7, IEMNATIVE_SIMD_REG_FIXED_TMP0 & 7);
8658 pCodeBuf[off++] = 0x01; /* Immediate */
8659 }
8660 else
8661 {
8662 /* pinsrq vecsrc, gpr, #iQWord (ASSUMES SSE4.1). */
8663 pCodeBuf[off++] = X86_OP_PRF_SIZE_OP;
8664 pCodeBuf[off++] = X86_OP_REX_W
8665 | (iVecRegDst < 8 ? 0 : X86_OP_REX_R)
8666 | (iGprSrc < 8 ? 0 : X86_OP_REX_B);
8667 pCodeBuf[off++] = 0x0f;
8668 pCodeBuf[off++] = 0x3a;
8669 pCodeBuf[off++] = 0x22;
8670 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iVecRegDst & 7, iGprSrc & 7);
8671 pCodeBuf[off++] = iQWord;
8672 }
8673#elif defined(RT_ARCH_ARM64)
8674 /* ins vecsrc[iQWord], gpr */
8675 pCodeBuf[off++] = Armv8A64MkVecInstrIns(iVecRegDst, iGprSrc, iQWord, kArmv8InstrUmovInsSz_U64);
8676#else
8677# error "port me"
8678#endif
8679 return off;
8680}
8681
8682
8683/**
8684 * Emits a vecdst[x] = gprsrc store, 64-bit.
8685 */
8686DECL_INLINE_THROW(uint32_t)
8687iemNativeEmitSimdStoreGprToVecRegU64(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iVecRegDst, uint8_t iGprSrc, uint8_t iQWord)
8688{
8689 Assert(iQWord <= 3);
8690
8691#ifdef RT_ARCH_AMD64
8692 off = iemNativeEmitSimdStoreGprToVecRegU64Ex(iemNativeInstrBufEnsure(pReNative, off, 19), off, iVecRegDst, iGprSrc, iQWord);
8693#elif defined(RT_ARCH_ARM64)
8694 Assert(!(iVecRegDst & 0x1));
8695 if (iQWord >= 2)
8696 off = iemNativeEmitSimdStoreGprToVecRegU64Ex(iemNativeInstrBufEnsure(pReNative, off, 1), off, iVecRegDst + 1, iGprSrc, iQWord - 2);
8697 else
8698 off = iemNativeEmitSimdStoreGprToVecRegU64Ex(iemNativeInstrBufEnsure(pReNative, off, 1), off, iVecRegDst, iGprSrc, iQWord);
8699#else
8700# error "port me"
8701#endif
8702 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
8703 return off;
8704}
8705
8706
8707/**
8708 * Emits a vecdst[x] = gprsrc store, 32-bit.
8709 */
8710DECL_FORCE_INLINE(uint32_t)
8711iemNativeEmitSimdStoreGprToVecRegU32Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iVecRegDst, uint8_t iGprSrc, uint8_t iDWord)
8712{
8713#ifdef RT_ARCH_AMD64
8714 if (iDWord >= 4)
8715 {
8716 /*
8717 * vpinsrq doesn't work on the upper 128-bits.
8718 * So we use the following sequence:
8719 * vextracti128 vectmp0, vecdst, 1
8720 * pinsrd vectmp0, gpr, #(iDword - 4)
8721 * vinserti128 vecdst, vectmp0, 1
8722 */
8723 /* vextracti128 */
8724 pCodeBuf[off++] = X86_OP_VEX3;
8725 pCodeBuf[off++] = X86_OP_VEX3_BYTE1_MAKE(0x3, IEMNATIVE_SIMD_REG_FIXED_TMP0 >= 8, false, iVecRegDst >= 8);
8726 pCodeBuf[off++] = X86_OP_VEX3_BYTE2_MAKE_NO_VVVV(false, true, X86_OP_VEX3_BYTE2_P_066H);
8727 pCodeBuf[off++] = 0x39;
8728 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iVecRegDst & 7, IEMNATIVE_SIMD_REG_FIXED_TMP0 & 7);
8729 pCodeBuf[off++] = 0x1;
8730
8731 /* pinsrd */
8732 pCodeBuf[off++] = X86_OP_PRF_SIZE_OP;
8733 if (IEMNATIVE_SIMD_REG_FIXED_TMP0 >= 8 || iGprSrc >= 8)
8734 pCodeBuf[off++] = (IEMNATIVE_SIMD_REG_FIXED_TMP0 < 8 ? 0 : X86_OP_REX_R)
8735 | (iGprSrc < 8 ? 0 : X86_OP_REX_B);
8736 pCodeBuf[off++] = 0x0f;
8737 pCodeBuf[off++] = 0x3a;
8738 pCodeBuf[off++] = 0x22;
8739 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, IEMNATIVE_SIMD_REG_FIXED_TMP0 & 7, iGprSrc & 7);
8740 pCodeBuf[off++] = iDWord - 4;
8741
8742 /* vinserti128 */
8743 pCodeBuf[off++] = X86_OP_VEX3;
8744 pCodeBuf[off++] = X86_OP_VEX3_BYTE1_MAKE(0x3, IEMNATIVE_SIMD_REG_FIXED_TMP0 >= 8, false, iVecRegDst >= 8);
8745 pCodeBuf[off++] = X86_OP_VEX3_BYTE2_MAKE(false, iVecRegDst, true, X86_OP_VEX3_BYTE2_P_066H);
8746 pCodeBuf[off++] = 0x38;
8747 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iVecRegDst & 7, IEMNATIVE_SIMD_REG_FIXED_TMP0 & 7);
8748 pCodeBuf[off++] = 0x01; /* Immediate */
8749 }
8750 else
8751 {
8752 /* pinsrd vecsrc, gpr, #iDWord (ASSUMES SSE4.1). */
8753 pCodeBuf[off++] = X86_OP_PRF_SIZE_OP;
8754 if (iVecRegDst >= 8 || iGprSrc >= 8)
8755 pCodeBuf[off++] = (iVecRegDst < 8 ? 0 : X86_OP_REX_R)
8756 | (iGprSrc < 8 ? 0 : X86_OP_REX_B);
8757 pCodeBuf[off++] = 0x0f;
8758 pCodeBuf[off++] = 0x3a;
8759 pCodeBuf[off++] = 0x22;
8760 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iVecRegDst & 7, iGprSrc & 7);
8761 pCodeBuf[off++] = iDWord;
8762 }
8763#elif defined(RT_ARCH_ARM64)
8764 /* ins vecsrc[iDWord], gpr */
8765 pCodeBuf[off++] = Armv8A64MkVecInstrIns(iVecRegDst, iGprSrc, iDWord, kArmv8InstrUmovInsSz_U32);
8766#else
8767# error "port me"
8768#endif
8769 return off;
8770}
8771
8772
8773/**
8774 * Emits a vecdst[x] = gprsrc store, 64-bit.
8775 */
8776DECL_INLINE_THROW(uint32_t)
8777iemNativeEmitSimdStoreGprToVecRegU32(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iVecRegDst, uint8_t iGprSrc, uint8_t iDWord)
8778{
8779 Assert(iDWord <= 7);
8780
8781#ifdef RT_ARCH_AMD64
8782 off = iemNativeEmitSimdStoreGprToVecRegU32Ex(iemNativeInstrBufEnsure(pReNative, off, 19), off, iVecRegDst, iGprSrc, iDWord);
8783#elif defined(RT_ARCH_ARM64)
8784 Assert(!(iVecRegDst & 0x1));
8785 if (iDWord >= 4)
8786 off = iemNativeEmitSimdStoreGprToVecRegU32Ex(iemNativeInstrBufEnsure(pReNative, off, 1), off, iVecRegDst + 1, iGprSrc, iDWord - 4);
8787 else
8788 off = iemNativeEmitSimdStoreGprToVecRegU32Ex(iemNativeInstrBufEnsure(pReNative, off, 1), off, iVecRegDst, iGprSrc, iDWord);
8789#else
8790# error "port me"
8791#endif
8792 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
8793 return off;
8794}
8795
8796
8797/**
8798 * Emits a vecdst[x] = gprsrc store, 16-bit.
8799 */
8800DECL_FORCE_INLINE(uint32_t)
8801iemNativeEmitSimdStoreGprToVecRegU16Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iVecRegDst, uint8_t iGprSrc, uint8_t iWord)
8802{
8803#ifdef RT_ARCH_AMD64
8804 /* pinsrw vecsrc, gpr, #iWord. */
8805 pCodeBuf[off++] = X86_OP_PRF_SIZE_OP;
8806 if (iVecRegDst >= 8 || iGprSrc >= 8)
8807 pCodeBuf[off++] = (iVecRegDst < 8 ? 0 : X86_OP_REX_R)
8808 | (iGprSrc < 8 ? 0 : X86_OP_REX_B);
8809 pCodeBuf[off++] = 0x0f;
8810 pCodeBuf[off++] = 0xc4;
8811 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iVecRegDst & 7, iGprSrc & 7);
8812 pCodeBuf[off++] = iWord;
8813#elif defined(RT_ARCH_ARM64)
8814 /* ins vecsrc[iWord], gpr */
8815 pCodeBuf[off++] = Armv8A64MkVecInstrIns(iVecRegDst, iGprSrc, iWord, kArmv8InstrUmovInsSz_U16);
8816#else
8817# error "port me"
8818#endif
8819 return off;
8820}
8821
8822
8823/**
8824 * Emits a vecdst[x] = gprsrc store, 16-bit.
8825 */
8826DECL_INLINE_THROW(uint32_t)
8827iemNativeEmitSimdStoreGprToVecRegU16(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iVecRegDst, uint8_t iGprSrc, uint8_t iWord)
8828{
8829 Assert(iWord <= 15);
8830
8831#ifdef RT_ARCH_AMD64
8832 off = iemNativeEmitSimdStoreGprToVecRegU16Ex(iemNativeInstrBufEnsure(pReNative, off, 6), off, iVecRegDst, iGprSrc, iWord);
8833#elif defined(RT_ARCH_ARM64)
8834 off = iemNativeEmitSimdStoreGprToVecRegU16Ex(iemNativeInstrBufEnsure(pReNative, off, 1), off, iVecRegDst, iGprSrc, iWord);
8835#else
8836# error "port me"
8837#endif
8838 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
8839 return off;
8840}
8841
8842
8843/**
8844 * Emits a vecdst[x] = gprsrc store, 8-bit.
8845 */
8846DECL_FORCE_INLINE(uint32_t)
8847iemNativeEmitSimdStoreGprToVecRegU8Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iVecRegDst, uint8_t iGprSrc, uint8_t iByte)
8848{
8849#ifdef RT_ARCH_AMD64
8850 /* pinsrb vecsrc, gpr, #iByte (ASSUMES SSE4.1). */
8851 pCodeBuf[off++] = X86_OP_PRF_SIZE_OP;
8852 if (iVecRegDst >= 8 || iGprSrc >= 8)
8853 pCodeBuf[off++] = (iVecRegDst < 8 ? 0 : X86_OP_REX_R)
8854 | (iGprSrc < 8 ? 0 : X86_OP_REX_B);
8855 pCodeBuf[off++] = 0x0f;
8856 pCodeBuf[off++] = 0x3a;
8857 pCodeBuf[off++] = 0x20;
8858 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iVecRegDst & 7, iGprSrc & 7);
8859 pCodeBuf[off++] = iByte;
8860#elif defined(RT_ARCH_ARM64)
8861 /* ins vecsrc[iByte], gpr */
8862 pCodeBuf[off++] = Armv8A64MkVecInstrIns(iVecRegDst, iGprSrc, iByte, kArmv8InstrUmovInsSz_U8);
8863#else
8864# error "port me"
8865#endif
8866 return off;
8867}
8868
8869
8870/**
8871 * Emits a vecdst[x] = gprsrc store, 8-bit.
8872 */
8873DECL_INLINE_THROW(uint32_t)
8874iemNativeEmitSimdStoreGprToVecRegU8(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iVecRegDst, uint8_t iGprSrc, uint8_t iByte)
8875{
8876 Assert(iByte <= 15);
8877
8878#ifdef RT_ARCH_AMD64
8879 off = iemNativeEmitSimdStoreGprToVecRegU8Ex(iemNativeInstrBufEnsure(pReNative, off, 7), off, iVecRegDst, iGprSrc, iByte);
8880#elif defined(RT_ARCH_ARM64)
8881 off = iemNativeEmitSimdStoreGprToVecRegU8Ex(iemNativeInstrBufEnsure(pReNative, off, 1), off, iVecRegDst, iGprSrc, iByte);
8882#else
8883# error "port me"
8884#endif
8885 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
8886 return off;
8887}
8888
8889
8890/**
8891 * Emits a vecdst.au32[iDWord] = 0 store.
8892 */
8893DECL_FORCE_INLINE(uint32_t)
8894iemNativeEmitSimdZeroVecRegElemU32Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iVecReg, uint8_t iDWord)
8895{
8896 Assert(iDWord <= 7);
8897
8898#ifdef RT_ARCH_AMD64
8899 /*
8900 * xor tmp0, tmp0
8901 * pinsrd xmm, tmp0, iDword
8902 */
8903 if (IEMNATIVE_REG_FIXED_TMP0 >= 8)
8904 pCodeBuf[off++] = X86_OP_REX_R | X86_OP_REX_B;
8905 pCodeBuf[off++] = 0x33;
8906 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, IEMNATIVE_REG_FIXED_TMP0 & 7, IEMNATIVE_REG_FIXED_TMP0 & 7);
8907 off = iemNativeEmitSimdStoreGprToVecRegU32Ex(&pCodeBuf[off], off, iVecReg, IEMNATIVE_REG_FIXED_TMP0, iDWord);
8908#elif defined(RT_ARCH_ARM64)
8909 /* ASSUMES that there are two adjacent 128-bit registers available for the 256-bit value. */
8910 Assert(!(iVecReg & 0x1));
8911 /* ins vecsrc[iDWord], wzr */
8912 if (iDWord >= 4)
8913 pCodeBuf[off++] = Armv8A64MkVecInstrIns(iVecReg + 1, ARMV8_A64_REG_WZR, iDWord - 4, kArmv8InstrUmovInsSz_U32);
8914 else
8915 pCodeBuf[off++] = Armv8A64MkVecInstrIns(iVecReg, ARMV8_A64_REG_WZR, iDWord, kArmv8InstrUmovInsSz_U32);
8916#else
8917# error "port me"
8918#endif
8919 return off;
8920}
8921
8922
8923/**
8924 * Emits a vecdst.au32[iDWord] = 0 store.
8925 */
8926DECL_INLINE_THROW(uint32_t)
8927iemNativeEmitSimdZeroVecRegElemU32(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iVecReg, uint8_t iDWord)
8928{
8929
8930#ifdef RT_ARCH_AMD64
8931 off = iemNativeEmitSimdZeroVecRegElemU32Ex(iemNativeInstrBufEnsure(pReNative, off, 10), off, iVecReg, iDWord);
8932#elif defined(RT_ARCH_ARM64)
8933 off = iemNativeEmitSimdZeroVecRegElemU32Ex(iemNativeInstrBufEnsure(pReNative, off, 1), off, iVecReg, iDWord);
8934#else
8935# error "port me"
8936#endif
8937 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
8938 return off;
8939}
8940
8941
8942/**
8943 * Emits a vecdst[0:127] = 0 store.
8944 */
8945DECL_FORCE_INLINE(uint32_t)
8946iemNativeEmitSimdZeroVecRegLowU128Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iVecReg)
8947{
8948#ifdef RT_ARCH_AMD64
8949 /* pxor xmm, xmm */
8950 pCodeBuf[off++] = X86_OP_PRF_SIZE_OP;
8951 if (iVecReg >= 8)
8952 pCodeBuf[off++] = X86_OP_REX_B | X86_OP_REX_R;
8953 pCodeBuf[off++] = 0x0f;
8954 pCodeBuf[off++] = 0xef;
8955 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iVecReg & 7, iVecReg & 7);
8956#elif defined(RT_ARCH_ARM64)
8957 /* ASSUMES that there are two adjacent 128-bit registers available for the 256-bit value. */
8958 Assert(!(iVecReg & 0x1));
8959 /* eor vecreg, vecreg, vecreg */
8960 pCodeBuf[off++] = Armv8A64MkVecInstrEor(iVecReg, iVecReg, iVecReg);
8961#else
8962# error "port me"
8963#endif
8964 return off;
8965}
8966
8967
8968/**
8969 * Emits a vecdst[0:127] = 0 store.
8970 */
8971DECL_INLINE_THROW(uint32_t)
8972iemNativeEmitSimdZeroVecRegLowU128(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iVecReg)
8973{
8974#ifdef RT_ARCH_AMD64
8975 off = iemNativeEmitSimdZeroVecRegLowU128Ex(iemNativeInstrBufEnsure(pReNative, off, 5), off, iVecReg);
8976#elif defined(RT_ARCH_ARM64)
8977 off = iemNativeEmitSimdZeroVecRegLowU128Ex(iemNativeInstrBufEnsure(pReNative, off, 1), off, iVecReg);
8978#else
8979# error "port me"
8980#endif
8981 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
8982 return off;
8983}
8984
8985
8986/**
8987 * Emits a vecdst[128:255] = 0 store.
8988 */
8989DECL_FORCE_INLINE(uint32_t)
8990iemNativeEmitSimdZeroVecRegHighU128Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iVecReg)
8991{
8992#ifdef RT_ARCH_AMD64
8993 /* vmovdqa xmm, xmm. This will clear the upper half of ymm */
8994 if (iVecReg < 8)
8995 {
8996 pCodeBuf[off++] = X86_OP_VEX2;
8997 pCodeBuf[off++] = 0xf9;
8998 }
8999 else
9000 {
9001 pCodeBuf[off++] = X86_OP_VEX3;
9002 pCodeBuf[off++] = 0x41;
9003 pCodeBuf[off++] = 0x79;
9004 }
9005 pCodeBuf[off++] = 0x6f;
9006 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iVecReg & 7, iVecReg & 7);
9007#elif defined(RT_ARCH_ARM64)
9008 /* ASSUMES that there are two adjacent 128-bit registers available for the 256-bit value. */
9009 Assert(!(iVecReg & 0x1));
9010 /* eor vecreg, vecreg, vecreg */
9011 pCodeBuf[off++] = Armv8A64MkVecInstrEor(iVecReg + 1, iVecReg + 1, iVecReg + 1);
9012#else
9013# error "port me"
9014#endif
9015 return off;
9016}
9017
9018
9019/**
9020 * Emits a vecdst[128:255] = 0 store.
9021 */
9022DECL_INLINE_THROW(uint32_t)
9023iemNativeEmitSimdZeroVecRegHighU128(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iVecReg)
9024{
9025#ifdef RT_ARCH_AMD64
9026 off = iemNativeEmitSimdZeroVecRegHighU128Ex(iemNativeInstrBufEnsure(pReNative, off, 7), off, iVecReg);
9027#elif defined(RT_ARCH_ARM64)
9028 off = iemNativeEmitSimdZeroVecRegHighU128Ex(iemNativeInstrBufEnsure(pReNative, off, 1), off, iVecReg);
9029#else
9030# error "port me"
9031#endif
9032 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
9033 return off;
9034}
9035
9036
9037/**
9038 * Emits a vecdst[0:255] = 0 store.
9039 */
9040DECL_FORCE_INLINE(uint32_t)
9041iemNativeEmitSimdZeroVecRegU256Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iVecReg)
9042{
9043#ifdef RT_ARCH_AMD64
9044 /* vpxor ymm, ymm, ymm */
9045 if (iVecReg < 8)
9046 {
9047 pCodeBuf[off++] = X86_OP_VEX2;
9048 pCodeBuf[off++] = X86_OP_VEX2_BYTE1_MAKE(false, iVecReg, true, X86_OP_VEX3_BYTE2_P_066H);
9049 }
9050 else
9051 {
9052 pCodeBuf[off++] = X86_OP_VEX3;
9053 pCodeBuf[off++] = X86_OP_VEX3_BYTE1_X | 0x01;
9054 pCodeBuf[off++] = X86_OP_VEX3_BYTE2_MAKE(false, iVecReg, true, X86_OP_VEX3_BYTE2_P_066H);
9055 }
9056 pCodeBuf[off++] = 0xef;
9057 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iVecReg & 7, iVecReg & 7);
9058#elif defined(RT_ARCH_ARM64)
9059 /* ASSUMES that there are two adjacent 128-bit registers available for the 256-bit value. */
9060 Assert(!(iVecReg & 0x1));
9061 /* eor vecreg, vecreg, vecreg */
9062 pCodeBuf[off++] = Armv8A64MkVecInstrEor(iVecReg, iVecReg, iVecReg);
9063 pCodeBuf[off++] = Armv8A64MkVecInstrEor(iVecReg + 1, iVecReg + 1, iVecReg + 1);
9064#else
9065# error "port me"
9066#endif
9067 return off;
9068}
9069
9070
9071/**
9072 * Emits a vecdst[0:255] = 0 store.
9073 */
9074DECL_INLINE_THROW(uint32_t)
9075iemNativeEmitSimdZeroVecRegU256(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iVecReg)
9076{
9077#ifdef RT_ARCH_AMD64
9078 off = iemNativeEmitSimdZeroVecRegU256Ex(iemNativeInstrBufEnsure(pReNative, off, 5), off, iVecReg);
9079#elif defined(RT_ARCH_ARM64)
9080 off = iemNativeEmitSimdZeroVecRegU256Ex(iemNativeInstrBufEnsure(pReNative, off, 2), off, iVecReg);
9081#else
9082# error "port me"
9083#endif
9084 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
9085 return off;
9086}
9087
9088
9089/**
9090 * Emits a vecdst = gprsrc broadcast, 8-bit.
9091 */
9092DECL_FORCE_INLINE(uint32_t)
9093iemNativeEmitSimdBroadcastGprToVecRegU8Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iVecRegDst, uint8_t iGprSrc, bool f256Bit = false)
9094{
9095#ifdef RT_ARCH_AMD64
9096 /* pinsrb vecdst, gpr, #0 (ASSUMES SSE 4.1) */
9097 pCodeBuf[off++] = X86_OP_PRF_SIZE_OP;
9098 if (iVecRegDst >= 8 || iGprSrc >= 8)
9099 pCodeBuf[off++] = (iVecRegDst < 8 ? 0 : X86_OP_REX_R)
9100 | (iGprSrc < 8 ? 0 : X86_OP_REX_B);
9101 pCodeBuf[off++] = 0x0f;
9102 pCodeBuf[off++] = 0x3a;
9103 pCodeBuf[off++] = 0x20;
9104 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iVecRegDst & 7, iGprSrc & 7);
9105 pCodeBuf[off++] = 0x00;
9106
9107 /* vpbroadcastb {y,x}mm, xmm (ASSUMES AVX2). */
9108 pCodeBuf[off++] = X86_OP_VEX3;
9109 pCodeBuf[off++] = X86_OP_VEX3_BYTE1_X
9110 | 0x02 /* opcode map. */
9111 | ( iVecRegDst >= 8
9112 ? 0
9113 : X86_OP_VEX3_BYTE1_B | X86_OP_VEX3_BYTE1_R);
9114 pCodeBuf[off++] = X86_OP_VEX3_BYTE2_MAKE_NO_VVVV(false, f256Bit, X86_OP_VEX3_BYTE2_P_066H);
9115 pCodeBuf[off++] = 0x78;
9116 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iVecRegDst & 7, iVecRegDst & 7);
9117#elif defined(RT_ARCH_ARM64)
9118 /* ASSUMES that there are two adjacent 128-bit registers available for the 256-bit value. */
9119 Assert(!(iVecRegDst & 0x1) || !f256Bit);
9120
9121 /* dup vecsrc, gpr */
9122 pCodeBuf[off++] = Armv8A64MkVecInstrDup(iVecRegDst, iGprSrc, kArmv8InstrUmovInsSz_U8);
9123 if (f256Bit)
9124 pCodeBuf[off++] = Armv8A64MkVecInstrDup(iVecRegDst + 1, iGprSrc, kArmv8InstrUmovInsSz_U8);
9125#else
9126# error "port me"
9127#endif
9128 return off;
9129}
9130
9131
9132/**
9133 * Emits a vecdst[x] = gprsrc broadcast, 8-bit.
9134 */
9135DECL_INLINE_THROW(uint32_t)
9136iemNativeEmitSimdBroadcastGprToVecRegU8(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iVecRegDst, uint8_t iGprSrc, bool f256Bit = false)
9137{
9138#ifdef RT_ARCH_AMD64
9139 off = iemNativeEmitSimdBroadcastGprToVecRegU8Ex(iemNativeInstrBufEnsure(pReNative, off, 12), off, iVecRegDst, iGprSrc, f256Bit);
9140#elif defined(RT_ARCH_ARM64)
9141 off = iemNativeEmitSimdBroadcastGprToVecRegU8Ex(iemNativeInstrBufEnsure(pReNative, off, f256Bit ? 2 : 1), off, iVecRegDst, iGprSrc, f256Bit);
9142#else
9143# error "port me"
9144#endif
9145 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
9146 return off;
9147}
9148
9149
9150/**
9151 * Emits a vecdst = gprsrc broadcast, 16-bit.
9152 */
9153DECL_FORCE_INLINE(uint32_t)
9154iemNativeEmitSimdBroadcastGprToVecRegU16Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iVecRegDst, uint8_t iGprSrc, bool f256Bit = false)
9155{
9156#ifdef RT_ARCH_AMD64
9157 /* pinsrw vecdst, gpr, #0 */
9158 pCodeBuf[off++] = X86_OP_PRF_SIZE_OP;
9159 if (iVecRegDst >= 8 || iGprSrc >= 8)
9160 pCodeBuf[off++] = (iVecRegDst < 8 ? 0 : X86_OP_REX_R)
9161 | (iGprSrc < 8 ? 0 : X86_OP_REX_B);
9162 pCodeBuf[off++] = 0x0f;
9163 pCodeBuf[off++] = 0xc4;
9164 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iVecRegDst & 7, iGprSrc & 7);
9165 pCodeBuf[off++] = 0x00;
9166
9167 /* vpbroadcastd {y,x}mm, xmm (ASSUMES AVX2). */
9168 pCodeBuf[off++] = X86_OP_VEX3;
9169 pCodeBuf[off++] = X86_OP_VEX3_BYTE1_X
9170 | 0x02 /* opcode map. */
9171 | ( iVecRegDst >= 8
9172 ? 0
9173 : X86_OP_VEX3_BYTE1_B | X86_OP_VEX3_BYTE1_R);
9174 pCodeBuf[off++] = X86_OP_VEX3_BYTE2_MAKE_NO_VVVV(false, f256Bit, X86_OP_VEX3_BYTE2_P_066H);
9175 pCodeBuf[off++] = 0x79;
9176 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iVecRegDst & 7, iVecRegDst & 7);
9177#elif defined(RT_ARCH_ARM64)
9178 /* ASSUMES that there are two adjacent 128-bit registers available for the 256-bit value. */
9179 Assert(!(iVecRegDst & 0x1) || !f256Bit);
9180
9181 /* dup vecsrc, gpr */
9182 pCodeBuf[off++] = Armv8A64MkVecInstrDup(iVecRegDst, iGprSrc, kArmv8InstrUmovInsSz_U16);
9183 if (f256Bit)
9184 pCodeBuf[off++] = Armv8A64MkVecInstrDup(iVecRegDst + 1, iGprSrc, kArmv8InstrUmovInsSz_U16);
9185#else
9186# error "port me"
9187#endif
9188 return off;
9189}
9190
9191
9192/**
9193 * Emits a vecdst[x] = gprsrc broadcast, 16-bit.
9194 */
9195DECL_INLINE_THROW(uint32_t)
9196iemNativeEmitSimdBroadcastGprToVecRegU16(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iVecRegDst, uint8_t iGprSrc, bool f256Bit = false)
9197{
9198#ifdef RT_ARCH_AMD64
9199 off = iemNativeEmitSimdBroadcastGprToVecRegU16Ex(iemNativeInstrBufEnsure(pReNative, off, 12), off, iVecRegDst, iGprSrc, f256Bit);
9200#elif defined(RT_ARCH_ARM64)
9201 off = iemNativeEmitSimdBroadcastGprToVecRegU16Ex(iemNativeInstrBufEnsure(pReNative, off, f256Bit ? 2 : 1), off, iVecRegDst, iGprSrc, f256Bit);
9202#else
9203# error "port me"
9204#endif
9205 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
9206 return off;
9207}
9208
9209
9210/**
9211 * Emits a vecdst = gprsrc broadcast, 32-bit.
9212 */
9213DECL_FORCE_INLINE(uint32_t)
9214iemNativeEmitSimdBroadcastGprToVecRegU32Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iVecRegDst, uint8_t iGprSrc, bool f256Bit = false)
9215{
9216#ifdef RT_ARCH_AMD64
9217 /** @todo If anyone has a better idea on how to do this more efficiently I'm all ears,
9218 * vbroadcast needs a memory operand or another xmm register to work... */
9219
9220 /* pinsrd vecdst, gpr, #0 (ASSUMES SSE4.1). */
9221 pCodeBuf[off++] = X86_OP_PRF_SIZE_OP;
9222 if (iVecRegDst >= 8 || iGprSrc >= 8)
9223 pCodeBuf[off++] = (iVecRegDst < 8 ? 0 : X86_OP_REX_R)
9224 | (iGprSrc < 8 ? 0 : X86_OP_REX_B);
9225 pCodeBuf[off++] = 0x0f;
9226 pCodeBuf[off++] = 0x3a;
9227 pCodeBuf[off++] = 0x22;
9228 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iVecRegDst & 7, iGprSrc & 7);
9229 pCodeBuf[off++] = 0x00;
9230
9231 /* vpbroadcastd {y,x}mm, xmm (ASSUMES AVX2). */
9232 pCodeBuf[off++] = X86_OP_VEX3;
9233 pCodeBuf[off++] = X86_OP_VEX3_BYTE1_X
9234 | 0x02 /* opcode map. */
9235 | ( iVecRegDst >= 8
9236 ? 0
9237 : X86_OP_VEX3_BYTE1_B | X86_OP_VEX3_BYTE1_R);
9238 pCodeBuf[off++] = X86_OP_VEX3_BYTE2_MAKE_NO_VVVV(false, f256Bit, X86_OP_VEX3_BYTE2_P_066H);
9239 pCodeBuf[off++] = 0x58;
9240 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iVecRegDst & 7, iVecRegDst & 7);
9241#elif defined(RT_ARCH_ARM64)
9242 /* ASSUMES that there are two adjacent 128-bit registers available for the 256-bit value. */
9243 Assert(!(iVecRegDst & 0x1) || !f256Bit);
9244
9245 /* dup vecsrc, gpr */
9246 pCodeBuf[off++] = Armv8A64MkVecInstrDup(iVecRegDst, iGprSrc, kArmv8InstrUmovInsSz_U32);
9247 if (f256Bit)
9248 pCodeBuf[off++] = Armv8A64MkVecInstrDup(iVecRegDst + 1, iGprSrc, kArmv8InstrUmovInsSz_U32);
9249#else
9250# error "port me"
9251#endif
9252 return off;
9253}
9254
9255
9256/**
9257 * Emits a vecdst[x] = gprsrc broadcast, 32-bit.
9258 */
9259DECL_INLINE_THROW(uint32_t)
9260iemNativeEmitSimdBroadcastGprToVecRegU32(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iVecRegDst, uint8_t iGprSrc, bool f256Bit = false)
9261{
9262#ifdef RT_ARCH_AMD64
9263 off = iemNativeEmitSimdBroadcastGprToVecRegU32Ex(iemNativeInstrBufEnsure(pReNative, off, 12), off, iVecRegDst, iGprSrc, f256Bit);
9264#elif defined(RT_ARCH_ARM64)
9265 off = iemNativeEmitSimdBroadcastGprToVecRegU32Ex(iemNativeInstrBufEnsure(pReNative, off, f256Bit ? 2 : 1), off, iVecRegDst, iGprSrc, f256Bit);
9266#else
9267# error "port me"
9268#endif
9269 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
9270 return off;
9271}
9272
9273
9274/**
9275 * Emits a vecdst = gprsrc broadcast, 64-bit.
9276 */
9277DECL_FORCE_INLINE(uint32_t)
9278iemNativeEmitSimdBroadcastGprToVecRegU64Ex(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iVecRegDst, uint8_t iGprSrc, bool f256Bit = false)
9279{
9280#ifdef RT_ARCH_AMD64
9281 /** @todo If anyone has a better idea on how to do this more efficiently I'm all ears,
9282 * vbroadcast needs a memory operand or another xmm register to work... */
9283
9284 /* pinsrq vecdst, gpr, #0 (ASSUMES SSE4.1). */
9285 pCodeBuf[off++] = X86_OP_PRF_SIZE_OP;
9286 pCodeBuf[off++] = X86_OP_REX_W
9287 | (iVecRegDst < 8 ? 0 : X86_OP_REX_R)
9288 | (iGprSrc < 8 ? 0 : X86_OP_REX_B);
9289 pCodeBuf[off++] = 0x0f;
9290 pCodeBuf[off++] = 0x3a;
9291 pCodeBuf[off++] = 0x22;
9292 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iVecRegDst & 7, iGprSrc & 7);
9293 pCodeBuf[off++] = 0x00;
9294
9295 /* vpbroadcastq {y,x}mm, xmm (ASSUMES AVX2). */
9296 pCodeBuf[off++] = X86_OP_VEX3;
9297 pCodeBuf[off++] = X86_OP_VEX3_BYTE1_X
9298 | 0x02 /* opcode map. */
9299 | ( iVecRegDst >= 8
9300 ? 0
9301 : X86_OP_VEX3_BYTE1_B | X86_OP_VEX3_BYTE1_R);
9302 pCodeBuf[off++] = X86_OP_VEX3_BYTE2_MAKE_NO_VVVV(false, f256Bit, X86_OP_VEX3_BYTE2_P_066H);
9303 pCodeBuf[off++] = 0x59;
9304 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iVecRegDst & 7, iVecRegDst & 7);
9305#elif defined(RT_ARCH_ARM64)
9306 /* ASSUMES that there are two adjacent 128-bit registers available for the 256-bit value. */
9307 Assert(!(iVecRegDst & 0x1) || !f256Bit);
9308
9309 /* dup vecsrc, gpr */
9310 pCodeBuf[off++] = Armv8A64MkVecInstrDup(iVecRegDst, iGprSrc, kArmv8InstrUmovInsSz_U64);
9311 if (f256Bit)
9312 pCodeBuf[off++] = Armv8A64MkVecInstrDup(iVecRegDst + 1, iGprSrc, kArmv8InstrUmovInsSz_U64);
9313#else
9314# error "port me"
9315#endif
9316 return off;
9317}
9318
9319
9320/**
9321 * Emits a vecdst[x] = gprsrc broadcast, 64-bit.
9322 */
9323DECL_INLINE_THROW(uint32_t)
9324iemNativeEmitSimdBroadcastGprToVecRegU64(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iVecRegDst, uint8_t iGprSrc, bool f256Bit = false)
9325{
9326#ifdef RT_ARCH_AMD64
9327 off = iemNativeEmitSimdBroadcastGprToVecRegU64Ex(iemNativeInstrBufEnsure(pReNative, off, 14), off, iVecRegDst, iGprSrc, f256Bit);
9328#elif defined(RT_ARCH_ARM64)
9329 off = iemNativeEmitSimdBroadcastGprToVecRegU64Ex(iemNativeInstrBufEnsure(pReNative, off, f256Bit ? 2 : 1), off, iVecRegDst, iGprSrc, f256Bit);
9330#else
9331# error "port me"
9332#endif
9333 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
9334 return off;
9335}
9336
9337
9338/**
9339 * Emits a vecdst[0:127] = vecdst[128:255] = vecsrc[0:127] broadcast, 128-bit.
9340 */
9341DECL_FORCE_INLINE(uint32_t)
9342iemNativeEmitSimdBroadcastVecRegU128ToVecRegEx(PIEMNATIVEINSTR pCodeBuf, uint32_t off, uint8_t iVecRegDst, uint8_t iVecRegSrc)
9343{
9344#ifdef RT_ARCH_AMD64
9345 off = iemNativeEmitSimdLoadVecRegFromVecRegU128Ex(pCodeBuf, off, iVecRegDst, iVecRegSrc);
9346
9347 /* vinserti128 ymm, ymm, xmm, 1. */ /* ASSUMES AVX2 support */
9348 pCodeBuf[off++] = X86_OP_VEX3;
9349 pCodeBuf[off++] = X86_OP_VEX3_BYTE1_MAKE(0x3, iVecRegSrc >= 8, false, iVecRegDst >= 8);
9350 pCodeBuf[off++] = X86_OP_VEX3_BYTE2_MAKE(false, iVecRegDst, true, X86_OP_VEX3_BYTE2_P_066H);
9351 pCodeBuf[off++] = 0x38;
9352 pCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iVecRegDst & 7, iVecRegSrc & 7);
9353 pCodeBuf[off++] = 0x01; /* Immediate */
9354#elif defined(RT_ARCH_ARM64)
9355 /* ASSUMES that there are two adjacent 128-bit registers available for the 256-bit value. */
9356 Assert(!(iVecRegDst & 0x1));
9357
9358 /* mov dst, src; alias for: orr dst, src, src */
9359 pCodeBuf[off++] = Armv8A64MkVecInstrOrr(iVecRegDst, iVecRegSrc, iVecRegSrc);
9360 pCodeBuf[off++] = Armv8A64MkVecInstrOrr(iVecRegDst + 1, iVecRegSrc, iVecRegSrc);
9361#else
9362# error "port me"
9363#endif
9364 return off;
9365}
9366
9367
9368/**
9369 * Emits a vecdst[0:127] = vecdst[128:255] = vecsrc[0:127] broadcast, 128-bit.
9370 */
9371DECL_INLINE_THROW(uint32_t)
9372iemNativeEmitSimdBroadcastVecRegU128ToVecReg(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iVecRegDst, uint8_t iVecRegSrc)
9373{
9374#ifdef RT_ARCH_AMD64
9375 off = iemNativeEmitSimdBroadcastVecRegU128ToVecRegEx(iemNativeInstrBufEnsure(pReNative, off, 11), off, iVecRegDst, iVecRegSrc);
9376#elif defined(RT_ARCH_ARM64)
9377 off = iemNativeEmitSimdBroadcastVecRegU128ToVecRegEx(iemNativeInstrBufEnsure(pReNative, off, 2), off, iVecRegDst, iVecRegSrc);
9378#else
9379# error "port me"
9380#endif
9381 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
9382 return off;
9383}
9384
9385#endif /* IEMNATIVE_WITH_SIMD_REG_ALLOCATOR */
9386
9387/** @} */
9388
9389#endif /* !VMM_INCLUDED_SRC_include_IEMN8veRecompilerEmit_h */
9390
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette