VirtualBox

source: vbox/trunk/src/VBox/VMM/include/IEMN8veRecompiler.h@ 101661

Last change on this file since 101661 was 101661, checked in by vboxsync, 16 months ago

VMM/IEM: Windows build recompiler fixes. Fixed handling of 8-bit registers on AMD64 hosts (test instr). bugref:10371

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 118.2 KB
Line 
1/* $Id: IEMN8veRecompiler.h 101661 2023-10-30 14:55:00Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - Native Recompiler Internals.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#ifndef VMM_INCLUDED_SRC_include_IEMN8veRecompiler_h
29#define VMM_INCLUDED_SRC_include_IEMN8veRecompiler_h
30#ifndef RT_WITHOUT_PRAGMA_ONCE
31# pragma once
32#endif
33
34
35/** @defgroup grp_iem_n8ve_re Native Recompiler Internals.
36 * @ingroup grp_iem_int
37 * @{
38 */
39
40/** @def IEMNATIVE_WITH_TB_DEBUG_INFO
41 * Enables generating internal debug info for better TB disassembly dumping. */
42#if defined(DEBUG) || defined(DOXYGEN_RUNNING)
43# define IEMNATIVE_WITH_TB_DEBUG_INFO
44#endif
45
46
47/** @name Stack Frame Layout
48 *
49 * @{ */
50/** The size of the area for stack variables and spills and stuff.
51 * @note This limit is duplicated in the python script(s). */
52#define IEMNATIVE_FRAME_VAR_SIZE 0xc0
53#ifdef RT_ARCH_AMD64
54/** Number of stack arguments slots for calls made from the frame. */
55# define IEMNATIVE_FRAME_STACK_ARG_COUNT 4
56/** An stack alignment adjustment (between non-volatile register pushes and
57 * the stack variable area, so the latter better aligned). */
58# define IEMNATIVE_FRAME_ALIGN_SIZE 8
59/** Number of any shadow arguments (spill area) for calls we make. */
60# ifdef RT_OS_WINDOWS
61# define IEMNATIVE_FRAME_SHADOW_ARG_COUNT 4
62# else
63# define IEMNATIVE_FRAME_SHADOW_ARG_COUNT 0
64# endif
65
66/** Frame pointer (RBP) relative offset of the last push. */
67# ifdef RT_OS_WINDOWS
68# define IEMNATIVE_FP_OFF_LAST_PUSH (7 * -8)
69# else
70# define IEMNATIVE_FP_OFF_LAST_PUSH (5 * -8)
71# endif
72/** Frame pointer (RBP) relative offset of the stack variable area (the lowest
73 * address for it). */
74# define IEMNATIVE_FP_OFF_STACK_VARS (IEMNATIVE_FP_OFF_LAST_PUSH - IEMNATIVE_FRAME_ALIGN_SIZE - IEMNATIVE_FRAME_VAR_SIZE)
75/** Frame pointer (RBP) relative offset of the first stack argument for calls. */
76# define IEMNATIVE_FP_OFF_STACK_ARG0 (IEMNATIVE_FP_OFF_STACK_VARS - IEMNATIVE_FRAME_STACK_ARG_COUNT * 8)
77/** Frame pointer (RBP) relative offset of the second stack argument for calls. */
78# define IEMNATIVE_FP_OFF_STACK_ARG1 (IEMNATIVE_FP_OFF_STACK_ARG0 + 8)
79/** Frame pointer (RBP) relative offset of the third stack argument for calls. */
80# define IEMNATIVE_FP_OFF_STACK_ARG2 (IEMNATIVE_FP_OFF_STACK_ARG0 + 16)
81/** Frame pointer (RBP) relative offset of the fourth stack argument for calls. */
82# define IEMNATIVE_FP_OFF_STACK_ARG3 (IEMNATIVE_FP_OFF_STACK_ARG0 + 24)
83
84# ifdef RT_OS_WINDOWS
85/** Frame pointer (RBP) relative offset of the first incoming shadow argument. */
86# define IEMNATIVE_FP_OFF_IN_SHADOW_ARG0 (16)
87/** Frame pointer (RBP) relative offset of the second incoming shadow argument. */
88# define IEMNATIVE_FP_OFF_IN_SHADOW_ARG1 (24)
89/** Frame pointer (RBP) relative offset of the third incoming shadow argument. */
90# define IEMNATIVE_FP_OFF_IN_SHADOW_ARG2 (32)
91/** Frame pointer (RBP) relative offset of the fourth incoming shadow argument. */
92# define IEMNATIVE_FP_OFF_IN_SHADOW_ARG3 (40)
93# endif
94
95#elif RT_ARCH_ARM64
96/** No stack argument slots, enough got 8 registers for arguments. */
97# define IEMNATIVE_FRAME_STACK_ARG_COUNT 0
98/** There are no argument spill area. */
99# define IEMNATIVE_FRAME_SHADOW_ARG_COUNT 0
100
101/** Number of saved registers at the top of our stack frame.
102 * This includes the return address and old frame pointer, so x19 thru x30. */
103# define IEMNATIVE_FRAME_SAVE_REG_COUNT (12)
104/** The size of the save registered (IEMNATIVE_FRAME_SAVE_REG_COUNT). */
105# define IEMNATIVE_FRAME_SAVE_REG_SIZE (IEMNATIVE_FRAME_SAVE_REG_COUNT * 8)
106
107/** Frame pointer (BP) relative offset of the last push. */
108# define IEMNATIVE_FP_OFF_LAST_PUSH (7 * -8)
109
110/** Frame pointer (BP) relative offset of the stack variable area (the lowest
111 * address for it). */
112# define IEMNATIVE_FP_OFF_STACK_VARS (IEMNATIVE_FP_OFF_LAST_PUSH - IEMNATIVE_FRAME_ALIGN_SIZE - IEMNATIVE_FRAME_VAR_SIZE)
113
114#else
115# error "port me"
116#endif
117/** @} */
118
119
120/** @name Fixed Register Allocation(s)
121 * @{ */
122/** @def IEMNATIVE_REG_FIXED_PVMCPU
123 * The number of the register holding the pVCpu pointer. */
124/** @def IEMNATIVE_REG_FIXED_PCPUMCTX
125 * The number of the register holding the &pVCpu->cpum.GstCtx pointer.
126 * @note This not available on AMD64, only ARM64. */
127/** @def IEMNATIVE_REG_FIXED_TMP0
128 * Dedicated temporary register.
129 * @todo replace this by a register allocator and content tracker. */
130/** @def IEMNATIVE_REG_FIXED_MASK
131 * Mask GPRs with fixes assignments, either by us or dictated by the CPU/OS
132 * architecture. */
133#if defined(RT_ARCH_AMD64) && !defined(DOXYGEN_RUNNING)
134# define IEMNATIVE_REG_FIXED_PVMCPU X86_GREG_xBX
135# define IEMNATIVE_REG_FIXED_TMP0 X86_GREG_x11
136# define IEMNATIVE_REG_FIXED_MASK ( RT_BIT_32(IEMNATIVE_REG_FIXED_PVMCPU) \
137 | RT_BIT_32(IEMNATIVE_REG_FIXED_TMP0) \
138 | RT_BIT_32(X86_GREG_xSP) \
139 | RT_BIT_32(X86_GREG_xBP) )
140
141#elif defined(RT_ARCH_ARM64) || defined(DOXYGEN_RUNNING)
142# define IEMNATIVE_REG_FIXED_PVMCPU ARMV8_A64_REG_X28
143# define IEMNATIVE_REG_FIXED_PCPUMCTX ARMV8_A64_REG_X27
144# define IEMNATIVE_REG_FIXED_TMP0 ARMV8_A64_REG_X15
145# define IEMNATIVE_REG_FIXED_MASK ( RT_BIT_32(ARMV8_A64_REG_SP) \
146 | RT_BIT_32(ARMV8_A64_REG_LR) \
147 | RT_BIT_32(ARMV8_A64_REG_BP) \
148 | RT_BIT_32(IEMNATIVE_REG_FIXED_PVMCPU) \
149 | RT_BIT_32(IEMNATIVE_REG_FIXED_PCPUMCTX) \
150 | RT_BIT_32(ARMV8_A64_REG_X18) \
151 | RT_BIT_32(IEMNATIVE_REG_FIXED_TMP0) )
152
153#else
154# error "port me"
155#endif
156/** @} */
157
158/** @name Call related registers.
159 * @{ */
160/** @def IEMNATIVE_CALL_RET_GREG
161 * The return value register. */
162/** @def IEMNATIVE_CALL_ARG_GREG_COUNT
163 * Number of arguments in registers. */
164/** @def IEMNATIVE_CALL_ARG0_GREG
165 * The general purpose register carrying argument \#0. */
166/** @def IEMNATIVE_CALL_ARG1_GREG
167 * The general purpose register carrying argument \#1. */
168/** @def IEMNATIVE_CALL_ARG2_GREG
169 * The general purpose register carrying argument \#2. */
170/** @def IEMNATIVE_CALL_ARG3_GREG
171 * The general purpose register carrying argument \#3. */
172/** @def IEMNATIVE_CALL_VOLATILE_GREG_MASK
173 * Mask of registers the callee will not save and may trash. */
174#ifdef RT_ARCH_AMD64
175# define IEMNATIVE_CALL_RET_GREG X86_GREG_xAX
176
177# ifdef RT_OS_WINDOWS
178# define IEMNATIVE_CALL_ARG_GREG_COUNT 4
179# define IEMNATIVE_CALL_ARG0_GREG X86_GREG_xCX
180# define IEMNATIVE_CALL_ARG1_GREG X86_GREG_xDX
181# define IEMNATIVE_CALL_ARG2_GREG X86_GREG_x8
182# define IEMNATIVE_CALL_ARG3_GREG X86_GREG_x9
183# define IEMNATIVE_CALL_VOLATILE_GREG_MASK ( RT_BIT_32(X86_GREG_xAX) \
184 | RT_BIT_32(X86_GREG_xCX) \
185 | RT_BIT_32(X86_GREG_xDX) \
186 | RT_BIT_32(X86_GREG_x8) \
187 | RT_BIT_32(X86_GREG_x9) \
188 | RT_BIT_32(X86_GREG_x10) \
189 | RT_BIT_32(X86_GREG_x11) )
190# else
191# define IEMNATIVE_CALL_ARG_GREG_COUNT 6
192# define IEMNATIVE_CALL_ARG0_GREG X86_GREG_xDI
193# define IEMNATIVE_CALL_ARG1_GREG X86_GREG_xSI
194# define IEMNATIVE_CALL_ARG2_GREG X86_GREG_xDX
195# define IEMNATIVE_CALL_ARG3_GREG X86_GREG_xCX
196# define IEMNATIVE_CALL_ARG4_GREG X86_GREG_x8
197# define IEMNATIVE_CALL_ARG5_GREG X86_GREG_x9
198# define IEMNATIVE_CALL_VOLATILE_GREG_MASK ( RT_BIT_32(X86_GREG_xAX) \
199 | RT_BIT_32(X86_GREG_xCX) \
200 | RT_BIT_32(X86_GREG_xDX) \
201 | RT_BIT_32(X86_GREG_xDI) \
202 | RT_BIT_32(X86_GREG_xSI) \
203 | RT_BIT_32(X86_GREG_x8) \
204 | RT_BIT_32(X86_GREG_x9) \
205 | RT_BIT_32(X86_GREG_x10) \
206 | RT_BIT_32(X86_GREG_x11) )
207# endif
208
209#elif defined(RT_ARCH_ARM64)
210# define IEMNATIVE_CALL_RET_GREG ARMV8_A64_REG_X0
211# define IEMNATIVE_CALL_ARG_GREG_COUNT 8
212# define IEMNATIVE_CALL_ARG0_GREG ARMV8_A64_REG_X0
213# define IEMNATIVE_CALL_ARG1_GREG ARMV8_A64_REG_X1
214# define IEMNATIVE_CALL_ARG2_GREG ARMV8_A64_REG_X2
215# define IEMNATIVE_CALL_ARG3_GREG ARMV8_A64_REG_X3
216# define IEMNATIVE_CALL_ARG4_GREG ARMV8_A64_REG_X4
217# define IEMNATIVE_CALL_ARG5_GREG ARMV8_A64_REG_X5
218# define IEMNATIVE_CALL_ARG6_GREG ARMV8_A64_REG_X6
219# define IEMNATIVE_CALL_ARG7_GREG ARMV8_A64_REG_X7
220# define IEMNATIVE_CALL_VOLATILE_GREG_MASK ( RT_BIT_32(ARMV8_A64_REG_X0) \
221 | RT_BIT_32(ARMV8_A64_REG_X1) \
222 | RT_BIT_32(ARMV8_A64_REG_X2) \
223 | RT_BIT_32(ARMV8_A64_REG_X3) \
224 | RT_BIT_32(ARMV8_A64_REG_X4) \
225 | RT_BIT_32(ARMV8_A64_REG_X5) \
226 | RT_BIT_32(ARMV8_A64_REG_X6) \
227 | RT_BIT_32(ARMV8_A64_REG_X7) \
228 | RT_BIT_32(ARMV8_A64_REG_X8) \
229 | RT_BIT_32(ARMV8_A64_REG_X9) \
230 | RT_BIT_32(ARMV8_A64_REG_X10) \
231 | RT_BIT_32(ARMV8_A64_REG_X11) \
232 | RT_BIT_32(ARMV8_A64_REG_X12) \
233 | RT_BIT_32(ARMV8_A64_REG_X13) \
234 | RT_BIT_32(ARMV8_A64_REG_X14) \
235 | RT_BIT_32(ARMV8_A64_REG_X15) \
236 | RT_BIT_32(ARMV8_A64_REG_X16) \
237 | RT_BIT_32(ARMV8_A64_REG_X17) )
238
239#endif
240
241/** @} */
242
243
244/** @def IEMNATIVE_HST_GREG_COUNT
245 * Number of host general purpose registers we tracker. */
246/** @def IEMNATIVE_HST_GREG_MASK
247 * Mask corresponding to IEMNATIVE_HST_GREG_COUNT that can be applied to
248 * inverted register masks and such to get down to a correct set of regs. */
249#ifdef RT_ARCH_AMD64
250# define IEMNATIVE_HST_GREG_COUNT 16
251# define IEMNATIVE_HST_GREG_MASK UINT32_C(0xffff)
252
253#elif defined(RT_ARCH_ARM64)
254# define IEMNATIVE_HST_GREG_COUNT 32
255# define IEMNATIVE_HST_GREG_MASK UINT32_MAX
256#else
257# error "Port me!"
258#endif
259
260
261/** Native code generator label types. */
262typedef enum
263{
264 kIemNativeLabelType_Invalid = 0,
265 /* Labels w/o data, only once instance per TB: */
266 kIemNativeLabelType_Return,
267 kIemNativeLabelType_ReturnBreak,
268 kIemNativeLabelType_NonZeroRetOrPassUp,
269 kIemNativeLabelType_RaiseGp0,
270 /* Labels with data, potentially multiple instances per TB: */
271 kIemNativeLabelType_If,
272 kIemNativeLabelType_Else,
273 kIemNativeLabelType_Endif,
274 kIemNativeLabelType_CheckIrq,
275 kIemNativeLabelType_End
276} IEMNATIVELABELTYPE;
277
278/** Native code generator label definition. */
279typedef struct IEMNATIVELABEL
280{
281 /** Code offset if defined, UINT32_MAX if it needs to be generated after/in
282 * the epilog. */
283 uint32_t off;
284 /** The type of label (IEMNATIVELABELTYPE). */
285 uint16_t enmType;
286 /** Additional label data, type specific. */
287 uint16_t uData;
288} IEMNATIVELABEL;
289/** Pointer to a label. */
290typedef IEMNATIVELABEL *PIEMNATIVELABEL;
291
292
293/** Native code generator fixup types. */
294typedef enum
295{
296 kIemNativeFixupType_Invalid = 0,
297#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
298 /** AMD64 fixup: PC relative 32-bit with addend in bData. */
299 kIemNativeFixupType_Rel32,
300#elif defined(RT_ARCH_ARM64)
301 /** ARM64 fixup: PC relative offset at bits 23:5 (CBZ, CBNZ, B, B.CC). */
302 kIemNativeFixupType_RelImm19At5,
303 /** ARM64 fixup: PC relative offset at bits 18:5 (TBZ, TBNZ). */
304 kIemNativeFixupType_RelImm14At5,
305#endif
306 kIemNativeFixupType_End
307} IEMNATIVEFIXUPTYPE;
308
309/** Native code generator fixup. */
310typedef struct IEMNATIVEFIXUP
311{
312 /** Code offset of the fixup location. */
313 uint32_t off;
314 /** The IEMNATIVELABEL this is a fixup for. */
315 uint16_t idxLabel;
316 /** The fixup type (IEMNATIVEFIXUPTYPE). */
317 uint8_t enmType;
318 /** Addend or other data. */
319 int8_t offAddend;
320} IEMNATIVEFIXUP;
321/** Pointer to a native code generator fixup. */
322typedef IEMNATIVEFIXUP *PIEMNATIVEFIXUP;
323
324
325/**
326 * Guest registers that can be shadowed in GPRs.
327 */
328typedef enum IEMNATIVEGSTREG : uint8_t
329{
330 kIemNativeGstReg_GprFirst = 0,
331 kIemNativeGstReg_GprLast = 15,
332 kIemNativeGstReg_Pc,
333 kIemNativeGstReg_EFlags, /**< This one is problematic since the higher bits are used internally. */
334 /* gap: 18..23 */
335 kIemNativeGstReg_SegSelFirst = 24,
336 kIemNativeGstReg_SegSelLast = 29,
337 kIemNativeGstReg_SegBaseFirst = 30,
338 kIemNativeGstReg_SegBaseLast = 35,
339 kIemNativeGstReg_SegLimitFirst = 36,
340 kIemNativeGstReg_SegLimitLast = 41,
341 kIemNativeGstReg_End
342} IEMNATIVEGSTREG;
343
344/**
345 * Intended use statement for iemNativeRegAllocTmpForGuestReg().
346 */
347typedef enum IEMNATIVEGSTREGUSE
348{
349 /** The usage is read-only, the register holding the guest register
350 * shadow copy will not be modified by the caller. */
351 kIemNativeGstRegUse_ReadOnly = 0,
352 /** The caller will update the guest register (think: PC += cbInstr).
353 * The guest shadow copy will follow the returned register. */
354 kIemNativeGstRegUse_ForUpdate,
355 /** The caller will use the guest register value as input in a calculation
356 * and the host register will be modified.
357 * This means that the returned host register will not be marked as a shadow
358 * copy of the guest register. */
359 kIemNativeGstRegUse_Calculation
360} IEMNATIVEGSTREGUSE;
361
362/**
363 * Guest registers (classes) that can be referenced.
364 */
365typedef enum IEMNATIVEGSTREGREF : uint8_t
366{
367 kIemNativeGstRegRef_Invalid = 0,
368 kIemNativeGstRegRef_Gpr,
369 kIemNativeGstRegRef_GprHighByte, /**< AH, CH, DH, BH*/
370 kIemNativeGstRegRef_EFlags,
371 kIemNativeGstRegRef_MxCsr,
372 kIemNativeGstRegRef_FpuReg,
373 kIemNativeGstRegRef_MReg,
374 kIemNativeGstRegRef_XReg,
375 kIemNativeGstRegRef_YReg,
376 kIemNativeGstRegRef_End
377} IEMNATIVEGSTREGREF;
378
379
380/** Variable kinds. */
381typedef enum IEMNATIVEVARKIND : uint8_t
382{
383 /** Customary invalid zero value. */
384 kIemNativeVarKind_Invalid = 0,
385 /** This is either in a register or on the stack. */
386 kIemNativeVarKind_Stack,
387 /** Immediate value - loaded into register when needed, or can live on the
388 * stack if referenced (in theory). */
389 kIemNativeVarKind_Immediate,
390 /** Variable reference - loaded into register when needed, never stack. */
391 kIemNativeVarKind_VarRef,
392 /** Guest register reference - loaded into register when needed, never stack. */
393 kIemNativeVarKind_GstRegRef,
394 /** End of valid values. */
395 kIemNativeVarKind_End
396} IEMNATIVEVARKIND;
397
398
399/** Variable or argument. */
400typedef struct IEMNATIVEVAR
401{
402 /** The kind of variable. */
403 IEMNATIVEVARKIND enmKind;
404 /** The variable size in bytes. */
405 uint8_t cbVar;
406 /** The first stack slot (uint64_t), except for immediate and references
407 * where it usually is UINT8_MAX. */
408 uint8_t idxStackSlot;
409 /** The host register allocated for the variable, UINT8_MAX if not. */
410 uint8_t idxReg;
411 /** The argument number if argument, UINT8_MAX if regular variable. */
412 uint8_t uArgNo;
413 /** If referenced, the index of the variable referencing this one, otherwise
414 * UINT8_MAX. A referenced variable must only be placed on the stack and
415 * must be either kIemNativeVarKind_Stack or kIemNativeVarKind_Immediate. */
416 uint8_t idxReferrerVar;
417 /** Guest register being shadowed here, kIemNativeGstReg_End(/UINT8_MAX) if not. */
418 IEMNATIVEGSTREG enmGstReg;
419 uint8_t bAlign;
420
421 union
422 {
423 /** kIemNativeVarKind_Immediate: The immediate value. */
424 uint64_t uValue;
425 /** kIemNativeVarKind_VarRef: The index of the variable being referenced. */
426 uint8_t idxRefVar;
427 /** kIemNativeVarKind_GstRegRef: The guest register being referrenced. */
428 struct
429 {
430 /** The class of register. */
431 IEMNATIVEGSTREGREF enmClass;
432 /** Index within the class. */
433 uint8_t idx;
434 } GstRegRef;
435 } u;
436} IEMNATIVEVAR;
437
438/** What is being kept in a host register. */
439typedef enum IEMNATIVEWHAT : uint8_t
440{
441 /** The traditional invalid zero value. */
442 kIemNativeWhat_Invalid = 0,
443 /** Mapping a variable (IEMNATIVEHSTREG::idxVar). */
444 kIemNativeWhat_Var,
445 /** Temporary register, this is typically freed when a MC completes. */
446 kIemNativeWhat_Tmp,
447 /** Call argument w/o a variable mapping. This is free (via
448 * IEMNATIVE_CALL_VOLATILE_GREG_MASK) after the call is emitted. */
449 kIemNativeWhat_Arg,
450 /** Return status code.
451 * @todo not sure if we need this... */
452 kIemNativeWhat_rc,
453 /** The fixed pVCpu (PVMCPUCC) register.
454 * @todo consider offsetting this on amd64 to use negative offsets to access
455 * more members using 8-byte disp. */
456 kIemNativeWhat_pVCpuFixed,
457 /** The fixed pCtx (PCPUMCTX) register.
458 * @todo consider offsetting this on amd64 to use negative offsets to access
459 * more members using 8-byte disp. */
460 kIemNativeWhat_pCtxFixed,
461 /** Fixed temporary register. */
462 kIemNativeWhat_FixedTmp,
463 /** Register reserved by the CPU or OS architecture. */
464 kIemNativeWhat_FixedReserved,
465 /** End of valid values. */
466 kIemNativeWhat_End
467} IEMNATIVEWHAT;
468
469/**
470 * Host general register entry.
471 *
472 * The actual allocation status is kept in IEMRECOMPILERSTATE::bmHstRegs.
473 *
474 * @todo Track immediate values in host registers similarlly to how we track the
475 * guest register shadow copies. For it to be real helpful, though,
476 * we probably need to know which will be reused and put them into
477 * non-volatile registers, otherwise it's going to be more or less
478 * restricted to an instruction or two.
479 */
480typedef struct IEMNATIVEHSTREG
481{
482 /** Set of guest registers this one shadows.
483 *
484 * Using a bitmap here so we can designate the same host register as a copy
485 * for more than one guest register. This is expected to be useful in
486 * situations where one value is copied to several registers in a sequence.
487 * If the mapping is 1:1, then we'd have to pick which side of a 'MOV SRC,DST'
488 * sequence we'd want to let this register follow to be a copy of and there
489 * will always be places where we'd be picking the wrong one.
490 */
491 uint64_t fGstRegShadows;
492 /** What is being kept in this register. */
493 IEMNATIVEWHAT enmWhat;
494 /** Variable index if holding a variable, otherwise UINT8_MAX. */
495 uint8_t idxVar;
496 /** Alignment padding. */
497 uint8_t abAlign[6];
498} IEMNATIVEHSTREG;
499
500
501/**
502 * Core state for the native recompiler, that is, things that needs careful
503 * handling when dealing with branches.
504 */
505typedef struct IEMNATIVECORESTATE
506{
507 /** Allocation bitmap for aHstRegs. */
508 uint32_t bmHstRegs;
509
510 /** Bitmap marking which host register contains guest register shadow copies.
511 * This is used during register allocation to try preserve copies. */
512 uint32_t bmHstRegsWithGstShadow;
513 /** Bitmap marking valid entries in aidxGstRegShadows. */
514 uint64_t bmGstRegShadows;
515
516 union
517 {
518 /** Index of variable arguments, UINT8_MAX if not valid. */
519 uint8_t aidxArgVars[8];
520 /** For more efficient resetting. */
521 uint64_t u64ArgVars;
522 };
523
524 /** Allocation bitmap for aVars. */
525 uint32_t bmVars;
526
527 /** Maps a guest register to a host GPR (index by IEMNATIVEGSTREG).
528 * Entries are only valid if the corresponding bit in bmGstRegShadows is set.
529 * (A shadow copy of a guest register can only be held in a one host register,
530 * there are no duplicate copies or ambiguities like that). */
531 uint8_t aidxGstRegShadows[kIemNativeGstReg_End];
532
533 /** Host register allocation tracking. */
534 IEMNATIVEHSTREG aHstRegs[IEMNATIVE_HST_GREG_COUNT];
535
536 /** Variables and arguments. */
537 IEMNATIVEVAR aVars[9];
538} IEMNATIVECORESTATE;
539/** Pointer to core state. */
540typedef IEMNATIVECORESTATE *PIEMNATIVECORESTATE;
541/** Pointer to const core state. */
542typedef IEMNATIVECORESTATE const *PCIEMNATIVECORESTATE;
543
544
545/**
546 * Conditional stack entry.
547 */
548typedef struct IEMNATIVECOND
549{
550 /** Set if we're in the "else" part, clear if we're in the "if" before it. */
551 bool fInElse;
552 /** The label for the IEM_MC_ELSE. */
553 uint32_t idxLabelElse;
554 /** The label for the IEM_MC_ENDIF. */
555 uint32_t idxLabelEndIf;
556 /** The initial state snapshot as the if-block starts executing. */
557 IEMNATIVECORESTATE InitialState;
558 /** The state snapshot at the end of the if-block. */
559 IEMNATIVECORESTATE IfFinalState;
560} IEMNATIVECOND;
561/** Pointer to a condition stack entry. */
562typedef IEMNATIVECOND *PIEMNATIVECOND;
563
564
565/**
566 * Native recompiler state.
567 */
568typedef struct IEMRECOMPILERSTATE
569{
570 /** Size of the buffer that pbNativeRecompileBufR3 points to in
571 * IEMNATIVEINSTR units. */
572 uint32_t cInstrBufAlloc;
573#ifdef VBOX_STRICT
574 /** Strict: How far the last iemNativeInstrBufEnsure() checked. */
575 uint32_t offInstrBufChecked;
576#else
577 uint32_t uPadding1; /* We don't keep track of the size here... */
578#endif
579 /** Fixed temporary code buffer for native recompilation. */
580 PIEMNATIVEINSTR pInstrBuf;
581
582 /** Bitmaps with the label types used. */
583 uint64_t bmLabelTypes;
584 /** Actual number of labels in paLabels. */
585 uint32_t cLabels;
586 /** Max number of entries allowed in paLabels before reallocating it. */
587 uint32_t cLabelsAlloc;
588 /** Labels defined while recompiling (referenced by fixups). */
589 PIEMNATIVELABEL paLabels;
590
591 /** Actual number of fixups paFixups. */
592 uint32_t cFixups;
593 /** Max number of entries allowed in paFixups before reallocating it. */
594 uint32_t cFixupsAlloc;
595 /** Buffer used by the recompiler for recording fixups when generating code. */
596 PIEMNATIVEFIXUP paFixups;
597
598#ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
599 /** Number of debug info entries allocated for pDbgInfo. */
600 uint32_t cDbgInfoAlloc;
601 uint32_t uPadding;
602 /** Debug info. */
603 PIEMTBDBG pDbgInfo;
604#endif
605
606 /** The translation block being recompiled. */
607 PCIEMTB pTbOrg;
608
609 /** The current condition stack depth (aCondStack). */
610 uint8_t cCondDepth;
611 uint8_t bPadding2;
612 /** Condition sequence number (for generating unique labels). */
613 uint16_t uCondSeqNo;
614 /** Check IRQ seqeunce number (for generating unique lables). */
615 uint16_t uCheckIrqSeqNo;
616 uint16_t uPadding3;
617
618 /** Core state requiring care with branches. */
619 IEMNATIVECORESTATE Core;
620
621 /** The condition nesting stack. */
622 IEMNATIVECOND aCondStack[2];
623} IEMRECOMPILERSTATE;
624/** Pointer to a native recompiler state. */
625typedef IEMRECOMPILERSTATE *PIEMRECOMPILERSTATE;
626
627
628/**
629 * Native recompiler worker for a threaded function.
630 *
631 * @returns New code buffer offset, UINT32_MAX in case of failure.
632 * @param pReNative The native recompiler state.
633 * @param off The current code buffer offset.
634 * @param pCallEntry The threaded call entry.
635 *
636 * @note This is not allowed to throw anything atm.
637 */
638typedef DECLCALLBACKTYPE(uint32_t, FNIEMNATIVERECOMPFUNC,(PIEMRECOMPILERSTATE pReNative, uint32_t off,
639 PCIEMTHRDEDCALLENTRY pCallEntry));
640/** Pointer to a native recompiler worker for a threaded function. */
641typedef FNIEMNATIVERECOMPFUNC *PFNIEMNATIVERECOMPFUNC;
642
643/** Defines a native recompiler worker for a threaded function. */
644#define IEM_DECL_IEMNATIVERECOMPFUNC_DEF(a_Name) \
645 DECLCALLBACK(uint32_t) a_Name(PIEMRECOMPILERSTATE pReNative, uint32_t off, PCIEMTHRDEDCALLENTRY pCallEntry)
646/** Prototypes a native recompiler function for a threaded function. */
647#define IEM_DECL_IEMNATIVERECOMPFUNC_PROTO(a_Name) FNIEMNATIVERECOMPFUNC a_Name
648
649DECLHIDDEN(uint32_t) iemNativeLabelCreate(PIEMRECOMPILERSTATE pReNative, IEMNATIVELABELTYPE enmType,
650 uint32_t offWhere = UINT32_MAX, uint16_t uData = 0) RT_NOEXCEPT;
651DECLHIDDEN(void) iemNativeLabelDefine(PIEMRECOMPILERSTATE pReNative, uint32_t idxLabel, uint32_t offWhere) RT_NOEXCEPT;
652DECLHIDDEN(bool) iemNativeAddFixup(PIEMRECOMPILERSTATE pReNative, uint32_t offWhere, uint32_t idxLabel,
653 IEMNATIVEFIXUPTYPE enmType, int8_t offAddend = 0) RT_NOEXCEPT;
654DECLHIDDEN(PIEMNATIVEINSTR) iemNativeInstrBufEnsureSlow(PIEMRECOMPILERSTATE pReNative, uint32_t off,
655 uint32_t cInstrReq) RT_NOEXCEPT;
656
657DECLHIDDEN(uint8_t) iemNativeRegAllocTmp(PIEMRECOMPILERSTATE pReNative, uint32_t *poff,
658 bool fPreferVolatile = true) RT_NOEXCEPT;
659DECLHIDDEN(uint8_t) iemNativeRegAllocTmpImm(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, uint64_t uImm,
660 bool fPreferVolatile = true) RT_NOEXCEPT;
661DECLHIDDEN(uint8_t) iemNativeRegAllocTmpForGuestReg(PIEMRECOMPILERSTATE pReNative, uint32_t *poff,
662 IEMNATIVEGSTREG enmGstReg,
663 IEMNATIVEGSTREGUSE enmIntendedUse) RT_NOEXCEPT;
664DECLHIDDEN(uint8_t) iemNativeRegAllocTmpForGuestRegIfAlreadyPresent(PIEMRECOMPILERSTATE pReNative, uint32_t *poff,
665 IEMNATIVEGSTREG enmGstReg) RT_NOEXCEPT;
666
667DECLHIDDEN(uint8_t) iemNativeRegAllocVar(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, uint8_t idxVar) RT_NOEXCEPT;
668DECLHIDDEN(uint32_t) iemNativeRegAllocArgs(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cArgs) RT_NOEXCEPT;
669DECLHIDDEN(uint8_t) iemNativeRegAssignRc(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstReg) RT_NOEXCEPT;
670DECLHIDDEN(void) iemNativeRegFree(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstReg) RT_NOEXCEPT;
671DECLHIDDEN(void) iemNativeRegFreeTmp(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstReg) RT_NOEXCEPT;
672DECLHIDDEN(void) iemNativeRegFreeTmpImm(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstReg) RT_NOEXCEPT;
673DECLHIDDEN(void) iemNativeRegFreeAndFlushMask(PIEMRECOMPILERSTATE pReNative, uint32_t fHstRegMask) RT_NOEXCEPT;
674DECLHIDDEN(uint32_t) iemNativeRegFlushPendingWrites(PIEMRECOMPILERSTATE pReNative, uint32_t off) RT_NOEXCEPT;
675
676DECLHIDDEN(uint32_t) iemNativeEmitLoadGprWithGstShadowReg(PIEMRECOMPILERSTATE pReNative, uint32_t off,
677 uint8_t idxHstReg, IEMNATIVEGSTREG enmGstReg) RT_NOEXCEPT;
678DECLHIDDEN(uint32_t) iemNativeEmitCheckCallRetAndPassUp(PIEMRECOMPILERSTATE pReNative, uint32_t off,
679 uint8_t idxInstr) RT_NOEXCEPT;
680
681
682/**
683 * Ensures that there is sufficient space in the instruction output buffer.
684 *
685 * This will reallocate the buffer if needed and allowed.
686 *
687 * @note Always use IEMNATIVE_ASSERT_INSTR_BUF_ENSURE when done to check the
688 * allocation size.
689 *
690 * @returns Pointer to the instruction output buffer on success, NULL on
691 * failure.
692 * @param pReNative The native recompile state.
693 * @param off Current instruction offset. Works safely for UINT32_MAX
694 * as well.
695 * @param cInstrReq Number of instruction about to be added. It's okay to
696 * overestimate this a bit.
697 */
698DECL_FORCE_INLINE(PIEMNATIVEINSTR) iemNativeInstrBufEnsure(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t cInstrReq)
699{
700 uint64_t const offChecked = off + (uint64_t)cInstrReq;
701 if (RT_LIKELY(offChecked <= pReNative->cInstrBufAlloc))
702 {
703#ifdef VBOX_STRICT
704 pReNative->offInstrBufChecked = offChecked;
705#endif
706 return pReNative->pInstrBuf;
707 }
708 return iemNativeInstrBufEnsureSlow(pReNative, off, cInstrReq);
709}
710
711/**
712 * Checks that we didn't exceed the space requested in the last
713 * iemNativeInstrBufEnsure() call. */
714#define IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(a_pReNative, a_off) \
715 AssertMsg((a_off) <= (a_pReNative)->offInstrBufChecked, \
716 ("off=%#x offInstrBufChecked=%#x\n", (a_off), (a_pReNative)->offInstrBufChecked))
717
718
719/**
720 * Emit a simple marker instruction to more easily tell where something starts
721 * in the disassembly.
722 */
723DECLINLINE(uint32_t) iemNativeEmitMarker(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t uInfo)
724{
725#ifdef RT_ARCH_AMD64
726 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
727 AssertReturn(pbCodeBuf, UINT32_MAX);
728 if (uInfo == 0)
729 {
730 /* nop */
731 pbCodeBuf[off++] = 0x90;
732 }
733 else
734 {
735 /* nop [disp32] */
736 pbCodeBuf[off++] = 0x0f;
737 pbCodeBuf[off++] = 0x1f;
738 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_MEM0, 0, 5);
739 pbCodeBuf[off++] = RT_BYTE1(uInfo);
740 pbCodeBuf[off++] = RT_BYTE2(uInfo);
741 pbCodeBuf[off++] = RT_BYTE3(uInfo);
742 pbCodeBuf[off++] = RT_BYTE4(uInfo);
743 }
744#elif RT_ARCH_ARM64
745 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
746 AssertReturn(pu32CodeBuf, UINT32_MAX);
747 /* nop */
748 pu32CodeBuf[off++] = 0xd503201f;
749
750 RT_NOREF(uInfo);
751#else
752# error "port me"
753#endif
754 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
755 return off;
756}
757
758
759/*********************************************************************************************************************************
760* Loads, Stores and Related Stuff. *
761*********************************************************************************************************************************/
762
763/**
764 * Emits setting a GPR to zero.
765 */
766DECLINLINE(uint32_t) iemNativeEmitGprZero(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr)
767{
768#ifdef RT_ARCH_AMD64
769 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3);
770 AssertReturn(pbCodeBuf, UINT32_MAX);
771 /* xor gpr32, gpr32 */
772 if (iGpr >= 8)
773 pbCodeBuf[off++] = X86_OP_REX_R | X86_OP_REX_B;
774 pbCodeBuf[off++] = 0x33;
775 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGpr & 7, iGpr & 7);
776
777#elif RT_ARCH_ARM64
778 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
779 AssertReturn(pu32CodeBuf, UINT32_MAX);
780 /* mov gpr, #0x0 */
781 pu32CodeBuf[off++] = UINT32_C(0xd2800000) | iGpr;
782
783#else
784# error "port me"
785#endif
786 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
787 return off;
788}
789
790
791/**
792 * Emits loading a constant into a 64-bit GPR
793 */
794DECLINLINE(uint32_t) iemNativeEmitLoadGprImm64(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr, uint64_t uImm64)
795{
796 if (!uImm64)
797 return iemNativeEmitGprZero(pReNative, off, iGpr);
798
799#ifdef RT_ARCH_AMD64
800 if (uImm64 <= UINT32_MAX)
801 {
802 /* mov gpr, imm32 */
803 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 6);
804 AssertReturn(pbCodeBuf, UINT32_MAX);
805 if (iGpr >= 8)
806 pbCodeBuf[off++] = X86_OP_REX_B;
807 pbCodeBuf[off++] = 0xb8 + (iGpr & 7);
808 pbCodeBuf[off++] = RT_BYTE1(uImm64);
809 pbCodeBuf[off++] = RT_BYTE2(uImm64);
810 pbCodeBuf[off++] = RT_BYTE3(uImm64);
811 pbCodeBuf[off++] = RT_BYTE4(uImm64);
812 }
813 else
814 {
815 /* mov gpr, imm64 */
816 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 10);
817 AssertReturn(pbCodeBuf, UINT32_MAX);
818 if (iGpr < 8)
819 pbCodeBuf[off++] = X86_OP_REX_W;
820 else
821 pbCodeBuf[off++] = X86_OP_REX_W | X86_OP_REX_B;
822 pbCodeBuf[off++] = 0xb8 + (iGpr & 7);
823 pbCodeBuf[off++] = RT_BYTE1(uImm64);
824 pbCodeBuf[off++] = RT_BYTE2(uImm64);
825 pbCodeBuf[off++] = RT_BYTE3(uImm64);
826 pbCodeBuf[off++] = RT_BYTE4(uImm64);
827 pbCodeBuf[off++] = RT_BYTE5(uImm64);
828 pbCodeBuf[off++] = RT_BYTE6(uImm64);
829 pbCodeBuf[off++] = RT_BYTE7(uImm64);
830 pbCodeBuf[off++] = RT_BYTE8(uImm64);
831 }
832
833#elif RT_ARCH_ARM64
834 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 4);
835 AssertReturn(pu32CodeBuf, UINT32_MAX);
836
837 /*
838 * We need to start this sequence with a 'mov grp, imm16, lsl #x' and
839 * supply remaining bits using 'movk grp, imm16, lsl #x'.
840 *
841 * The mov instruction is encoded 0xd2800000 + shift + imm16 + grp,
842 * while the movk is 0xf2800000 + shift + imm16 + grp, meaning the diff
843 * is 0x20000000 (bit 29). So, we keep this bit in a variable and set it
844 * after the first non-zero immediate component so we switch to movk for
845 * the remainder.
846 */
847 uint32_t fMovK = 0;
848 /* mov gpr, imm16 */
849 uint32_t uImmPart = ((uint32_t)((uImm64 >> 0) & UINT32_C(0xffff)) << 5);
850 if (uImmPart)
851 {
852 pu32CodeBuf[off++] = UINT32_C(0xd2800000) | (UINT32_C(0) << 21) | uImmPart | iGpr;
853 fMovK |= RT_BIT_32(29);
854 }
855 /* mov[k] gpr, imm16, lsl #16 */
856 uImmPart = ((uint32_t)((uImm64 >> 16) & UINT32_C(0xffff)) << 5);
857 if (uImmPart)
858 {
859 pu32CodeBuf[off++] = UINT32_C(0xd2800000) | fMovK | (UINT32_C(1) << 21) | uImmPart | iGpr;
860 fMovK |= RT_BIT_32(29);
861 }
862 /* mov[k] gpr, imm16, lsl #32 */
863 uImmPart = ((uint32_t)((uImm64 >> 32) & UINT32_C(0xffff)) << 5);
864 if (uImmPart)
865 {
866 pu32CodeBuf[off++] = UINT32_C(0xd2800000) | fMovK | (UINT32_C(2) << 21) | uImmPart | iGpr;
867 fMovK |= RT_BIT_32(29);
868 }
869 /* mov[k] gpr, imm16, lsl #48 */
870 uImmPart = ((uint32_t)((uImm64 >> 48) & UINT32_C(0xffff)) << 5);
871 if (uImmPart)
872 pu32CodeBuf[off++] = UINT32_C(0xd2800000) | fMovK | (UINT32_C(3) << 21) | uImmPart | iGpr;
873
874 /** @todo there is an inverted mask variant we might want to explore if it
875 * reduces the number of instructions... */
876 /** @todo load into 'w' register instead of 'x' when imm64 <= UINT32_MAX?
877 * clang 12.x does that, only to use the 'x' version for the
878 * addressing in the following ldr). */
879
880#else
881# error "port me"
882#endif
883 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
884 return off;
885}
886
887
888/**
889 * Emits loading a constant into a 8-bit GPR
890 * @note The AMD64 version does *NOT* clear any bits in the 8..63 range,
891 * only the ARM64 version does that.
892 */
893DECLINLINE(uint32_t) iemNativeEmitLoadGpr8Imm(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr, uint8_t uImm8)
894{
895#ifdef RT_ARCH_AMD64
896 /* mov gpr, imm8 */
897 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3);
898 AssertReturn(pbCodeBuf, UINT32_MAX);
899 if (iGpr >= 8)
900 pbCodeBuf[off++] = X86_OP_REX_B;
901 else if (iGpr >= 4)
902 pbCodeBuf[off++] = X86_OP_REX;
903 pbCodeBuf[off++] = 0xb0 + (iGpr & 7);
904 pbCodeBuf[off++] = RT_BYTE1(uImm8);
905
906#elif RT_ARCH_ARM64
907 /* movz gpr, imm16, lsl #0 */
908 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
909 AssertReturn(pu32CodeBuf, UINT32_MAX);
910 pu32CodeBuf[off++] = UINT32_C(0xd2800000) | (UINT32_C(0) << 21) | ((uint32_t)uImm8 << 5) | iGpr;
911
912#else
913# error "port me"
914#endif
915 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
916 return off;
917}
918
919
920#ifdef RT_ARCH_AMD64
921/**
922 * Common bit of iemNativeEmitLoadGprFromVCpuU64 and friends.
923 */
924DECL_FORCE_INLINE(uint32_t) iemNativeEmitGprByVCpuDisp(uint8_t *pbCodeBuf, uint32_t off, uint8_t iGprReg, uint32_t offVCpu)
925{
926 if (offVCpu < 128)
927 {
928 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_MEM1, iGprReg & 7, IEMNATIVE_REG_FIXED_PVMCPU);
929 pbCodeBuf[off++] = (uint8_t)(int8_t)offVCpu;
930 }
931 else
932 {
933 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_MEM4, iGprReg & 7, IEMNATIVE_REG_FIXED_PVMCPU);
934 pbCodeBuf[off++] = RT_BYTE1((uint32_t)offVCpu);
935 pbCodeBuf[off++] = RT_BYTE2((uint32_t)offVCpu);
936 pbCodeBuf[off++] = RT_BYTE3((uint32_t)offVCpu);
937 pbCodeBuf[off++] = RT_BYTE4((uint32_t)offVCpu);
938 }
939 return off;
940}
941#elif RT_ARCH_ARM64
942/**
943 * Common bit of iemNativeEmitLoadGprFromVCpuU64 and friends.
944 */
945DECL_FORCE_INLINE(uint32_t) iemNativeEmitGprByVCpuLdSt(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprReg,
946 uint32_t offVCpu, ARMV8A64INSTRLDSTTYPE enmOperation, unsigned cbData)
947{
948 /*
949 * There are a couple of ldr variants that takes an immediate offset, so
950 * try use those if we can, otherwise we have to use the temporary register
951 * help with the addressing.
952 */
953 if (offVCpu < _4K * cbData && !(offVCpu & (cbData - 1)))
954 {
955 /* Use the unsigned variant of ldr Wt, [<Xn|SP>, #off]. */
956 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
957 AssertReturn(pu32CodeBuf, UINT32_MAX);
958 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(enmOperation, iGpr, IEMNATIVE_REG_FIXED_PVMCPU, offVCpu / cbData);
959 }
960 else if (offVCpu - RT_UOFFSETOF(VMCPU, cpum.GstCtx) < (unsigned)(_4K * cbData) && !(offVCpu & (cbData - 1)))
961 {
962 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
963 AssertReturn(pu32CodeBuf, UINT32_MAX);
964 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(enmOperation, iGpr, IEMNATIVE_REG_FIXED_PCPUMCTX,
965 (offVCpu - RT_UOFFSETOF(VMCPU, cpum.GstCtx)) / cbData);
966 }
967 else
968 {
969 /* The offset is too large, so we must load it into a register and use
970 ldr Wt, [<Xn|SP>, (<Wm>|<Xm>)]. */
971 /** @todo reduce by offVCpu by >> 3 or >> 2? if it saves instructions? */
972 off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_REG_FIXED_TMP0, offVCpu);
973
974 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
975 AssertReturn(pu32CodeBuf, UINT32_MAX);
976 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRegIdx(enmOperation, iGpr, IEMNATIVE_REG_FIXED_PVMCPU, IEMNATIVE_REG_FIXED_TMP);
977 }
978 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
979 return off;
980}
981#endif
982
983
984/**
985 * Emits a 64-bit GPR load of a VCpu value.
986 */
987DECLINLINE(uint32_t) iemNativeEmitLoadGprFromVCpuU64(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr, uint32_t offVCpu)
988{
989#ifdef RT_ARCH_AMD64
990 /* mov reg64, mem64 */
991 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
992 AssertReturn(pbCodeBuf, UINT32_MAX);
993 if (iGpr < 8)
994 pbCodeBuf[off++] = X86_OP_REX_W;
995 else
996 pbCodeBuf[off++] = X86_OP_REX_W | X86_OP_REX_R;
997 pbCodeBuf[off++] = 0x8b;
998 off = iemNativeEmitGprByVCpuDisp(pbCodeBuf, off,iGpr, offVCpu);
999 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1000
1001#elif RT_ARCH_ARM64
1002 off = iemNativeEmitGprByVCpuLdSt(pReNative, off, iGpr, offVCpu, kArmv8A64InstrLdStType_Ld_Dword, sizeof(uint64_t));
1003
1004#else
1005# error "port me"
1006#endif
1007 return off;
1008}
1009
1010
1011/**
1012 * Emits a 32-bit GPR load of a VCpu value.
1013 * @note Bits 32 thru 63 in the GPR will be zero after the operation.
1014 */
1015DECLINLINE(uint32_t) iemNativeEmitLoadGprFromVCpuU32(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr, uint32_t offVCpu)
1016{
1017#ifdef RT_ARCH_AMD64
1018 /* mov reg32, mem32 */
1019 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
1020 AssertReturn(pbCodeBuf, UINT32_MAX);
1021 if (iGpr >= 8)
1022 pbCodeBuf[off++] = X86_OP_REX_R;
1023 pbCodeBuf[off++] = 0x8b;
1024 off = iemNativeEmitGprByVCpuDisp(pbCodeBuf, off, iGpr, offVCpu);
1025 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1026
1027#elif RT_ARCH_ARM64
1028 off = iemNativeEmitGprByVCpuLdSt(pReNative, off, iGpr, offVCpu, kArmv8A64InstrLdStType_Ld_Word, sizeof(uint32_t));
1029
1030#else
1031# error "port me"
1032#endif
1033 return off;
1034}
1035
1036
1037/**
1038 * Emits a 16-bit GPR load of a VCpu value.
1039 * @note Bits 16 thru 63 in the GPR will be zero after the operation.
1040 */
1041DECLINLINE(uint32_t) iemNativeEmitLoadGprFromVCpuU16(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr, uint32_t offVCpu)
1042{
1043#ifdef RT_ARCH_AMD64
1044 /* movzx reg32, mem16 */
1045 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8);
1046 AssertReturn(pbCodeBuf, UINT32_MAX);
1047 if (iGpr >= 8)
1048 pbCodeBuf[off++] = X86_OP_REX_R;
1049 pbCodeBuf[off++] = 0x0f;
1050 pbCodeBuf[off++] = 0xb7;
1051 off = iemNativeEmitGprByVCpuDisp(pbCodeBuf, off, iGpr, offVCpu);
1052 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1053
1054#elif RT_ARCH_ARM64
1055 off = iemNativeEmitGprByVCpuLdSt(pReNative, off, iGpr, offVCpu, kArmv8A64InstrLdStType_Ld_Half, sizeof(uint16_t));
1056
1057#else
1058# error "port me"
1059#endif
1060 return off;
1061}
1062
1063
1064/**
1065 * Emits a 8-bit GPR load of a VCpu value.
1066 * @note Bits 8 thru 63 in the GPR will be zero after the operation.
1067 */
1068DECLINLINE(uint32_t) iemNativeEmitLoadGprFromVCpuU8(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr, uint32_t offVCpu)
1069{
1070#ifdef RT_ARCH_AMD64
1071 /* movzx reg32, mem8 */
1072 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8);
1073 AssertReturn(pbCodeBuf, UINT32_MAX);
1074 if (iGpr >= 8)
1075 pbCodeBuf[off++] = X86_OP_REX_R;
1076 pbCodeBuf[off++] = 0x0f;
1077 pbCodeBuf[off++] = 0xb6;
1078 off = iemNativeEmitGprByVCpuDisp(pbCodeBuf, off, iGpr, offVCpu);
1079 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1080
1081#elif RT_ARCH_ARM64
1082 off = iemNativeEmitGprByVCpuLdSt(pReNative, off, iGpr, offVCpu, kArmv8A64InstrLdStType_Ld_Byte, sizeof(uint8_t));
1083
1084#else
1085# error "port me"
1086#endif
1087 return off;
1088}
1089
1090
1091/**
1092 * Emits a store of a GPR value to a 64-bit VCpu field.
1093 */
1094DECLINLINE(uint32_t) iemNativeEmitStoreGprToVCpuU64(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr, uint32_t offVCpu)
1095{
1096#ifdef RT_ARCH_AMD64
1097 /* mov mem64, reg64 */
1098 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
1099 AssertReturn(pbCodeBuf, UINT32_MAX);
1100 if (iGpr < 8)
1101 pbCodeBuf[off++] = X86_OP_REX_W;
1102 else
1103 pbCodeBuf[off++] = X86_OP_REX_W | X86_OP_REX_R;
1104 pbCodeBuf[off++] = 0x89;
1105 off = iemNativeEmitGprByVCpuDisp(pbCodeBuf,off,iGpr, offVCpu);
1106 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1107
1108#elif RT_ARCH_ARM64
1109 off = iemNativeEmitGprByVCpuLdSt(pReNative, off, iGpr, offVCpu, kArmv8A64InstrLdStType_St_Dword, sizeof(uint64_t));
1110
1111#else
1112# error "port me"
1113#endif
1114 return off;
1115}
1116
1117
1118/**
1119 * Emits a store of a GPR value to a 32-bit VCpu field.
1120 */
1121DECLINLINE(uint32_t) iemNativeEmitStoreGprFromVCpuU32(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr, uint32_t offVCpu)
1122{
1123#ifdef RT_ARCH_AMD64
1124 /* mov mem32, reg32 */
1125 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
1126 AssertReturn(pbCodeBuf, UINT32_MAX);
1127 if (iGpr >= 8)
1128 pbCodeBuf[off++] = X86_OP_REX_R;
1129 pbCodeBuf[off++] = 0x89;
1130 off = iemNativeEmitGprByVCpuDisp(pbCodeBuf, off, iGpr, offVCpu);
1131 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1132
1133#elif RT_ARCH_ARM64
1134 off = iemNativeEmitGprByVCpuLdSt(pReNative, off, iGpr, offVCpu, kArmv8A64InstrLdStType_St_Word, sizeof(uint32_t));
1135
1136#else
1137# error "port me"
1138#endif
1139 return off;
1140}
1141
1142
1143/**
1144 * Emits a store of a GPR value to a 16-bit VCpu field.
1145 */
1146DECLINLINE(uint32_t) iemNativeEmitStoreGprFromVCpuU16(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr, uint32_t offVCpu)
1147{
1148#ifdef RT_ARCH_AMD64
1149 /* mov mem16, reg16 */
1150 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8);
1151 AssertReturn(pbCodeBuf, UINT32_MAX);
1152 pbCodeBuf[off++] = X86_OP_PRF_SIZE_OP;
1153 if (iGpr >= 8)
1154 pbCodeBuf[off++] = X86_OP_REX_R;
1155 pbCodeBuf[off++] = 0x89;
1156 off = iemNativeEmitGprByVCpuDisp(pbCodeBuf, off, iGpr, offVCpu);
1157 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1158
1159#elif RT_ARCH_ARM64
1160 off = iemNativeEmitGprByVCpuLdSt(pReNative, off, iGpr, offVCpu, kArmv8A64InstrLdStType_St_Half, sizeof(uint16_t));
1161
1162#else
1163# error "port me"
1164#endif
1165 return off;
1166}
1167
1168
1169/**
1170 * Emits a store of a GPR value to a 8-bit VCpu field.
1171 */
1172DECLINLINE(uint32_t) iemNativeEmitStoreGprFromVCpuU8(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGpr, uint32_t offVCpu)
1173{
1174#ifdef RT_ARCH_AMD64
1175 /* mov mem8, reg8 */
1176 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
1177 AssertReturn(pbCodeBuf, UINT32_MAX);
1178 if (iGpr >= 8)
1179 pbCodeBuf[off++] = X86_OP_REX_R;
1180 pbCodeBuf[off++] = 0x88;
1181 off = iemNativeEmitGprByVCpuDisp(pbCodeBuf, off, iGpr, offVCpu);
1182 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1183
1184#elif RT_ARCH_ARM64
1185 off = iemNativeEmitGprByVCpuLdSt(pReNative, off, iGpr, offVCpu, kArmv8A64InstrLdStType_St_Byte, sizeof(uint8_t));
1186
1187#else
1188# error "port me"
1189#endif
1190 return off;
1191}
1192
1193
1194/**
1195 * Emits a gprdst = gprsrc load.
1196 */
1197DECLINLINE(uint32_t) iemNativeEmitLoadGprFromGpr(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc)
1198{
1199#ifdef RT_ARCH_AMD64
1200 /* mov gprdst, gprsrc */
1201 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3);
1202 AssertReturn(pbCodeBuf, UINT32_MAX);
1203 if ((iGprDst | iGprSrc) >= 8)
1204 pbCodeBuf[off++] = iGprDst < 8 ? X86_OP_REX_W | X86_OP_REX_B
1205 : iGprSrc >= 8 ? X86_OP_REX_W | X86_OP_REX_R | X86_OP_REX_B
1206 : X86_OP_REX_W | X86_OP_REX_R;
1207 else
1208 pbCodeBuf[off++] = X86_OP_REX_W;
1209 pbCodeBuf[off++] = 0x8b;
1210 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGprDst & 7, iGprSrc & 7);
1211
1212#elif RT_ARCH_ARM64
1213 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
1214 AssertReturn(pu32CodeBuf, UINT32_MAX);
1215 /* mov dst, src; alias for: orr dst, xzr, src */
1216 pu32CodeBuf[off++] = UINT32_C(0xaa000000) | ((uint32_t)iGprSrc << 16) | ((uint32_t)ARMV8_A64_REG_XZR << 5) | iGprDst;
1217
1218#else
1219# error "port me"
1220#endif
1221 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1222 return off;
1223}
1224
1225#ifdef RT_ARCH_AMD64
1226/**
1227 * Common bit of iemNativeEmitLoadGprByBp and friends.
1228 */
1229DECL_FORCE_INLINE(uint32_t) iemNativeEmitGprByBpDisp(uint8_t *pbCodeBuf, uint32_t off, uint8_t iGprReg, int32_t offDisp,
1230 PIEMRECOMPILERSTATE pReNativeAssert)
1231{
1232 if (offDisp < 128 && offDisp >= -128)
1233 {
1234 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_MEM1, iGprReg & 7, X86_GREG_xBP);
1235 pbCodeBuf[off++] = (uint8_t)(int8_t)offDisp;
1236 }
1237 else
1238 {
1239 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_MEM4, iGprReg & 7, X86_GREG_xBP);
1240 pbCodeBuf[off++] = RT_BYTE1((uint32_t)offDisp);
1241 pbCodeBuf[off++] = RT_BYTE2((uint32_t)offDisp);
1242 pbCodeBuf[off++] = RT_BYTE3((uint32_t)offDisp);
1243 pbCodeBuf[off++] = RT_BYTE4((uint32_t)offDisp);
1244 }
1245 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNativeAssert, off); RT_NOREF(pReNativeAssert);
1246 return off;
1247}
1248#endif
1249
1250
1251#ifdef RT_ARCH_AMD64
1252/**
1253 * Emits a 64-bit GRP load instruction with an BP relative source address.
1254 */
1255DECLINLINE(uint32_t) iemNativeEmitLoadGprByBp(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, int32_t offDisp)
1256{
1257 /* mov gprdst, qword [rbp + offDisp] */
1258 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
1259 AssertReturn(pbCodeBuf, UINT32_MAX);
1260 if (iGprDst < 8)
1261 pbCodeBuf[off++] = X86_OP_REX_W;
1262 else
1263 pbCodeBuf[off++] = X86_OP_REX_W | X86_OP_REX_R;
1264 pbCodeBuf[off++] = 0x8b;
1265 return iemNativeEmitGprByBpDisp(pbCodeBuf, off, iGprDst, offDisp, pReNative);
1266}
1267#endif
1268
1269
1270#ifdef RT_ARCH_AMD64
1271/**
1272 * Emits a 32-bit GRP load instruction with an BP relative source address.
1273 */
1274DECLINLINE(uint32_t) iemNativeEmitLoadGprByBpU32(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, int32_t offDisp)
1275{
1276 /* mov gprdst, dword [rbp + offDisp] */
1277 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
1278 AssertReturn(pbCodeBuf, UINT32_MAX);
1279 if (iGprDst >= 8)
1280 pbCodeBuf[off++] = X86_OP_REX_R;
1281 pbCodeBuf[off++] = 0x8b;
1282 return iemNativeEmitGprByBpDisp(pbCodeBuf, off, iGprDst, offDisp, pReNative);
1283}
1284#endif
1285
1286
1287#ifdef RT_ARCH_AMD64
1288/**
1289 * Emits a load effective address to a GRP with an BP relative source address.
1290 */
1291DECLINLINE(uint32_t) iemNativeEmitLeaGprByBp(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, int32_t offDisp)
1292{
1293 /* lea gprdst, [rbp + offDisp] */
1294 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
1295 AssertReturn(pbCodeBuf, UINT32_MAX);
1296 if (iGprDst < 8)
1297 pbCodeBuf[off++] = X86_OP_REX_W;
1298 else
1299 pbCodeBuf[off++] = X86_OP_REX_W | X86_OP_REX_R;
1300 pbCodeBuf[off++] = 0x8d;
1301 return iemNativeEmitGprByBpDisp(pbCodeBuf, off, iGprDst, offDisp, pReNative);
1302}
1303#endif
1304
1305
1306/**
1307 * Emits a 64-bit GPR store with an BP relative destination address.
1308 *
1309 * @note May trash IEMNATIVE_REG_FIXED_TMP0.
1310 */
1311DECLINLINE(uint32_t) iemNativeEmitStoreGprByBp(PIEMRECOMPILERSTATE pReNative, uint32_t off, int32_t offDisp, uint8_t iGprSrc)
1312{
1313#ifdef RT_ARCH_AMD64
1314 /* mov qword [rbp + offDisp], gprdst */
1315 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
1316 AssertReturn(pbCodeBuf, UINT32_MAX);
1317 if (iGprSrc < 8)
1318 pbCodeBuf[off++] = X86_OP_REX_W;
1319 else
1320 pbCodeBuf[off++] = X86_OP_REX_W | X86_OP_REX_R;
1321 pbCodeBuf[off++] = 0x89;
1322 return iemNativeEmitGprByBpDisp(pbCodeBuf, off, iGprSrc, offDisp, pReNative);
1323
1324#elif defined(RT_ARCH_ARM64)
1325 if (offDisp >= 0 && offDisp < 4096 * 8 && !((uint32_t)offDisp & 7))
1326 {
1327 /* str w/ unsigned imm12 (scaled) */
1328 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
1329 AssertReturn(pu32CodeBuf, UINT32_MAX);
1330 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(kArmv8A64InstrLdStType_St_Dword, iGprSrc,
1331 ARMV8_A64_REG_BP, (uint32_t)offDisp / 8);
1332 }
1333 else if (offDisp >= -256 && offDisp <= 256)
1334 {
1335 /* stur w/ signed imm9 (unscaled) */
1336 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
1337 AssertReturn(pu32CodeBuf, UINT32_MAX);
1338 pu32CodeBuf[off++] = Armv8A64MkInstrSturLdur(kArmv8A64InstrLdStType_St_Dword, iGprSrc, ARMV8_A64_REG_BP, offDisp);
1339 }
1340 else
1341 {
1342 /* Use temporary indexing register. */
1343 off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_REG_FIXED_TMP0, (uint32_t)offDisp);
1344 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
1345 AssertReturn(pu32CodeBuf, UINT32_MAX);
1346 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRegIdx(kArmv8A64InstrLdStType_St_Dword, iGprSrc, ARMV8_A64_REG_BP,
1347 IEMNATIVE_REG_FIXED_TMP0, kArmv8A64InstrLdStExtend_Sxtw);
1348 }
1349 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1350 return off;
1351
1352#else
1353# error "Port me!"
1354#endif
1355}
1356
1357
1358/**
1359 * Emits a 64-bit immediate store with an BP relative destination address.
1360 *
1361 * @note May trash IEMNATIVE_REG_FIXED_TMP0.
1362 */
1363DECLINLINE(uint32_t) iemNativeEmitStoreImm64ByBp(PIEMRECOMPILERSTATE pReNative, uint32_t off, int32_t offDisp, uint64_t uImm64)
1364{
1365#ifdef RT_ARCH_AMD64
1366 if ((int64_t)uImm64 == (int32_t)uImm64)
1367 {
1368 /* mov qword [rbp + offDisp], imm32 - sign extended */
1369 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 11);
1370 AssertReturn(pbCodeBuf, UINT32_MAX);
1371
1372 pbCodeBuf[off++] = X86_OP_REX_W;
1373 pbCodeBuf[off++] = 0xc7;
1374 if (offDisp < 128 && offDisp >= -128)
1375 {
1376 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_MEM1, 0, X86_GREG_xBP);
1377 pbCodeBuf[off++] = (uint8_t)offDisp;
1378 }
1379 else
1380 {
1381 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_MEM4, 0, X86_GREG_xBP);
1382 pbCodeBuf[off++] = RT_BYTE1((uint32_t)offDisp);
1383 pbCodeBuf[off++] = RT_BYTE2((uint32_t)offDisp);
1384 pbCodeBuf[off++] = RT_BYTE3((uint32_t)offDisp);
1385 pbCodeBuf[off++] = RT_BYTE4((uint32_t)offDisp);
1386 }
1387 pbCodeBuf[off++] = RT_BYTE1(uImm64);
1388 pbCodeBuf[off++] = RT_BYTE2(uImm64);
1389 pbCodeBuf[off++] = RT_BYTE3(uImm64);
1390 pbCodeBuf[off++] = RT_BYTE4(uImm64);
1391 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1392 return off;
1393 }
1394#endif
1395
1396 /* Load tmp0, imm64; Store tmp to bp+disp. */
1397 off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_REG_FIXED_TMP0, uImm64);
1398 return iemNativeEmitStoreGprByBp(pReNative, off, offDisp, IEMNATIVE_REG_FIXED_TMP0);
1399}
1400
1401
1402#ifdef RT_ARCH_AMD64
1403/**
1404 * Common bit of iemNativeEmitLoadGprByGpr and friends.
1405 */
1406DECL_FORCE_INLINE(uint32_t) iemNativeEmitGprByGprDisp(uint8_t *pbCodeBuf, uint32_t off,
1407 uint8_t iGprReg, uint8_t iGprBase, int32_t offDisp)
1408{
1409 if (offDisp == 0 && (iGprBase & 7) != X86_GREG_xBP) /* Can use encoding w/o displacement field. */
1410 {
1411 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_MEM0, iGprReg & 7, iGprBase & 7);
1412 if ((iGprBase & 7) == X86_GREG_xSP) /* for RSP/R12 relative addressing we have to use a SIB byte. */
1413 pbCodeBuf[off++] = X86_SIB_MAKE(X86_GREG_xSP, X86_GREG_xSP, 0); /* -> [RSP/R12] */
1414 }
1415 else if (offDisp == (int8_t)offDisp)
1416 {
1417 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_MEM1, iGprReg & 7, iGprBase & 7);
1418 if ((iGprBase & 7) == X86_GREG_xSP) /* for RSP/R12 relative addressing we have to use a SIB byte. */
1419 pbCodeBuf[off++] = X86_SIB_MAKE(X86_GREG_xSP, X86_GREG_xSP, 0); /* -> [RSP/R12] */
1420 pbCodeBuf[off++] = (uint8_t)offDisp;
1421 }
1422 else
1423 {
1424 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_MEM1, iGprReg & 7, iGprBase & 7);
1425 if ((iGprBase & 7) == X86_GREG_xSP) /* for RSP/R12 relative addressing we have to use a SIB byte. */
1426 pbCodeBuf[off++] = X86_SIB_MAKE(X86_GREG_xSP, X86_GREG_xSP, 0); /* -> [RSP/R12] */
1427 pbCodeBuf[off++] = RT_BYTE1((uint32_t)offDisp);
1428 pbCodeBuf[off++] = RT_BYTE2((uint32_t)offDisp);
1429 pbCodeBuf[off++] = RT_BYTE3((uint32_t)offDisp);
1430 pbCodeBuf[off++] = RT_BYTE4((uint32_t)offDisp);
1431 }
1432 return off;
1433}
1434#elif RT_ARCH_ARM64
1435/**
1436 * Common bit of iemNativeEmitLoadGprFromVCpuU64 and friends.
1437 */
1438DECL_FORCE_INLINE(uint32_t) iemNativeEmitGprByGprLdSt(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprReg,
1439 uint8_t iGprBase, int32_t offDisp,
1440 ARMV8A64INSTRLDSTTYPE enmOperation, unsigned cbData)
1441{
1442 /*
1443 * There are a couple of ldr variants that takes an immediate offset, so
1444 * try use those if we can, otherwise we have to use the temporary register
1445 * help with the addressing.
1446 */
1447 if ((uint32_t)offDisp < _4K * cbData && !((uint32_t)offDisp & (cbData - 1)))
1448 {
1449 /* Use the unsigned variant of ldr Wt, [<Xn|SP>, #off]. */
1450 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
1451 AssertReturn(pu32CodeBuf, UINT32_MAX);
1452 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRUOff(enmOperation, iGprReg, iGprBase, (uint32_t)offDisp / cbData);
1453 }
1454 else
1455 {
1456 /* The offset is too large, so we must load it into a register and use
1457 ldr Wt, [<Xn|SP>, (<Wm>|<Xm>)]. */
1458 /** @todo reduce by offVCpu by >> 3 or >> 2? if it saves instructions? */
1459 uint8_t const idxTmpReg = iemNativeRegAllocTmpImm(pReNative, off, (uint64)offDisp);
1460 AssertReturn(idxTmpReg < RT_ELEMENTS(pReNative->Core.aHstRegs), UINT32_MAX);
1461
1462 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
1463 AssertReturn(pu32CodeBuf, UINT32_MAX);
1464 pu32CodeBuf[off++] = Armv8A64MkInstrStLdRegIdx(enmOperation, iGprReg, iGprBase, idxTmpReg);
1465
1466 iemNativeRegFreeTmpImm(pReNative, idxTmpReg);
1467 }
1468 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1469 return off;
1470}
1471#endif
1472
1473
1474/**
1475 * Emits a 64-bit GPR load via a GPR base address with a displacement.
1476 */
1477DECLINLINE(uint32_t) iemNativeEmitLoadGprByGpr(PIEMRECOMPILERSTATE pReNative, uint32_t off,
1478 uint8_t iGprDst, uint8_t iGprBase, int32_t offDisp)
1479{
1480#ifdef RT_ARCH_AMD64
1481 /* mov reg64, mem64 */
1482 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8);
1483 AssertReturn(pbCodeBuf, UINT32_MAX);
1484 pbCodeBuf[off++] = X86_OP_REX_W | (iGprDst < 8 ? 0 : X86_OP_REX_R) | (iGprBase < 8 ? 0 : X86_OP_REX_B);
1485 pbCodeBuf[off++] = 0x8b;
1486 off = iemNativeEmitGprByGprDisp(pbCodeBuf, off, iGprDst, iGprBase, offDisp);
1487 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1488
1489#elif RT_ARCH_ARM64
1490 off = iemNativeEmitGprByGprLdSt(pReNative, off, iGprDst, offDisp, kArmv8A64InstrLdStType_Ld_Dword, sizeof(uint64_t));
1491
1492#else
1493# error "port me"
1494#endif
1495 return off;
1496}
1497
1498
1499/**
1500 * Emits a 32-bit GPR load via a GPR base address with a displacement.
1501 * @note Bits 63 thru 32 in @a iGprDst will be cleared.
1502 */
1503DECLINLINE(uint32_t) iemNativeEmitLoadGpr32ByGpr(PIEMRECOMPILERSTATE pReNative, uint32_t off,
1504 uint8_t iGprDst, uint8_t iGprBase, int32_t offDisp)
1505{
1506#ifdef RT_ARCH_AMD64
1507 /* mov reg32, mem32 */
1508 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 8);
1509 AssertReturn(pbCodeBuf, UINT32_MAX);
1510 if (iGprDst >= 8 || iGprBase >= 8)
1511 pbCodeBuf[off++] = (iGprDst < 8 ? 0 : X86_OP_REX_R) | (iGprBase < 8 ? 0 : X86_OP_REX_B);
1512 pbCodeBuf[off++] = 0x8b;
1513 off = iemNativeEmitGprByGprDisp(pbCodeBuf, off, iGprDst, iGprBase, offDisp);
1514 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1515
1516#elif RT_ARCH_ARM64
1517 off = iemNativeEmitGprByGprLdSt(pReNative, off, iGprDst, offDisp, kArmv8A64InstrLdStType_Ld_Word, sizeof(uint32_t));
1518
1519#else
1520# error "port me"
1521#endif
1522 return off;
1523}
1524
1525
1526/*********************************************************************************************************************************
1527* Subtraction and Additions *
1528*********************************************************************************************************************************/
1529
1530
1531#ifdef RT_ARCH_AMD64
1532/**
1533 * Emits a 64-bit GPR subtract with a signed immediate subtrahend.
1534 */
1535DECLINLINE(uint32_t) iemNativeEmitSubGprImm(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, int32_t iSubtrahend)
1536{
1537 /* sub gprdst, imm8/imm32 */
1538 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
1539 AssertReturn(pbCodeBuf, UINT32_MAX);
1540 if (iGprDst < 8)
1541 pbCodeBuf[off++] = X86_OP_REX_W;
1542 else
1543 pbCodeBuf[off++] = X86_OP_REX_W | X86_OP_REX_B;
1544 if (iSubtrahend < 128 && iSubtrahend >= -128)
1545 {
1546 pbCodeBuf[off++] = 0x83;
1547 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 5, iGprDst & 7);
1548 pbCodeBuf[off++] = (uint8_t)iSubtrahend;
1549 }
1550 else
1551 {
1552 pbCodeBuf[off++] = 0x81;
1553 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 5, iGprDst & 7);
1554 pbCodeBuf[off++] = RT_BYTE1(iSubtrahend);
1555 pbCodeBuf[off++] = RT_BYTE2(iSubtrahend);
1556 pbCodeBuf[off++] = RT_BYTE3(iSubtrahend);
1557 pbCodeBuf[off++] = RT_BYTE4(iSubtrahend);
1558 }
1559 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1560 return off;
1561}
1562#endif
1563
1564
1565/**
1566 * Emits adding a 64-bit GPR to another, storing the result in the frist.
1567 * @note The AMD64 version sets flags.
1568 */
1569DECLINLINE(uint32_t ) iemNativeEmitAddTwoGprs(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t iGprAddend)
1570{
1571#if defined(RT_ARCH_AMD64)
1572 /* add Gv,Ev */
1573 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3);
1574 AssertReturn(pbCodeBuf, UINT32_MAX);
1575 pbCodeBuf[off++] = (iGprDst < 8 ? X86_OP_REX_W : X86_OP_REX_W | X86_OP_REX_R)
1576 | (iGprAddend < 8 ? 0 : X86_OP_REX_B);
1577 pbCodeBuf[off++] = 0x04;
1578 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGprDst & 7, iGprAddend & 7);
1579
1580#elif defined(RT_ARCH_ARM64)
1581 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
1582 AssertReturn(pu32CodeBuf, UINT32_MAX);
1583 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubReg(false /*fSub*/, iGprDst, iGprDst, iGprAddend);
1584
1585#else
1586# error "Port me"
1587#endif
1588 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1589 return off;
1590}
1591
1592
1593/**
1594 * Emits a 64-bit GPR additions with a 8-bit signed immediate.
1595 */
1596DECLINLINE(uint32_t ) iemNativeEmitAddGprImm8(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, int8_t iImm8)
1597{
1598#if defined(RT_ARCH_AMD64)
1599 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 4);
1600 AssertReturn(pbCodeBuf, UINT32_MAX);
1601 /* add or inc */
1602 pbCodeBuf[off++] = iGprDst < 8 ? X86_OP_REX_W : X86_OP_REX_W | X86_OP_REX_B;
1603 if (iImm8 != 1)
1604 {
1605 pbCodeBuf[off++] = 0x83;
1606 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, iGprDst & 7);
1607 pbCodeBuf[off++] = (uint8_t)iImm8;
1608 }
1609 else
1610 {
1611 pbCodeBuf[off++] = 0xff;
1612 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, iGprDst & 7);
1613 }
1614
1615#elif defined(RT_ARCH_ARM64)
1616 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
1617 AssertReturn(pu32CodeBuf, UINT32_MAX);
1618 if (iImm8 >= 0)
1619 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(false /*fSub*/, iGprDst, iGprDst, (uint8_t)iImm8);
1620 else
1621 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(true /*fSub*/, iGprDst, iGprDst, (uint8_t)-iImm8);
1622
1623#else
1624# error "Port me"
1625#endif
1626 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1627 return off;
1628}
1629
1630
1631/**
1632 * Emits a 32-bit GPR additions with a 8-bit signed immediate.
1633 * @note Bits 32 thru 63 in the GPR will be zero after the operation.
1634 */
1635DECLINLINE(uint32_t ) iemNativeEmitAddGpr32Imm8(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, int8_t iImm8)
1636{
1637#if defined(RT_ARCH_AMD64)
1638 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 4);
1639 AssertReturn(pbCodeBuf, UINT32_MAX);
1640 /* add or inc */
1641 if (iGprDst >= 8)
1642 pbCodeBuf[off++] = X86_OP_REX_B;
1643 if (iImm8 != 1)
1644 {
1645 pbCodeBuf[off++] = 0x83;
1646 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, iGprDst & 7);
1647 pbCodeBuf[off++] = (uint8_t)iImm8;
1648 }
1649 else
1650 {
1651 pbCodeBuf[off++] = 0xff;
1652 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, iGprDst & 7);
1653 }
1654
1655#elif defined(RT_ARCH_ARM64)
1656 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
1657 AssertReturn(pu32CodeBuf, UINT32_MAX);
1658 if (iImm8 >= 0)
1659 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(false /*fSub*/, iGprDst, iGprDst, (uint8_t)iImm8, false /*f64Bit*/);
1660 else
1661 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(true /*fSub*/, iGprDst, iGprDst, (uint8_t)-iImm8, false /*f64Bit*/);
1662
1663#else
1664# error "Port me"
1665#endif
1666 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1667 return off;
1668}
1669
1670
1671/**
1672 * Emits a 64-bit GPR additions with a 64-bit signed addend.
1673 */
1674DECLINLINE(uint32_t ) iemNativeEmitAddGprImm(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, int64_t iAddend)
1675{
1676#if defined(RT_ARCH_AMD64)
1677 if (iAddend <= INT8_MAX && iAddend >= INT8_MIN)
1678 return iemNativeEmitAddGprImm8(pReNative, off, iGprDst, (int8_t)iAddend);
1679
1680 if (iAddend <= INT32_MAX && iAddend >= INT32_MIN)
1681 {
1682 /* add grp, imm32 */
1683 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
1684 AssertReturn(pbCodeBuf, UINT32_MAX);
1685 pbCodeBuf[off++] = iGprDst < 8 ? X86_OP_REX_W : X86_OP_REX_W | X86_OP_REX_B;
1686 pbCodeBuf[off++] = 0x81;
1687 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, iGprDst & 7);
1688 pbCodeBuf[off++] = RT_BYTE1((uint32_t)iAddend);
1689 pbCodeBuf[off++] = RT_BYTE2((uint32_t)iAddend);
1690 pbCodeBuf[off++] = RT_BYTE3((uint32_t)iAddend);
1691 pbCodeBuf[off++] = RT_BYTE4((uint32_t)iAddend);
1692 }
1693 else
1694 {
1695 /* Best to use a temporary register to deal with this in the simplest way: */
1696 uint8_t iTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, (uint64_t)iAddend);
1697 AssertReturn(iTmpReg < RT_ELEMENTS(pReNative->Core.aHstRegs), UINT32_MAX);
1698
1699 /* add dst, tmpreg */
1700 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3);
1701 AssertReturn(pbCodeBuf, UINT32_MAX);
1702 pbCodeBuf[off++] = (iGprDst < 8 ? X86_OP_REX_W : X86_OP_REX_W | X86_OP_REX_R)
1703 | (iTmpReg < 8 ? 0 : X86_OP_REX_B);
1704 pbCodeBuf[off++] = 0x03;
1705 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGprDst & 7, iTmpReg & 7);
1706
1707 iemNativeRegFreeTmpImm(pReNative, iTmpReg);
1708 }
1709
1710#elif defined(RT_ARCH_ARM64)
1711 if ((uint64_t)RT_ABS(iAddend) < RT_BIT_32(12))
1712 {
1713 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
1714 AssertReturn(pu32CodeBuf, UINT32_MAX);
1715 if (iAddend >= 0)
1716 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(false /*fSub*/, iGprDst, iGprDst, (uint32_t)iAddend);
1717 else
1718 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(true /*fSub*/, iGprDst, iGprDst, (uint32_t)-iAddend);
1719 }
1720 else
1721 {
1722 /* Use temporary register for the immediate. */
1723 uint8_t iTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, (uint64_t)iAddend);
1724 AssertReturn(iTmpReg < RT_ELEMENTS(pReNative->Core.aHstRegs), UINT32_MAX);
1725
1726 /* add gprdst, gprdst, tmpreg */
1727 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
1728 AssertReturn(pu32CodeBuf, UINT32_MAX);
1729 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubReg(false /*fSub*/, iGprDst, iGprDst, iTmpReg);
1730
1731 iemNativeRegFreeTmpImm(pReNative, iTmpReg);
1732 }
1733
1734#else
1735# error "Port me"
1736#endif
1737 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1738 return off;
1739}
1740
1741
1742/**
1743 * Emits a 32-bit GPR additions with a 32-bit signed immediate.
1744 * @note Bits 32 thru 63 in the GPR will be zero after the operation.
1745 */
1746DECLINLINE(uint32_t ) iemNativeEmitAddGpr32Imm(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, int32_t iAddend)
1747{
1748#if defined(RT_ARCH_AMD64)
1749 if (iAddend <= INT8_MAX && iAddend >= INT8_MIN)
1750 return iemNativeEmitAddGpr32Imm8(pReNative, off, iGprDst, (int8_t)iAddend);
1751
1752 /* add grp, imm32 */
1753 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
1754 AssertReturn(pbCodeBuf, UINT32_MAX);
1755 if (iGprDst >= 8)
1756 pbCodeBuf[off++] = X86_OP_REX_B;
1757 pbCodeBuf[off++] = 0x81;
1758 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, iGprDst & 7);
1759 pbCodeBuf[off++] = RT_BYTE1((uint32_t)iAddend);
1760 pbCodeBuf[off++] = RT_BYTE2((uint32_t)iAddend);
1761 pbCodeBuf[off++] = RT_BYTE3((uint32_t)iAddend);
1762 pbCodeBuf[off++] = RT_BYTE4((uint32_t)iAddend);
1763
1764#elif defined(RT_ARCH_ARM64)
1765 if ((uint64_t)RT_ABS(iAddend) < RT_BIT_32(12))
1766 {
1767 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
1768 AssertReturn(pu32CodeBuf, UINT32_MAX);
1769 if (iAddend >= 0)
1770 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(false /*fSub*/, iGprDst, iGprDst, (uint32_t)iAddend, false /*f64Bit*/);
1771 else
1772 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(true /*fSub*/, iGprDst, iGprDst, (uint32_t)-iAddend, false /*f64Bit*/);
1773 }
1774 else
1775 {
1776 /* Use temporary register for the immediate. */
1777 uint8_t iTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, (uint32_t)iAddend);
1778 AssertReturn(iTmpReg < RT_ELEMENTS(pReNative->Core.aHstRegs), UINT32_MAX);
1779
1780 /* add gprdst, gprdst, tmpreg */
1781 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
1782 AssertReturn(pu32CodeBuf, UINT32_MAX);
1783 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubReg(false /*fSub*/, iGprDst, iGprDst, iTmpReg, false /*f64Bit*/);
1784
1785 iemNativeRegFreeTmpImm(pReNative, iTmpReg);
1786 }
1787
1788#else
1789# error "Port me"
1790#endif
1791 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1792 return off;
1793}
1794
1795
1796
1797/*********************************************************************************************************************************
1798* Bit Operations *
1799*********************************************************************************************************************************/
1800
1801/**
1802 * Emits code for clearing bits 16 thru 63 in the GPR.
1803 */
1804DECLINLINE(uint32_t ) iemNativeEmitClear16UpGpr(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst)
1805{
1806#if defined(RT_ARCH_AMD64)
1807 /* movzx reg32, reg16 */
1808 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 4);
1809 AssertReturn(pbCodeBuf, UINT32_MAX);
1810 if (iGprDst >= 8)
1811 pbCodeBuf[off++] = X86_OP_REX_B | X86_OP_REX_R;
1812 pbCodeBuf[off++] = 0x0f;
1813 pbCodeBuf[off++] = 0xb7;
1814 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGprDst & 7, iGprDst & 7);
1815
1816#elif defined(RT_ARCH_ARM64)
1817 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
1818 AssertReturn(pu32CodeBuf, UINT32_MAX);
1819# if 1
1820 pu32CodeBuf[off++] = Armv8A64MkInstrUxth(iGprDst, iGprDst);
1821# else
1822 ///* This produces 0xffff; 0x4f: N=1 imms=001111 (immr=0) => size=64 length=15 */
1823 //pu32CodeBuf[off++] = Armv8A64MkInstrAndImm(iGprDst, iGprDst, 0x4f);
1824# endif
1825#else
1826# error "Port me"
1827#endif
1828 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1829 return off;
1830}
1831
1832
1833/**
1834 * Emits code for AND'ing two 64-bit GPRs.
1835 *
1836 * @note When fSetFlags=true, JZ/JNZ jumps can be used afterwards on both AMD64
1837 * and ARM64 hosts.
1838 */
1839DECLINLINE(uint32_t ) iemNativeEmitAndGprByGpr(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc,
1840 bool fSetFlags = false)
1841{
1842#if defined(RT_ARCH_AMD64)
1843 /* and Gv, Ev */
1844 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3);
1845 AssertReturn(pbCodeBuf, UINT32_MAX);
1846 pbCodeBuf[off++] = X86_OP_REX_W | (iGprDst < 8 ? 0 : X86_OP_REX_R) | (iGprSrc < 8 ? 0 : X86_OP_REX_B);
1847 pbCodeBuf[off++] = 0x23;
1848 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGprDst & 7, iGprSrc & 7);
1849 RT_NOREF(fSetFlags);
1850
1851#elif defined(RT_ARCH_ARM64)
1852 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
1853 AssertReturn(pu32CodeBuf, UINT32_MAX);
1854 if (!fSetFlags)
1855 pu32CodeBuf[off++] = Armv8A64MkInstrAnd(iGprDst, iGprDst, iGprSrc);
1856 else
1857 pu32CodeBuf[off++] = Armv8A64MkInstrAnds(iGprDst, iGprDst, iGprSrc);
1858
1859#else
1860# error "Port me"
1861#endif
1862 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1863 return off;
1864}
1865
1866
1867/**
1868 * Emits code for AND'ing two 32-bit GPRs.
1869 */
1870DECLINLINE(uint32_t ) iemNativeEmitAndGpr32ByGpr32(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc)
1871{
1872#if defined(RT_ARCH_AMD64)
1873 /* and Gv, Ev */
1874 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3);
1875 AssertReturn(pbCodeBuf, UINT32_MAX);
1876 if (iGprDst >= 8 || iGprSrc >= 8)
1877 pbCodeBuf[off++] = (iGprDst < 8 ? 0 : X86_OP_REX_R) | (iGprSrc < 8 ? 0 : X86_OP_REX_B);
1878 pbCodeBuf[off++] = 0x23;
1879 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGprDst & 7, iGprSrc & 7);
1880
1881#elif defined(RT_ARCH_ARM64)
1882 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
1883 AssertReturn(pu32CodeBuf, UINT32_MAX);
1884 pu32CodeBuf[off++] = Armv8A64MkInstrAnd(iGprDst, iGprDst, iGprSrc, false /*f64Bit*/);
1885
1886#else
1887# error "Port me"
1888#endif
1889 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1890 return off;
1891}
1892
1893
1894/**
1895 * Emits code for AND'ing a 64-bit GPRs with a constant.
1896 *
1897 * @note When fSetFlags=true, JZ/JNZ jumps can be used afterwards on both AMD64
1898 * and ARM64 hosts.
1899 */
1900DECLINLINE(uint32_t ) iemNativeEmitAndGprByImm(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint64_t uImm,
1901 bool fSetFlags = false)
1902{
1903#if defined(RT_ARCH_AMD64)
1904 if ((int64_t)uImm == (int8_t)uImm)
1905 {
1906 /* and Ev, imm8 */
1907 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 4);
1908 AssertReturn(pbCodeBuf, UINT32_MAX);
1909 pbCodeBuf[off++] = X86_OP_REX_W | (iGprDst < 8 ? 0 : X86_OP_REX_R);
1910 pbCodeBuf[off++] = 0x83;
1911 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 4, iGprDst & 7);
1912 pbCodeBuf[off++] = (uint8_t)uImm;
1913 }
1914 else if ((int64_t)uImm == (int32_t)uImm)
1915 {
1916 /* and Ev, imm32 */
1917 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
1918 AssertReturn(pbCodeBuf, UINT32_MAX);
1919 pbCodeBuf[off++] = X86_OP_REX_W | (iGprDst < 8 ? 0 : X86_OP_REX_R);
1920 pbCodeBuf[off++] = 0x81;
1921 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 4, iGprDst & 7);
1922 pbCodeBuf[off++] = RT_BYTE1(uImm);
1923 pbCodeBuf[off++] = RT_BYTE2(uImm);
1924 pbCodeBuf[off++] = RT_BYTE3(uImm);
1925 pbCodeBuf[off++] = RT_BYTE4(uImm);
1926 }
1927 else
1928 {
1929 /* Use temporary register for the 64-bit immediate. */
1930 uint8_t iTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, uImm);
1931 AssertReturn(iTmpReg < RT_ELEMENTS(pReNative->Core.aHstRegs), UINT32_MAX);
1932 off = iemNativeEmitAndGprByGpr(pReNative, off, iGprDst, iTmpReg);
1933 iemNativeRegFreeTmpImm(pReNative, iTmpReg);
1934 }
1935 RT_NOREF(fSetFlags);
1936
1937#elif defined(RT_ARCH_ARM64)
1938 uint32_t uImmR = 0;
1939 uint32_t uImmNandS = 0;
1940 if (Armv8A64ConvertMaskToImmRImmS(uImm, &uImmNandS, &uImmR))
1941 {
1942 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
1943 AssertReturn(pu32CodeBuf, UINT32_MAX);
1944 if (!fSetFlags)
1945 pu32CodeBuf[off++] = Armv8A64MkInstrAndImm(iGprDst, iGprDst, uImmNandS, uImmR);
1946 else
1947 pu32CodeBuf[off++] = Armv8A64MkInstrAndsImm(iGprDst, iGprDst, uImmNandS, uImmR);
1948 }
1949 else
1950 {
1951 /* Use temporary register for the 64-bit immediate. */
1952 uint8_t iTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, uImm);
1953 AssertReturn(iTmpReg < RT_ELEMENTS(pReNative->Core.aHstRegs), UINT32_MAX);
1954 off = iemNativeEmitAndGprByGpr(pReNative, off, iGprDst, iTmpReg, fSetFlags);
1955 iemNativeRegFreeTmpImm(pReNative, iTmpReg);
1956 }
1957
1958#else
1959# error "Port me"
1960#endif
1961 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
1962 return off;
1963}
1964
1965
1966/**
1967 * Emits code for AND'ing an 32-bit GPRs with a constant.
1968 */
1969DECLINLINE(uint32_t ) iemNativeEmitAndGpr32ByImm(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint32_t uImm,
1970 bool fSetFlags = false)
1971{
1972#if defined(RT_ARCH_AMD64)
1973 /* and Ev, imm */
1974 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
1975 AssertReturn(pbCodeBuf, UINT32_MAX);
1976 if (iGprDst >= 8)
1977 pbCodeBuf[off++] = X86_OP_REX_R;
1978 if ((int32_t)uImm == (int8_t)uImm)
1979 {
1980 pbCodeBuf[off++] = 0x83;
1981 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 4, iGprDst & 7);
1982 pbCodeBuf[off++] = (uint8_t)uImm;
1983 }
1984 else
1985 {
1986 pbCodeBuf[off++] = 0x81;
1987 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 4, iGprDst & 7);
1988 pbCodeBuf[off++] = RT_BYTE1(uImm);
1989 pbCodeBuf[off++] = RT_BYTE2(uImm);
1990 pbCodeBuf[off++] = RT_BYTE3(uImm);
1991 pbCodeBuf[off++] = RT_BYTE4(uImm);
1992 }
1993 RT_NOREF(fSetFlags);
1994
1995#elif defined(RT_ARCH_ARM64)
1996 uint32_t uImmR = 0;
1997 uint32_t uImmNandS = 0;
1998 if (Armv8A64ConvertMaskToImmRImmS(uImm, &uImmNandS, &uImmR))
1999 {
2000 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
2001 AssertReturn(pu32CodeBuf, UINT32_MAX);
2002 if (!fSetFlags)
2003 pu32CodeBuf[off++] = Armv8A64MkInstrAndImm(iGprDst, iGprDst, uImmNandS, uImmR, false /*f64Bit*/);
2004 else
2005 pu32CodeBuf[off++] = Armv8A64MkInstrAndsImm(iGprDst, iGprDst, uImmNandS, uImmR, false /*f64Bit*/);
2006 }
2007 else
2008 {
2009 /* Use temporary register for the 64-bit immediate. */
2010 uint8_t iTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, uImm);
2011 AssertReturn(iTmpReg < RT_ELEMENTS(pReNative->Core.aHstRegs), UINT32_MAX);
2012 if (!fSetFlags)
2013 off = iemNativeEmitAndGpr32ByGpr32(pReNative, off, iGprDst, iTmpReg);
2014 else
2015 off = iemNativeEmitAndsGpr32ByGpr32(pReNative, off, iGprDst, iTmpReg);
2016 iemNativeRegFreeTmpImm(pReNative, iTmpReg);
2017 }
2018
2019#else
2020# error "Port me"
2021#endif
2022 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
2023 return off;
2024}
2025
2026
2027/**
2028 * Emits code for XOR'ing two 64-bit GPRs.
2029 */
2030DECLINLINE(uint32_t ) iemNativeEmitXorGprByGpr(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc)
2031{
2032#if defined(RT_ARCH_AMD64)
2033 /* and Gv, Ev */
2034 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3);
2035 AssertReturn(pbCodeBuf, UINT32_MAX);
2036 pbCodeBuf[off++] = X86_OP_REX_W | (iGprDst < 8 ? 0 : X86_OP_REX_R) | (iGprSrc < 8 ? 0 : X86_OP_REX_B);
2037 pbCodeBuf[off++] = 0x33;
2038 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGprDst & 7, iGprSrc & 7);
2039
2040#elif defined(RT_ARCH_ARM64)
2041 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
2042 AssertReturn(pu32CodeBuf, UINT32_MAX);
2043 pu32CodeBuf[off++] = Armv8A64MkInstrEor(iGprDst, iGprDst, iGprSrc);
2044
2045#else
2046# error "Port me"
2047#endif
2048 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
2049 return off;
2050}
2051
2052
2053/**
2054 * Emits code for XOR'ing two 32-bit GPRs.
2055 */
2056DECLINLINE(uint32_t ) iemNativeEmitXorGpr32ByGpr32(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t iGprSrc)
2057{
2058#if defined(RT_ARCH_AMD64)
2059 /* and Gv, Ev */
2060 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3);
2061 AssertReturn(pbCodeBuf, UINT32_MAX);
2062 if (iGprDst >= 8 || iGprSrc >= 8)
2063 pbCodeBuf[off++] = (iGprDst < 8 ? 0 : X86_OP_REX_R) | (iGprSrc < 8 ? 0 : X86_OP_REX_B);
2064 pbCodeBuf[off++] = 0x33;
2065 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGprDst & 7, iGprSrc & 7);
2066
2067#elif defined(RT_ARCH_ARM64)
2068 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
2069 AssertReturn(pu32CodeBuf, UINT32_MAX);
2070 pu32CodeBuf[off++] = Armv8A64MkInstrEor(iGprDst, iGprDst, iGprSrc, false /*f64Bit*/);
2071
2072#else
2073# error "Port me"
2074#endif
2075 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
2076 return off;
2077}
2078
2079
2080/*********************************************************************************************************************************
2081* Shifting *
2082*********************************************************************************************************************************/
2083
2084/**
2085 * Emits code for shifting a GPR a fixed number of bits to the left.
2086 */
2087DECLINLINE(uint32_t ) iemNativeEmitShiftGprLeft(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t cShift)
2088{
2089 Assert(cShift > 0 && cShift < 64);
2090
2091#if defined(RT_ARCH_AMD64)
2092 /* shl dst, cShift */
2093 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 4);
2094 AssertReturn(pbCodeBuf, UINT32_MAX);
2095 pbCodeBuf[off++] = iGprDst < 8 ? X86_OP_REX_W : X86_OP_REX_W | X86_OP_REX_B;
2096 if (cShift != 1)
2097 {
2098 pbCodeBuf[off++] = 0xc1;
2099 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 4, iGprDst & 7);
2100 pbCodeBuf[off++] = cShift;
2101 }
2102 else
2103 {
2104 pbCodeBuf[off++] = 0xd1;
2105 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 4, iGprDst & 7);
2106 }
2107
2108#elif defined(RT_ARCH_ARM64)
2109 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
2110 AssertReturn(pu32CodeBuf, UINT32_MAX);
2111 pu32CodeBuf[off++] = Armv8A64MkInstrLslImm(iGprDst, iGprDst, cShift);
2112
2113#else
2114# error "Port me"
2115#endif
2116 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
2117 return off;
2118}
2119
2120
2121/**
2122 * Emits code for shifting a 32-bit GPR a fixed number of bits to the left.
2123 */
2124DECLINLINE(uint32_t ) iemNativeEmitShiftGpr32Left(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t cShift)
2125{
2126 Assert(cShift > 0 && cShift < 32);
2127
2128#if defined(RT_ARCH_AMD64)
2129 /* shl dst, cShift */
2130 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 4);
2131 AssertReturn(pbCodeBuf, UINT32_MAX);
2132 if (iGprDst >= 8)
2133 pbCodeBuf[off++] = X86_OP_REX_B;
2134 if (cShift != 1)
2135 {
2136 pbCodeBuf[off++] = 0xc1;
2137 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 4, iGprDst & 7);
2138 pbCodeBuf[off++] = cShift;
2139 }
2140 else
2141 {
2142 pbCodeBuf[off++] = 0xd1;
2143 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 4, iGprDst & 7);
2144 }
2145
2146#elif defined(RT_ARCH_ARM64)
2147 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
2148 AssertReturn(pu32CodeBuf, UINT32_MAX);
2149 pu32CodeBuf[off++] = Armv8A64MkInstrLslImm(iGprDst, iGprDst, cShift, false /*64Bit*/);
2150
2151#else
2152# error "Port me"
2153#endif
2154 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
2155 return off;
2156}
2157
2158
2159/**
2160 * Emits code for (unsigned) shifting a GPR a fixed number of bits to the right.
2161 */
2162DECLINLINE(uint32_t ) iemNativeEmitShiftGprRight(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t cShift)
2163{
2164 Assert(cShift > 0 && cShift < 64);
2165
2166#if defined(RT_ARCH_AMD64)
2167 /* shr dst, cShift */
2168 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 4);
2169 AssertReturn(pbCodeBuf, UINT32_MAX);
2170 pbCodeBuf[off++] = iGprDst < 8 ? X86_OP_REX_W : X86_OP_REX_W | X86_OP_REX_B;
2171 if (cShift != 1)
2172 {
2173 pbCodeBuf[off++] = 0xc1;
2174 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 5, iGprDst & 7);
2175 pbCodeBuf[off++] = cShift;
2176 }
2177 else
2178 {
2179 pbCodeBuf[off++] = 0xd1;
2180 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 5, iGprDst & 7);
2181 }
2182
2183#elif defined(RT_ARCH_ARM64)
2184 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
2185 AssertReturn(pu32CodeBuf, UINT32_MAX);
2186 pu32CodeBuf[off++] = Armv8A64MkInstrLsrImm(iGprDst, iGprDst, cShift);
2187
2188#else
2189# error "Port me"
2190#endif
2191 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
2192 return off;
2193}
2194
2195
2196/**
2197 * Emits code for (unsigned) shifting a 32-bit GPR a fixed number of bits to the
2198 * right.
2199 */
2200DECLINLINE(uint32_t ) iemNativeEmitShiftGpr32Right(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprDst, uint8_t cShift)
2201{
2202 Assert(cShift > 0 && cShift < 32);
2203
2204#if defined(RT_ARCH_AMD64)
2205 /* shr dst, cShift */
2206 uint8_t *pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 4);
2207 AssertReturn(pbCodeBuf, UINT32_MAX);
2208 if (iGprDst >= 8)
2209 pbCodeBuf[off++] = X86_OP_REX_B;
2210 if (cShift != 1)
2211 {
2212 pbCodeBuf[off++] = 0xc1;
2213 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 5, iGprDst & 7);
2214 pbCodeBuf[off++] = cShift;
2215 }
2216 else
2217 {
2218 pbCodeBuf[off++] = 0xd1;
2219 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 5, iGprDst & 7);
2220 }
2221
2222#elif defined(RT_ARCH_ARM64)
2223 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
2224 AssertReturn(pu32CodeBuf, UINT32_MAX);
2225 pu32CodeBuf[off++] = Armv8A64MkInstrLsrImm(iGprDst, iGprDst, cShift, false /*64Bit*/);
2226
2227#else
2228# error "Port me"
2229#endif
2230 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
2231 return off;
2232}
2233
2234
2235
2236/*********************************************************************************************************************************
2237* Compare and Testing *
2238*********************************************************************************************************************************/
2239
2240
2241#ifdef RT_ARCH_ARM64
2242/**
2243 * Emits an ARM64 compare instruction.
2244 */
2245DECLINLINE(uint32_t) iemNativeEmitCmpArm64(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprLeft, uint8_t iGprRight,
2246 bool f64Bit = true, uint32_t cShift = 0,
2247 ARMV8A64INSTRSHIFT enmShift = kArmv8A64InstrShift_Lsr)
2248{
2249 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
2250 AssertReturn(pu32CodeBuf, UINT32_MAX);
2251 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubReg(true /*fSub*/, ARMV8_A64_REG_XZR /*iRegResult*/, iGprLeft, iGprRight,
2252 f64Bit, true /*fSetFlags*/, cShift, enmShift);
2253 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
2254 return off;
2255}
2256#endif
2257
2258
2259/**
2260 * Emits a compare of two 64-bit GPRs, settings status flags/whatever for use
2261 * with conditional instruction.
2262 */
2263DECLINLINE(uint32_t) iemNativeEmitCmpGprWithGpr(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprLeft, uint8_t iGprRight)
2264{
2265#ifdef RT_ARCH_AMD64
2266 /* cmp Gv, Ev */
2267 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3);
2268 AssertReturn(pbCodeBuf, UINT32_MAX);
2269 pbCodeBuf[off++] = X86_OP_REX_W | (iGprLeft >= 8 ? X86_OP_REX_R : 0) | (iGprRight >= 8 ? X86_OP_REX_B : 0);
2270 pbCodeBuf[off++] = 0x3b;
2271 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGprLeft & 7, iGprRight & 7);
2272 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
2273
2274#elif defined(RT_ARCH_ARM64)
2275 off = iemNativeEmitCmpArm64(pReNative, off, iGprLeft, iGprRight, false /*f64Bit*/);
2276
2277#else
2278# error "Port me!"
2279#endif
2280 return off;
2281}
2282
2283
2284/**
2285 * Emits a compare of two 32-bit GPRs, settings status flags/whatever for use
2286 * with conditional instruction.
2287 */
2288DECLINLINE(uint32_t) iemNativeEmitCmpGpr32WithGpr(PIEMRECOMPILERSTATE pReNative, uint32_t off,
2289 uint8_t iGprLeft, uint8_t iGprRight)
2290{
2291#ifdef RT_ARCH_AMD64
2292 /* cmp Gv, Ev */
2293 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3);
2294 AssertReturn(pbCodeBuf, UINT32_MAX);
2295 if (iGprLeft >= 8 || iGprRight >= 8)
2296 pbCodeBuf[off++] = (iGprLeft >= 8 ? X86_OP_REX_R : 0) | (iGprRight >= 8 ? X86_OP_REX_B : 0);
2297 pbCodeBuf[off++] = 0x3b;
2298 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGprLeft & 7, iGprRight & 7);
2299 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
2300
2301#elif defined(RT_ARCH_ARM64)
2302 off = iemNativeEmitCmpArm64(pReNative, off, iGprLeft, iGprRight, false /*f64Bit*/);
2303
2304#else
2305# error "Port me!"
2306#endif
2307 return off;
2308}
2309
2310
2311/**
2312 * Emits a compare of a 64-bit GPR with a constant value, settings status
2313 * flags/whatever for use with conditional instruction.
2314 */
2315DECLINLINE(uint32_t) iemNativeEmitCmpGprWithImm(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprLeft, uint64_t uImm)
2316{
2317#ifdef RT_ARCH_AMD64
2318 if (uImm <= UINT32_C(0xff))
2319 {
2320 /* cmp Ev, Ib */
2321 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 4);
2322 AssertReturn(pbCodeBuf, UINT32_MAX);
2323 pbCodeBuf[off++] = X86_OP_REX_W | (iGprLeft >= 8 ? X86_OP_REX_B : 0);
2324 pbCodeBuf[off++] = 0x83;
2325 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 7, iGprLeft & 7);
2326 pbCodeBuf[off++] = (uint8_t)uImm;
2327 }
2328 else if ((int64_t)uImm == (int32_t)uImm)
2329 {
2330 /* cmp Ev, imm */
2331 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
2332 AssertReturn(pbCodeBuf, UINT32_MAX);
2333 pbCodeBuf[off++] = X86_OP_REX_W | (iGprLeft >= 8 ? X86_OP_REX_B : 0);
2334 pbCodeBuf[off++] = 0x81;
2335 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 7, iGprLeft & 7);
2336 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
2337 pbCodeBuf[off++] = RT_BYTE1(uImm);
2338 pbCodeBuf[off++] = RT_BYTE2(uImm);
2339 pbCodeBuf[off++] = RT_BYTE3(uImm);
2340 pbCodeBuf[off++] = RT_BYTE4(uImm);
2341 }
2342 else
2343 {
2344 /* Use temporary register for the immediate. */
2345 uint8_t iTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, uImm);
2346 AssertReturn(iTmpReg < RT_ELEMENTS(pReNative->Core.aHstRegs), UINT32_MAX);
2347
2348 off = iemNativeEmitCmpGprWithGpr(pReNative, off, iGprLeft, iTmpReg);
2349
2350 iemNativeRegFreeTmpImm(pReNative, iTmpReg);
2351 }
2352
2353#elif defined(RT_ARCH_ARM64)
2354 /** @todo guess there are clevere things we can do here... */
2355 if (uImm < _4K)
2356 {
2357 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
2358 AssertReturn(pu32CodeBuf, UINT32_MAX);
2359 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(true /*fSub*/, ARMV8_A64_REG_XZR, iGprLeft, (uint32_t)uImm,
2360 true /*64Bit*/, true /*fSetFlags*/);
2361 }
2362 else if (uImm < RT_BIT_32(12+12) && (uImm & (_4K - 1)) == 0)
2363 {
2364 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
2365 AssertReturn(pu32CodeBuf, UINT32_MAX);
2366 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(true /*fSub*/, ARMV8_A64_REG_XZR, iGprLeft, (uint32_t)uImm,
2367 true /*64Bit*/, true /*fSetFlags*/, true /*fShift12*/);
2368 }
2369 else
2370 {
2371 /* Use temporary register for the immediate. */
2372 uint8_t iTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, uImm);
2373 AssertReturn(iTmpReg < RT_ELEMENTS(pReNative->Core.aHstRegs), UINT32_MAX);
2374
2375 off = iemNativeEmitCmpGprWithGpr(pReNative, off, iGprLeft, iTmpReg);
2376
2377 iemNativeRegFreeTmpImm(pReNative, iTmpReg);
2378 }
2379
2380#else
2381# error "Port me!"
2382#endif
2383
2384 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
2385 return off;
2386}
2387
2388
2389/**
2390 * Emits a compare of a 32-bit GPR with a constant value, settings status
2391 * flags/whatever for use with conditional instruction.
2392 */
2393DECLINLINE(uint32_t) iemNativeEmitCmpGpr32WithImm(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprLeft, uint32_t uImm)
2394{
2395#ifdef RT_ARCH_AMD64
2396 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
2397 AssertReturn(pbCodeBuf, UINT32_MAX);
2398 if (iGprLeft >= 8)
2399 pbCodeBuf[off++] = X86_OP_REX_B;
2400 if (uImm <= UINT32_C(0xff))
2401 {
2402 /* cmp Ev, Ib */
2403 pbCodeBuf[off++] = 0x83;
2404 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 7, iGprLeft & 7);
2405 pbCodeBuf[off++] = (uint8_t)uImm;
2406 }
2407 else
2408 {
2409 /* cmp Ev, imm */
2410 pbCodeBuf[off++] = 0x81;
2411 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 7, iGprLeft & 7);
2412 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
2413 pbCodeBuf[off++] = RT_BYTE1(uImm);
2414 pbCodeBuf[off++] = RT_BYTE2(uImm);
2415 pbCodeBuf[off++] = RT_BYTE3(uImm);
2416 pbCodeBuf[off++] = RT_BYTE4(uImm);
2417 }
2418
2419#elif defined(RT_ARCH_ARM64)
2420 /** @todo guess there are clevere things we can do here... */
2421 if (uImm < _4K)
2422 {
2423 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
2424 AssertReturn(pu32CodeBuf, UINT32_MAX);
2425 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(true /*fSub*/, ARMV8_A64_REG_XZR, iGprLeft, (uint32_t)uImm,
2426 false /*64Bit*/, true /*fSetFlags*/);
2427 }
2428 else if (uImm < RT_BIT_32(12+12) && (uImm & (_4K - 1)) == 0)
2429 {
2430 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
2431 AssertReturn(pu32CodeBuf, UINT32_MAX);
2432 pu32CodeBuf[off++] = Armv8A64MkInstrAddSubUImm12(true /*fSub*/, ARMV8_A64_REG_XZR, iGprLeft, (uint32_t)uImm,
2433 false /*64Bit*/, true /*fSetFlags*/, true /*fShift12*/);
2434 }
2435 else
2436 {
2437 /* Use temporary register for the immediate. */
2438 uint8_t iTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, uImm);
2439 AssertReturn(iTmpReg < RT_ELEMENTS(pReNative->Core.aHstRegs), UINT32_MAX);
2440
2441 off = iemNativeEmitCmpGpr32WithGpr(pReNative, off, iGprLeft, iTmpReg);
2442
2443 iemNativeRegFreeTmpImm(pReNative, iTmpReg);
2444 }
2445
2446#else
2447# error "Port me!"
2448#endif
2449
2450 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
2451 return off;
2452}
2453
2454
2455
2456/*********************************************************************************************************************************
2457* Branching *
2458*********************************************************************************************************************************/
2459
2460/**
2461 * Emits a JMP rel32 / B imm19 to the given label.
2462 */
2463DECLINLINE(uint32_t) iemNativeEmitJmpToLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t idxLabel)
2464{
2465 Assert(idxLabel < pReNative->cLabels);
2466
2467#ifdef RT_ARCH_AMD64
2468 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 6);
2469 AssertReturn(pbCodeBuf, UINT32_MAX);
2470 if (pReNative->paLabels[idxLabel].off != UINT32_MAX)
2471 {
2472 uint32_t offRel = pReNative->paLabels[idxLabel].off - (off + 2);
2473 if ((int32_t)offRel < 128 && (int32_t)offRel >= -128)
2474 {
2475 pbCodeBuf[off++] = 0xeb; /* jmp rel8 */
2476 pbCodeBuf[off++] = (uint8_t)offRel;
2477 }
2478 else
2479 {
2480 offRel -= 3;
2481 pbCodeBuf[off++] = 0xe9; /* jmp rel32 */
2482 pbCodeBuf[off++] = RT_BYTE1(offRel);
2483 pbCodeBuf[off++] = RT_BYTE2(offRel);
2484 pbCodeBuf[off++] = RT_BYTE3(offRel);
2485 pbCodeBuf[off++] = RT_BYTE4(offRel);
2486 }
2487 }
2488 else
2489 {
2490 pbCodeBuf[off++] = 0xe9; /* jmp rel32 */
2491 AssertReturn(iemNativeAddFixup(pReNative, off, idxLabel, kIemNativeFixupType_Rel32, -4), UINT32_MAX);
2492 pbCodeBuf[off++] = 0xfe;
2493 pbCodeBuf[off++] = 0xff;
2494 pbCodeBuf[off++] = 0xff;
2495 pbCodeBuf[off++] = 0xff;
2496 }
2497 pbCodeBuf[off++] = 0xcc; /* int3 poison */
2498
2499#elif defined(RT_ARCH_ARM64)
2500 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
2501 AssertReturn(pu32CodeBuf, UINT32_MAX);
2502 if (pReNative->paLabels[idxLabel].off != UINT32_MAX)
2503 pu32CodeBuf[off++] = Armv8A64MkInstrB(pReNative->paLabels[idxReturnLabel].off - off);
2504 else
2505 {
2506 AssertReturn(iemNativeAddFixup(pReNative, off, idxLabel, kIemNativeFixupType_RelImm19At5), UINT32_MAX);
2507 pu32CodeBuf[off++] = Armv8A64MkInstrB(-1);
2508 }
2509
2510#else
2511# error "Port me!"
2512#endif
2513 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
2514 return off;
2515}
2516
2517
2518/**
2519 * Emits a JMP rel32 / B imm19 to a new undefined label.
2520 */
2521DECLINLINE(uint32_t) iemNativeEmitJmpToNewLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off,
2522 IEMNATIVELABELTYPE enmLabelType, uint16_t uData = 0)
2523{
2524 uint32_t const idxLabel = iemNativeLabelCreate(pReNative, enmLabelType, UINT32_MAX /*offWhere*/, uData);
2525 AssertReturn(idxLabel != UINT32_MAX, UINT32_MAX);
2526 return iemNativeEmitJmpToLabel(pReNative, off, idxLabel);
2527}
2528
2529/** Condition type. */
2530#ifdef RT_ARCH_AMD64
2531typedef enum IEMNATIVEINSTRCOND : uint8_t
2532{
2533 kIemNativeInstrCond_o = 0,
2534 kIemNativeInstrCond_no,
2535 kIemNativeInstrCond_c,
2536 kIemNativeInstrCond_nc,
2537 kIemNativeInstrCond_e,
2538 kIemNativeInstrCond_ne,
2539 kIemNativeInstrCond_be,
2540 kIemNativeInstrCond_nbe,
2541 kIemNativeInstrCond_s,
2542 kIemNativeInstrCond_ns,
2543 kIemNativeInstrCond_p,
2544 kIemNativeInstrCond_np,
2545 kIemNativeInstrCond_l,
2546 kIemNativeInstrCond_nl,
2547 kIemNativeInstrCond_le,
2548 kIemNativeInstrCond_nle
2549} IEMNATIVEINSTRCOND;
2550#elif defined(RT_ARCH_ARM64)
2551typedef ARMV8INSTRCOND IEMNATIVEINSTRCOND;
2552#else
2553# error "Port me!"
2554#endif
2555
2556
2557/**
2558 * Emits a Jcc rel32 / B.cc imm19 to the given label (ASSUMED requiring fixup).
2559 */
2560DECLINLINE(uint32_t) iemNativeEmitJccToLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off,
2561 uint32_t idxLabel, IEMNATIVEINSTRCOND enmCond)
2562{
2563 Assert(idxLabel < pReNative->cLabels);
2564
2565#ifdef RT_ARCH_AMD64
2566 /* jcc rel32 */
2567 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 6);
2568 AssertReturn(pbCodeBuf, UINT32_MAX);
2569 pbCodeBuf[off++] = 0x0f;
2570 pbCodeBuf[off++] = (uint8_t)enmCond | 0x80;
2571 AssertReturn(iemNativeAddFixup(pReNative, off, idxLabel, kIemNativeFixupType_Rel32, -4), UINT32_MAX);
2572 pbCodeBuf[off++] = 0x00;
2573 pbCodeBuf[off++] = 0x00;
2574 pbCodeBuf[off++] = 0x00;
2575 pbCodeBuf[off++] = 0x00;
2576
2577#elif defined(RT_ARCH_ARM64)
2578 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
2579 AssertReturn(pu32CodeBuf, UINT32_MAX);
2580 AssertReturn(iemNativeAddFixup(pReNative, off, idxLabel, kIemNativeFixupType_RelImm19At5), UINT32_MAX);
2581 pu32CodeBuf[off++] = Armv8A64MkInstrBCond(enmCond, -1);
2582
2583#else
2584# error "Port me!"
2585#endif
2586 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
2587 return off;
2588}
2589
2590
2591/**
2592 * Emits a Jcc rel32 / B.cc imm19 to a new label.
2593 */
2594DECLINLINE(uint32_t) iemNativeEmitJccToNewLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off,
2595 IEMNATIVELABELTYPE enmLabelType, uint16_t uData, IEMNATIVEINSTRCOND enmCond)
2596{
2597 uint32_t const idxLabel = iemNativeLabelCreate(pReNative, enmLabelType, UINT32_MAX /*offWhere*/, uData);
2598 AssertReturn(idxLabel != UINT32_MAX, UINT32_MAX);
2599 return iemNativeEmitJccToLabel(pReNative, off, idxLabel, enmCond);
2600}
2601
2602
2603/**
2604 * Emits a JZ/JE rel32 / B.EQ imm19 to the given label.
2605 */
2606DECLINLINE(uint32_t) iemNativeEmitJzToLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t idxLabel)
2607{
2608#ifdef RT_ARCH_AMD64
2609 return iemNativeEmitJccToLabel(pReNative, off, idxLabel, kIemNativeInstrCond_e);
2610#elif defined(RT_ARCH_ARM64)
2611 return iemNativeEmitJccToLabel(pReNative, off, idxLabel, kArmv8InstrCond_Eq);
2612#else
2613# error "Port me!"
2614#endif
2615}
2616
2617/**
2618 * Emits a JZ/JE rel32 / B.EQ imm19 to a new label.
2619 */
2620DECLINLINE(uint32_t) iemNativeEmitJzToNewLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off,
2621 IEMNATIVELABELTYPE enmLabelType, uint16_t uData = 0)
2622{
2623#ifdef RT_ARCH_AMD64
2624 return iemNativeEmitJccToNewLabel(pReNative, off, enmLabelType, uData, kIemNativeInstrCond_e);
2625#elif defined(RT_ARCH_ARM64)
2626 return iemNativeEmitJccToNewLabel(pReNative, off, enmLabelType, uData, kArmv8InstrCond_Eq);
2627#else
2628# error "Port me!"
2629#endif
2630}
2631
2632
2633/**
2634 * Emits a JNZ/JNE rel32 / B.NE imm19 to the given label.
2635 */
2636DECLINLINE(uint32_t) iemNativeEmitJnzToLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t idxLabel)
2637{
2638#ifdef RT_ARCH_AMD64
2639 return iemNativeEmitJccToLabel(pReNative, off, idxLabel, kIemNativeInstrCond_ne);
2640#elif defined(RT_ARCH_ARM64)
2641 return iemNativeEmitJccToLabel(pReNative, off, idxLabel, kArmv8InstrCond_Ne);
2642#else
2643# error "Port me!"
2644#endif
2645}
2646
2647/**
2648 * Emits a JNZ/JNE rel32 / B.NE imm19 to a new label.
2649 */
2650DECLINLINE(uint32_t) iemNativeEmitJnzToNewLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off,
2651 IEMNATIVELABELTYPE enmLabelType, uint16_t uData = 0)
2652{
2653#ifdef RT_ARCH_AMD64
2654 return iemNativeEmitJccToNewLabel(pReNative, off, enmLabelType, uData, kIemNativeInstrCond_ne);
2655#elif defined(RT_ARCH_ARM64)
2656 return iemNativeEmitJccToNewLabel(pReNative, off, enmLabelType, uData, kArmv8InstrCond_Ne);
2657#else
2658# error "Port me!"
2659#endif
2660}
2661
2662
2663/**
2664 * Emits a JBE/JNA rel32 / B.LS imm19 to the given label.
2665 */
2666DECLINLINE(uint32_t) iemNativeEmitJbeToLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t idxLabel)
2667{
2668#ifdef RT_ARCH_AMD64
2669 return iemNativeEmitJccToLabel(pReNative, off, idxLabel, kIemNativeInstrCond_be);
2670#elif defined(RT_ARCH_ARM64)
2671 return iemNativeEmitJccToLabel(pReNative, off, idxLabel, kArmv8InstrCond_Ls);
2672#else
2673# error "Port me!"
2674#endif
2675}
2676
2677/**
2678 * Emits a JBE/JNA rel32 / B.LS imm19 to a new label.
2679 */
2680DECLINLINE(uint32_t) iemNativeEmitJbeToNewLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off,
2681 IEMNATIVELABELTYPE enmLabelType, uint16_t uData = 0)
2682{
2683#ifdef RT_ARCH_AMD64
2684 return iemNativeEmitJccToNewLabel(pReNative, off, enmLabelType, uData, kIemNativeInstrCond_be);
2685#elif defined(RT_ARCH_ARM64)
2686 return iemNativeEmitJccToNewLabel(pReNative, off, enmLabelType, uData, kArmv8InstrCond_Ls);
2687#else
2688# error "Port me!"
2689#endif
2690}
2691
2692
2693/**
2694 * Emits a JA/JNBE rel32 / B.HI imm19 to the given label.
2695 */
2696DECLINLINE(uint32_t) iemNativeEmitJaToLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t idxLabel)
2697{
2698#ifdef RT_ARCH_AMD64
2699 return iemNativeEmitJccToLabel(pReNative, off, idxLabel, kIemNativeInstrCond_nbe);
2700#elif defined(RT_ARCH_ARM64)
2701 return iemNativeEmitJccToLabel(pReNative, off, idxLabel, kArmv8InstrCond_Hi);
2702#else
2703# error "Port me!"
2704#endif
2705}
2706
2707/**
2708 * Emits a JA/JNBE rel32 / B.HI imm19 to a new label.
2709 */
2710DECLINLINE(uint32_t) iemNativeEmitJaToNewLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off,
2711 IEMNATIVELABELTYPE enmLabelType, uint16_t uData = 0)
2712{
2713#ifdef RT_ARCH_AMD64
2714 return iemNativeEmitJccToNewLabel(pReNative, off, enmLabelType, uData, kIemNativeInstrCond_nbe);
2715#elif defined(RT_ARCH_ARM64)
2716 return iemNativeEmitJccToNewLabel(pReNative, off, enmLabelType, uData, kArmv8InstrCond_Hi);
2717#else
2718# error "Port me!"
2719#endif
2720}
2721
2722
2723/**
2724 * Emits a Jcc rel32 / B.cc imm19 with a fixed displacement.
2725 * How @a offJmp is applied is are target specific.
2726 */
2727DECLINLINE(uint32_t) iemNativeEmitJccToFixed(PIEMRECOMPILERSTATE pReNative, uint32_t off,
2728 int32_t offTarget, IEMNATIVEINSTRCOND enmCond)
2729{
2730#ifdef RT_ARCH_AMD64
2731 /* jcc rel32 */
2732 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 6);
2733 AssertReturn(pbCodeBuf, UINT32_MAX);
2734 if (offTarget < 128 && offTarget >= -128)
2735 {
2736 pbCodeBuf[off++] = (uint8_t)enmCond | 0x70;
2737 pbCodeBuf[off++] = RT_BYTE1((uint32_t)offTarget);
2738 }
2739 else
2740 {
2741 pbCodeBuf[off++] = 0x0f;
2742 pbCodeBuf[off++] = (uint8_t)enmCond | 0x80;
2743 pbCodeBuf[off++] = RT_BYTE1((uint32_t)offTarget);
2744 pbCodeBuf[off++] = RT_BYTE2((uint32_t)offTarget);
2745 pbCodeBuf[off++] = RT_BYTE3((uint32_t)offTarget);
2746 pbCodeBuf[off++] = RT_BYTE4((uint32_t)offTarget);
2747 }
2748
2749#elif defined(RT_ARCH_ARM64)
2750 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
2751 AssertReturn(pu32CodeBuf, UINT32_MAX);
2752 pu32CodeBuf[off++] = Armv8A64MkInstrBCond(enmCond, offTarget);
2753
2754#else
2755# error "Port me!"
2756#endif
2757 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
2758 return off;
2759}
2760
2761
2762/**
2763 * Emits a JZ/JE rel32 / B.EQ imm19 with a fixed displacement.
2764 * How @a offJmp is applied is are target specific.
2765 */
2766DECLINLINE(uint32_t) iemNativeEmitJzToFixed(PIEMRECOMPILERSTATE pReNative, uint32_t off, int32_t offTarget)
2767{
2768#ifdef RT_ARCH_AMD64
2769 return iemNativeEmitJccToFixed(pReNative, off, offTarget, kIemNativeInstrCond_e);
2770#elif defined(RT_ARCH_ARM64)
2771 return iemNativeEmitJccToFixed(pReNative, off, offTarget, kArmv8InstrCond_Eq);
2772#else
2773# error "Port me!"
2774#endif
2775}
2776
2777
2778/**
2779 * Emits a JNZ/JNE rel32 / B.NE imm19 with a fixed displacement.
2780 * How @a offJmp is applied is are target specific.
2781 */
2782DECLINLINE(uint32_t) iemNativeEmitJnzToFixed(PIEMRECOMPILERSTATE pReNative, uint32_t off, int32_t offTarget)
2783{
2784#ifdef RT_ARCH_AMD64
2785 return iemNativeEmitJccToFixed(pReNative, off, offTarget, kIemNativeInstrCond_ne);
2786#elif defined(RT_ARCH_ARM64)
2787 return iemNativeEmitJccToFixed(pReNative, off, offTarget, kArmv8InstrCond_Ne);
2788#else
2789# error "Port me!"
2790#endif
2791}
2792
2793
2794/**
2795 * Emits a JBE/JNA rel32 / B.LS imm19 with a fixed displacement.
2796 * How @a offJmp is applied is are target specific.
2797 */
2798DECLINLINE(uint32_t) iemNativeEmitJbeToFixed(PIEMRECOMPILERSTATE pReNative, uint32_t off, int32_t offTarget)
2799{
2800#ifdef RT_ARCH_AMD64
2801 return iemNativeEmitJccToFixed(pReNative, off, offTarget, kIemNativeInstrCond_be);
2802#elif defined(RT_ARCH_ARM64)
2803 return iemNativeEmitJccToFixed(pReNative, off, offTarget, kArmv8InstrCond_Ls);
2804#else
2805# error "Port me!"
2806#endif
2807}
2808
2809
2810/**
2811 * Emits a JA/JNBE rel32 / B.EQ imm19 with a fixed displacement.
2812 * How @a offJmp is applied is are target specific.
2813 */
2814DECLINLINE(uint32_t) iemNativeEmitJaToFixed(PIEMRECOMPILERSTATE pReNative, uint32_t off, int32_t offTarget)
2815{
2816#ifdef RT_ARCH_AMD64
2817 return iemNativeEmitJccToFixed(pReNative, off, offTarget, kIemNativeInstrCond_nbe);
2818#elif defined(RT_ARCH_ARM64)
2819 return iemNativeEmitJccToFixed(pReNative, off, offTarget, kArmv8InstrCond_Hi);
2820#else
2821# error "Port me!"
2822#endif
2823}
2824
2825
2826/**
2827 * Fixes up a conditional jump to a fixed label.
2828 * @see iemNativeEmitJnzToFixed, iemNativeEmitJzToFixed, ...
2829 */
2830DECLINLINE(void) iemNativeFixupFixedJump(PIEMRECOMPILERSTATE pReNative, uint32_t offFixup, uint32_t offTarget)
2831{
2832# if defined(RT_ARCH_AMD64)
2833 uint8_t * const pbCodeBuf = pReNative->pInstrBuf;
2834 if (pbCodeBuf[offFixup] != 0x0f)
2835 {
2836 Assert((uint8_t)(pbCodeBuf[offFixup] - 0x70) <= 0x10);
2837 pbCodeBuf[offFixup + 1] = (uint8_t)(offTarget - (offFixup + 2));
2838 Assert(pbCodeBuf[offFixup + 1] == offTarget - (offFixup + 2));
2839 }
2840 else
2841 {
2842 Assert((uint8_t)(pbCodeBuf[offFixup + 1] - 0x80) <= 0x10);
2843 uint32_t const offRel32 = offTarget - (offFixup + 6);
2844 pbCodeBuf[offFixup + 2] = RT_BYTE1(offRel32);
2845 pbCodeBuf[offFixup + 3] = RT_BYTE2(offRel32);
2846 pbCodeBuf[offFixup + 4] = RT_BYTE3(offRel32);
2847 pbCodeBuf[offFixup + 5] = RT_BYTE4(offRel32);
2848 }
2849
2850# elif defined(RT_ARCH_ARM64)
2851 uint32_t * const pu32CodeBuf = pReNative->pInstrBuf;
2852 Assert(RT_ABS((int32_t)(offTarget - offFixup)) < RT_BIT_32(18)); /* off by one for negative jumps, but not relevant here */
2853 pu32CodeBuf[offFixup] = (pu32CodeBuf[offFixup] & ~((RT_BIT_32(19) - 1U) << 5))
2854 | (((offTarget - offFixup) & (RT_BIT_32(19) - 1U)) << 5);
2855
2856# endif
2857}
2858
2859
2860/**
2861 * Internal helper, don't call directly.
2862 */
2863DECLINLINE(uint32_t) iemNativeEmitTestBitInGprAndJmpToLabelIfCc(PIEMRECOMPILERSTATE pReNative, uint32_t off,
2864 uint8_t iGprSrc, uint8_t iBitNo, uint32_t idxLabel,
2865 bool fJmpIfSet)
2866{
2867 Assert(iBitNo < 64);
2868#ifdef RT_ARCH_AMD64
2869 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 5);
2870 AssertReturn(pbCodeBuf, UINT32_MAX);
2871 if (iBitNo < 8)
2872 {
2873 /* test Eb, imm8 */
2874 if (iGprSrc >= 4)
2875 pbCodeBuf[off++] = iGprSrc >= 8 ? X86_OP_REX_B : X86_OP_REX;
2876 pbCodeBuf[off++] = 0xf6;
2877 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, iGprSrc & 7);
2878 pbCodeBuf[off++] = (uint8_t)1 << iBitNo;
2879 off = iemNativeEmitJccToLabel(pReNative, off, idxLabel, fJmpIfSet ? kIemNativeInstrCond_ne : kIemNativeInstrCond_e);
2880 }
2881 else
2882 {
2883 /* bt Ev, imm8 */
2884 if (iBitNo >= 32)
2885 pbCodeBuf[off++] = X86_OP_REX_W | (iGprSrc < 8 ? 0 : X86_OP_REX_B);
2886 else if (iGprSrc >= 8)
2887 pbCodeBuf[off++] = X86_OP_REX_B;
2888 pbCodeBuf[off++] = 0x0f;
2889 pbCodeBuf[off++] = 0xba;
2890 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 4, iGprSrc & 7);
2891 pbCodeBuf[off++] = iBitNo;
2892 off = iemNativeEmitJccToLabel(pReNative, off, idxLabel, fJmpIfSet ? kIemNativeInstrCond_c : kIemNativeInstrCond_nc);
2893 }
2894
2895#elif defined(RT_ARCH_ARM64)
2896 /* Use the TBNZ instruction here. */
2897 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
2898 AssertReturn(pu32CodeBuf, UINT32_MAX);
2899 AssertReturn(iemNativeAddFixup(pReNative, off, idxLabel, kIemNativeFixupType_RelImm14At5), UINT32_MAX);
2900 pu32CodeBuf[off++] = Armv8A64MkInstrTbzTbnz(fJmpIfSet, 0, iGprSrc, iBitNo);
2901
2902#else
2903# error "Port me!"
2904#endif
2905 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
2906 return off;
2907}
2908
2909
2910/**
2911 * Emits a jump to @a idxLabel on the condition that bit @a iBitNo _is_ _set_ in
2912 * @a iGprSrc.
2913 *
2914 * @note On ARM64 the range is only +/-8191 instructions.
2915 */
2916DECLINLINE(uint32_t) iemNativeEmitTestBitInGprAndJmpToLabelIfSet(PIEMRECOMPILERSTATE pReNative, uint32_t off,
2917 uint8_t iGprSrc, uint8_t iBitNo, uint32_t idxLabel)
2918{
2919 return iemNativeEmitTestBitInGprAndJmpToLabelIfCc(pReNative, off, iGprSrc, iBitNo, idxLabel, true /*fJmpIfSet*/);
2920}
2921
2922
2923/**
2924 * Emits a jump to @a idxLabel on the condition that bit @a iBitNo _is_ _not_
2925 * _set_ in @a iGprSrc.
2926 *
2927 * @note On ARM64 the range is only +/-8191 instructions.
2928 */
2929DECLINLINE(uint32_t) iemNativeEmitTestBitInGprAndJmpToLabelIfNotSet(PIEMRECOMPILERSTATE pReNative, uint32_t off,
2930 uint8_t iGprSrc, uint8_t iBitNo, uint32_t idxLabel)
2931{
2932 return iemNativeEmitTestBitInGprAndJmpToLabelIfCc(pReNative, off, iGprSrc, iBitNo, idxLabel, false /*fJmpIfSet*/);
2933}
2934
2935
2936/**
2937 * Emits a test for any of the bits from @a fBits in @a iGprSrc, setting CPU
2938 * flags accordingly.
2939 */
2940DECLINLINE(uint32_t) iemNativeEmitTestAnyBitsInGpr(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprSrc, uint64_t fBits)
2941{
2942 Assert(fBits != 0);
2943#ifdef RT_ARCH_AMD64
2944
2945 if (fBits >= UINT32_MAX)
2946 {
2947 uint8_t iTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, fBits);
2948 AssertReturn(iTmpReg < RT_ELEMENTS(pReNative->Core.aHstRegs), UINT32_MAX);
2949
2950 /* test Ev,Gv */
2951 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 5);
2952 AssertReturn(pbCodeBuf, UINT32_MAX);
2953 pbCodeBuf[off++] = X86_OP_REX_W | (iGprSrc < 8 ? 0 : X86_OP_REX_R) | (iTmpReg < 8 ? 0 : X86_OP_REX_B);
2954 pbCodeBuf[off++] = 0x85;
2955 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGprSrc & 8, iTmpReg & 7);
2956
2957 iemNativeRegFreeTmpImm(pReNative, iTmpReg);
2958 }
2959 else if (fBits <= UINT32_MAX)
2960 {
2961 /* test Eb, imm8 or test Ev, imm32 */
2962 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 7);
2963 AssertReturn(pbCodeBuf, UINT32_MAX);
2964 if (fBits <= UINT8_MAX)
2965 {
2966 if (iGprSrc >= 4)
2967 pbCodeBuf[off++] = iGprSrc >= 8 ? X86_OP_REX_B : X86_OP_REX;
2968 pbCodeBuf[off++] = 0xf6;
2969 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, iGprSrc & 7);
2970 pbCodeBuf[off++] = (uint8_t)fBits;
2971 }
2972 else
2973 {
2974 if (iGprSrc >= 8)
2975 pbCodeBuf[off++] = X86_OP_REX_B;
2976 pbCodeBuf[off++] = 0xf7;
2977 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 0, iGprSrc & 7);
2978 pbCodeBuf[off++] = RT_BYTE1(fBits);
2979 pbCodeBuf[off++] = RT_BYTE2(fBits);
2980 pbCodeBuf[off++] = RT_BYTE3(fBits);
2981 pbCodeBuf[off++] = RT_BYTE4(fBits);
2982 }
2983 }
2984 /** @todo implement me. */
2985 else
2986 AssertFailedReturn(UINT32_MAX);
2987
2988#elif defined(RT_ARCH_ARM64)
2989
2990 if (false)
2991 {
2992 /** @todo figure out how to work the immr / N:imms constants. */
2993 }
2994 else
2995 {
2996 uint8_t iTmpReg = iemNativeRegAllocTmpImm(pReNative, &off, fBits);
2997 AssertReturn(iTmpReg < RT_ELEMENTS(pReNative->Core.aHstRegs), UINT32_MAX);
2998
2999 /* ands Zr, iGprSrc, iTmpReg */
3000 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
3001 AssertReturn(pu32CodeBuf, UINT32_MAX);
3002 pu32CodeBuf[off++] = Armv8A64MkInstrAnds(ARMV8_A64_REG_XZR, iGprSrc, iTmpReg);
3003
3004 iemNativeRegFreeTmpImm(pReNative, iTmpReg);
3005 }
3006
3007#else
3008# error "Port me!"
3009#endif
3010 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
3011 return off;
3012}
3013
3014
3015/**
3016 * Emits a jump to @a idxLabel on the condition _any_ of the bits in @a fBits
3017 * are set in @a iGprSrc.
3018 */
3019DECLINLINE(uint32_t) iemNativeEmitTestAnyBitsInGprAndJmpToLabelIfAnySet(PIEMRECOMPILERSTATE pReNative, uint32_t off,
3020 uint8_t iGprSrc, uint64_t fBits, uint32_t idxLabel)
3021{
3022 Assert(fBits); Assert(!RT_IS_POWER_OF_TWO(fBits));
3023
3024 off = iemNativeEmitTestAnyBitsInGpr(pReNative, off, iGprSrc, fBits);
3025 off = iemNativeEmitJnzToLabel(pReNative, off, idxLabel);
3026
3027 return off;
3028}
3029
3030
3031/**
3032 * Emits a jump to @a idxLabel on the condition _none_ of the bits in @a fBits
3033 * are set in @a iGprSrc.
3034 */
3035DECLINLINE(uint32_t) iemNativeEmitTestAnyBitsInGprAndJmpToLabelIfNoneSet(PIEMRECOMPILERSTATE pReNative, uint32_t off,
3036 uint8_t iGprSrc, uint64_t fBits, uint32_t idxLabel)
3037{
3038 Assert(fBits); Assert(!RT_IS_POWER_OF_TWO(fBits));
3039
3040 off = iemNativeEmitTestAnyBitsInGpr(pReNative, off, iGprSrc, fBits);
3041 off = iemNativeEmitJzToLabel(pReNative, off, idxLabel);
3042
3043 return off;
3044}
3045
3046
3047/**
3048 * Emits code that jumps to @a idxLabel if @a iGprSrc is zero.
3049 *
3050 * The operand size is given by @a f64Bit.
3051 */
3052DECLINLINE(uint32_t) iemNativeEmitTestIfGprIsZeroAndJmpToLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off,
3053 uint8_t iGprSrc, bool f64Bit, uint32_t idxLabel)
3054{
3055 Assert(idxLabel < pReNative->cLabels);
3056
3057#ifdef RT_ARCH_AMD64
3058 /* test reg32,reg32 / test reg64,reg64 */
3059 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 3);
3060 AssertReturn(pbCodeBuf, UINT32_MAX);
3061 if (f64Bit)
3062 pbCodeBuf[off++] = X86_OP_REX_W | (iGprSrc < 8 ? 0 : X86_OP_REX_R | X86_OP_REX_B);
3063 else if (iGprSrc >= 8)
3064 pbCodeBuf[off++] = X86_OP_REX_R | X86_OP_REX_B;
3065 pbCodeBuf[off++] = 0x85;
3066 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, iGprSrc & 7, iGprSrc & 7);
3067 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
3068
3069 /* jz idxLabel */
3070 off = iemNativeEmitJzToLabel(pReNative, off, idxLabel);
3071
3072#elif defined(RT_ARCH_ARM64)
3073 uint32_t *pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
3074 AssertReturn(pu32CodeBuf, UINT32_MAX);
3075 AssertReturn(iemNativeAddFixup(pReNative, off, idxLabel, kIemNativeFixupType_RelImm19At5), UINT32_MAX);
3076 pu32CodeBuf[off++] = Armv8A64MkInstrCbzCbnz(false /*fJmpIfNotZero*/, 0, f64Bit);
3077 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
3078
3079#else
3080# error "Port me!"
3081#endif
3082 return off;
3083}
3084
3085
3086/**
3087 * Emits code that jumps to a new label if @a iGprSrc is zero.
3088 *
3089 * The operand size is given by @a f64Bit.
3090 */
3091DECLINLINE(uint32_t) iemNativeEmitTestIfGprIsZeroAndJmpToNewLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t iGprSrc,
3092 bool f64Bit, IEMNATIVELABELTYPE enmLabelType,
3093 uint16_t uData = 0)
3094{
3095 uint32_t const idxLabel = iemNativeLabelCreate(pReNative, enmLabelType, UINT32_MAX /*offWhere*/, uData);
3096 AssertReturn(idxLabel != UINT32_MAX, UINT32_MAX);
3097 return iemNativeEmitTestIfGprIsZeroAndJmpToLabel(pReNative, off, iGprSrc, f64Bit, idxLabel);
3098}
3099
3100
3101/**
3102 * Emits code that jumps to the given label if @a iGprLeft and @a iGprRight
3103 * differs.
3104 */
3105DECLINLINE(uint32_t) iemNativeEmitTestIfGprNotEqualGprAndJmpToLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off,
3106 uint8_t iGprLeft, uint8_t iGprRight, uint32_t idxLabel)
3107{
3108 off = iemNativeEmitCmpGprWithGpr(pReNative, off, iGprLeft, iGprRight);
3109 off = iemNativeEmitJnzToLabel(pReNative, off, idxLabel);
3110 return off;
3111}
3112
3113
3114/**
3115 * Emits code that jumps to a new label if @a iGprLeft and @a iGprRight differs.
3116 */
3117DECLINLINE(uint32_t) iemNativeEmitTestIfGprNotEqualGprAndJmpToNewLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off,
3118 uint8_t iGprLeft, uint8_t iGprRight,
3119 IEMNATIVELABELTYPE enmLabelType, uint16_t uData = 0)
3120{
3121 uint32_t const idxLabel = iemNativeLabelCreate(pReNative, enmLabelType, UINT32_MAX /*offWhere*/, uData);
3122 AssertReturn(idxLabel != UINT32_MAX, UINT32_MAX);
3123 return iemNativeEmitTestIfGprNotEqualGprAndJmpToLabel(pReNative, off, iGprLeft, iGprRight, idxLabel);
3124}
3125
3126
3127/**
3128 * Emits code that jumps to the given label if @a iGprSrc differs from @a uImm.
3129 */
3130DECLINLINE(uint32_t) iemNativeEmitTestIfGprNotEqualImmAndJmpToLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off,
3131 uint8_t iGprSrc, uint64_t uImm, uint32_t idxLabel)
3132{
3133 off = iemNativeEmitCmpGprWithImm(pReNative, off, iGprSrc, uImm);
3134 off = iemNativeEmitJnzToLabel(pReNative, off, idxLabel);
3135 return off;
3136}
3137
3138
3139/**
3140 * Emits code that jumps to a new label if @a iGprSrc differs from @a uImm.
3141 */
3142DECLINLINE(uint32_t) iemNativeEmitTestIfGprNotEqualImmAndJmpToNewLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off,
3143 uint8_t iGprSrc, uint64_t uImm,
3144 IEMNATIVELABELTYPE enmLabelType, uint16_t uData = 0)
3145{
3146 uint32_t const idxLabel = iemNativeLabelCreate(pReNative, enmLabelType, UINT32_MAX /*offWhere*/, uData);
3147 AssertReturn(idxLabel != UINT32_MAX, UINT32_MAX);
3148 return iemNativeEmitTestIfGprNotEqualImmAndJmpToLabel(pReNative, off, iGprSrc, uImm, idxLabel);
3149}
3150
3151
3152/**
3153 * Emits code that jumps to the given label if 32-bit @a iGprSrc differs from
3154 * @a uImm.
3155 */
3156DECLINLINE(uint32_t) iemNativeEmitTestIfGpr32NotEqualImmAndJmpToLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off,
3157 uint8_t iGprSrc, uint32_t uImm, uint32_t idxLabel)
3158{
3159 off = iemNativeEmitCmpGpr32WithImm(pReNative, off, iGprSrc, uImm);
3160 off = iemNativeEmitJnzToLabel(pReNative, off, idxLabel);
3161 return off;
3162}
3163
3164
3165/**
3166 * Emits code that jumps to a new label if 32-bit @a iGprSrc differs from
3167 * @a uImm.
3168 */
3169DECLINLINE(uint32_t) iemNativeEmitTestIfGpr32NotEqualImmAndJmpToNewLabel(PIEMRECOMPILERSTATE pReNative, uint32_t off,
3170 uint8_t iGprSrc, uint32_t uImm,
3171 IEMNATIVELABELTYPE enmLabelType, uint16_t uData = 0)
3172{
3173 uint32_t const idxLabel = iemNativeLabelCreate(pReNative, enmLabelType, UINT32_MAX /*offWhere*/, uData);
3174 AssertReturn(idxLabel != UINT32_MAX, UINT32_MAX);
3175 return iemNativeEmitTestIfGpr32NotEqualImmAndJmpToLabel(pReNative, off, iGprSrc, uImm, idxLabel);
3176}
3177
3178
3179
3180/**
3181 * Emits a call to a 64-bit address.
3182 */
3183DECLINLINE(uint32_t) iemNativeEmitCallImm(PIEMRECOMPILERSTATE pReNative, uint32_t off, uintptr_t uPfn)
3184{
3185#ifdef RT_ARCH_AMD64
3186 off = iemNativeEmitLoadGprImm64(pReNative, off, X86_GREG_xAX, uPfn);
3187
3188 /* call rax */
3189 uint8_t * const pbCodeBuf = iemNativeInstrBufEnsure(pReNative, off, 2);
3190 AssertReturn(pbCodeBuf, UINT32_MAX);
3191 pbCodeBuf[off++] = 0xff;
3192 pbCodeBuf[off++] = X86_MODRM_MAKE(X86_MOD_REG, 2, X86_GREG_xAX);
3193
3194#elif defined(RT_ARCH_ARM64)
3195 off = iemNativeEmitLoadGprImm64(pReNative, off, IEMNATIVE_REG_FIXED_TMP0, uPfn);
3196
3197 uint32_t * const pu32CodeBuf = iemNativeInstrBufEnsure(pReNative, off, 1);
3198 AssertReturn(pu32CodeBuf, UINT32_MAX);
3199 pu32CodeBuf[off++] = Armv8A64MkInstrBlr(IEMNATIVE_REG_FIXED_TMP0);
3200#else
3201# error "port me"
3202#endif
3203 IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(pReNative, off);
3204 return off;
3205}
3206
3207
3208/** @} */
3209
3210#endif /* !VMM_INCLUDED_SRC_include_IEMN8veRecompiler_h */
3211
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette