VirtualBox

source: vbox/trunk/src/VBox/VMM/include/IEMN8veRecompiler.h@ 102684

Last change on this file since 102684 was 102684, checked in by vboxsync, 16 months ago

VMM/IEM: Fixed another bug in related to ah,ch,dh,bh storing (AMD64 host). bugref:10371

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 42.0 KB
Line 
1/* $Id: IEMN8veRecompiler.h 102684 2023-12-21 21:36:01Z vboxsync $ */
2/** @file
3 * IEM - Interpreted Execution Manager - Native Recompiler Internals.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#ifndef VMM_INCLUDED_SRC_include_IEMN8veRecompiler_h
29#define VMM_INCLUDED_SRC_include_IEMN8veRecompiler_h
30#ifndef RT_WITHOUT_PRAGMA_ONCE
31# pragma once
32#endif
33
34
35/** @defgroup grp_iem_n8ve_re Native Recompiler Internals.
36 * @ingroup grp_iem_int
37 * @{
38 */
39
40/** @def IEMNATIVE_WITH_TB_DEBUG_INFO
41 * Enables generating internal debug info for better TB disassembly dumping. */
42#if defined(DEBUG) || defined(DOXYGEN_RUNNING)
43# define IEMNATIVE_WITH_TB_DEBUG_INFO
44#endif
45
46/** Always count instructions for now. */
47#define IEMNATIVE_WITH_INSTRUCTION_COUNTING
48
49
50/** @name Stack Frame Layout
51 *
52 * @{ */
53/** The size of the area for stack variables and spills and stuff.
54 * @note This limit is duplicated in the python script(s). We add 0x40 for
55 * alignment padding. */
56#define IEMNATIVE_FRAME_VAR_SIZE (0xc0 + 0x40)
57/** Number of 64-bit variable slots (0x100 / 8 = 32. */
58#define IEMNATIVE_FRAME_VAR_SLOTS (IEMNATIVE_FRAME_VAR_SIZE / 8)
59AssertCompile(IEMNATIVE_FRAME_VAR_SLOTS == 32);
60
61#ifdef RT_ARCH_AMD64
62/** An stack alignment adjustment (between non-volatile register pushes and
63 * the stack variable area, so the latter better aligned). */
64# define IEMNATIVE_FRAME_ALIGN_SIZE 8
65
66/** Number of stack arguments slots for calls made from the frame. */
67# ifdef RT_OS_WINDOWS
68# define IEMNATIVE_FRAME_STACK_ARG_COUNT 4
69# else
70# define IEMNATIVE_FRAME_STACK_ARG_COUNT 2
71# endif
72/** Number of any shadow arguments (spill area) for calls we make. */
73# ifdef RT_OS_WINDOWS
74# define IEMNATIVE_FRAME_SHADOW_ARG_COUNT 4
75# else
76# define IEMNATIVE_FRAME_SHADOW_ARG_COUNT 0
77# endif
78
79/** Frame pointer (RBP) relative offset of the last push. */
80# ifdef RT_OS_WINDOWS
81# define IEMNATIVE_FP_OFF_LAST_PUSH (7 * -8)
82# else
83# define IEMNATIVE_FP_OFF_LAST_PUSH (5 * -8)
84# endif
85/** Frame pointer (RBP) relative offset of the stack variable area (the lowest
86 * address for it). */
87# define IEMNATIVE_FP_OFF_STACK_VARS (IEMNATIVE_FP_OFF_LAST_PUSH - IEMNATIVE_FRAME_ALIGN_SIZE - IEMNATIVE_FRAME_VAR_SIZE)
88/** Frame pointer (RBP) relative offset of the first stack argument for calls. */
89# define IEMNATIVE_FP_OFF_STACK_ARG0 (IEMNATIVE_FP_OFF_STACK_VARS - IEMNATIVE_FRAME_STACK_ARG_COUNT * 8)
90/** Frame pointer (RBP) relative offset of the second stack argument for calls. */
91# define IEMNATIVE_FP_OFF_STACK_ARG1 (IEMNATIVE_FP_OFF_STACK_ARG0 + 8)
92# ifdef RT_OS_WINDOWS
93/** Frame pointer (RBP) relative offset of the third stack argument for calls. */
94# define IEMNATIVE_FP_OFF_STACK_ARG2 (IEMNATIVE_FP_OFF_STACK_ARG0 + 16)
95/** Frame pointer (RBP) relative offset of the fourth stack argument for calls. */
96# define IEMNATIVE_FP_OFF_STACK_ARG3 (IEMNATIVE_FP_OFF_STACK_ARG0 + 24)
97# endif
98
99# ifdef RT_OS_WINDOWS
100/** Frame pointer (RBP) relative offset of the first incoming shadow argument. */
101# define IEMNATIVE_FP_OFF_IN_SHADOW_ARG0 (16)
102/** Frame pointer (RBP) relative offset of the second incoming shadow argument. */
103# define IEMNATIVE_FP_OFF_IN_SHADOW_ARG1 (24)
104/** Frame pointer (RBP) relative offset of the third incoming shadow argument. */
105# define IEMNATIVE_FP_OFF_IN_SHADOW_ARG2 (32)
106/** Frame pointer (RBP) relative offset of the fourth incoming shadow argument. */
107# define IEMNATIVE_FP_OFF_IN_SHADOW_ARG3 (40)
108# endif
109
110#elif RT_ARCH_ARM64
111/** No alignment padding needed for arm64. */
112# define IEMNATIVE_FRAME_ALIGN_SIZE 0
113/** No stack argument slots, got 8 registers for arguments will suffice. */
114# define IEMNATIVE_FRAME_STACK_ARG_COUNT 0
115/** There are no argument spill area. */
116# define IEMNATIVE_FRAME_SHADOW_ARG_COUNT 0
117
118/** Number of saved registers at the top of our stack frame.
119 * This includes the return address and old frame pointer, so x19 thru x30. */
120# define IEMNATIVE_FRAME_SAVE_REG_COUNT (12)
121/** The size of the save registered (IEMNATIVE_FRAME_SAVE_REG_COUNT). */
122# define IEMNATIVE_FRAME_SAVE_REG_SIZE (IEMNATIVE_FRAME_SAVE_REG_COUNT * 8)
123
124/** Frame pointer (BP) relative offset of the last push. */
125# define IEMNATIVE_FP_OFF_LAST_PUSH (10 * -8)
126
127/** Frame pointer (BP) relative offset of the stack variable area (the lowest
128 * address for it). */
129# define IEMNATIVE_FP_OFF_STACK_VARS (IEMNATIVE_FP_OFF_LAST_PUSH - IEMNATIVE_FRAME_ALIGN_SIZE - IEMNATIVE_FRAME_VAR_SIZE)
130
131#else
132# error "port me"
133#endif
134/** @} */
135
136
137/** @name Fixed Register Allocation(s)
138 * @{ */
139/** @def IEMNATIVE_REG_FIXED_PVMCPU
140 * The number of the register holding the pVCpu pointer. */
141/** @def IEMNATIVE_REG_FIXED_PCPUMCTX
142 * The number of the register holding the &pVCpu->cpum.GstCtx pointer.
143 * @note This not available on AMD64, only ARM64. */
144/** @def IEMNATIVE_REG_FIXED_TMP0
145 * Dedicated temporary register.
146 * @todo replace this by a register allocator and content tracker. */
147/** @def IEMNATIVE_REG_FIXED_MASK
148 * Mask GPRs with fixes assignments, either by us or dictated by the CPU/OS
149 * architecture. */
150#if defined(RT_ARCH_AMD64) && !defined(DOXYGEN_RUNNING)
151# define IEMNATIVE_REG_FIXED_PVMCPU X86_GREG_xBX
152# define IEMNATIVE_REG_FIXED_TMP0 X86_GREG_x11
153# define IEMNATIVE_REG_FIXED_MASK ( RT_BIT_32(IEMNATIVE_REG_FIXED_PVMCPU) \
154 | RT_BIT_32(IEMNATIVE_REG_FIXED_TMP0) \
155 | RT_BIT_32(X86_GREG_xSP) \
156 | RT_BIT_32(X86_GREG_xBP) )
157
158#elif defined(RT_ARCH_ARM64) || defined(DOXYGEN_RUNNING)
159# define IEMNATIVE_REG_FIXED_PVMCPU ARMV8_A64_REG_X28
160# define IEMNATIVE_REG_FIXED_PCPUMCTX ARMV8_A64_REG_X27
161# define IEMNATIVE_REG_FIXED_TMP0 ARMV8_A64_REG_X15
162# define IEMNATIVE_REG_FIXED_MASK ( RT_BIT_32(ARMV8_A64_REG_SP) \
163 | RT_BIT_32(ARMV8_A64_REG_LR) \
164 | RT_BIT_32(ARMV8_A64_REG_BP) \
165 | RT_BIT_32(IEMNATIVE_REG_FIXED_PVMCPU) \
166 | RT_BIT_32(IEMNATIVE_REG_FIXED_PCPUMCTX) \
167 | RT_BIT_32(ARMV8_A64_REG_X18) \
168 | RT_BIT_32(IEMNATIVE_REG_FIXED_TMP0) )
169
170#else
171# error "port me"
172#endif
173/** @} */
174
175/** @name Call related registers.
176 * @{ */
177/** @def IEMNATIVE_CALL_RET_GREG
178 * The return value register. */
179/** @def IEMNATIVE_CALL_ARG_GREG_COUNT
180 * Number of arguments in registers. */
181/** @def IEMNATIVE_CALL_ARG0_GREG
182 * The general purpose register carrying argument \#0. */
183/** @def IEMNATIVE_CALL_ARG1_GREG
184 * The general purpose register carrying argument \#1. */
185/** @def IEMNATIVE_CALL_ARG2_GREG
186 * The general purpose register carrying argument \#2. */
187/** @def IEMNATIVE_CALL_ARG3_GREG
188 * The general purpose register carrying argument \#3. */
189/** @def IEMNATIVE_CALL_VOLATILE_GREG_MASK
190 * Mask of registers the callee will not save and may trash. */
191#ifdef RT_ARCH_AMD64
192# define IEMNATIVE_CALL_RET_GREG X86_GREG_xAX
193
194# ifdef RT_OS_WINDOWS
195# define IEMNATIVE_CALL_ARG_GREG_COUNT 4
196# define IEMNATIVE_CALL_ARG0_GREG X86_GREG_xCX
197# define IEMNATIVE_CALL_ARG1_GREG X86_GREG_xDX
198# define IEMNATIVE_CALL_ARG2_GREG X86_GREG_x8
199# define IEMNATIVE_CALL_ARG3_GREG X86_GREG_x9
200# define IEMNATIVE_CALL_ARGS_GREG_MASK ( RT_BIT_32(IEMNATIVE_CALL_ARG0_GREG) \
201 | RT_BIT_32(IEMNATIVE_CALL_ARG1_GREG) \
202 | RT_BIT_32(IEMNATIVE_CALL_ARG2_GREG) \
203 | RT_BIT_32(IEMNATIVE_CALL_ARG3_GREG) )
204# define IEMNATIVE_CALL_VOLATILE_GREG_MASK ( RT_BIT_32(X86_GREG_xAX) \
205 | RT_BIT_32(X86_GREG_xCX) \
206 | RT_BIT_32(X86_GREG_xDX) \
207 | RT_BIT_32(X86_GREG_x8) \
208 | RT_BIT_32(X86_GREG_x9) \
209 | RT_BIT_32(X86_GREG_x10) \
210 | RT_BIT_32(X86_GREG_x11) )
211# else
212# define IEMNATIVE_CALL_ARG_GREG_COUNT 6
213# define IEMNATIVE_CALL_ARG0_GREG X86_GREG_xDI
214# define IEMNATIVE_CALL_ARG1_GREG X86_GREG_xSI
215# define IEMNATIVE_CALL_ARG2_GREG X86_GREG_xDX
216# define IEMNATIVE_CALL_ARG3_GREG X86_GREG_xCX
217# define IEMNATIVE_CALL_ARG4_GREG X86_GREG_x8
218# define IEMNATIVE_CALL_ARG5_GREG X86_GREG_x9
219# define IEMNATIVE_CALL_ARGS_GREG_MASK ( RT_BIT_32(IEMNATIVE_CALL_ARG0_GREG) \
220 | RT_BIT_32(IEMNATIVE_CALL_ARG1_GREG) \
221 | RT_BIT_32(IEMNATIVE_CALL_ARG2_GREG) \
222 | RT_BIT_32(IEMNATIVE_CALL_ARG3_GREG) \
223 | RT_BIT_32(IEMNATIVE_CALL_ARG4_GREG) \
224 | RT_BIT_32(IEMNATIVE_CALL_ARG5_GREG) )
225# define IEMNATIVE_CALL_VOLATILE_GREG_MASK ( RT_BIT_32(X86_GREG_xAX) \
226 | RT_BIT_32(X86_GREG_xCX) \
227 | RT_BIT_32(X86_GREG_xDX) \
228 | RT_BIT_32(X86_GREG_xDI) \
229 | RT_BIT_32(X86_GREG_xSI) \
230 | RT_BIT_32(X86_GREG_x8) \
231 | RT_BIT_32(X86_GREG_x9) \
232 | RT_BIT_32(X86_GREG_x10) \
233 | RT_BIT_32(X86_GREG_x11) )
234# endif
235
236#elif defined(RT_ARCH_ARM64)
237# define IEMNATIVE_CALL_RET_GREG ARMV8_A64_REG_X0
238# define IEMNATIVE_CALL_ARG_GREG_COUNT 8
239# define IEMNATIVE_CALL_ARG0_GREG ARMV8_A64_REG_X0
240# define IEMNATIVE_CALL_ARG1_GREG ARMV8_A64_REG_X1
241# define IEMNATIVE_CALL_ARG2_GREG ARMV8_A64_REG_X2
242# define IEMNATIVE_CALL_ARG3_GREG ARMV8_A64_REG_X3
243# define IEMNATIVE_CALL_ARG4_GREG ARMV8_A64_REG_X4
244# define IEMNATIVE_CALL_ARG5_GREG ARMV8_A64_REG_X5
245# define IEMNATIVE_CALL_ARG6_GREG ARMV8_A64_REG_X6
246# define IEMNATIVE_CALL_ARG7_GREG ARMV8_A64_REG_X7
247# define IEMNATIVE_CALL_ARGS_GREG_MASK ( RT_BIT_32(ARMV8_A64_REG_X0) \
248 | RT_BIT_32(ARMV8_A64_REG_X1) \
249 | RT_BIT_32(ARMV8_A64_REG_X2) \
250 | RT_BIT_32(ARMV8_A64_REG_X3) \
251 | RT_BIT_32(ARMV8_A64_REG_X4) \
252 | RT_BIT_32(ARMV8_A64_REG_X5) \
253 | RT_BIT_32(ARMV8_A64_REG_X6) \
254 | RT_BIT_32(ARMV8_A64_REG_X7) )
255# define IEMNATIVE_CALL_VOLATILE_GREG_MASK ( RT_BIT_32(ARMV8_A64_REG_X0) \
256 | RT_BIT_32(ARMV8_A64_REG_X1) \
257 | RT_BIT_32(ARMV8_A64_REG_X2) \
258 | RT_BIT_32(ARMV8_A64_REG_X3) \
259 | RT_BIT_32(ARMV8_A64_REG_X4) \
260 | RT_BIT_32(ARMV8_A64_REG_X5) \
261 | RT_BIT_32(ARMV8_A64_REG_X6) \
262 | RT_BIT_32(ARMV8_A64_REG_X7) \
263 | RT_BIT_32(ARMV8_A64_REG_X8) \
264 | RT_BIT_32(ARMV8_A64_REG_X9) \
265 | RT_BIT_32(ARMV8_A64_REG_X10) \
266 | RT_BIT_32(ARMV8_A64_REG_X11) \
267 | RT_BIT_32(ARMV8_A64_REG_X12) \
268 | RT_BIT_32(ARMV8_A64_REG_X13) \
269 | RT_BIT_32(ARMV8_A64_REG_X14) \
270 | RT_BIT_32(ARMV8_A64_REG_X15) \
271 | RT_BIT_32(ARMV8_A64_REG_X16) \
272 | RT_BIT_32(ARMV8_A64_REG_X17) )
273
274#endif
275
276/** This is the maximum argument count we'll ever be needing. */
277#if defined(RT_OS_WINDOWS) && defined(VBOXSTRICTRC_STRICT_ENABLED)
278# define IEMNATIVE_CALL_MAX_ARG_COUNT 8
279#else
280# define IEMNATIVE_CALL_MAX_ARG_COUNT 7
281#endif
282/** @} */
283
284
285/** @def IEMNATIVE_HST_GREG_COUNT
286 * Number of host general purpose registers we tracker. */
287/** @def IEMNATIVE_HST_GREG_MASK
288 * Mask corresponding to IEMNATIVE_HST_GREG_COUNT that can be applied to
289 * inverted register masks and such to get down to a correct set of regs. */
290#ifdef RT_ARCH_AMD64
291# define IEMNATIVE_HST_GREG_COUNT 16
292# define IEMNATIVE_HST_GREG_MASK UINT32_C(0xffff)
293
294#elif defined(RT_ARCH_ARM64)
295# define IEMNATIVE_HST_GREG_COUNT 32
296# define IEMNATIVE_HST_GREG_MASK UINT32_MAX
297#else
298# error "Port me!"
299#endif
300
301
302/** Native code generator label types. */
303typedef enum
304{
305 kIemNativeLabelType_Invalid = 0,
306 /* Labels w/o data, only once instance per TB: */
307 kIemNativeLabelType_Return,
308 kIemNativeLabelType_ReturnBreak,
309 kIemNativeLabelType_ReturnWithFlags,
310 kIemNativeLabelType_NonZeroRetOrPassUp,
311 kIemNativeLabelType_RaiseGp0,
312 kIemNativeLabelType_ObsoleteTb,
313 kIemNativeLabelType_NeedCsLimChecking,
314 kIemNativeLabelType_CheckBranchMiss,
315 /* Labels with data, potentially multiple instances per TB: */
316 kIemNativeLabelType_FirstWithMultipleInstances,
317 kIemNativeLabelType_If = kIemNativeLabelType_FirstWithMultipleInstances,
318 kIemNativeLabelType_Else,
319 kIemNativeLabelType_Endif,
320 kIemNativeLabelType_CheckIrq,
321 kIemNativeLabelType_TlbMiss,
322 kIemNativeLabelType_TlbDone,
323 kIemNativeLabelType_End
324} IEMNATIVELABELTYPE;
325
326/** Native code generator label definition. */
327typedef struct IEMNATIVELABEL
328{
329 /** Code offset if defined, UINT32_MAX if it needs to be generated after/in
330 * the epilog. */
331 uint32_t off;
332 /** The type of label (IEMNATIVELABELTYPE). */
333 uint16_t enmType;
334 /** Additional label data, type specific. */
335 uint16_t uData;
336} IEMNATIVELABEL;
337/** Pointer to a label. */
338typedef IEMNATIVELABEL *PIEMNATIVELABEL;
339
340
341/** Native code generator fixup types. */
342typedef enum
343{
344 kIemNativeFixupType_Invalid = 0,
345#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
346 /** AMD64 fixup: PC relative 32-bit with addend in bData. */
347 kIemNativeFixupType_Rel32,
348#elif defined(RT_ARCH_ARM64)
349 /** ARM64 fixup: PC relative offset at bits 25:0 (B, BL). */
350 kIemNativeFixupType_RelImm26At0,
351 /** ARM64 fixup: PC relative offset at bits 23:5 (CBZ, CBNZ, B.CC). */
352 kIemNativeFixupType_RelImm19At5,
353 /** ARM64 fixup: PC relative offset at bits 18:5 (TBZ, TBNZ). */
354 kIemNativeFixupType_RelImm14At5,
355#endif
356 kIemNativeFixupType_End
357} IEMNATIVEFIXUPTYPE;
358
359/** Native code generator fixup. */
360typedef struct IEMNATIVEFIXUP
361{
362 /** Code offset of the fixup location. */
363 uint32_t off;
364 /** The IEMNATIVELABEL this is a fixup for. */
365 uint16_t idxLabel;
366 /** The fixup type (IEMNATIVEFIXUPTYPE). */
367 uint8_t enmType;
368 /** Addend or other data. */
369 int8_t offAddend;
370} IEMNATIVEFIXUP;
371/** Pointer to a native code generator fixup. */
372typedef IEMNATIVEFIXUP *PIEMNATIVEFIXUP;
373
374
375/**
376 * Guest registers that can be shadowed in GPRs.
377 */
378typedef enum IEMNATIVEGSTREG : uint8_t
379{
380 kIemNativeGstReg_GprFirst = 0,
381 kIemNativeGstReg_GprLast = kIemNativeGstReg_GprFirst + 15,
382 kIemNativeGstReg_Pc,
383 kIemNativeGstReg_EFlags, /**< 32-bit, includes internal flags. */
384 kIemNativeGstReg_SegSelFirst,
385 kIemNativeGstReg_SegSelLast = kIemNativeGstReg_SegSelFirst + 5,
386 kIemNativeGstReg_SegBaseFirst,
387 kIemNativeGstReg_SegBaseLast = kIemNativeGstReg_SegBaseFirst + 5,
388 kIemNativeGstReg_SegLimitFirst,
389 kIemNativeGstReg_SegLimitLast = kIemNativeGstReg_SegLimitFirst + 5,
390 kIemNativeGstReg_End
391} IEMNATIVEGSTREG;
392
393/** @name Helpers for converting register numbers to IEMNATIVEGSTREG values.
394 * @{ */
395#define IEMNATIVEGSTREG_GPR(a_iGpr) ((IEMNATIVEGSTREG)(kIemNativeGstReg_GprFirst + (a_iGpr) ))
396#define IEMNATIVEGSTREG_SEG_SEL(a_iSegReg) ((IEMNATIVEGSTREG)(kIemNativeGstReg_SegSelFirst + (a_iSegReg) ))
397#define IEMNATIVEGSTREG_SEG_BASE(a_iSegReg) ((IEMNATIVEGSTREG)(kIemNativeGstReg_SegBaseFirst + (a_iSegReg) ))
398#define IEMNATIVEGSTREG_SEG_LIMIT(a_iSegReg) ((IEMNATIVEGSTREG)(kIemNativeGstReg_SegLimitFirst + (a_iSegReg) ))
399/** @} */
400
401/**
402 * Intended use statement for iemNativeRegAllocTmpForGuestReg().
403 */
404typedef enum IEMNATIVEGSTREGUSE
405{
406 /** The usage is read-only, the register holding the guest register
407 * shadow copy will not be modified by the caller. */
408 kIemNativeGstRegUse_ReadOnly = 0,
409 /** The caller will update the guest register (think: PC += cbInstr).
410 * The guest shadow copy will follow the returned register. */
411 kIemNativeGstRegUse_ForUpdate,
412 /** The call will put an entirely new value in the guest register, so
413 * if new register is allocate it will be returned uninitialized. */
414 kIemNativeGstRegUse_ForFullWrite,
415 /** The caller will use the guest register value as input in a calculation
416 * and the host register will be modified.
417 * This means that the returned host register will not be marked as a shadow
418 * copy of the guest register. */
419 kIemNativeGstRegUse_Calculation
420} IEMNATIVEGSTREGUSE;
421
422/**
423 * Guest registers (classes) that can be referenced.
424 */
425typedef enum IEMNATIVEGSTREGREF : uint8_t
426{
427 kIemNativeGstRegRef_Invalid = 0,
428 kIemNativeGstRegRef_Gpr,
429 kIemNativeGstRegRef_GprHighByte, /**< AH, CH, DH, BH*/
430 kIemNativeGstRegRef_EFlags,
431 kIemNativeGstRegRef_MxCsr,
432 kIemNativeGstRegRef_FpuReg,
433 kIemNativeGstRegRef_MReg,
434 kIemNativeGstRegRef_XReg,
435 //kIemNativeGstRegRef_YReg, - doesn't work.
436 kIemNativeGstRegRef_End
437} IEMNATIVEGSTREGREF;
438
439
440/** Variable kinds. */
441typedef enum IEMNATIVEVARKIND : uint8_t
442{
443 /** Customary invalid zero value. */
444 kIemNativeVarKind_Invalid = 0,
445 /** This is either in a register or on the stack. */
446 kIemNativeVarKind_Stack,
447 /** Immediate value - loaded into register when needed, or can live on the
448 * stack if referenced (in theory). */
449 kIemNativeVarKind_Immediate,
450 /** Variable reference - loaded into register when needed, never stack. */
451 kIemNativeVarKind_VarRef,
452 /** Guest register reference - loaded into register when needed, never stack. */
453 kIemNativeVarKind_GstRegRef,
454 /** End of valid values. */
455 kIemNativeVarKind_End
456} IEMNATIVEVARKIND;
457
458
459/** Variable or argument. */
460typedef struct IEMNATIVEVAR
461{
462 /** The kind of variable. */
463 IEMNATIVEVARKIND enmKind;
464 /** The variable size in bytes. */
465 uint8_t cbVar;
466 /** The first stack slot (uint64_t), except for immediate and references
467 * where it usually is UINT8_MAX. This is allocated lazily, so if a variable
468 * has a stack slot it has been initialized and has a value. Unused variables
469 * has neither a stack slot nor a host register assignment. */
470 uint8_t idxStackSlot;
471 /** The host register allocated for the variable, UINT8_MAX if not. */
472 uint8_t idxReg;
473 /** The argument number if argument, UINT8_MAX if regular variable. */
474 uint8_t uArgNo;
475 /** If referenced, the index of the variable referencing this one, otherwise
476 * UINT8_MAX. A referenced variable must only be placed on the stack and
477 * must be either kIemNativeVarKind_Stack or kIemNativeVarKind_Immediate. */
478 uint8_t idxReferrerVar;
479 /** Guest register being shadowed here, kIemNativeGstReg_End(/UINT8_MAX) if not.
480 * @todo not sure what this really is for... */
481 IEMNATIVEGSTREG enmGstReg;
482 /** Set if the registered is currently used exclusively, false if the
483 * variable is idle and the register can be grabbed. */
484 bool fRegAcquired;
485
486 union
487 {
488 /** kIemNativeVarKind_Immediate: The immediate value. */
489 uint64_t uValue;
490 /** kIemNativeVarKind_VarRef: The index of the variable being referenced. */
491 uint8_t idxRefVar;
492 /** kIemNativeVarKind_GstRegRef: The guest register being referrenced. */
493 struct
494 {
495 /** The class of register. */
496 IEMNATIVEGSTREGREF enmClass;
497 /** Index within the class. */
498 uint8_t idx;
499 } GstRegRef;
500 } u;
501} IEMNATIVEVAR;
502
503/** What is being kept in a host register. */
504typedef enum IEMNATIVEWHAT : uint8_t
505{
506 /** The traditional invalid zero value. */
507 kIemNativeWhat_Invalid = 0,
508 /** Mapping a variable (IEMNATIVEHSTREG::idxVar). */
509 kIemNativeWhat_Var,
510 /** Temporary register, this is typically freed when a MC completes. */
511 kIemNativeWhat_Tmp,
512 /** Call argument w/o a variable mapping. This is free (via
513 * IEMNATIVE_CALL_VOLATILE_GREG_MASK) after the call is emitted. */
514 kIemNativeWhat_Arg,
515 /** Return status code.
516 * @todo not sure if we need this... */
517 kIemNativeWhat_rc,
518 /** The fixed pVCpu (PVMCPUCC) register.
519 * @todo consider offsetting this on amd64 to use negative offsets to access
520 * more members using 8-byte disp. */
521 kIemNativeWhat_pVCpuFixed,
522 /** The fixed pCtx (PCPUMCTX) register.
523 * @todo consider offsetting this on amd64 to use negative offsets to access
524 * more members using 8-byte disp. */
525 kIemNativeWhat_pCtxFixed,
526 /** Fixed temporary register. */
527 kIemNativeWhat_FixedTmp,
528 /** Register reserved by the CPU or OS architecture. */
529 kIemNativeWhat_FixedReserved,
530 /** End of valid values. */
531 kIemNativeWhat_End
532} IEMNATIVEWHAT;
533
534/**
535 * Host general register entry.
536 *
537 * The actual allocation status is kept in IEMRECOMPILERSTATE::bmHstRegs.
538 *
539 * @todo Track immediate values in host registers similarlly to how we track the
540 * guest register shadow copies. For it to be real helpful, though,
541 * we probably need to know which will be reused and put them into
542 * non-volatile registers, otherwise it's going to be more or less
543 * restricted to an instruction or two.
544 */
545typedef struct IEMNATIVEHSTREG
546{
547 /** Set of guest registers this one shadows.
548 *
549 * Using a bitmap here so we can designate the same host register as a copy
550 * for more than one guest register. This is expected to be useful in
551 * situations where one value is copied to several registers in a sequence.
552 * If the mapping is 1:1, then we'd have to pick which side of a 'MOV SRC,DST'
553 * sequence we'd want to let this register follow to be a copy of and there
554 * will always be places where we'd be picking the wrong one.
555 */
556 uint64_t fGstRegShadows;
557 /** What is being kept in this register. */
558 IEMNATIVEWHAT enmWhat;
559 /** Variable index if holding a variable, otherwise UINT8_MAX. */
560 uint8_t idxVar;
561 /** Alignment padding. */
562 uint8_t abAlign[6];
563} IEMNATIVEHSTREG;
564
565
566/**
567 * Core state for the native recompiler, that is, things that needs careful
568 * handling when dealing with branches.
569 */
570typedef struct IEMNATIVECORESTATE
571{
572 /** Allocation bitmap for aHstRegs. */
573 uint32_t bmHstRegs;
574
575 /** Bitmap marking which host register contains guest register shadow copies.
576 * This is used during register allocation to try preserve copies. */
577 uint32_t bmHstRegsWithGstShadow;
578 /** Bitmap marking valid entries in aidxGstRegShadows. */
579 uint64_t bmGstRegShadows;
580
581 union
582 {
583 /** Index of variable arguments, UINT8_MAX if not valid. */
584 uint8_t aidxArgVars[8];
585 /** For more efficient resetting. */
586 uint64_t u64ArgVars;
587 };
588
589 /** Allocation bitmap for the stack. */
590 uint32_t bmStack;
591 /** Allocation bitmap for aVars. */
592 uint32_t bmVars;
593
594 /** Maps a guest register to a host GPR (index by IEMNATIVEGSTREG).
595 * Entries are only valid if the corresponding bit in bmGstRegShadows is set.
596 * (A shadow copy of a guest register can only be held in a one host register,
597 * there are no duplicate copies or ambiguities like that). */
598 uint8_t aidxGstRegShadows[kIemNativeGstReg_End];
599
600 /** Host register allocation tracking. */
601 IEMNATIVEHSTREG aHstRegs[IEMNATIVE_HST_GREG_COUNT];
602
603 /** Variables and arguments. */
604 IEMNATIVEVAR aVars[9];
605} IEMNATIVECORESTATE;
606/** Pointer to core state. */
607typedef IEMNATIVECORESTATE *PIEMNATIVECORESTATE;
608/** Pointer to const core state. */
609typedef IEMNATIVECORESTATE const *PCIEMNATIVECORESTATE;
610
611
612/**
613 * Conditional stack entry.
614 */
615typedef struct IEMNATIVECOND
616{
617 /** Set if we're in the "else" part, clear if we're in the "if" before it. */
618 bool fInElse;
619 /** The label for the IEM_MC_ELSE. */
620 uint32_t idxLabelElse;
621 /** The label for the IEM_MC_ENDIF. */
622 uint32_t idxLabelEndIf;
623 /** The initial state snapshot as the if-block starts executing. */
624 IEMNATIVECORESTATE InitialState;
625 /** The state snapshot at the end of the if-block. */
626 IEMNATIVECORESTATE IfFinalState;
627} IEMNATIVECOND;
628/** Pointer to a condition stack entry. */
629typedef IEMNATIVECOND *PIEMNATIVECOND;
630
631
632/**
633 * Native recompiler state.
634 */
635typedef struct IEMRECOMPILERSTATE
636{
637 /** Size of the buffer that pbNativeRecompileBufR3 points to in
638 * IEMNATIVEINSTR units. */
639 uint32_t cInstrBufAlloc;
640#ifdef VBOX_STRICT
641 /** Strict: How far the last iemNativeInstrBufEnsure() checked. */
642 uint32_t offInstrBufChecked;
643#else
644 uint32_t uPadding1; /* We don't keep track of the size here... */
645#endif
646 /** Fixed temporary code buffer for native recompilation. */
647 PIEMNATIVEINSTR pInstrBuf;
648
649 /** Bitmaps with the label types used. */
650 uint64_t bmLabelTypes;
651 /** Actual number of labels in paLabels. */
652 uint32_t cLabels;
653 /** Max number of entries allowed in paLabels before reallocating it. */
654 uint32_t cLabelsAlloc;
655 /** Labels defined while recompiling (referenced by fixups). */
656 PIEMNATIVELABEL paLabels;
657 /** Array with indexes of unique labels (uData always 0). */
658 uint32_t aidxUniqueLabels[kIemNativeLabelType_FirstWithMultipleInstances];
659
660 /** Actual number of fixups paFixups. */
661 uint32_t cFixups;
662 /** Max number of entries allowed in paFixups before reallocating it. */
663 uint32_t cFixupsAlloc;
664 /** Buffer used by the recompiler for recording fixups when generating code. */
665 PIEMNATIVEFIXUP paFixups;
666
667#ifdef IEMNATIVE_WITH_TB_DEBUG_INFO
668 /** Number of debug info entries allocated for pDbgInfo. */
669 uint32_t cDbgInfoAlloc;
670 uint32_t uPadding;
671 /** Debug info. */
672 PIEMTBDBG pDbgInfo;
673#endif
674
675 /** The translation block being recompiled. */
676 PCIEMTB pTbOrg;
677
678 /** Condition sequence number (for generating unique labels). */
679 uint16_t uCondSeqNo;
680 /** Check IRQ seqeunce number (for generating unique labels). */
681 uint16_t uCheckIrqSeqNo;
682 /** TLB load sequence number (for generating unique labels). */
683 uint16_t uTlbSeqNo;
684 /** The current condition stack depth (aCondStack). */
685 uint8_t cCondDepth;
686
687 /** The argument count + hidden regs from the IEM_MC_BEGIN statement. */
688 uint8_t cArgs;
689 /** The IEM_CIMPL_F_XXX flags from the IEM_MC_BEGIN statement. */
690 uint32_t fCImpl;
691 /** The IEM_MC_F_XXX flags from the IEM_MC_BEGIN statement. */
692 uint32_t fMc;
693 /** The expected IEMCPU::fExec value for the current call/instruction. */
694 uint32_t fExec;
695
696 /** Core state requiring care with branches. */
697 IEMNATIVECORESTATE Core;
698
699 /** The condition nesting stack. */
700 IEMNATIVECOND aCondStack[2];
701
702#ifndef IEM_WITH_THROW_CATCH
703 /** Pointer to the setjmp/longjmp buffer if we're not using C++ exceptions
704 * for recompilation error handling. */
705 jmp_buf JmpBuf;
706#endif
707} IEMRECOMPILERSTATE;
708/** Pointer to a native recompiler state. */
709typedef IEMRECOMPILERSTATE *PIEMRECOMPILERSTATE;
710
711
712/** @def IEMNATIVE_TRY_SETJMP
713 * Wrapper around setjmp / try, hiding all the ugly differences.
714 *
715 * @note Use with extreme care as this is a fragile macro.
716 * @param a_pReNative The native recompile state.
717 * @param a_rcTarget The variable that should receive the status code in case
718 * of a longjmp/throw.
719 */
720/** @def IEMNATIVE_CATCH_LONGJMP_BEGIN
721 * Start wrapper for catch / setjmp-else.
722 *
723 * This will set up a scope.
724 *
725 * @note Use with extreme care as this is a fragile macro.
726 * @param a_pReNative The native recompile state.
727 * @param a_rcTarget The variable that should receive the status code in case
728 * of a longjmp/throw.
729 */
730/** @def IEMNATIVE_CATCH_LONGJMP_END
731 * End wrapper for catch / setjmp-else.
732 *
733 * This will close the scope set up by IEMNATIVE_CATCH_LONGJMP_BEGIN and clean
734 * up the state.
735 *
736 * @note Use with extreme care as this is a fragile macro.
737 * @param a_pReNative The native recompile state.
738 */
739/** @def IEMNATIVE_DO_LONGJMP
740 *
741 * Wrapper around longjmp / throw.
742 *
743 * @param a_pReNative The native recompile state.
744 * @param a_rc The status code jump back with / throw.
745 */
746#ifdef IEM_WITH_THROW_CATCH
747# define IEMNATIVE_TRY_SETJMP(a_pReNative, a_rcTarget) \
748 a_rcTarget = VINF_SUCCESS; \
749 try
750# define IEMNATIVE_CATCH_LONGJMP_BEGIN(a_pReNative, a_rcTarget) \
751 catch (int rcThrown) \
752 { \
753 a_rcTarget = rcThrown
754# define IEMNATIVE_CATCH_LONGJMP_END(a_pReNative) \
755 } \
756 ((void)0)
757# define IEMNATIVE_DO_LONGJMP(a_pReNative, a_rc) throw int(a_rc)
758#else /* !IEM_WITH_THROW_CATCH */
759# define IEMNATIVE_TRY_SETJMP(a_pReNative, a_rcTarget) \
760 if ((a_rcTarget = setjmp((a_pReNative)->JmpBuf)) == 0)
761# define IEMNATIVE_CATCH_LONGJMP_BEGIN(a_pReNative, a_rcTarget) \
762 else \
763 { \
764 ((void)0)
765# define IEMNATIVE_CATCH_LONGJMP_END(a_pReNative) \
766 }
767# define IEMNATIVE_DO_LONGJMP(a_pReNative, a_rc) longjmp((a_pReNative)->JmpBuf, (a_rc))
768#endif /* !IEM_WITH_THROW_CATCH */
769
770
771/**
772 * Native recompiler worker for a threaded function.
773 *
774 * @returns New code buffer offset; throws VBox status code in case of a failure.
775 * @param pReNative The native recompiler state.
776 * @param off The current code buffer offset.
777 * @param pCallEntry The threaded call entry.
778 *
779 * @note This may throw/longjmp VBox status codes (int) to abort compilation, so no RT_NOEXCEPT!
780 */
781typedef uint32_t (VBOXCALL FNIEMNATIVERECOMPFUNC)(PIEMRECOMPILERSTATE pReNative, uint32_t off, PCIEMTHRDEDCALLENTRY pCallEntry);
782/** Pointer to a native recompiler worker for a threaded function. */
783typedef FNIEMNATIVERECOMPFUNC *PFNIEMNATIVERECOMPFUNC;
784
785/** Defines a native recompiler worker for a threaded function.
786 * @see FNIEMNATIVERECOMPFUNC */
787#define IEM_DECL_IEMNATIVERECOMPFUNC_DEF(a_Name) \
788 uint32_t VBOXCALL a_Name(PIEMRECOMPILERSTATE pReNative, uint32_t off, PCIEMTHRDEDCALLENTRY pCallEntry)
789
790/** Prototypes a native recompiler function for a threaded function.
791 * @see FNIEMNATIVERECOMPFUNC */
792#define IEM_DECL_IEMNATIVERECOMPFUNC_PROTO(a_Name) FNIEMNATIVERECOMPFUNC a_Name
793
794
795/** Define a native recompiler helper function, safe to call from the TB code. */
796#define IEM_DECL_NATIVE_HLP_DEF(a_RetType, a_Name, a_ArgList) \
797 DECL_HIDDEN_THROW(a_RetType) VBOXCALL a_Name a_ArgList
798/** Prototype a native recompiler helper function, safe to call from the TB code. */
799#define IEM_DECL_NATIVE_HLP_PROTO(a_RetType, a_Name, a_ArgList) \
800 DECL_HIDDEN_THROW(a_RetType) VBOXCALL a_Name a_ArgList
801
802
803DECL_HIDDEN_THROW(uint32_t) iemNativeLabelCreate(PIEMRECOMPILERSTATE pReNative, IEMNATIVELABELTYPE enmType,
804 uint32_t offWhere = UINT32_MAX, uint16_t uData = 0);
805DECL_HIDDEN_THROW(void) iemNativeLabelDefine(PIEMRECOMPILERSTATE pReNative, uint32_t idxLabel, uint32_t offWhere);
806DECL_HIDDEN_THROW(void) iemNativeAddFixup(PIEMRECOMPILERSTATE pReNative, uint32_t offWhere, uint32_t idxLabel,
807 IEMNATIVEFIXUPTYPE enmType, int8_t offAddend = 0);
808DECL_HIDDEN_THROW(PIEMNATIVEINSTR) iemNativeInstrBufEnsureSlow(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t cInstrReq);
809
810DECL_HIDDEN_THROW(uint8_t) iemNativeRegAllocTmp(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, bool fPreferVolatile = true);
811DECL_HIDDEN_THROW(uint8_t) iemNativeRegAllocTmpEx(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, uint32_t fRegMask,
812 bool fPreferVolatile = true);
813DECL_HIDDEN_THROW(uint8_t) iemNativeRegAllocTmpImm(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, uint64_t uImm,
814 bool fPreferVolatile = true);
815DECL_HIDDEN_THROW(uint8_t) iemNativeRegAllocTmpForGuestReg(PIEMRECOMPILERSTATE pReNative, uint32_t *poff,
816 IEMNATIVEGSTREG enmGstReg, IEMNATIVEGSTREGUSE enmIntendedUse);
817DECL_HIDDEN_THROW(uint8_t) iemNativeRegAllocTmpForGuestRegIfAlreadyPresent(PIEMRECOMPILERSTATE pReNative, uint32_t *poff,
818 IEMNATIVEGSTREG enmGstReg);
819
820DECL_HIDDEN_THROW(uint8_t) iemNativeRegAllocVar(PIEMRECOMPILERSTATE pReNative, uint32_t *poff, uint8_t idxVar);
821DECL_HIDDEN_THROW(uint32_t) iemNativeRegAllocArgs(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cArgs);
822DECL_HIDDEN_THROW(uint8_t) iemNativeRegAssignRc(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstReg);
823DECLHIDDEN(void) iemNativeRegFree(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstReg) RT_NOEXCEPT;
824DECLHIDDEN(void) iemNativeRegFreeTmp(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstReg) RT_NOEXCEPT;
825DECLHIDDEN(void) iemNativeRegFreeTmpImm(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstReg) RT_NOEXCEPT;
826DECLHIDDEN(void) iemNativeRegFreeVar(PIEMRECOMPILERSTATE pReNative, uint8_t idxHstReg, bool fFlushShadows) RT_NOEXCEPT;
827DECLHIDDEN(void) iemNativeRegFreeAndFlushMask(PIEMRECOMPILERSTATE pReNative, uint32_t fHstRegMask) RT_NOEXCEPT;
828DECL_HIDDEN_THROW(uint32_t) iemNativeRegFlushPendingWrites(PIEMRECOMPILERSTATE pReNative, uint32_t off);
829DECL_HIDDEN_THROW(uint32_t) iemNativeRegMoveAndFreeAndFlushAtCall(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t cArgs,
830 uint32_t fKeepVars = 0);
831DECLHIDDEN(void) iemNativeRegFlushGuestShadows(PIEMRECOMPILERSTATE pReNative, uint64_t fGstRegs) RT_NOEXCEPT;
832
833DECL_HIDDEN_THROW(uint8_t) iemNativeVarGetStackSlot(PIEMRECOMPILERSTATE pReNative, uint8_t idxVar);
834DECL_HIDDEN_THROW(uint8_t) iemNativeVarRegisterAcquireForGuestReg(PIEMRECOMPILERSTATE pReNative, uint8_t idxVar,
835 IEMNATIVEGSTREG enmGstReg, uint32_t *poff);
836
837
838DECL_HIDDEN_THROW(uint32_t) iemNativeEmitLoadGprWithGstShadowReg(PIEMRECOMPILERSTATE pReNative, uint32_t off,
839 uint8_t idxHstReg, IEMNATIVEGSTREG enmGstReg);
840DECL_HIDDEN_THROW(uint32_t) iemNativeEmitCheckCallRetAndPassUp(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxInstr);
841DECL_HIDDEN_THROW(uint32_t) iemNativeEmitCImplCall(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint8_t idxInstr,
842 uint64_t fGstShwFlush, uintptr_t pfnCImpl, uint8_t cbInstr, uint8_t cAddParams,
843 uint64_t uParam0, uint64_t uParam1, uint64_t uParam2);
844DECL_HIDDEN_THROW(uint32_t) iemNativeEmitThreadedCall(PIEMRECOMPILERSTATE pReNative, uint32_t off,
845 PCIEMTHRDEDCALLENTRY pCallEntry);
846
847IEM_DECL_IEMNATIVERECOMPFUNC_PROTO(iemNativeRecompFunc_BltIn_Nop);
848IEM_DECL_IEMNATIVERECOMPFUNC_PROTO(iemNativeRecompFunc_BltIn_LogCpuState);
849IEM_DECL_IEMNATIVERECOMPFUNC_PROTO(iemNativeRecompFunc_BltIn_DeferToCImpl0);
850IEM_DECL_IEMNATIVERECOMPFUNC_PROTO(iemNativeRecompFunc_BltIn_CheckIrq);
851IEM_DECL_IEMNATIVERECOMPFUNC_PROTO(iemNativeRecompFunc_BltIn_CheckMode);
852IEM_DECL_IEMNATIVERECOMPFUNC_PROTO(iemNativeRecompFunc_BltIn_CheckCsLim);
853IEM_DECL_IEMNATIVERECOMPFUNC_PROTO(iemNativeRecompFunc_BltIn_CheckCsLimAndOpcodes);
854IEM_DECL_IEMNATIVERECOMPFUNC_PROTO(iemNativeRecompFunc_BltIn_CheckOpcodes);
855IEM_DECL_IEMNATIVERECOMPFUNC_PROTO(iemNativeRecompFunc_BltIn_CheckOpcodesConsiderCsLim);
856IEM_DECL_IEMNATIVERECOMPFUNC_PROTO(iemNativeRecompFunc_BltIn_CheckCsLimAndPcAndOpcodes);
857IEM_DECL_IEMNATIVERECOMPFUNC_PROTO(iemNativeRecompFunc_BltIn_CheckPcAndOpcodes);
858IEM_DECL_IEMNATIVERECOMPFUNC_PROTO(iemNativeRecompFunc_BltIn_CheckPcAndOpcodesConsiderCsLim);
859IEM_DECL_IEMNATIVERECOMPFUNC_PROTO(iemNativeRecompFunc_BltIn_CheckCsLimAndOpcodesLoadingTlb);
860IEM_DECL_IEMNATIVERECOMPFUNC_PROTO(iemNativeRecompFunc_BltIn_CheckOpcodesLoadingTlb);
861IEM_DECL_IEMNATIVERECOMPFUNC_PROTO(iemNativeRecompFunc_BltIn_CheckOpcodesLoadingTlbConsiderCsLim);
862IEM_DECL_IEMNATIVERECOMPFUNC_PROTO(iemNativeRecompFunc_BltIn_CheckCsLimAndOpcodesAcrossPageLoadingTlb);
863IEM_DECL_IEMNATIVERECOMPFUNC_PROTO(iemNativeRecompFunc_BltIn_CheckOpcodesAcrossPageLoadingTlb);
864IEM_DECL_IEMNATIVERECOMPFUNC_PROTO(iemNativeRecompFunc_BltIn_CheckOpcodesAcrossPageLoadingTlbConsiderCsLim);
865IEM_DECL_IEMNATIVERECOMPFUNC_PROTO(iemNativeRecompFunc_BltIn_CheckCsLimAndOpcodesOnNextPageLoadingTlb);
866IEM_DECL_IEMNATIVERECOMPFUNC_PROTO(iemNativeRecompFunc_BltIn_CheckOpcodesOnNextPageLoadingTlb);
867IEM_DECL_IEMNATIVERECOMPFUNC_PROTO(iemNativeRecompFunc_BltIn_CheckOpcodesOnNextPageLoadingTlbConsiderCsLim);
868IEM_DECL_IEMNATIVERECOMPFUNC_PROTO(iemNativeRecompFunc_BltIn_CheckCsLimAndOpcodesOnNewPageLoadingTlb);
869IEM_DECL_IEMNATIVERECOMPFUNC_PROTO(iemNativeRecompFunc_BltIn_CheckOpcodesOnNewPageLoadingTlb);
870IEM_DECL_IEMNATIVERECOMPFUNC_PROTO(iemNativeRecompFunc_BltIn_CheckOpcodesOnNewPageLoadingTlbConsiderCsLim);
871
872extern DECL_HIDDEN_DATA(const char * const) g_apszIemNativeHstRegNames[];
873
874
875/**
876 * Ensures that there is sufficient space in the instruction output buffer.
877 *
878 * This will reallocate the buffer if needed and allowed.
879 *
880 * @note Always use IEMNATIVE_ASSERT_INSTR_BUF_ENSURE when done to check the
881 * allocation size.
882 *
883 * @returns Pointer to the instruction output buffer on success; throws VBox
884 * status code on failure, so no need to check it.
885 * @param pReNative The native recompile state.
886 * @param off Current instruction offset. Works safely for UINT32_MAX
887 * as well.
888 * @param cInstrReq Number of instruction about to be added. It's okay to
889 * overestimate this a bit.
890 */
891DECL_FORCE_INLINE_THROW(PIEMNATIVEINSTR)
892iemNativeInstrBufEnsure(PIEMRECOMPILERSTATE pReNative, uint32_t off, uint32_t cInstrReq)
893{
894 uint64_t const offChecked = off + (uint64_t)cInstrReq; /** @todo may reconsider the need for UINT32_MAX safety... */
895 if (RT_LIKELY(offChecked <= pReNative->cInstrBufAlloc))
896 {
897#ifdef VBOX_STRICT
898 pReNative->offInstrBufChecked = offChecked;
899#endif
900 return pReNative->pInstrBuf;
901 }
902 return iemNativeInstrBufEnsureSlow(pReNative, off, cInstrReq);
903}
904
905/**
906 * Checks that we didn't exceed the space requested in the last
907 * iemNativeInstrBufEnsure() call.
908 */
909#define IEMNATIVE_ASSERT_INSTR_BUF_ENSURE(a_pReNative, a_off) \
910 AssertMsg((a_off) <= (a_pReNative)->offInstrBufChecked, \
911 ("off=%#x offInstrBufChecked=%#x\n", (a_off), (a_pReNative)->offInstrBufChecked))
912
913/**
914 * Checks that a variable index is valid.
915 */
916#define IEMNATIVE_ASSERT_VAR_IDX(a_pReNative, a_idxVar) \
917 AssertMsg( (unsigned)(a_idxVar) < RT_ELEMENTS((a_pReNative)->Core.aVars) \
918 && ((a_pReNative)->Core.bmVars & RT_BIT_32(a_idxVar)), ("%s=%d\n", #a_idxVar, a_idxVar))
919
920/**
921 * Checks that a variable index is valid and that the variable is assigned the
922 * correct argument number.
923 * This also adds a RT_NOREF of a_idxVar.
924 */
925#define IEMNATIVE_ASSERT_ARG_VAR_IDX(a_pReNative, a_idxVar, a_uArgNo) do { \
926 RT_NOREF_PV(a_idxVar); \
927 AssertMsg( (unsigned)(a_idxVar) < RT_ELEMENTS((a_pReNative)->Core.aVars) \
928 && ((a_pReNative)->Core.bmVars & RT_BIT_32(a_idxVar))\
929 && (a_pReNative)->Core.aVars[a_idxVar].uArgNo == (a_uArgNo) \
930 , ("%s=%d; uArgNo=%d, expected %u\n", #a_idxVar, a_idxVar, \
931 (a_pReNative)->Core.aVars[RT_MAX(a_idxVar, RT_ELEMENTS((a_pReNative)->Core.aVars)) - 1].uArgNo, a_uArgNo)); \
932 } while (0)
933
934/**
935 * Calculates the stack address of a variable as a [r]BP displacement value.
936 */
937DECL_FORCE_INLINE(int32_t)
938iemNativeStackCalcBpDisp(uint8_t idxStackSlot)
939{
940 Assert(idxStackSlot < IEMNATIVE_FRAME_VAR_SLOTS);
941 return idxStackSlot * sizeof(uint64_t) + IEMNATIVE_FP_OFF_STACK_VARS;
942}
943
944/** @} */
945
946#endif /* !VMM_INCLUDED_SRC_include_IEMN8veRecompiler_h */
947
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette