VirtualBox

source: vbox/trunk/src/VBox/VMM/include/IEMInternal.h@ 109128

Last change on this file since 109128 was 108908, checked in by vboxsync, 5 weeks ago

VMM/IEM: Working on the ARM bsd/opensource spec reader & decoder generator. Still work in progress. [fix] jiraref:VBP-1598

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 165.3 KB
Line 
1/* $Id: IEMInternal.h 108908 2025-04-09 07:44:27Z vboxsync $ */
2/** @file
3 * IEM - Internal header file.
4 */
5
6/*
7 * Copyright (C) 2011-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#ifndef VMM_INCLUDED_SRC_include_IEMInternal_h
29#define VMM_INCLUDED_SRC_include_IEMInternal_h
30#ifndef RT_WITHOUT_PRAGMA_ONCE
31# pragma once
32#endif
33
34#ifndef RT_IN_ASSEMBLER
35# include <VBox/vmm/cpum.h>
36# include <VBox/vmm/iem.h>
37# include <VBox/vmm/pgm.h>
38# include <VBox/vmm/stam.h>
39# include <VBox/param.h>
40
41# include <iprt/setjmp-without-sigmask.h>
42# include <iprt/list.h>
43#endif /* !RT_IN_ASSEMBLER */
44
45
46RT_C_DECLS_BEGIN
47
48
49/** @defgroup grp_iem_int Internals
50 * @ingroup grp_iem
51 * @internal
52 * @{
53 */
54
55/* Make doxygen happy w/o overcomplicating the #if checks. */
56#ifdef DOXYGEN_RUNNING
57# define IEM_WITH_THROW_CATCH
58# define VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP
59#endif
60
61/** For expanding symbol in slickedit and other products tagging and
62 * crossreferencing IEM symbols. */
63#ifndef IEM_STATIC
64# define IEM_STATIC static
65#endif
66
67/** @def IEM_WITH_THROW_CATCH
68 * Enables using C++ throw/catch as an alternative to setjmp/longjmp in user
69 * mode code.
70 *
71 * With GCC 11.3.1 and code TLB on linux, using throw/catch instead of
72 * setjmp/long resulted in bs2-test-1 running 3.00% faster and all but on test
73 * result value improving by more than 1%. (Best out of three.)
74 *
75 * With Visual C++ 2019 and code TLB on windows, using throw/catch instead of
76 * setjmp/long resulted in bs2-test-1 running 3.68% faster and all but some of
77 * the MMIO and CPUID tests ran noticeably faster. Variation is greater than on
78 * Linux, but it should be quite a bit faster for normal code.
79 */
80#if defined(__cplusplus) && defined(IN_RING3) && (defined(__GNUC__) || defined(_MSC_VER)) /* ASM-NOINC-START */
81# define IEM_WITH_THROW_CATCH
82#endif /*ASM-NOINC-END*/
83
84/** @def IEM_WITH_ADAPTIVE_TIMER_POLLING
85 * Enables the adaptive timer polling code.
86 */
87#if defined(DOXYGEN_RUNNING) || 1
88# define IEM_WITH_ADAPTIVE_TIMER_POLLING
89#endif
90
91/** @def IEM_WITH_INTRA_TB_JUMPS
92 * Enables loop-jumps within a TB (currently only to the first call).
93 */
94#if defined(DOXYGEN_RUNNING) || 1
95# define IEM_WITH_INTRA_TB_JUMPS
96#endif
97
98/** @def IEMNATIVE_WITH_DELAYED_PC_UPDATING
99 * Enables the delayed PC updating optimization (see @bugref{10373}).
100 */
101#if defined(DOXYGEN_RUNNING) || 1
102# define IEMNATIVE_WITH_DELAYED_PC_UPDATING
103#endif
104/** @def IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG
105 * Enabled delayed PC updating debugging code.
106 * This is an alternative to the ARM64-only IEMNATIVE_REG_FIXED_PC_DBG. */
107#if defined(DOXYGEN_RUNNING) || 0
108# define IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG
109#endif
110
111/** Enables access to even callee saved registers. */
112/*# define IEMNATIVE_WITH_SIMD_REG_ACCESS_ALL_REGISTERS*/
113
114#if defined(DOXYGEN_RUNNING) || 1
115/** @def IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
116 * Delay the writeback or dirty registers as long as possible. */
117# define IEMNATIVE_WITH_DELAYED_REGISTER_WRITEBACK
118#endif
119
120/** @def IEM_WITH_TLB_STATISTICS
121 * Enables all TLB statistics. */
122#if defined(VBOX_WITH_STATISTICS) || defined(DOXYGEN_RUNNING)
123# define IEM_WITH_TLB_STATISTICS
124#endif
125
126/** @def IEMNATIVE_WITH_SIMD_FP_NATIVE_EMITTERS
127 * Enable this to use native emitters for certain SIMD FP operations. */
128#if 1 || defined(DOXYGEN_RUNNING)
129# define IEMNATIVE_WITH_SIMD_FP_NATIVE_EMITTERS
130#endif
131
132/** @def VBOX_WITH_SAVE_THREADED_TBS_FOR_PROFILING
133 * Enable this to create a saved state file with the threaded translation
134 * blocks fed to the native recompiler on VCPU \#0. The resulting file can
135 * then be fed into the native recompiler for code profiling purposes.
136 * This is not a feature that should be normally be enabled! */
137#if 0 || defined(DOXYGEN_RUNNING)
138# define VBOX_WITH_SAVE_THREADED_TBS_FOR_PROFILING
139#endif
140
141/** @def VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP
142 * Enables a quicker alternative to throw/longjmp for IEM_DO_LONGJMP when
143 * executing native translation blocks.
144 *
145 * This exploits the fact that we save all non-volatile registers in the TB
146 * prologue and thus just need to do the same as the TB epilogue to get the
147 * effect of a longjmp/throw. Since MSC marks XMM6 thru XMM15 as
148 * non-volatile (and does something even more crazy for ARM), this probably
149 * won't work reliably on Windows. */
150#ifdef RT_ARCH_ARM64
151# ifndef RT_OS_WINDOWS
152# define VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP
153# endif
154#endif
155/* ASM-NOINC-START */
156#ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP
157# if !defined(IN_RING3) \
158 || !defined(VBOX_WITH_IEM_RECOMPILER) \
159 || !defined(VBOX_WITH_IEM_NATIVE_RECOMPILER)
160# undef VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP
161# elif defined(RT_OS_WINDOWS)
162# pragma message("VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP is not safe to use on windows")
163# endif
164#endif
165
166
167/** @def IEM_DO_LONGJMP
168 *
169 * Wrapper around longjmp / throw.
170 *
171 * @param a_pVCpu The CPU handle.
172 * @param a_rc The status code jump back with / throw.
173 */
174#ifdef IEM_WITH_THROW_CATCH
175# ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP
176# define IEM_DO_LONGJMP(a_pVCpu, a_rc) do { \
177 if ((a_pVCpu)->iem.s.pvTbFramePointerR3) \
178 iemNativeTbLongJmp((a_pVCpu)->iem.s.pvTbFramePointerR3, (a_rc)); \
179 throw int(a_rc); \
180 } while (0)
181# else
182# define IEM_DO_LONGJMP(a_pVCpu, a_rc) throw int(a_rc)
183# endif
184#else
185# define IEM_DO_LONGJMP(a_pVCpu, a_rc) longjmp(*(a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf), (a_rc))
186#endif
187
188/** For use with IEM function that may do a longjmp (when enabled).
189 *
190 * Visual C++ has trouble longjmp'ing from/over functions with the noexcept
191 * attribute. So, we indicate that function that may be part of a longjmp may
192 * throw "exceptions" and that the compiler should definitely not generate and
193 * std::terminate calling unwind code.
194 *
195 * Here is one example of this ending in std::terminate:
196 * @code{.txt}
19700 00000041`cadfda10 00007ffc`5d5a1f9f ucrtbase!abort+0x4e
19801 00000041`cadfda40 00007ffc`57af229a ucrtbase!terminate+0x1f
19902 00000041`cadfda70 00007ffb`eec91030 VCRUNTIME140!__std_terminate+0xa [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\ehhelpers.cpp @ 192]
20003 00000041`cadfdaa0 00007ffb`eec92c6d VCRUNTIME140_1!_CallSettingFrame+0x20 [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\amd64\handlers.asm @ 50]
20104 00000041`cadfdad0 00007ffb`eec93ae5 VCRUNTIME140_1!__FrameHandler4::FrameUnwindToState+0x241 [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\frame.cpp @ 1085]
20205 00000041`cadfdc00 00007ffb`eec92258 VCRUNTIME140_1!__FrameHandler4::FrameUnwindToEmptyState+0x2d [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\risctrnsctrl.cpp @ 218]
20306 00000041`cadfdc30 00007ffb`eec940e9 VCRUNTIME140_1!__InternalCxxFrameHandler<__FrameHandler4>+0x194 [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\frame.cpp @ 304]
20407 00000041`cadfdcd0 00007ffc`5f9f249f VCRUNTIME140_1!__CxxFrameHandler4+0xa9 [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\risctrnsctrl.cpp @ 290]
20508 00000041`cadfdd40 00007ffc`5f980939 ntdll!RtlpExecuteHandlerForUnwind+0xf
20609 00000041`cadfdd70 00007ffc`5f9a0edd ntdll!RtlUnwindEx+0x339
2070a 00000041`cadfe490 00007ffc`57aff976 ntdll!RtlUnwind+0xcd
2080b 00000041`cadfea00 00007ffb`e1b5de01 VCRUNTIME140!__longjmp_internal+0xe6 [d:\agent\_work\1\s\src\vctools\crt\vcruntime\src\eh\amd64\longjmp.asm @ 140]
2090c (Inline Function) --------`-------- VBoxVMM!iemOpcodeGetNextU8SlowJmp+0x95 [L:\vbox-intern\src\VBox\VMM\VMMAll\IEMAll.cpp @ 1155]
2100d 00000041`cadfea50 00007ffb`e1b60f6b VBoxVMM!iemOpcodeGetNextU8Jmp+0xc1 [L:\vbox-intern\src\VBox\VMM\include\IEMInline.h @ 402]
2110e 00000041`cadfea90 00007ffb`e1cc6201 VBoxVMM!IEMExecForExits+0xdb [L:\vbox-intern\src\VBox\VMM\VMMAll\IEMAll.cpp @ 10185]
2120f 00000041`cadfec70 00007ffb`e1d0df8d VBoxVMM!EMHistoryExec+0x4f1 [L:\vbox-intern\src\VBox\VMM\VMMAll\EMAll.cpp @ 452]
21310 00000041`cadfed60 00007ffb`e1d0d4c0 VBoxVMM!nemR3WinHandleExitCpuId+0x79d [L:\vbox-intern\src\VBox\VMM\VMMAll\NEMAllNativeTemplate-win.cpp.h @ 1829] @encode
214 @endcode
215 *
216 * @see https://developercommunity.visualstudio.com/t/fragile-behavior-of-longjmp-called-from-noexcept-f/1532859
217 */
218#if defined(_MSC_VER) || defined(IEM_WITH_THROW_CATCH)
219# define IEM_NOEXCEPT_MAY_LONGJMP RT_NOEXCEPT_EX(false)
220# if RT_CPLUSPLUS_PREREQ(201700)
221# define IEM_NOEXCEPT_MAY_LONGJMP_TYPEDEF RT_NOEXCEPT_EX(false)
222# else
223# define IEM_NOEXCEPT_MAY_LONGJMP_TYPEDEF
224# endif
225#else
226# define IEM_NOEXCEPT_MAY_LONGJMP RT_NOEXCEPT
227# if RT_CPLUSPLUS_PREREQ(201700)
228# define IEM_NOEXCEPT_MAY_LONGJMP_TYPEDEF RT_NOEXCEPT
229# else
230# define IEM_NOEXCEPT_MAY_LONGJMP_TYPEDEF
231# endif
232#endif
233/* ASM-NOINC-END */
234
235
236//#define IEM_WITH_CODE_TLB // - work in progress
237//#define IEM_WITH_DATA_TLB // - work in progress
238
239
240/** @def IEM_USE_UNALIGNED_DATA_ACCESS
241 * Use unaligned accesses instead of elaborate byte assembly. */
242#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86) || defined(DOXYGEN_RUNNING) /*ASM-NOINC*/
243# define IEM_USE_UNALIGNED_DATA_ACCESS
244#endif /*ASM-NOINC*/
245
246//#define IEM_LOG_MEMORY_WRITES
247
248
249/** @def IEM_CFG_TARGET_CPU
250 * The minimum target CPU for the IEM emulation (IEMTARGETCPU_XXX value).
251 *
252 * By default we allow this to be configured by the user via the
253 * CPUM/GuestCpuName config string, but this comes at a slight cost during
254 * decoding. So, for applications of this code where there is no need to
255 * be dynamic wrt target CPU, just modify this define.
256 */
257#if !defined(IEM_CFG_TARGET_CPU) || defined(DOXYGEN_RUNNING)
258# define IEM_CFG_TARGET_CPU IEMTARGETCPU_DYNAMIC
259#endif
260
261
262/*
263 * X86 config.
264 */
265
266#define IEM_IMPLEMENTS_TASKSWITCH
267
268/** @def IEM_WITH_3DNOW
269 * Includes the 3DNow decoding. */
270#if !defined(IEM_WITH_3DNOW) || defined(DOXYGEN_RUNNING) /* For doxygen, set in Config.kmk. */
271# ifndef IEM_WITHOUT_3DNOW
272# define IEM_WITH_3DNOW
273# endif
274#endif
275
276/** @def IEM_WITH_THREE_0F_38
277 * Includes the three byte opcode map for instrs starting with 0x0f 0x38. */
278#if !defined(IEM_WITH_THREE_0F_38) || defined(DOXYGEN_RUNNING) /* For doxygen, set in Config.kmk. */
279# ifndef IEM_WITHOUT_THREE_0F_38
280# define IEM_WITH_THREE_0F_38
281# endif
282#endif
283
284/** @def IEM_WITH_THREE_0F_3A
285 * Includes the three byte opcode map for instrs starting with 0x0f 0x38. */
286#if !defined(IEM_WITH_THREE_0F_3A) || defined(DOXYGEN_RUNNING) /* For doxygen, set in Config.kmk. */
287# ifndef IEM_WITHOUT_THREE_0F_3A
288# define IEM_WITH_THREE_0F_3A
289# endif
290#endif
291
292/** @def IEM_WITH_VEX
293 * Includes the VEX decoding. */
294#if !defined(IEM_WITH_VEX) || defined(DOXYGEN_RUNNING) /* For doxygen, set in Config.kmk. */
295# ifndef IEM_WITHOUT_VEX
296# define IEM_WITH_VEX
297# endif
298#endif
299
300
301#ifndef RT_IN_ASSEMBLER /* ASM-NOINC-START - the rest of the file */
302
303# if !defined(IEM_WITHOUT_INSTRUCTION_STATS) && !defined(DOXYGEN_RUNNING)
304/** Instruction statistics. */
305typedef struct IEMINSTRSTATS
306{
307# define IEM_DO_INSTR_STAT(a_Name, a_szDesc) uint32_t a_Name;
308# include "IEMInstructionStatisticsTmpl.h"
309# undef IEM_DO_INSTR_STAT
310} IEMINSTRSTATS;
311#else
312struct IEMINSTRSTATS;
313typedef struct IEMINSTRSTATS IEMINSTRSTATS;
314#endif
315/** Pointer to IEM instruction statistics. */
316typedef IEMINSTRSTATS *PIEMINSTRSTATS;
317
318
319/** @name IEMTARGETCPU_EFL_BEHAVIOR_XXX - IEMCPU::aidxTargetCpuEflFlavour
320 * @{ */
321#define IEMTARGETCPU_EFL_BEHAVIOR_NATIVE 0 /**< Native x86 EFLAGS result; Intel EFLAGS when on non-x86 hosts. */
322#define IEMTARGETCPU_EFL_BEHAVIOR_INTEL 1 /**< Intel EFLAGS result. */
323#define IEMTARGETCPU_EFL_BEHAVIOR_AMD 2 /**< AMD EFLAGS result */
324#define IEMTARGETCPU_EFL_BEHAVIOR_RESERVED 3 /**< Reserved/dummy entry slot that's the same as 0. */
325#define IEMTARGETCPU_EFL_BEHAVIOR_MASK 3 /**< For masking the index before use. */
326/** Selects the right variant from a_aArray.
327 * pVCpu is implicit in the caller context. */
328#define IEMTARGETCPU_EFL_BEHAVIOR_SELECT(a_aArray) \
329 (a_aArray[pVCpu->iem.s.aidxTargetCpuEflFlavour[1] & IEMTARGETCPU_EFL_BEHAVIOR_MASK])
330/** Variation of IEMTARGETCPU_EFL_BEHAVIOR_SELECT for when no native worker can
331 * be used because the host CPU does not support the operation. */
332#define IEMTARGETCPU_EFL_BEHAVIOR_SELECT_NON_NATIVE(a_aArray) \
333 (a_aArray[pVCpu->iem.s.aidxTargetCpuEflFlavour[0] & IEMTARGETCPU_EFL_BEHAVIOR_MASK])
334/** Variation of IEMTARGETCPU_EFL_BEHAVIOR_SELECT for a two dimentional
335 * array paralleling IEMCPU::aidxTargetCpuEflFlavour and a single bit index
336 * into the two.
337 * @sa IEM_SELECT_NATIVE_OR_FALLBACK */
338#if defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)
339# define IEMTARGETCPU_EFL_BEHAVIOR_SELECT_EX(a_aaArray, a_fNative) \
340 (a_aaArray[a_fNative][pVCpu->iem.s.aidxTargetCpuEflFlavour[a_fNative] & IEMTARGETCPU_EFL_BEHAVIOR_MASK])
341#else
342# define IEMTARGETCPU_EFL_BEHAVIOR_SELECT_EX(a_aaArray, a_fNative) \
343 (a_aaArray[0][pVCpu->iem.s.aidxTargetCpuEflFlavour[0] & IEMTARGETCPU_EFL_BEHAVIOR_MASK])
344#endif
345/** @} */
346
347/**
348 * Picks @a a_pfnNative or @a a_pfnFallback according to the host CPU feature
349 * indicator given by @a a_fCpumFeatureMember (CPUMFEATURES member).
350 *
351 * On non-x86 hosts, this will shortcut to the fallback w/o checking the
352 * indicator.
353 *
354 * @sa IEMTARGETCPU_EFL_BEHAVIOR_SELECT_EX
355 */
356#if (defined(RT_ARCH_X86) || defined(RT_ARCH_AMD64)) && !defined(IEM_WITHOUT_ASSEMBLY)
357# define IEM_SELECT_HOST_OR_FALLBACK(a_fCpumFeatureMember, a_pfnNative, a_pfnFallback) \
358 (g_CpumHostFeatures.s.a_fCpumFeatureMember ? a_pfnNative : a_pfnFallback)
359#else
360# define IEM_SELECT_HOST_OR_FALLBACK(a_fCpumFeatureMember, a_pfnNative, a_pfnFallback) (a_pfnFallback)
361#endif
362
363/** @name Helpers for passing C++ template arguments to an
364 * IEM_MC_NATIVE_EMIT_3/4/5 style macro.
365 * @{
366 */
367#define IEM_TEMPL_ARG_1(a1) <a1>
368#define IEM_TEMPL_ARG_2(a1, a2) <a1,a2>
369#define IEM_TEMPL_ARG_3(a1, a2, a3) <a1,a2,a3>
370/** @} */
371
372
373/**
374 * IEM TLB entry.
375 *
376 * Lookup assembly:
377 * @code{.asm}
378 ; Calculate tag.
379 mov rax, [VA]
380 shl rax, 16
381 shr rax, 16 + X86_PAGE_SHIFT
382 or rax, [uTlbRevision]
383
384 ; Do indexing.
385 movzx ecx, al
386 lea rcx, [pTlbEntries + rcx]
387
388 ; Check tag.
389 cmp [rcx + IEMTLBENTRY.uTag], rax
390 jne .TlbMiss
391
392 ; Check access.
393 mov rax, ACCESS_FLAGS | MAPPING_R3_NOT_VALID | 0xffffff00
394 and rax, [rcx + IEMTLBENTRY.fFlagsAndPhysRev]
395 cmp rax, [uTlbPhysRev]
396 jne .TlbMiss
397
398 ; Calc address and we're done.
399 mov eax, X86_PAGE_OFFSET_MASK
400 and eax, [VA]
401 or rax, [rcx + IEMTLBENTRY.pMappingR3]
402 %ifdef VBOX_WITH_STATISTICS
403 inc qword [cTlbHits]
404 %endif
405 jmp .Done
406
407 .TlbMiss:
408 mov r8d, ACCESS_FLAGS
409 mov rdx, [VA]
410 mov rcx, [pVCpu]
411 call iemTlbTypeMiss
412 .Done:
413
414 @endcode
415 *
416 */
417typedef struct IEMTLBENTRY
418{
419 /** The TLB entry tag.
420 * Bits 35 thru 0 are made up of the virtual address shifted right 12 bits, this
421 * is ASSUMING a virtual address width of 48 bits.
422 *
423 * Bits 63 thru 36 are made up of the TLB revision (zero means invalid).
424 *
425 * The TLB lookup code uses the current TLB revision, which won't ever be zero,
426 * enabling an extremely cheap TLB invalidation most of the time. When the TLB
427 * revision wraps around though, the tags needs to be zeroed.
428 *
429 * @note Try use SHRD instruction? After seeing
430 * https://gmplib.org/~tege/x86-timing.pdf, maybe not.
431 *
432 * @todo This will need to be reorganized for 57-bit wide virtual address and
433 * PCID (currently 12 bits) and ASID (currently 6 bits) support. We'll
434 * have to move the TLB entry versioning entirely to the
435 * fFlagsAndPhysRev member then, 57 bit wide VAs means we'll only have
436 * 19 bits left (64 - 57 + 12 = 19) and they'll almost entire be
437 * consumed by PCID and ASID (12 + 6 = 18).
438 * Update: Put the PCID + ASID in fFlagsAndPhysRev; that doesn't solve
439 * the 57-bit problem, though.
440 */
441 uint64_t uTag;
442 /** Access flags and physical TLB revision.
443 *
444 * @x86
445 * @parblock
446 * - Bit 0 - page tables - not executable (X86_PTE_PAE_NX).
447 * - Bit 1 - page tables - not writable (complemented X86_PTE_RW).
448 * - Bit 2 - page tables - not user (complemented X86_PTE_US).
449 * - Bit 3 - pgm phys/virt - not directly writable.
450 * - Bit 4 - pgm phys page - not directly readable.
451 * - Bit 5 - page tables - not accessed (complemented X86_PTE_A).
452 * - Bit 6 - page tables - not dirty (complemented X86_PTE_D).
453 * - Bit 7 - page tables - large page.
454 * - Bit 8 - tlb entry - pMappingR3 member not valid.
455 * - Bit 9 - phys - Unassigned memory.
456 * - Bit 10 - phys - Code page.
457 * - Bits 63:11 - phys - Physical TLB revision number.
458 *
459 * We're using complemented bit meanings here because it makes it easy to check
460 * whether special action is required. For instance a user mode write access
461 * would do a "TEST fFlags, (X86_PTE_RW | X86_PTE_US | X86_PTE_D)" and a
462 * non-zero result would mean special handling needed because either it wasn't
463 * writable, or it wasn't user, or the page wasn't dirty. A user mode read
464 * access would do "TEST fFlags, X86_PTE_US"; and a kernel mode read wouldn't
465 * need to check any PTE flag.
466 * @endparblock
467 *
468 * @arm
469 * @parblock
470 * - Bit 0 - stage 1+2 - not unprivileged read accessible.
471 * - Bit 1 - stage 1+2 - not unprivileged write accessible.
472 * - Bit 2 - stage 1+2 - not unprivileged execute accessible.
473 * - Bit 3 - stage 1+2 - not unprivileged guarded control stack accessible.
474 * - Bit 4 - stage 1+2 - not privileged readable accessible.
475 * - Bit 5 - stage 1+2 - not privileged writable accessible.
476 * - Bit 6 - stage 1+2 - not privileged executable accessible.
477 * - Bit 7 - stage 1+2 - not privileged guarded control stack accessible.
478 * - Bit 8 - stage 2 - no limited write access (?).
479 * - Bit 9 - stage 2 - TopLevel0 (?)
480 * - Bit 10 - stage 2 - TopLevel1 (?)
481 * - Bit 11 - stage 1+2 leaf - not dirty.
482 * - Bit 12 - stage 1+2 leaf - alternate MECID (AMEC).
483 * - Bit 13 - pgm phys page - not directly readable.
484 * - Bit 14 - pgm phys/virt - not directly writable.
485 * - Bit 15 - tlb entry - pMappingR3 member not valid.
486 * - Bit 16 - phys - Unassigned memory.
487 * - Bit 17 - phys - Code page.
488 * - Bit 18 - stage 1 leaf - NS (not-secure).
489 * - Bit 19 - stage 1 leaf - NSE (root).
490 * - Bits 20:21 - stage 1+2 - Page size.
491 * - Bits 37:22 - stage 1 reg - Address space ID (ASID).
492 * - Bits 53:38 - stage 2 reg - Virtual Machine ID (VMID).
493 * - Bits 63:54 - tlb entry - physical TLB revision number.
494 *
495 * The ASIDs and VMIDs are kept with the physical TLB revision number, so
496 * there is no extra overhead there. How the NSE:NS stuff will be handled
497 * is a question for later.
498 *
499 * The above is a preliminary sketch...
500 * @endparblock
501 *
502 * @todo arm64: Not sure if we can combine the stage 1 and 2 AMEC bits,
503 * but hope so... Doubt we'll be needing this any time soon.
504 */
505 uint64_t fFlagsAndPhysRev;
506 /** The guest physical page address. */
507 uint64_t GCPhys;
508 /** Pointer to the ring-3 mapping. */
509 R3PTRTYPE(uint8_t *) pbMappingR3;
510#if HC_ARCH_BITS == 32
511 uint32_t u32Padding1;
512#endif
513} IEMTLBENTRY;
514AssertCompileSize(IEMTLBENTRY, 32);
515/** Pointer to an IEM TLB entry. */
516typedef IEMTLBENTRY *PIEMTLBENTRY;
517/** Pointer to a const IEM TLB entry. */
518typedef IEMTLBENTRY const *PCIEMTLBENTRY;
519
520/** @name IEMTLBE_F_XXX - TLB entry flags (IEMTLBENTRY::fFlagsAndPhysRev)
521 * @{ */
522#if defined(VBOX_VMM_TARGET_X86) || defined(DOXYGEN_RUNNING)
523# define IEMTLBE_F_PT_NO_EXEC RT_BIT_64(0) /**< Page tables: Not executable. */
524# define IEMTLBE_F_PT_NO_WRITE RT_BIT_64(1) /**< Page tables: Not writable. */
525# define IEMTLBE_F_PT_NO_USER RT_BIT_64(2) /**< Page tables: Not user accessible (supervisor only). */
526# define IEMTLBE_F_PG_NO_WRITE RT_BIT_64(3) /**< Phys page: Not writable (access handler, ROM, whatever). */
527# define IEMTLBE_F_PG_NO_READ RT_BIT_64(4) /**< Phys page: Not readable (MMIO / access handler, ROM) */
528# define IEMTLBE_F_PT_NO_ACCESSED RT_BIT_64(5) /**< Phys tables: Not accessed (need to be marked accessed). */
529# define IEMTLBE_F_PT_NO_DIRTY RT_BIT_64(6) /**< Page tables: Not dirty (needs to be made dirty on write). */
530# define IEMTLBE_F_PT_LARGE_PAGE RT_BIT_64(7) /**< Page tables: Large 2 or 4 MiB page (for flushing). */
531# define IEMTLBE_F_NO_MAPPINGR3 RT_BIT_64(8) /**< TLB entry: The IEMTLBENTRY::pMappingR3 member is invalid. */
532# define IEMTLBE_F_PG_UNASSIGNED RT_BIT_64(9) /**< Phys page: Unassigned memory (not RAM, ROM, MMIO2 or MMIO). */
533# define IEMTLBE_F_PG_CODE_PAGE RT_BIT_64(10) /**< Phys page: Code page. */
534# define IEMTLBE_F_PHYS_REV UINT64_C(0xfffffffffffff800) /**< Physical revision mask. @sa IEMTLB_PHYS_REV_INCR */
535#endif
536#if defined(VBOX_VMM_TARGET_ARMV8) || defined(DOXYGEN_RUNNING)
537/** Stage 1+2: No unprivileged read access. */
538# define IEMTLBE_F_EFF_P_NO_READ_BIT 0
539# define IEMTLBE_F_EFF_P_NO_READ RT_BIT_64(IEMTLBE_F_EFF_P_NO_READ_BIT)
540/** Stage 1+2: No privileged write access. */
541# define IEMTLBE_F_EFF_P_NO_WRITE_BIT 1
542# define IEMTLBE_F_EFF_P_NO_WRITE RT_BIT_64(IEMTLBE_F_EFF_P_NO_WRITE_BIT)
543/** Stage 1+2: No privileged execute access. */
544# define IEMTLBE_F_EFF_P_NO_EXEC_BIT 2
545# define IEMTLBE_F_EFF_P_NO_EXEC RT_BIT_64(IEMTLBE_F_EFF_P_NO_EXEC_BIT)
546/** Stage 1+2: No privileged guard control stack access. */
547# define IEMTLBE_F_EFF_P_NO_GCS_BIT 3
548# define IEMTLBE_F_EFF_P_NO_GCS RT_BIT_64(IEMTLBE_F_EFF_P_NO_GCS_BIT)
549/** Stage 1+2: No unprivileged read access. */
550# define IEMTLBE_F_EFF_U_NO_READ_BIT 4
551# define IEMTLBE_F_EFF_U_NO_READ RT_BIT_64(IEMTLBE_F_EFF_U_NO_READ_BIT)
552/** Stage 1+2: No unprivileged write access. */
553# define IEMTLBE_F_EFF_U_NO_WRITE_BIT 5
554# define IEMTLBE_F_EFF_U_NO_WRITE RT_BIT_64(IEMTLBE_F_EFF_U_NO_WRITE_BIT)
555/** Stage 1+2: No unprivileged execute access. */
556# define IEMTLBE_F_EFF_U_NO_EXEC_BIT 6
557# define IEMTLBE_F_EFF_U_NO_EXEC RT_BIT_64(IEMTLBE_F_EFF_U_NO_EXEC_BIT)
558/** Stage 1+2: No unprivileged guard control stack access. */
559# define IEMTLBE_F_EFF_U_NO_GCS_BIT 7
560# define IEMTLBE_F_EFF_U_NO_GCS RT_BIT_64(IEMTLBE_F_EFF_U_NO_GCS_BIT)
561/** Stage 2: No limited write access. */
562# define IEMTLBE_F_S2_NO_LIM_WRITE_BIT 8
563# define IEMTLBE_F_S2_NO_LIM_WRITE RT_BIT_64(IEMTLBE_F_S2_NO_LIM_WRITE_BIT)
564/** Stage 2: TopLevel0. */
565# define IEMTLBE_F_S2_TL0_BIT 9
566# define IEMTLBE_F_S2_TL0 RT_BIT_64(IEMTLBE_F_S2_TL0_BIT)
567/** Stage 2: TopLevel1. */
568# define IEMTLBE_F_S2_TL1_BIT 10
569# define IEMTLBE_F_S2_TL1 RT_BIT_64(IEMTLBE_F_S2_TL1_BIT)
570/** Stage 1+2: Not dirty. */
571# define IEMTLBE_F_EFF_NO_DIRTY_BIT 11
572# define IEMTLBE_F_EFF_NO_DIRTY RT_BIT_64(IEMTLBE_F_EFF_NO_DIRTY_BIT)
573/** Stage 1+2: Alternative MECID. */
574# define IEMTLBE_F_EFF_AMEC_BIT 12
575# define IEMTLBE_F_EFF_AMEC RT_BIT_64(IEMTLBE_F_EFF_AMEC_BIT)
576/** Phys page: Not readable (MMIO / access handler, ROM) */
577# define IEMTLBE_F_PG_NO_READ_BIT 13
578# define IEMTLBE_F_PG_NO_READ RT_BIT_64(IEMTLBE_F_PG_NO_READ_BIT)
579/** Phys page: Not writable (access handler, ROM, whatever). */
580# define IEMTLBE_F_PG_NO_WRITE_BIT 14
581# define IEMTLBE_F_PG_NO_WRITE RT_BIT_64(IEMTLBE_F_PG_NO_WRITE_BIT)
582/** TLB entry: The IEMTLBENTRY::pMappingR3 member is invalid. */
583# define IEMTLBE_F_NO_MAPPINGR3_BIT 15
584# define IEMTLBE_F_NO_MAPPINGR3 RT_BIT_64(IEMTLBE_F_NO_MAPPINGR3_BIT)
585/** Phys page: Unassigned memory (not RAM, ROM, MMIO2 or MMIO). */
586# define IEMTLBE_F_PG_UNASSIGNED_BIT 16
587# define IEMTLBE_F_PG_UNASSIGNED RT_BIT_64(IEMTLBE_F_PG_UNASSIGNED_BIT)
588/** Phys page: Code page. */
589# define IEMTLBE_F_PG_CODE_PAGE_BIT 17
590# define IEMTLBE_F_PG_CODE_PAGE RT_BIT_64(IEMTLBE_F_PG_CODE_PAGE_BIT)
591/** Stage 1: Non-secure bit. */
592# define IEMTLBE_F_S1_NS_BIT 18
593# define IEMTLBE_F_S1_NS RT_BIT_64(IEMTLBE_F_S1_NS_BIT)
594/** Stage 1: Non-secure extension/whatever bit. */
595# define IEMTLBE_F_S1_NSE_BIT 19
596# define IEMTLBE_F_S1_NSE RT_BIT_64(IEMTLBE_F_S1_NSE_BIT)
597/** Stage 1+2: Page size.
598 * @todo may need separate bits for each stage since they may use different
599 * page sizes. Or perhaps a single bit suffices? Also possible we
600 * don't need any of this at all because of a very very rich invalidation
601 * interface on arm. */
602# define IEMTLBE_F_EFF_SIZE_MASK UINT64(0x300000)
603/** @see IEMTLBE_F_EFF_SIZE_MASK */
604# define IEMTLBE_F_EFF_SIZE_SHIFT 20
605/** Stage 1+2: Smallest page size. */
606# define IEMTLBE_F_EFF_SIZE_L3 UINT64(0x000000)
607/** Stage 1+2: Level 2 block. */
608# define IEMTLBE_F_EFF_SIZE_L2 UINT64(0x100000)
609/** Stage 1+2: Level 1 block. */
610# define IEMTLBE_F_EFF_SIZE_L1 UINT64(0x200000)
611/** Stage 1+2: Level 0 block. */
612# define IEMTLBE_F_EFF_SIZE_L0 UINT64(0x300000)
613/** Stage 1+2: Device memory type (clear if normal memory type). */
614# define IEMTLBE_F_EFF_DEVICE_BIT 22
615# define IEMTLBE_F_EFF_DEVICE RT_BIT_64(IEMTLBE_F_EFF_DEVICE_BIT)
616/** Stage 1: Address space ID (from stage 1 root register). */
617# define IEMTLBE_F_S1_ASID (UINT64_C(0xffff) << IEMTLBE_F_S1_ASID_SHIFT)
618/** @see IEMTLBE_F_S1_ASID */
619# define IEMTLBE_F_S1_ASID_SHIFT 23
620/** Stage 2: Virtual machine ID (from stage 2 root register). */
621# define IEMTLBE_F_S2_VMID (UINT64_C(0xffff) << IEMTLBE_F_S2_VMID_SHIFT)
622/** @see IEMTLBE_F_S2_VMID */
623# define IEMTLBE_F_S2_VMID_SHIFT 39
624# ifndef DOXYGEN_RUNNING
625/** Physical revision mask. @sa IEMTLB_PHYS_REV_INCR */
626# define IEMTLBE_F_PHYS_REV UINT64_C(0xff80000000000000)
627# endif
628/** @todo ARM: We need to identify which EL the entry is from (EL1,
629 * EL2 or EL3)! */
630#endif
631/** @} */
632/** The bits set by PGMPhysIemGCPhys2PtrNoLock. */
633#define IEMTLBE_GCPHYS2PTR_MASK ( PGMIEMGCPHYS2PTR_F_NO_WRITE \
634 | PGMIEMGCPHYS2PTR_F_NO_READ \
635 | PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 \
636 | PGMIEMGCPHYS2PTR_F_UNASSIGNED \
637 | PGMIEMGCPHYS2PTR_F_CODE_PAGE \
638 | IEMTLBE_F_PHYS_REV )
639#if defined(VBOX_VMM_TARGET_X86) || defined(VBOX_VMM_TARGET_ARMV8)
640AssertCompile(PGMIEMGCPHYS2PTR_F_NO_WRITE == IEMTLBE_F_PG_NO_WRITE);
641AssertCompile(PGMIEMGCPHYS2PTR_F_NO_READ == IEMTLBE_F_PG_NO_READ);
642AssertCompile(PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3 == IEMTLBE_F_NO_MAPPINGR3);
643AssertCompile(PGMIEMGCPHYS2PTR_F_UNASSIGNED == IEMTLBE_F_PG_UNASSIGNED);
644AssertCompile(PGMIEMGCPHYS2PTR_F_CODE_PAGE == IEMTLBE_F_PG_CODE_PAGE);
645# ifdef VBOX_VMM_TARGET_X86
646AssertCompile(PGM_WALKINFO_BIG_PAGE == IEMTLBE_F_PT_LARGE_PAGE);
647# endif
648#endif
649
650/** Tests if the TLB entry is global (odd). */
651#define IEMTLBE_IS_GLOBAL(a_pTlbe) (((uintptr_t)(a_pTlbe) / sizeof(IEMTLBENTRY)) & 1)
652
653
654/** The TLB size (power of two).
655 * We initially chose 256 because that way we can obtain the result directly
656 * from a 8-bit register without an additional AND instruction.
657 * See also @bugref{10687}. */
658#if defined(RT_ARCH_AMD64)
659# define IEMTLB_ENTRY_COUNT 256
660# define IEMTLB_ENTRY_COUNT_AS_POWER_OF_TWO 8
661#else
662# define IEMTLB_ENTRY_COUNT 8192
663# define IEMTLB_ENTRY_COUNT_AS_POWER_OF_TWO 13
664#endif
665AssertCompile(RT_BIT_32(IEMTLB_ENTRY_COUNT_AS_POWER_OF_TWO) == IEMTLB_ENTRY_COUNT);
666
667/** TLB slot format spec (assumes uint32_t or unsigned value). */
668#if IEMTLB_ENTRY_COUNT <= 0x100 / 2
669# define IEMTLB_SLOT_FMT "%02x"
670#elif IEMTLB_ENTRY_COUNT <= 0x1000 / 2
671# define IEMTLB_SLOT_FMT "%03x"
672#elif IEMTLB_ENTRY_COUNT <= 0x10000 / 2
673# define IEMTLB_SLOT_FMT "%04x"
674#else
675# define IEMTLB_SLOT_FMT "%05x"
676#endif
677
678/** Enable the large page bitmap TLB optimization.
679 *
680 * The idea here is to avoid scanning the full 32 KB (2MB pages, 2*512 TLB
681 * entries) or 64 KB (4MB pages, 2*1024 TLB entries) worth of TLB entries during
682 * invlpg when large pages are used, and instead just scan 128 or 256 bytes of
683 * the bmLargePage bitmap to determin which TLB entires that might be containing
684 * large pages and actually require checking.
685 *
686 * There is a good posibility of false positives since we currently don't clear
687 * the bitmap when flushing the TLB, but it should help reduce the workload when
688 * the large pages aren't fully loaded into the TLB in their entirity...
689 */
690#define IEMTLB_WITH_LARGE_PAGE_BITMAP
691
692/**
693 * An IEM TLB.
694 *
695 * We've got two of these, one for data and one for instructions.
696 */
697typedef struct IEMTLB
698{
699 /** The non-global TLB revision.
700 * This is actually only 28 bits wide (see IEMTLBENTRY::uTag) and is incremented
701 * by adding RT_BIT_64(36) to it. When it wraps around and becomes zero, all
702 * the tags in the TLB must be zeroed and the revision set to RT_BIT_64(36).
703 * (The revision zero indicates an invalid TLB entry.)
704 *
705 * The initial value is choosen to cause an early wraparound.
706 * @arm This includes the ASID & VM ID. */
707 uint64_t uTlbRevision;
708 /** The TLB physical address revision - shadow of PGM variable.
709 *
710 * The revision number is the top x bits (target dependent, see
711 * IEMTLBENTRY::fFlagsAndPhysRev) and is incremented by adding RT_BIT_64(x).
712 * When it wraps around and becomes zero, a rendezvous is called and each CPU
713 * wipe the IEMTLBENTRY::pMappingR3 as well as many of the
714 * IEMTLBENTRY::fFlagsAndPhysRev bits.
715 *
716 * @arm This includes the current ASID & VMID values.
717 * @todo arm: implement ASID & VMID.
718 *
719 * The initial value is choosen to cause an early wraparound.
720 *
721 * @note This is placed between the two TLB revisions because we
722 * load it in pair with one or the other on arm64.
723 */
724 uint64_t volatile uTlbPhysRev;
725 /** The global TLB revision.
726 * Same as uTlbRevision, but only increased for global flushes. */
727 uint64_t uTlbRevisionGlobal;
728
729 /** Large page tag range.
730 *
731 * This is used to avoid scanning a large page's worth of TLB entries for each
732 * INVLPG instruction, and only to do so iff we've loaded any and when the
733 * address is in this range. This is kept up to date when we loading new TLB
734 * entries.
735 */
736 struct LARGEPAGERANGE
737 {
738 /** The lowest large page address tag, UINT64_MAX if none. */
739 uint64_t uFirstTag;
740 /** The highest large page address tag (with offset mask part set), 0 if none. */
741 uint64_t uLastTag;
742 }
743 /** Large page range for non-global pages. */
744 NonGlobalLargePageRange,
745 /** Large page range for global pages. */
746 GlobalLargePageRange;
747 /** Number of non-global entries for large pages loaded since last TLB flush. */
748 uint32_t cTlbNonGlobalLargePageCurLoads;
749 /** Number of global entries for large pages loaded since last TLB flush. */
750 uint32_t cTlbGlobalLargePageCurLoads;
751
752 /* Statistics: */
753
754 /** TLB hits in IEMAll.cpp code (IEM_WITH_TLB_STATISTICS only; both).
755 * @note For the data TLB this is only used in iemMemMap and and for direct (i.e.
756 * not via safe read/write path) calls to iemMemMapJmp. */
757 uint64_t cTlbCoreHits;
758 /** Safe read/write TLB hits in iemMemMapJmp (IEM_WITH_TLB_STATISTICS
759 * only; data tlb only). */
760 uint64_t cTlbSafeHits;
761 /** TLB hits in IEMAllMemRWTmplInline.cpp.h (data + IEM_WITH_TLB_STATISTICS only). */
762 uint64_t cTlbInlineCodeHits;
763
764 /** TLB misses in IEMAll.cpp code (both).
765 * @note For the data TLB this is only used in iemMemMap and for direct (i.e.
766 * not via safe read/write path) calls to iemMemMapJmp. So,
767 * for the data TLB this more like 'other misses', while for the code
768 * TLB is all misses. */
769 uint64_t cTlbCoreMisses;
770 /** Subset of cTlbCoreMisses that results in PTE.G=1 loads (odd entries). */
771 uint64_t cTlbCoreGlobalLoads;
772 /** Safe read/write TLB misses in iemMemMapJmp (so data only). */
773 uint64_t cTlbSafeMisses;
774 /** Subset of cTlbSafeMisses that results in PTE.G=1 loads (odd entries). */
775 uint64_t cTlbSafeGlobalLoads;
776 /** Safe read path taken (data only). */
777 uint64_t cTlbSafeReadPath;
778 /** Safe write path taken (data only). */
779 uint64_t cTlbSafeWritePath;
780
781 /** @name Details for native code TLB misses.
782 * @note These counts are included in the above counters (cTlbSafeReadPath,
783 * cTlbSafeWritePath, cTlbInlineCodeHits).
784 * @{ */
785 /** TLB misses in native code due to tag mismatch. */
786 STAMCOUNTER cTlbNativeMissTag;
787 /** TLB misses in native code due to flags or physical revision mismatch. */
788 STAMCOUNTER cTlbNativeMissFlagsAndPhysRev;
789 /** TLB misses in native code due to misaligned access. */
790 STAMCOUNTER cTlbNativeMissAlignment;
791 /** TLB misses in native code due to cross page access. */
792 uint32_t cTlbNativeMissCrossPage;
793 /** TLB misses in native code due to non-canonical address. */
794 uint32_t cTlbNativeMissNonCanonical;
795 /** @} */
796
797 /** Slow read path (code only). */
798 uint32_t cTlbSlowCodeReadPath;
799
800 /** Regular TLB flush count. */
801 uint32_t cTlsFlushes;
802 /** Global TLB flush count. */
803 uint32_t cTlsGlobalFlushes;
804 /** Revision rollovers. */
805 uint32_t cTlbRevisionRollovers;
806 /** Physical revision flushes. */
807 uint32_t cTlbPhysRevFlushes;
808 /** Physical revision rollovers. */
809 uint32_t cTlbPhysRevRollovers;
810
811 /** Number of INVLPG (and similar) operations. */
812 uint32_t cTlbInvlPg;
813 /** Subset of cTlbInvlPg that involved non-global large pages. */
814 uint32_t cTlbInvlPgLargeNonGlobal;
815 /** Subset of cTlbInvlPg that involved global large pages. */
816 uint32_t cTlbInvlPgLargeGlobal;
817
818 uint32_t au32Padding[13];
819
820 /** The TLB entries.
821 * Even entries are for PTE.G=0 and uses uTlbRevision.
822 * Odd entries are for PTE.G=1 and uses uTlbRevisionGlobal. */
823 IEMTLBENTRY aEntries[IEMTLB_ENTRY_COUNT * 2];
824#ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP
825 /** Bitmap tracking TLB entries for large pages.
826 * This duplicates IEMTLBE_F_PT_LARGE_PAGE for each TLB entry. */
827 uint64_t bmLargePage[IEMTLB_ENTRY_COUNT * 2 / 64];
828#endif
829} IEMTLB;
830AssertCompileSizeAlignment(IEMTLB, 64);
831#ifdef IEMTLB_WITH_LARGE_PAGE_BITMAP
832AssertCompile(IEMTLB_ENTRY_COUNT >= 32 /* bmLargePage ASSUMPTION */);
833#endif
834/** The width (in bits) of the address portion of the TLB tag. */
835#define IEMTLB_TAG_ADDR_WIDTH 36
836/** IEMTLB::uTlbRevision increment. */
837#define IEMTLB_REVISION_INCR RT_BIT_64(IEMTLB_TAG_ADDR_WIDTH)
838/** IEMTLB::uTlbRevision mask. */
839#define IEMTLB_REVISION_MASK (~(RT_BIT_64(IEMTLB_TAG_ADDR_WIDTH) - 1))
840
841/** IEMTLB::uTlbPhysRev increment.
842 * @sa IEMTLBE_F_PHYS_REV */
843#if defined(VBOX_VMM_TARGET_X86) || defined(DOXYGEN_RUNNING)
844# define IEMTLB_PHYS_REV_INCR RT_BIT_64(11)
845#elif defined(VBOX_VMM_TARGET_ARMV8)
846# define IEMTLB_PHYS_REV_INCR RT_BIT_64(55)
847#endif
848#ifdef IEMTLBE_F_PHYS_REV
849AssertCompile(IEMTLBE_F_PHYS_REV == ~(IEMTLB_PHYS_REV_INCR - 1U));
850#endif
851
852/**
853 * Calculates the TLB tag for a virtual address but without TLB revision.
854 * @returns Tag value for indexing and comparing with IEMTLB::uTag.
855 * @param a_pVCpu The CPU handle (for ARM targets to deal with
856 * configurable page size).
857 * @param a_GCPtr The virtual address. Must be RTGCPTR or same size or
858 * the clearing of the top 16 bits won't work (if 32-bit
859 * we'll end up with mostly zeros).
860 * @todo ARM: Support 52-bit and 56-bit address space size (FEAT_LVA,
861 * FEAT_LVA3) when we see hardware supporting such. */
862#ifdef VBOX_VMM_TARGET_ARMV8
863# define IEMTLB_CALC_TAG_NO_REV(a_pVCpu, a_GCPtr) ( (((a_GCPtr) << 16) >> (IEM_F_ARM_GET_TLB_PAGE_SHIFT(pVCpu->iem.s.fExec) + 16)) )
864#else
865# define IEMTLB_CALC_TAG_NO_REV(a_pVCpu, a_GCPtr) ( (((a_GCPtr) << 16) >> (GUEST_PAGE_SHIFT + 16)) )
866#endif
867/**
868 * Converts a TLB tag value into a even TLB index.
869 * @returns Index into IEMTLB::aEntries.
870 * @param a_uTag Value returned by IEMTLB_CALC_TAG.
871 */
872#if IEMTLB_ENTRY_COUNT == 256
873# define IEMTLB_TAG_TO_EVEN_INDEX(a_uTag) ( (uint8_t)(a_uTag) * 2U )
874#else
875# define IEMTLB_TAG_TO_EVEN_INDEX(a_uTag) ( ((a_uTag) & (IEMTLB_ENTRY_COUNT - 1U)) * 2U )
876AssertCompile(RT_IS_POWER_OF_TWO(IEMTLB_ENTRY_COUNT));
877#endif
878/**
879 * Converts a TLB tag value into an even TLB index.
880 * @returns Pointer into IEMTLB::aEntries corresponding to .
881 * @param a_pTlb The TLB.
882 * @param a_uTag Value returned by IEMTLB_CALC_TAG or
883 * IEMTLB_CALC_TAG_NO_REV.
884 */
885#define IEMTLB_TAG_TO_EVEN_ENTRY(a_pTlb, a_uTag) ( &(a_pTlb)->aEntries[IEMTLB_TAG_TO_EVEN_INDEX(a_uTag)] )
886
887/** Converts a GC address to an even TLB index. */
888#define IEMTLB_ADDR_TO_EVEN_INDEX(a_pVCpu, a_GCPtr) IEMTLB_TAG_TO_EVEN_INDEX(IEMTLB_CALC_TAG_NO_REV(a_pVCpu, a_GCPtr))
889
890
891/** @def IEM_WITH_TLB_TRACE
892 * Enables the TLB tracing.
893 * Adjust buffer size in IEMR3Init. */
894#if defined(DOXYGEN_RUNNING) || 0
895# define IEM_WITH_TLB_TRACE
896#endif
897
898#ifdef IEM_WITH_TLB_TRACE
899
900/** TLB trace entry types. */
901typedef enum : uint8_t
902{
903 kIemTlbTraceType_Invalid,
904 kIemTlbTraceType_InvlPg,
905 kIemTlbTraceType_EvictSlot,
906 kIemTlbTraceType_LargeEvictSlot,
907 kIemTlbTraceType_LargeScan,
908 kIemTlbTraceType_Flush,
909 kIemTlbTraceType_FlushGlobal, /**< x86 specific */
910 kIemTlbTraceType_Load,
911 kIemTlbTraceType_LoadGlobal, /**< x86 specific */
912 kIemTlbTraceType_Load_Cr0, /**< x86 specific */
913 kIemTlbTraceType_Load_Cr3, /**< x86 specific */
914 kIemTlbTraceType_Load_Cr4, /**< x86 specific */
915 kIemTlbTraceType_Load_Efer, /**< x86 specific */
916 kIemTlbTraceType_Irq,
917 kIemTlbTraceType_Xcpt,
918 kIemTlbTraceType_IRet, /**< x86 specific */
919 kIemTlbTraceType_Tb_Compile,
920 kIemTlbTraceType_Tb_Exec_Threaded,
921 kIemTlbTraceType_Tb_Exec_Native,
922 kIemTlbTraceType_User0,
923 kIemTlbTraceType_User1,
924 kIemTlbTraceType_User2,
925 kIemTlbTraceType_User3,
926} IEMTLBTRACETYPE;
927
928/** TLB trace entry. */
929typedef struct IEMTLBTRACEENTRY
930{
931 /** The flattened RIP for the event. */
932 uint64_t rip;
933 /** The event type. */
934 IEMTLBTRACETYPE enmType;
935 /** Byte parameter - typically used as 'bool fDataTlb'. */
936 uint8_t bParam;
937 /** 16-bit parameter value. */
938 uint16_t u16Param;
939 /** 32-bit parameter value. */
940 uint32_t u32Param;
941 /** 64-bit parameter value. */
942 uint64_t u64Param;
943 /** 64-bit parameter value. */
944 uint64_t u64Param2;
945} IEMTLBTRACEENTRY;
946AssertCompileSize(IEMTLBTRACEENTRY, 32);
947/** Pointer to a TLB trace entry. */
948typedef IEMTLBTRACEENTRY *PIEMTLBTRACEENTRY;
949/** Pointer to a const TLB trace entry. */
950typedef IEMTLBTRACEENTRY const *PCIEMTLBTRACEENTRY;
951#endif /* !IEM_WITH_TLB_TRACE */
952
953#if defined(IEM_WITH_TLB_TRACE) && defined(IN_RING3) && 1
954# define IEMTLBTRACE_INVLPG(a_pVCpu, a_GCPtr) \
955 iemTlbTrace(a_pVCpu, kIemTlbTraceType_InvlPg, a_GCPtr)
956# define IEMTLBTRACE_EVICT_SLOT(a_pVCpu, a_GCPtrTag, a_GCPhys, a_idxSlot, a_fDataTlb) \
957 iemTlbTrace(a_pVCpu, kIemTlbTraceType_EvictSlot, a_GCPtrTag, a_GCPhys, a_fDataTlb, a_idxSlot)
958# define IEMTLBTRACE_LARGE_EVICT_SLOT(a_pVCpu, a_GCPtrTag, a_GCPhys, a_idxSlot, a_fDataTlb) \
959 iemTlbTrace(a_pVCpu, kIemTlbTraceType_LargeEvictSlot, a_GCPtrTag, a_GCPhys, a_fDataTlb, a_idxSlot)
960# define IEMTLBTRACE_LARGE_SCAN(a_pVCpu, a_fGlobal, a_fNonGlobal, a_fDataTlb) \
961 iemTlbTrace(a_pVCpu, kIemTlbTraceType_LargeScan, 0, 0, a_fDataTlb, (uint8_t)a_fGlobal | ((uint8_t)a_fNonGlobal << 1))
962# define IEMTLBTRACE_FLUSH(a_pVCpu, a_uRev, a_fDataTlb) \
963 iemTlbTrace(a_pVCpu, kIemTlbTraceType_Flush, a_uRev, 0, a_fDataTlb)
964# define IEMTLBTRACE_FLUSH_GLOBAL(a_pVCpu, a_uRev, a_uGRev, a_fDataTlb) \
965 iemTlbTrace(a_pVCpu, kIemTlbTraceType_FlushGlobal, a_uRev, a_uGRev, a_fDataTlb)
966# define IEMTLBTRACE_LOAD(a_pVCpu, a_GCPtr, a_GCPhys, a_fTlb, a_fDataTlb) \
967 iemTlbTrace(a_pVCpu, kIemTlbTraceType_Load, a_GCPtr, a_GCPhys, a_fDataTlb, a_fTlb)
968# define IEMTLBTRACE_LOAD_GLOBAL(a_pVCpu, a_GCPtr, a_GCPhys, a_fTlb, a_fDataTlb) \
969 iemTlbTrace(a_pVCpu, kIemTlbTraceType_LoadGlobal, a_GCPtr, a_GCPhys, a_fDataTlb, a_fTlb)
970#else
971# define IEMTLBTRACE_INVLPG(a_pVCpu, a_GCPtr) do { } while (0)
972# define IEMTLBTRACE_EVICT_SLOT(a_pVCpu, a_GCPtrTag, a_GCPhys, a_idxSlot, a_fDataTlb) do { } while (0)
973# define IEMTLBTRACE_LARGE_EVICT_SLOT(a_pVCpu, a_GCPtrTag, a_GCPhys, a_idxSlot, a_fDataTlb) do { } while (0)
974# define IEMTLBTRACE_LARGE_SCAN(a_pVCpu, a_fGlobal, a_fNonGlobal, a_fDataTlb) do { } while (0)
975# define IEMTLBTRACE_FLUSH(a_pVCpu, a_uRev, a_fDataTlb) do { } while (0)
976# define IEMTLBTRACE_FLUSH_GLOBAL(a_pVCpu, a_uRev, a_uGRev, a_fDataTlb) do { } while (0)
977# define IEMTLBTRACE_LOAD(a_pVCpu, a_GCPtr, a_GCPhys, a_fTlb, a_fDataTlb) do { } while (0)
978# define IEMTLBTRACE_LOAD_GLOBAL(a_pVCpu, a_GCPtr, a_GCPhys, a_fTlb, a_fDataTlb) do { } while (0)
979#endif
980
981#if defined(IEM_WITH_TLB_TRACE) && defined(IN_RING3) && 1
982# define IEMTLBTRACE_LOAD_CR0(a_pVCpu, a_uNew, a_uOld) iemTlbTrace(a_pVCpu, kIemTlbTraceType_Load_Cr0, a_uNew, a_uOld)
983# define IEMTLBTRACE_LOAD_CR3(a_pVCpu, a_uNew, a_uOld) iemTlbTrace(a_pVCpu, kIemTlbTraceType_Load_Cr3, a_uNew, a_uOld)
984# define IEMTLBTRACE_LOAD_CR4(a_pVCpu, a_uNew, a_uOld) iemTlbTrace(a_pVCpu, kIemTlbTraceType_Load_Cr4, a_uNew, a_uOld)
985# define IEMTLBTRACE_LOAD_EFER(a_pVCpu, a_uNew, a_uOld) iemTlbTrace(a_pVCpu, kIemTlbTraceType_Load_Efer, a_uNew, a_uOld)
986#else
987# define IEMTLBTRACE_LOAD_CR0(a_pVCpu, a_uNew, a_uOld) do { } while (0)
988# define IEMTLBTRACE_LOAD_CR3(a_pVCpu, a_uNew, a_uOld) do { } while (0)
989# define IEMTLBTRACE_LOAD_CR4(a_pVCpu, a_uNew, a_uOld) do { } while (0)
990# define IEMTLBTRACE_LOAD_EFER(a_pVCpu, a_uNew, a_uOld) do { } while (0)
991#endif
992
993#if defined(IEM_WITH_TLB_TRACE) && defined(IN_RING3) && 1
994# define IEMTLBTRACE_IRQ(a_pVCpu, a_uVector, a_fFlags, a_fEFlags) \
995 iemTlbTrace(a_pVCpu, kIemTlbTraceType_Irq, a_fEFlags, 0, a_uVector, a_fFlags)
996# define IEMTLBTRACE_XCPT(a_pVCpu, a_uVector, a_uErr, a_uCr2, a_fFlags) \
997 iemTlbTrace(a_pVCpu, kIemTlbTraceType_Xcpt, a_uErr, a_uCr2, a_uVector, a_fFlags)
998# define IEMTLBTRACE_IRET(a_pVCpu, a_uRetCs, a_uRetRip, a_fEFlags) \
999 iemTlbTrace(a_pVCpu, kIemTlbTraceType_IRet, a_uRetRip, a_fEFlags, 0, a_uRetCs)
1000#else
1001# define IEMTLBTRACE_IRQ(a_pVCpu, a_uVector, a_fFlags, a_fEFlags) do { } while (0)
1002# define IEMTLBTRACE_XCPT(a_pVCpu, a_uVector, a_uErr, a_uCr2, a_fFlags) do { } while (0)
1003# define IEMTLBTRACE_IRET(a_pVCpu, a_uRetCs, a_uRetRip, a_fEFlags) do { } while (0)
1004#endif
1005
1006#if defined(IEM_WITH_TLB_TRACE) && defined(IN_RING3) && 1
1007# define IEMTLBTRACE_TB_COMPILE(a_pVCpu, a_GCPhysPc) \
1008 iemTlbTrace(a_pVCpu, kIemTlbTraceType_Tb_Compile, a_GCPhysPc)
1009# define IEMTLBTRACE_TB_EXEC_THRD(a_pVCpu, a_pTb) \
1010 iemTlbTrace(a_pVCpu, kIemTlbTraceType_Tb_Exec_Threaded, (a_pTb)->GCPhysPc, (uintptr_t)a_pTb, 0, (a_pTb)->cUsed)
1011# define IEMTLBTRACE_TB_EXEC_N8VE(a_pVCpu, a_pTb) \
1012 iemTlbTrace(a_pVCpu, kIemTlbTraceType_Tb_Exec_Native, (a_pTb)->GCPhysPc, (uintptr_t)a_pTb, 0, (a_pTb)->cUsed)
1013#else
1014# define IEMTLBTRACE_TB_COMPILE(a_pVCpu, a_GCPhysPc) do { } while (0)
1015# define IEMTLBTRACE_TB_EXEC_THRD(a_pVCpu, a_pTb) do { } while (0)
1016# define IEMTLBTRACE_TB_EXEC_N8VE(a_pVCpu, a_pTb) do { } while (0)
1017#endif
1018
1019#if defined(IEM_WITH_TLB_TRACE) && defined(IN_RING3) && 1
1020# define IEMTLBTRACE_USER0(a_pVCpu, a_u64Param1, a_u64Param2, a_u32Param, a_bParam) \
1021 iemTlbTrace(a_pVCpu, kIemTlbTraceType_User0, a_u64Param1, a_u64Param2, a_bParam, a_u32Param)
1022# define IEMTLBTRACE_USER1(a_pVCpu, a_u64Param1, a_u64Param2, a_u32Param, a_bParam) \
1023 iemTlbTrace(a_pVCpu, kIemTlbTraceType_User1, a_u64Param1, a_u64Param2, a_bParam, a_u32Param)
1024# define IEMTLBTRACE_USER2(a_pVCpu, a_u64Param1, a_u64Param2, a_u32Param, a_bParam) \
1025 iemTlbTrace(a_pVCpu, kIemTlbTraceType_User2, a_u64Param1, a_u64Param2, a_bParam, a_u32Param)
1026# define IEMTLBTRACE_USER3(a_pVCpu, a_u64Param1, a_u64Param2, a_u32Param, a_bParam) \
1027 iemTlbTrace(a_pVCpu, kIemTlbTraceType_User3, a_u64Param1, a_u64Param2, a_bParam, a_u32Param)
1028#else
1029# define IEMTLBTRACE_USER0(a_pVCpu, a_u64Param1, a_u64Param2, a_u32Param, a_bParam) do { } while (0)
1030# define IEMTLBTRACE_USER1(a_pVCpu, a_u64Param1, a_u64Param2, a_u32Param, a_bParam) do { } while (0)
1031# define IEMTLBTRACE_USER2(a_pVCpu, a_u64Param1, a_u64Param2, a_u32Param, a_bParam) do { } while (0)
1032# define IEMTLBTRACE_USER3(a_pVCpu, a_u64Param1, a_u64Param2, a_u32Param, a_bParam) do { } while (0)
1033#endif
1034
1035
1036/** @name IEM_MC_F_XXX - MC block flags/clues.
1037 * @note x86 specific
1038 * @todo Merge with IEM_CIMPL_F_XXX
1039 * @{ */
1040#define IEM_MC_F_ONLY_8086 RT_BIT_32(0)
1041#define IEM_MC_F_MIN_186 RT_BIT_32(1)
1042#define IEM_MC_F_MIN_286 RT_BIT_32(2)
1043#define IEM_MC_F_NOT_286_OR_OLDER IEM_MC_F_MIN_386
1044#define IEM_MC_F_MIN_386 RT_BIT_32(3)
1045#define IEM_MC_F_MIN_486 RT_BIT_32(4)
1046#define IEM_MC_F_MIN_PENTIUM RT_BIT_32(5)
1047#define IEM_MC_F_MIN_PENTIUM_II IEM_MC_F_MIN_PENTIUM
1048#define IEM_MC_F_MIN_CORE IEM_MC_F_MIN_PENTIUM
1049#define IEM_MC_F_64BIT RT_BIT_32(6)
1050#define IEM_MC_F_NOT_64BIT RT_BIT_32(7)
1051/** This is set by IEMAllN8vePython.py to indicate a variation with the
1052 * flags-clearing-and-checking. */
1053#define IEM_MC_F_WITH_FLAGS RT_BIT_32(8)
1054/** This is set by IEMAllN8vePython.py to indicate a variation without the
1055 * flags-clearing-and-checking, when there is also a variation with that.
1056 * @note Do not set this manully, it's only for python and for testing in
1057 * the native recompiler! */
1058#define IEM_MC_F_WITHOUT_FLAGS RT_BIT_32(9)
1059/** @} */
1060
1061/** @name IEM_CIMPL_F_XXX - State change clues for CIMPL calls.
1062 *
1063 * These clues are mainly for the recompiler, so that it can emit correct code.
1064 *
1065 * They are processed by the python script and which also automatically
1066 * calculates flags for MC blocks based on the statements, extending the use of
1067 * these flags to describe MC block behavior to the recompiler core. The python
1068 * script pass the flags to the IEM_MC2_END_EMIT_CALLS macro, but mainly for
1069 * error checking purposes. The script emits the necessary fEndTb = true and
1070 * similar statements as this reduces compile time a tiny bit.
1071 *
1072 * @{ */
1073/** Flag set if direct branch, clear if absolute or indirect. */
1074#define IEM_CIMPL_F_BRANCH_DIRECT RT_BIT_32(0)
1075/** Flag set if indirect branch, clear if direct or relative.
1076 * This is also used for all system control transfers (SYSCALL, SYSRET, INT, ++)
1077 * as well as for return instructions (RET, IRET, RETF). */
1078#define IEM_CIMPL_F_BRANCH_INDIRECT RT_BIT_32(1)
1079/** Flag set if relative branch, clear if absolute or indirect. */
1080#define IEM_CIMPL_F_BRANCH_RELATIVE RT_BIT_32(2)
1081/** Flag set if conditional branch, clear if unconditional. */
1082#define IEM_CIMPL_F_BRANCH_CONDITIONAL RT_BIT_32(3)
1083/** Flag set if it's a far branch (changes CS).
1084 * @note x86 specific */
1085#define IEM_CIMPL_F_BRANCH_FAR RT_BIT_32(4)
1086/** Convenience: Testing any kind of branch. */
1087#define IEM_CIMPL_F_BRANCH_ANY (IEM_CIMPL_F_BRANCH_DIRECT | IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_RELATIVE)
1088
1089/** Execution flags may change (IEMCPU::fExec). */
1090#define IEM_CIMPL_F_MODE RT_BIT_32(5)
1091/** May change significant portions of RFLAGS.
1092 * @note x86 specific */
1093#define IEM_CIMPL_F_RFLAGS RT_BIT_32(6)
1094/** May change the status bits (X86_EFL_STATUS_BITS) in RFLAGS.
1095 * @note x86 specific */
1096#define IEM_CIMPL_F_STATUS_FLAGS RT_BIT_32(7)
1097/** May trigger interrupt shadowing.
1098 * @note x86 specific */
1099#define IEM_CIMPL_F_INHIBIT_SHADOW RT_BIT_32(8)
1100/** May enable interrupts, so recheck IRQ immediately afterwards executing
1101 * the instruction. */
1102#define IEM_CIMPL_F_CHECK_IRQ_AFTER RT_BIT_32(9)
1103/** May disable interrupts, so recheck IRQ immediately before executing the
1104 * instruction. */
1105#define IEM_CIMPL_F_CHECK_IRQ_BEFORE RT_BIT_32(10)
1106/** Convenience: Check for IRQ both before and after an instruction. */
1107#define IEM_CIMPL_F_CHECK_IRQ_BEFORE_AND_AFTER (IEM_CIMPL_F_CHECK_IRQ_BEFORE | IEM_CIMPL_F_CHECK_IRQ_AFTER)
1108/** May trigger a VM exit (treated like IEM_CIMPL_F_MODE atm). */
1109#define IEM_CIMPL_F_VMEXIT RT_BIT_32(11)
1110/** May modify FPU state.
1111 * @todo Not sure if this is useful yet. */
1112#define IEM_CIMPL_F_FPU RT_BIT_32(12)
1113/** REP prefixed instruction which may yield before updating PC.
1114 * @todo Not sure if this is useful, REP functions now return non-zero
1115 * status if they don't update the PC.
1116 * @note x86 specific */
1117#define IEM_CIMPL_F_REP RT_BIT_32(13)
1118/** I/O instruction.
1119 * @todo Not sure if this is useful yet.
1120 * @note x86 specific */
1121#define IEM_CIMPL_F_IO RT_BIT_32(14)
1122/** Force end of TB after the instruction. */
1123#define IEM_CIMPL_F_END_TB RT_BIT_32(15)
1124/** Flag set if a branch may also modify the stack (push/pop return address). */
1125#define IEM_CIMPL_F_BRANCH_STACK RT_BIT_32(16)
1126/** Flag set if a branch may also modify the stack (push/pop return address)
1127 * and switch it (load/restore SS:RSP).
1128 * @note x86 specific */
1129#define IEM_CIMPL_F_BRANCH_STACK_FAR RT_BIT_32(17)
1130/** Convenience: Raise exception (technically unnecessary, since it shouldn't return VINF_SUCCESS). */
1131#define IEM_CIMPL_F_XCPT \
1132 (IEM_CIMPL_F_BRANCH_INDIRECT | IEM_CIMPL_F_BRANCH_FAR | IEM_CIMPL_F_BRANCH_STACK_FAR \
1133 | IEM_CIMPL_F_MODE | IEM_CIMPL_F_RFLAGS | IEM_CIMPL_F_VMEXIT)
1134
1135/** The block calls a C-implementation instruction function with two implicit arguments.
1136 * Mutually exclusive with IEM_CIMPL_F_CALLS_AIMPL and
1137 * IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE.
1138 * @note The python scripts will add this if missing. */
1139#define IEM_CIMPL_F_CALLS_CIMPL RT_BIT_32(18)
1140/** The block calls an ASM-implementation instruction function.
1141 * Mutually exclusive with IEM_CIMPL_F_CALLS_CIMPL and
1142 * IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE.
1143 * @note The python scripts will add this if missing. */
1144#define IEM_CIMPL_F_CALLS_AIMPL RT_BIT_32(19)
1145/** The block calls an ASM-implementation instruction function with an implicit
1146 * X86FXSTATE pointer argument.
1147 * Mutually exclusive with IEM_CIMPL_F_CALLS_CIMPL, IEM_CIMPL_F_CALLS_AIMPL and
1148 * IEM_CIMPL_F_CALLS_AIMPL_WITH_XSTATE.
1149 * @note The python scripts will add this if missing.
1150 * @note x86 specific */
1151#define IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE RT_BIT_32(20)
1152/** The block calls an ASM-implementation instruction function with an implicit
1153 * X86XSAVEAREA pointer argument.
1154 * Mutually exclusive with IEM_CIMPL_F_CALLS_CIMPL, IEM_CIMPL_F_CALLS_AIMPL and
1155 * IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE.
1156 * @note No different from IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE, so same value.
1157 * @note The python scripts will add this if missing.
1158 * @note x86 specific */
1159#define IEM_CIMPL_F_CALLS_AIMPL_WITH_XSTATE IEM_CIMPL_F_CALLS_AIMPL_WITH_FXSTATE
1160/** @} */
1161
1162
1163/** @name IEM_F_XXX - Execution mode flags (IEMCPU::fExec, IEMTB::fFlags).
1164 *
1165 * These flags are set when entering IEM and adjusted as code is executed, such
1166 * that they will always contain the current values as instructions are
1167 * finished.
1168 *
1169 * In recompiled execution mode, (most of) these flags are included in the
1170 * translation block selection key and stored in IEMTB::fFlags alongside the
1171 * IEMTB_F_XXX flags. The latter flags uses bits 31 thru 24, which are all zero
1172 * in IEMCPU::fExec.
1173 *
1174 * @{ */
1175/** Mode: The block target mode mask.
1176 * X86: CPUMODE plus protected, v86 and pre-386 indicators.
1177 * ARM: PSTATE.nRW | PSTATE.T | PSTATE.EL.
1178 * This doesn't quite overlap with SPSR_ELx when in AARCH32 mode,
1179 * but that's life. */
1180#if defined(VBOX_VMM_TARGET_X86) || defined(DOXYGEN_RUNNING)
1181# define IEM_F_MODE_MASK UINT32_C(0x0000001f)
1182#elif defined(VBOX_VMM_TARGET_ARMV8)
1183# define IEM_F_MODE_MASK UINT32_C(0x0000003c)
1184#endif
1185
1186#if defined(VBOX_VMM_TARGET_X86) || defined(DOXYGEN_RUNNING)
1187/** X86 Mode: The IEMMODE part of the IEMTB_F_MODE_MASK value. */
1188# define IEM_F_MODE_X86_CPUMODE_MASK UINT32_C(0x00000003)
1189/** X86 Mode: Bit used to indicating pre-386 CPU in 16-bit mode (for eliminating
1190 * conditional in EIP/IP updating), and flat wide open CS, SS, DS, and ES in
1191 * 32-bit mode (for simplifying most memory accesses). */
1192# define IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK UINT32_C(0x00000004)
1193/** X86 Mode: Bit indicating protected mode, real mode (or SMM) when not set. */
1194# define IEM_F_MODE_X86_PROT_MASK UINT32_C(0x00000008)
1195/** X86 Mode: Bit used to indicate virtual 8086 mode (only 16-bit). */
1196# define IEM_F_MODE_X86_V86_MASK UINT32_C(0x00000010)
1197
1198/** X86 Mode: 16-bit on 386 or later. */
1199# define IEM_F_MODE_X86_16BIT UINT32_C(0x00000000)
1200/** X86 Mode: 80286, 80186 and 8086/88 targetting blocks (EIP update opt). */
1201# define IEM_F_MODE_X86_16BIT_PRE_386 UINT32_C(0x00000004)
1202/** X86 Mode: 16-bit protected mode on 386 or later. */
1203# define IEM_F_MODE_X86_16BIT_PROT UINT32_C(0x00000008)
1204/** X86 Mode: 16-bit protected mode on 386 or later. */
1205# define IEM_F_MODE_X86_16BIT_PROT_PRE_386 UINT32_C(0x0000000c)
1206/** X86 Mode: 16-bit virtual 8086 protected mode (on 386 or later). */
1207# define IEM_F_MODE_X86_16BIT_PROT_V86 UINT32_C(0x00000018)
1208
1209/** X86 Mode: 32-bit on 386 or later. */
1210# define IEM_F_MODE_X86_32BIT UINT32_C(0x00000001)
1211/** X86 Mode: 32-bit mode with wide open flat CS, SS, DS and ES. */
1212# define IEM_F_MODE_X86_32BIT_FLAT UINT32_C(0x00000005)
1213/** X86 Mode: 32-bit protected mode. */
1214# define IEM_F_MODE_X86_32BIT_PROT UINT32_C(0x00000009)
1215/** X86 Mode: 32-bit protected mode with wide open flat CS, SS, DS and ES. */
1216# define IEM_F_MODE_X86_32BIT_PROT_FLAT UINT32_C(0x0000000d)
1217
1218/** X86 Mode: 64-bit (includes protected, but not the flat bit). */
1219# define IEM_F_MODE_X86_64BIT UINT32_C(0x0000000a)
1220
1221/** X86 Mode: Checks if @a a_fExec represent a FLAT mode. */
1222# define IEM_F_MODE_X86_IS_FLAT(a_fExec) ( ((a_fExec) & IEM_F_MODE_MASK) == IEM_F_MODE_X86_64BIT \
1223 || ((a_fExec) & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_PROT_FLAT \
1224 || ((a_fExec) & IEM_F_MODE_MASK) == IEM_F_MODE_X86_32BIT_FLAT)
1225
1226/** X86: The current protection level (CPL) shift factor. */
1227# define IEM_F_X86_CPL_SHIFT 8
1228/** X86: The current protection level (CPL) mask. */
1229# define IEM_F_X86_CPL_MASK UINT32_C(0x00000300)
1230/** X86: The current protection level (CPL) shifted mask. */
1231# define IEM_F_X86_CPL_SMASK UINT32_C(0x00000003)
1232
1233/** X86: Alignment checks enabled (CR0.AM=1 & EFLAGS.AC=1). */
1234# define IEM_F_X86_AC UINT32_C(0x00080000)
1235
1236/** X86 execution context.
1237 * The IEM_F_X86_CTX_XXX values are individual flags that can be combined (with
1238 * the exception of IEM_F_X86_CTX_NORMAL). This allows running VMs from SMM
1239 * mode. */
1240# define IEM_F_X86_CTX_MASK UINT32_C(0x0000f000)
1241/** X86 context: Plain regular execution context. */
1242# define IEM_F_X86_CTX_NORMAL UINT32_C(0x00000000)
1243/** X86 context: VT-x enabled. */
1244# define IEM_F_X86_CTX_VMX UINT32_C(0x00001000)
1245/** X86 context: AMD-V enabled. */
1246# define IEM_F_X86_CTX_SVM UINT32_C(0x00002000)
1247/** X86 context: In AMD-V or VT-x guest mode. */
1248# define IEM_F_X86_CTX_IN_GUEST UINT32_C(0x00004000)
1249/** X86 context: System management mode (SMM). */
1250# define IEM_F_X86_CTX_SMM UINT32_C(0x00008000)
1251
1252/** @todo Add TF+RF+INHIBIT indicator(s), so we can eliminate the conditional in
1253 * iemRegFinishClearingRF() most for most situations (CPUMCTX_DBG_HIT_DRX_MASK
1254 * and CPUMCTX_DBG_DBGF_MASK are covered by the IEM_F_PENDING_BRK_XXX bits
1255 * alread). */
1256
1257/** @todo Add TF+RF+INHIBIT indicator(s), so we can eliminate the conditional in
1258 * iemRegFinishClearingRF() most for most situations
1259 * (CPUMCTX_DBG_HIT_DRX_MASK and CPUMCTX_DBG_DBGF_MASK are covered by
1260 * the IEM_F_PENDING_BRK_XXX bits alread). */
1261
1262#endif /* X86 || doxygen */
1263
1264#if defined(VBOX_VMM_TARGET_ARMV8) || defined(DOXYGEN_RUNNING)
1265/** ARM Mode: Exception (privilege) level shift count. */
1266# define IEM_F_MODE_ARM_EL_SHIFT 2
1267/** ARM Mode: Exception (privilege) level mask. */
1268# define IEM_F_MODE_ARM_EL_MASK UINT32_C(0x0000000c)
1269/** ARM Mode: Exception (privilege) level shifted down mask. */
1270# define IEM_F_MODE_ARM_EL_SMASK UINT32_C(0x00000003)
1271/** ARM Mode: 32-bit (set) or 64-bit (clear) indicator (SPSR_ELx.M[4]). */
1272# define IEM_F_MODE_ARM_32BIT UINT32_C(0x00000010)
1273/** ARM Mode: Thumb mode indicator (SPSR_ELx.T). */
1274# define IEM_F_MODE_ARM_T32 UINT32_C(0x00000020)
1275
1276/** ARM Mode: Get the exception (privilege) level. */
1277# define IEM_F_MODE_ARM_GET_EL(a_fExec) (((a_fExec) >> IEM_F_MODE_ARM_EL_SHIFT) & IEM_F_MODE_ARM_EL_SMASK)
1278
1279/** ARM: The stack pointer index - not part of mode */
1280# define IEM_F_ARM_SP_IDX UINT32_C(0x00000003)
1281/** ARM: Get the SP register index. */
1282# define IEM_F_ARM_GET_SP_IDX(a_fExec) ((a_fExec) & IEM_F_ARM_SP_IDX)
1283
1284/** ARM: Normal alignment checks enabled (SCTRL_ELx.A). */
1285# define IEM_F_ARM_A UINT32_C(0x00004000)
1286/** ARM: LSE2 alignment checks enabled (~SCTRL_ELx.nAA). */
1287# define IEM_F_ARM_AA UINT32_C(0x00080000)
1288
1289/** ARM: 4K page (granule) size - stage 1. */
1290# define IEM_F_ARM_S1_PAGE_4K UINT32_C(0x00000000)
1291/** ARM: 16K page (granule) size - stage 1. */
1292# define IEM_F_ARM_S1_PAGE_16K UINT32_C(0x00000200)
1293/** ARM: 64K page (granule) size - stage 1. */
1294# define IEM_F_ARM_S1_PAGE_64K UINT32_C(0x00000400)
1295/** ARM: Mask for the stage 1 page (granule) size encoding.
1296 * This is the shift count - 12. */
1297# define IEM_F_ARM_S1_PAGE_MASK UINT32_C(0x00000700)
1298/** ARM: The shift count for the tage 1 page (granule) size encoding value. */
1299# define IEM_F_ARM_S1_PAGE_SHIFT 8
1300/** Get the current stage 1 page (granule) shift count. */
1301# define IEM_F_ARM_GET_S1_PAGE_SHIFT(a_fExec) (12 + (((a_fExec) & IEM_F_ARM_S1_PAGE_MASK) >> IEM_F_ARM_S1_PAGE_SHIFT))
1302/** Get the current stage 1 page (granule) size. */
1303# define IEM_F_ARM_GET_S1_PAGE_SIZE(a_fExec) (1 << IEM_F_ARM_GET_S1_PAGE_SHIFT(a_fExec))
1304/** Get the current stage 1 page (granule) offset mask. */
1305# define IEM_F_ARM_GET_S1_PAGE_OFFSET_MASK(a_fExec) (IEM_F_ARM_GET_S1_PAGE_SHIFT(a_fExec) - 1)
1306
1307/** Get the current TLB page (granule) shift count.
1308 * The TLB page size is the smallest of S1 and S2 page sizes.
1309 * @todo Implement stage 2 tables. */
1310# define IEM_F_ARM_GET_TLB_PAGE_SHIFT(a_fExec) IEM_F_ARM_GET_S1_PAGE_SHIFT(a_fExec)
1311/** Get the current TLB page (granule) size. */
1312# define IEM_F_ARM_GET_TLB_PAGE_SIZE(a_fExec) IEM_F_ARM_GET_S1_PAGE_SIZE(a_fExec)
1313/** Get the current TLB page (granule) offset mask. */
1314# define IEM_F_ARM_GET_TLB_PAGE_OFFSET_MASK(a_fExec) IEM_F_ARM_GET_S1_PAGE_OFFSET_MASK(a_fExec)
1315
1316/** @todo ARM: Need copy of TCR_ELx.EPD0 & TCR_ELx.EPD1. */
1317
1318#endif /* ARM || doxygen */
1319
1320/** Bypass access handlers when set. */
1321#define IEM_F_BYPASS_HANDLERS UINT32_C(0x00010000)
1322/** Have pending hardware instruction breakpoints. */
1323#define IEM_F_PENDING_BRK_INSTR UINT32_C(0x00020000)
1324/** Have pending hardware data breakpoints. */
1325#define IEM_F_PENDING_BRK_DATA UINT32_C(0x00040000)
1326
1327/** X86: Have pending hardware I/O breakpoints. */
1328#define IEM_F_PENDING_BRK_X86_IO UINT32_C(0x00000400)
1329/** X86: Disregard the lock prefix (implied or not) when set. */
1330#define IEM_F_X86_DISREGARD_LOCK UINT32_C(0x00000800)
1331
1332/** ARM: Software step (single step).
1333 * @todo make generic? */
1334#define IEM_F_ARM_SOFTWARE_STEP UINT32_C(0x00000400)
1335
1336/** Pending breakpoint mask (what iemCalcExecDbgFlags works out). */
1337#if defined(VBOX_VMM_TARGET_X86) || defined(DOXYGEN_RUNNING)
1338# define IEM_F_PENDING_BRK_MASK (IEM_F_PENDING_BRK_INSTR | IEM_F_PENDING_BRK_DATA | IEM_F_PENDING_BRK_X86_IO)
1339#else
1340# define IEM_F_PENDING_BRK_MASK (IEM_F_PENDING_BRK_INSTR | IEM_F_PENDING_BRK_DATA)
1341#endif
1342
1343/** Caller configurable options. */
1344#if defined(VBOX_VMM_TARGET_X86) || defined(DOXYGEN_RUNNING)
1345# define IEM_F_USER_OPTS (IEM_F_BYPASS_HANDLERS | IEM_F_X86_DISREGARD_LOCK)
1346#else
1347# define IEM_F_USER_OPTS (IEM_F_BYPASS_HANDLERS)
1348#endif
1349/** @} */
1350
1351
1352/** @name IEMTB_F_XXX - Translation block flags (IEMTB::fFlags).
1353 *
1354 * Extends the IEM_F_XXX flags (subject to IEMTB_F_IEM_F_MASK) to make up the
1355 * translation block flags. The combined flag mask (subject to
1356 * IEMTB_F_KEY_MASK) is used as part of the lookup key for translation blocks.
1357 *
1358 * @{ */
1359/** Mask of IEM_F_XXX flags included in IEMTB_F_XXX. */
1360#define IEMTB_F_IEM_F_MASK UINT32_C(0x00ffffff)
1361
1362/** Type: The block type mask. */
1363#define IEMTB_F_TYPE_MASK UINT32_C(0x03000000)
1364/** Type: Purly threaded recompiler (via tables). */
1365#define IEMTB_F_TYPE_THREADED UINT32_C(0x01000000)
1366/** Type: Native recompilation. */
1367#define IEMTB_F_TYPE_NATIVE UINT32_C(0x02000000)
1368
1369/** Set when we're starting the block in an "interrupt shadow".
1370 * We don't need to distingish between the two types of this mask, thus the one.
1371 * @see CPUMCTX_INHIBIT_SHADOW, CPUMIsInInterruptShadow() */
1372#define IEMTB_F_X86_INHIBIT_SHADOW UINT32_C(0x04000000)
1373/** Set when we're currently inhibiting NMIs
1374 * @see CPUMCTX_INHIBIT_NMI, CPUMAreInterruptsInhibitedByNmi() */
1375#define IEMTB_F_X86_INHIBIT_NMI UINT32_C(0x08000000)
1376
1377/** Checks that EIP/IP is wihin CS.LIM before each instruction. Used when
1378 * we're close the limit before starting a TB, as determined by
1379 * iemGetTbFlagsForCurrentPc(). */
1380#define IEMTB_F_X86_CS_LIM_CHECKS UINT32_C(0x10000000)
1381
1382/** Mask of the IEMTB_F_XXX flags that are part of the TB lookup key.
1383 *
1384 * @note We skip all of IEM_F_X86_CTX_MASK, with the exception of SMM (which we
1385 * don't implement), because we don't currently generate any context
1386 * specific code - that's all handled in CIMPL functions.
1387 *
1388 * For the threaded recompiler we don't generate any CPL specific code
1389 * either, but the native recompiler does for memory access (saves getting
1390 * the CPL from fExec and turning it into IEMTLBE_F_PT_NO_USER).
1391 * Since most OSes will not share code between rings, this shouldn't
1392 * have any real effect on TB/memory/recompiling load.
1393 */
1394#if defined(VBOX_VMM_TARGET_X86) || defined(DOXYGEN_RUNNING)
1395# define IEMTB_F_KEY_MASK ((UINT32_MAX & ~(IEM_F_X86_CTX_MASK | IEMTB_F_TYPE_MASK)) | IEM_F_X86_CTX_SMM)
1396#else
1397# define IEMTB_F_KEY_MASK (UINT32_MAX)
1398#endif
1399/** @} */
1400
1401#ifdef VBOX_VMM_TARGET_X86
1402AssertCompile( (IEM_F_MODE_X86_16BIT & IEM_F_MODE_X86_CPUMODE_MASK) == IEMMODE_16BIT);
1403AssertCompile(!(IEM_F_MODE_X86_16BIT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK));
1404AssertCompile(!(IEM_F_MODE_X86_16BIT & IEM_F_MODE_X86_PROT_MASK));
1405AssertCompile(!(IEM_F_MODE_X86_16BIT & IEM_F_MODE_X86_V86_MASK));
1406AssertCompile( (IEM_F_MODE_X86_16BIT_PRE_386 & IEM_F_MODE_X86_CPUMODE_MASK) == IEMMODE_16BIT);
1407AssertCompile( IEM_F_MODE_X86_16BIT_PRE_386 & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK);
1408AssertCompile(!(IEM_F_MODE_X86_16BIT_PRE_386 & IEM_F_MODE_X86_PROT_MASK));
1409AssertCompile(!(IEM_F_MODE_X86_16BIT_PRE_386 & IEM_F_MODE_X86_V86_MASK));
1410AssertCompile( (IEM_F_MODE_X86_16BIT_PROT & IEM_F_MODE_X86_CPUMODE_MASK) == IEMMODE_16BIT);
1411AssertCompile(!(IEM_F_MODE_X86_16BIT_PROT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK));
1412AssertCompile( IEM_F_MODE_X86_16BIT_PROT & IEM_F_MODE_X86_PROT_MASK);
1413AssertCompile(!(IEM_F_MODE_X86_16BIT_PROT & IEM_F_MODE_X86_V86_MASK));
1414AssertCompile( (IEM_F_MODE_X86_16BIT_PROT_PRE_386 & IEM_F_MODE_X86_CPUMODE_MASK) == IEMMODE_16BIT);
1415AssertCompile( IEM_F_MODE_X86_16BIT_PROT_PRE_386 & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK);
1416AssertCompile( IEM_F_MODE_X86_16BIT_PROT_PRE_386 & IEM_F_MODE_X86_PROT_MASK);
1417AssertCompile(!(IEM_F_MODE_X86_16BIT_PROT_PRE_386 & IEM_F_MODE_X86_V86_MASK));
1418AssertCompile( IEM_F_MODE_X86_16BIT_PROT_V86 & IEM_F_MODE_X86_PROT_MASK);
1419AssertCompile(!(IEM_F_MODE_X86_16BIT_PROT_V86 & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK));
1420AssertCompile( IEM_F_MODE_X86_16BIT_PROT_V86 & IEM_F_MODE_X86_V86_MASK);
1421
1422AssertCompile( (IEM_F_MODE_X86_32BIT & IEM_F_MODE_X86_CPUMODE_MASK) == IEMMODE_32BIT);
1423AssertCompile(!(IEM_F_MODE_X86_32BIT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK));
1424AssertCompile(!(IEM_F_MODE_X86_32BIT & IEM_F_MODE_X86_PROT_MASK));
1425AssertCompile( (IEM_F_MODE_X86_32BIT_FLAT & IEM_F_MODE_X86_CPUMODE_MASK) == IEMMODE_32BIT);
1426AssertCompile( IEM_F_MODE_X86_32BIT_FLAT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK);
1427AssertCompile(!(IEM_F_MODE_X86_32BIT_FLAT & IEM_F_MODE_X86_PROT_MASK));
1428AssertCompile( (IEM_F_MODE_X86_32BIT_PROT & IEM_F_MODE_X86_CPUMODE_MASK) == IEMMODE_32BIT);
1429AssertCompile(!(IEM_F_MODE_X86_32BIT_PROT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK));
1430AssertCompile( IEM_F_MODE_X86_32BIT_PROT & IEM_F_MODE_X86_PROT_MASK);
1431AssertCompile( (IEM_F_MODE_X86_32BIT_PROT_FLAT & IEM_F_MODE_X86_CPUMODE_MASK) == IEMMODE_32BIT);
1432AssertCompile( IEM_F_MODE_X86_32BIT_PROT_FLAT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK);
1433AssertCompile( IEM_F_MODE_X86_32BIT_PROT_FLAT & IEM_F_MODE_X86_PROT_MASK);
1434
1435AssertCompile( (IEM_F_MODE_X86_64BIT & IEM_F_MODE_X86_CPUMODE_MASK) == IEMMODE_64BIT);
1436AssertCompile( IEM_F_MODE_X86_64BIT & IEM_F_MODE_X86_PROT_MASK);
1437AssertCompile(!(IEM_F_MODE_X86_64BIT & IEM_F_MODE_X86_FLAT_OR_PRE_386_MASK));
1438#endif /* VBOX_VMM_TARGET_X86 */
1439
1440#ifdef VBOX_VMM_TARGET_ARMV8
1441AssertCompile(IEM_F_MODE_ARM_EL_SHIFT == ARMV8_SPSR_EL2_AARCH64_EL_SHIFT);
1442AssertCompile(IEM_F_MODE_ARM_EL_MASK == ARMV8_SPSR_EL2_AARCH64_EL);
1443AssertCompile(IEM_F_MODE_ARM_32BIT == ARMV8_SPSR_EL2_AARCH64_M4);
1444AssertCompile(IEM_F_MODE_ARM_T32 == ARMV8_SPSR_EL2_AARCH64_T);
1445#endif
1446
1447/** Native instruction type for use with the native code generator.
1448 * This is a byte (uint8_t) for x86 and amd64 and uint32_t for the other(s). */
1449#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
1450typedef uint8_t IEMNATIVEINSTR;
1451#else
1452typedef uint32_t IEMNATIVEINSTR;
1453#endif
1454/** Pointer to a native instruction unit. */
1455typedef IEMNATIVEINSTR *PIEMNATIVEINSTR;
1456/** Pointer to a const native instruction unit. */
1457typedef IEMNATIVEINSTR const *PCIEMNATIVEINSTR;
1458
1459/**
1460 * A call for the threaded call table.
1461 */
1462typedef struct IEMTHRDEDCALLENTRY
1463{
1464 /** The function to call (IEMTHREADEDFUNCS). */
1465 uint16_t enmFunction;
1466
1467 /** Instruction number in the TB (for statistics). */
1468 uint8_t idxInstr;
1469 /** The opcode length. */
1470 uint8_t cbOpcode;
1471 /** Offset into IEMTB::pabOpcodes. */
1472 uint16_t offOpcode;
1473
1474 /** TB lookup table index (7 bits) and large size (1 bits).
1475 *
1476 * The default size is 1 entry, but for indirect calls and returns we set the
1477 * top bit and allocate 4 (IEM_TB_LOOKUP_TAB_LARGE_SIZE) entries. The large
1478 * tables uses RIP for selecting the entry to use, as it is assumed a hash table
1479 * lookup isn't that slow compared to sequentially trying out 4 TBs.
1480 *
1481 * By default lookup table entry 0 for a TB is reserved as a fallback for
1482 * calltable entries w/o explicit entreis, so this member will be non-zero if
1483 * there is a lookup entry associated with this call.
1484 *
1485 * @sa IEM_TB_LOOKUP_TAB_GET_SIZE, IEM_TB_LOOKUP_TAB_GET_IDX
1486 */
1487 uint8_t uTbLookup;
1488
1489 /** Flags - IEMTHREADEDCALLENTRY_F_XXX. */
1490 uint8_t fFlags;
1491
1492 /** Generic parameters.
1493 * @todo ARM: Hope we can get away with one param here... */
1494 uint64_t auParams[3];
1495} IEMTHRDEDCALLENTRY;
1496AssertCompileSize(IEMTHRDEDCALLENTRY, sizeof(uint64_t) * 4);
1497/** Pointer to a threaded call entry. */
1498typedef struct IEMTHRDEDCALLENTRY *PIEMTHRDEDCALLENTRY;
1499/** Pointer to a const threaded call entry. */
1500typedef IEMTHRDEDCALLENTRY const *PCIEMTHRDEDCALLENTRY;
1501
1502/** The number of TB lookup table entries for a large allocation
1503 * (IEMTHRDEDCALLENTRY::uTbLookup bit 7 set). */
1504#define IEM_TB_LOOKUP_TAB_LARGE_SIZE 4
1505/** Get the lookup table size from IEMTHRDEDCALLENTRY::uTbLookup. */
1506#define IEM_TB_LOOKUP_TAB_GET_SIZE(a_uTbLookup) (!((a_uTbLookup) & 0x80) ? 1 : IEM_TB_LOOKUP_TAB_LARGE_SIZE)
1507/** Get the first lookup table index from IEMTHRDEDCALLENTRY::uTbLookup. */
1508#define IEM_TB_LOOKUP_TAB_GET_IDX(a_uTbLookup) ((a_uTbLookup) & 0x7f)
1509/** Get the lookup table index from IEMTHRDEDCALLENTRY::uTbLookup and PC. */
1510#define IEM_TB_LOOKUP_TAB_GET_IDX_WITH_PC(a_uTbLookup, a_Pc) \
1511 (!((a_uTbLookup) & 0x80) ? (a_uTbLookup) & 0x7f : ((a_uTbLookup) & 0x7f) + ((a_Pc) & (IEM_TB_LOOKUP_TAB_LARGE_SIZE - 1)) )
1512
1513/** Make a IEMTHRDEDCALLENTRY::uTbLookup value. */
1514#define IEM_TB_LOOKUP_TAB_MAKE(a_idxTable, a_fLarge) ((a_idxTable) | ((a_fLarge) ? 0x80 : 0))
1515
1516
1517/** The call entry is a jump target. */
1518#define IEMTHREADEDCALLENTRY_F_JUMP_TARGET UINT8_C(0x01)
1519
1520
1521/**
1522 * Native IEM TB 'function' typedef.
1523 *
1524 * This will throw/longjmp on occation.
1525 *
1526 * @note AMD64 doesn't have that many non-volatile registers and does sport
1527 * 32-bit address displacments, so we don't need pCtx.
1528 *
1529 * On ARM64 pCtx allows us to directly address the whole register
1530 * context without requiring a separate indexing register holding the
1531 * offset. This saves an instruction loading the offset for each guest
1532 * CPU context access, at the cost of a non-volatile register.
1533 * Fortunately, ARM64 has quite a lot more registers.
1534 */
1535typedef
1536#ifdef RT_ARCH_AMD64
1537int FNIEMTBNATIVE(PVMCPUCC pVCpu)
1538#else
1539int FNIEMTBNATIVE(PVMCPUCC pVCpu, PCPUMCTX pCtx)
1540#endif
1541#if RT_CPLUSPLUS_PREREQ(201700)
1542 IEM_NOEXCEPT_MAY_LONGJMP
1543#endif
1544 ;
1545/** Pointer to a native IEM TB entry point function.
1546 * This will throw/longjmp on occation. */
1547typedef FNIEMTBNATIVE *PFNIEMTBNATIVE;
1548
1549
1550/**
1551 * Translation block.
1552 *
1553 * The current plan is to just keep TBs and associated lookup hash table private
1554 * to each VCpu as that simplifies TB removal greatly (no races) and generally
1555 * avoids using expensive atomic primitives for updating lists and stuff.
1556 */
1557#pragma pack(2) /* to prevent the Thrd structure from being padded unnecessarily */
1558typedef struct IEMTB
1559{
1560 /** Next block with the same hash table entry. */
1561 struct IEMTB *pNext;
1562 /** Usage counter. */
1563 uint32_t cUsed;
1564 /** The IEMCPU::msRecompilerPollNow last time it was used. */
1565 uint32_t msLastUsed;
1566
1567 /** @name What uniquely identifies the block.
1568 * @{ */
1569 RTGCPHYS GCPhysPc;
1570 /** IEMTB_F_XXX (i.e. IEM_F_XXX ++). */
1571 uint32_t fFlags;
1572 union
1573 {
1574 struct
1575 {
1576 /**< Relevant CS X86DESCATTR_XXX bits. */
1577 uint16_t fAttr;
1578 } x86;
1579 };
1580 /** @} */
1581
1582 /** Number of opcode ranges. */
1583 uint8_t cRanges;
1584 /** Statistics: Number of instructions in the block. */
1585 uint8_t cInstructions;
1586
1587 /** Type specific info. */
1588 union
1589 {
1590 struct
1591 {
1592 /** The call sequence table. */
1593 PIEMTHRDEDCALLENTRY paCalls;
1594 /** Number of calls in paCalls. */
1595 uint16_t cCalls;
1596 /** Number of calls allocated. */
1597 uint16_t cAllocated;
1598 } Thrd;
1599 struct
1600 {
1601 /** The native instructions (PFNIEMTBNATIVE). */
1602 PIEMNATIVEINSTR paInstructions;
1603 /** Number of instructions pointed to by paInstructions. */
1604 uint32_t cInstructions;
1605 } Native;
1606 /** Generic view for zeroing when freeing. */
1607 struct
1608 {
1609 uintptr_t uPtr;
1610 uint32_t uData;
1611 } Gen;
1612 };
1613
1614 /** The allocation chunk this TB belongs to. */
1615 uint8_t idxAllocChunk;
1616 /** The number of entries in the lookup table.
1617 * Because we're out of space, the TB lookup table is located before the
1618 * opcodes pointed to by pabOpcodes. */
1619 uint8_t cTbLookupEntries;
1620
1621 /** Number of bytes of opcodes stored in pabOpcodes.
1622 * @todo this field isn't really needed, aRanges keeps the actual info. */
1623 uint16_t cbOpcodes;
1624 /** Pointer to the opcode bytes this block was recompiled from.
1625 * This also points to the TB lookup table, which starts cTbLookupEntries
1626 * entries before the opcodes (we don't have room atm for another point). */
1627 uint8_t *pabOpcodes;
1628
1629 union
1630 {
1631 /** Native recompilation debug info if enabled.
1632 * This is only generated by the native recompiler. */
1633 struct IEMTBDBG *pDbgInfo;
1634 /** For threaded TBs and natives when debug info is disabled, this is the flat
1635 * PC corresponding to GCPhysPc. */
1636 RTGCPTR FlatPc;
1637 };
1638
1639 /* --- 64 byte cache line end --- */
1640
1641 /** Opcode ranges.
1642 *
1643 * The opcode checkers and maybe TLB loading functions will use this to figure
1644 * out what to do. The parameter will specify an entry and the opcode offset to
1645 * start at and the minimum number of bytes to verify (instruction length).
1646 *
1647 * When VT-x and AMD-V looks up the opcode bytes for an exitting instruction,
1648 * they'll first translate RIP (+ cbInstr - 1) to a physical address using the
1649 * code TLB (must have a valid entry for that address) and scan the ranges to
1650 * locate the corresponding opcodes. Probably.
1651 */
1652 struct IEMTBOPCODERANGE
1653 {
1654 /** Offset within pabOpcodes. */
1655 uint16_t offOpcodes;
1656 /** Number of bytes. */
1657 uint16_t cbOpcodes;
1658 /** The page offset. */
1659 RT_GCC_EXTENSION
1660 uint16_t offPhysPage : 12;
1661 /** Unused bits. */
1662 RT_GCC_EXTENSION
1663 uint16_t u2Unused : 2;
1664 /** Index into GCPhysPc + aGCPhysPages for the physical page address. */
1665 RT_GCC_EXTENSION
1666 uint16_t idxPhysPage : 2;
1667 } aRanges[8];
1668
1669 /** Physical pages that this TB covers.
1670 * The GCPhysPc w/o page offset is element zero, so starting here with 1. */
1671 RTGCPHYS aGCPhysPages[2];
1672} IEMTB;
1673#pragma pack()
1674AssertCompileMemberAlignment(IEMTB, GCPhysPc, sizeof(RTGCPHYS));
1675AssertCompileMemberAlignment(IEMTB, Thrd, sizeof(void *));
1676AssertCompileMemberAlignment(IEMTB, pabOpcodes, sizeof(void *));
1677AssertCompileMemberAlignment(IEMTB, pDbgInfo, sizeof(void *));
1678AssertCompileMemberAlignment(IEMTB, aGCPhysPages, sizeof(RTGCPHYS));
1679AssertCompileMemberOffset(IEMTB, aRanges, 64);
1680AssertCompileMemberSize(IEMTB, aRanges[0], 6);
1681#if 1
1682AssertCompileSize(IEMTB, 128);
1683# define IEMTB_SIZE_IS_POWER_OF_TWO /**< The IEMTB size is a power of two. */
1684#else
1685AssertCompileSize(IEMTB, 168);
1686# undef IEMTB_SIZE_IS_POWER_OF_TWO
1687#endif
1688
1689/** Pointer to a translation block. */
1690typedef IEMTB *PIEMTB;
1691/** Pointer to a const translation block. */
1692typedef IEMTB const *PCIEMTB;
1693
1694/** Gets address of the given TB lookup table entry. */
1695#define IEMTB_GET_TB_LOOKUP_TAB_ENTRY(a_pTb, a_idx) \
1696 ((PIEMTB *)&(a_pTb)->pabOpcodes[-(int)((a_pTb)->cTbLookupEntries - (a_idx)) * sizeof(PIEMTB)])
1697
1698/**
1699 * Gets the physical address for a TB opcode range.
1700 */
1701DECL_FORCE_INLINE(RTGCPHYS) iemTbGetRangePhysPageAddr(PCIEMTB pTb, uint8_t idxRange)
1702{
1703 Assert(idxRange < RT_MIN(pTb->cRanges, RT_ELEMENTS(pTb->aRanges)));
1704 uint8_t const idxPage = pTb->aRanges[idxRange].idxPhysPage;
1705 Assert(idxPage <= RT_ELEMENTS(pTb->aGCPhysPages));
1706 if (idxPage == 0)
1707 return pTb->GCPhysPc & ~(RTGCPHYS)GUEST_PAGE_OFFSET_MASK;
1708 Assert(!(pTb->aGCPhysPages[idxPage - 1] & GUEST_PAGE_OFFSET_MASK));
1709 return pTb->aGCPhysPages[idxPage - 1];
1710}
1711
1712
1713/**
1714 * A chunk of memory in the TB allocator.
1715 */
1716typedef struct IEMTBCHUNK
1717{
1718 /** Pointer to the translation blocks in this chunk. */
1719 PIEMTB paTbs;
1720#ifdef IN_RING0
1721 /** Allocation handle. */
1722 RTR0MEMOBJ hMemObj;
1723#endif
1724} IEMTBCHUNK;
1725
1726/**
1727 * A per-CPU translation block allocator.
1728 *
1729 * Because of how the IEMTBCACHE uses the lower 6 bits of the TB address to keep
1730 * the length of the collision list, and of course also for cache line alignment
1731 * reasons, the TBs must be allocated with at least 64-byte alignment.
1732 * Memory is there therefore allocated using one of the page aligned allocators.
1733 *
1734 *
1735 * To avoid wasting too much memory, it is allocated piecemeal as needed,
1736 * in chunks (IEMTBCHUNK) of 2 MiB or more. The TB has an 8-bit chunk index
1737 * that enables us to quickly calculate the allocation bitmap position when
1738 * freeing the translation block.
1739 */
1740typedef struct IEMTBALLOCATOR
1741{
1742 /** Magic value (IEMTBALLOCATOR_MAGIC). */
1743 uint32_t uMagic;
1744
1745#ifdef IEMTB_SIZE_IS_POWER_OF_TWO
1746 /** Mask corresponding to cTbsPerChunk - 1. */
1747 uint32_t fChunkMask;
1748 /** Shift count corresponding to cTbsPerChunk. */
1749 uint8_t cChunkShift;
1750#else
1751 uint32_t uUnused;
1752 uint8_t bUnused;
1753#endif
1754 /** Number of chunks we're allowed to allocate. */
1755 uint8_t cMaxChunks;
1756 /** Number of chunks currently populated. */
1757 uint16_t cAllocatedChunks;
1758 /** Number of translation blocks per chunk. */
1759 uint32_t cTbsPerChunk;
1760 /** Chunk size. */
1761 uint32_t cbPerChunk;
1762
1763 /** The maximum number of TBs. */
1764 uint32_t cMaxTbs;
1765 /** Total number of TBs in the populated chunks.
1766 * (cAllocatedChunks * cTbsPerChunk) */
1767 uint32_t cTotalTbs;
1768 /** The current number of TBs in use.
1769 * The number of free TBs: cAllocatedTbs - cInUseTbs; */
1770 uint32_t cInUseTbs;
1771 /** Statistics: Number of the cInUseTbs that are native ones. */
1772 uint32_t cNativeTbs;
1773 /** Statistics: Number of the cInUseTbs that are threaded ones. */
1774 uint32_t cThreadedTbs;
1775
1776 /** Where to start pruning TBs from when we're out.
1777 * See iemTbAllocatorAllocSlow for details. */
1778 uint32_t iPruneFrom;
1779 /** Where to start pruning native TBs from when we're out of executable memory.
1780 * See iemTbAllocatorFreeupNativeSpace for details. */
1781 uint32_t iPruneNativeFrom;
1782 uint64_t u64Padding;
1783
1784 /** Statistics: Number of TB allocation calls. */
1785 STAMCOUNTER StatAllocs;
1786 /** Statistics: Number of TB free calls. */
1787 STAMCOUNTER StatFrees;
1788 /** Statistics: Time spend pruning. */
1789 STAMPROFILE StatPrune;
1790 /** Statistics: Time spend pruning native TBs. */
1791 STAMPROFILE StatPruneNative;
1792
1793 /** The delayed free list (see iemTbAlloctorScheduleForFree). */
1794 PIEMTB pDelayedFreeHead;
1795 /* Head of the list of free TBs. */
1796 PIEMTB pTbsFreeHead;
1797
1798 /** Allocation chunks. */
1799 IEMTBCHUNK aChunks[256];
1800} IEMTBALLOCATOR;
1801/** Pointer to a TB allocator. */
1802typedef struct IEMTBALLOCATOR *PIEMTBALLOCATOR;
1803
1804/** Magic value for the TB allocator (Emmet Harley Cohen). */
1805#define IEMTBALLOCATOR_MAGIC UINT32_C(0x19900525)
1806
1807
1808/**
1809 * A per-CPU translation block cache (hash table).
1810 *
1811 * The hash table is allocated once during IEM initialization and size double
1812 * the max TB count, rounded up to the nearest power of two (so we can use and
1813 * AND mask rather than a rest division when hashing).
1814 */
1815typedef struct IEMTBCACHE
1816{
1817 /** Magic value (IEMTBCACHE_MAGIC). */
1818 uint32_t uMagic;
1819 /** Size of the hash table. This is a power of two. */
1820 uint32_t cHash;
1821 /** The mask corresponding to cHash. */
1822 uint32_t uHashMask;
1823 uint32_t uPadding;
1824
1825 /** @name Statistics
1826 * @{ */
1827 /** Number of collisions ever. */
1828 STAMCOUNTER cCollisions;
1829
1830 /** Statistics: Number of TB lookup misses. */
1831 STAMCOUNTER cLookupMisses;
1832 /** Statistics: Number of TB lookup hits via hash table (debug only). */
1833 STAMCOUNTER cLookupHits;
1834 /** Statistics: Number of TB lookup hits via TB associated lookup table (debug only). */
1835 STAMCOUNTER cLookupHitsViaTbLookupTable;
1836 STAMCOUNTER auPadding2[2];
1837 /** Statistics: Collision list length pruning. */
1838 STAMPROFILE StatPrune;
1839 /** @} */
1840
1841 /** The hash table itself.
1842 * @note The lower 6 bits of the pointer is used for keeping the collision
1843 * list length, so we can take action when it grows too long.
1844 * This works because TBs are allocated using a 64 byte (or
1845 * higher) alignment from page aligned chunks of memory, so the lower
1846 * 6 bits of the address will always be zero.
1847 * See IEMTBCACHE_PTR_COUNT_MASK, IEMTBCACHE_PTR_MAKE and friends.
1848 */
1849 RT_FLEXIBLE_ARRAY_EXTENSION
1850 PIEMTB apHash[RT_FLEXIBLE_ARRAY];
1851} IEMTBCACHE;
1852/** Pointer to a per-CPU translation block cache. */
1853typedef IEMTBCACHE *PIEMTBCACHE;
1854
1855/** Magic value for IEMTBCACHE (Johnny O'Neal). */
1856#define IEMTBCACHE_MAGIC UINT32_C(0x19561010)
1857
1858/** The collision count mask for IEMTBCACHE::apHash entries. */
1859#define IEMTBCACHE_PTR_COUNT_MASK ((uintptr_t)0x3f)
1860/** The max collision count for IEMTBCACHE::apHash entries before pruning. */
1861#define IEMTBCACHE_PTR_MAX_COUNT ((uintptr_t)0x30)
1862/** Combine a TB pointer and a collision list length into a value for an
1863 * IEMTBCACHE::apHash entry. */
1864#define IEMTBCACHE_PTR_MAKE(a_pTb, a_cCount) (PIEMTB)((uintptr_t)(a_pTb) | (a_cCount))
1865/** Combine a TB pointer and a collision list length into a value for an
1866 * IEMTBCACHE::apHash entry. */
1867#define IEMTBCACHE_PTR_GET_TB(a_pHashEntry) (PIEMTB)((uintptr_t)(a_pHashEntry) & ~IEMTBCACHE_PTR_COUNT_MASK)
1868/** Combine a TB pointer and a collision list length into a value for an
1869 * IEMTBCACHE::apHash entry. */
1870#define IEMTBCACHE_PTR_GET_COUNT(a_pHashEntry) ((uintptr_t)(a_pHashEntry) & IEMTBCACHE_PTR_COUNT_MASK)
1871
1872/**
1873 * Calculates the hash table slot for a TB from physical PC address and TB flags.
1874 */
1875#define IEMTBCACHE_HASH(a_paCache, a_fTbFlags, a_GCPhysPc) \
1876 IEMTBCACHE_HASH_NO_KEY_MASK(a_paCache, (a_fTbFlags) & IEMTB_F_KEY_MASK, a_GCPhysPc)
1877
1878/**
1879 * Calculates the hash table slot for a TB from physical PC address and TB
1880 * flags, ASSUMING the caller has applied IEMTB_F_KEY_MASK to @a a_fTbFlags.
1881 */
1882#define IEMTBCACHE_HASH_NO_KEY_MASK(a_paCache, a_fTbFlags, a_GCPhysPc) \
1883 (((uint32_t)(a_GCPhysPc) ^ (a_fTbFlags)) & (a_paCache)->uHashMask)
1884
1885
1886/** @name IEMBRANCHED_F_XXX - Branched indicator (IEMCPU::fTbBranched).
1887 *
1888 * These flags parallels the main IEM_CIMPL_F_BRANCH_XXX flags.
1889 *
1890 * @{ */
1891/** Value if no branching happened recently. */
1892#define IEMBRANCHED_F_NO UINT8_C(0x00)
1893/** Flag set if direct branch, clear if absolute or indirect. */
1894#define IEMBRANCHED_F_DIRECT UINT8_C(0x01)
1895/** Flag set if indirect branch, clear if direct or relative. */
1896#define IEMBRANCHED_F_INDIRECT UINT8_C(0x02)
1897/** Flag set if relative branch, clear if absolute or indirect. */
1898#define IEMBRANCHED_F_RELATIVE UINT8_C(0x04)
1899/** Flag set if conditional branch, clear if unconditional. */
1900#define IEMBRANCHED_F_CONDITIONAL UINT8_C(0x08)
1901/** Flag set if it's a far branch.
1902 * @note x86 specific */
1903#define IEMBRANCHED_F_FAR UINT8_C(0x10)
1904/** Flag set if the stack pointer is modified. */
1905#define IEMBRANCHED_F_STACK UINT8_C(0x20)
1906/** Flag set if the stack pointer and (maybe) the stack segment are modified.
1907 * @note x86 specific */
1908#define IEMBRANCHED_F_STACK_FAR UINT8_C(0x40)
1909/** Flag set (by IEM_MC_REL_JMP_XXX) if it's a zero bytes relative jump. */
1910#define IEMBRANCHED_F_ZERO UINT8_C(0x80)
1911/** @} */
1912
1913
1914/** @def IEM_MAX_MEM_MAPPINGS
1915 * Maximum number of concurrent memory needed by the target architecture.
1916 * @x86 There are a few instructions with two memory operands (push/pop [mem],
1917 * string instructions). We add another entry for safety.
1918 * @arm Except for the recently specified memcpy/move instructions,
1919 * ARM instruction takes at most one memory operand. We use 1 and add
1920 * another entry for safety, ignoring the memcpy instructions for now. */
1921#if defined(VBOX_VMM_TARGET_X86) || defined(DOXYGEN_RUNNING) /* for now: */ || defined(VBOX_VMM_TARGET_AGNOSTIC)
1922# define IEM_MAX_MEM_MAPPINGS 3
1923#elif defined(VBOX_VMM_TARGET_ARMV8)
1924# define IEM_MAX_MEM_MAPPINGS 2
1925#else
1926# error "port me"
1927#endif
1928
1929/** @def IEM_BOUNCE_BUFFER_SIZE
1930 * The size of the bounce buffers. This is dictated by the largest memory
1931 * operand of the target architecture.
1932 * @x86 fxsave/fxrstor takes a 512 byte operand. Whether we actually need a
1933 * 512 byte bounce buffer for it is questionable...
1934 * @arm Currently we shouldn't need more than 64 bytes here (ld64b, ld4). */
1935#if defined(VBOX_VMM_TARGET_X86) || defined(DOXYGEN_RUNNING) /* for now: */ || defined(VBOX_VMM_TARGET_AGNOSTIC)
1936# define IEM_BOUNCE_BUFFER_SIZE 512
1937#elif defined(VBOX_VMM_TARGET_ARMV8)
1938# define IEM_BOUNCE_BUFFER_SIZE 64
1939#else
1940# error "port me"
1941#endif
1942
1943
1944/**
1945 * The per-CPU IEM state.
1946 */
1947typedef struct IEMCPU
1948{
1949 /** Info status code that needs to be propagated to the IEM caller.
1950 * This cannot be passed internally, as it would complicate all success
1951 * checks within the interpreter making the code larger and almost impossible
1952 * to get right. Instead, we'll store status codes to pass on here. Each
1953 * source of these codes will perform appropriate sanity checks. */
1954 int32_t rcPassUp; /* 0x00 */
1955 /** Execution flag, IEM_F_XXX. */
1956 uint32_t fExec; /* 0x04 */
1957
1958 /** @name Decoder state.
1959 * @{ */
1960#ifdef IEM_WITH_CODE_TLB
1961 /** The offset of the next instruction byte. */
1962 uint32_t offInstrNextByte; /* 0x08 */
1963# if defined(VBOX_VMM_TARGET_X86) || defined(DOXYGEN_RUNNING)
1964 /** X86: The number of bytes available at pbInstrBuf for the current
1965 * instruction. This takes the max opcode length into account so that doesn't
1966 * need to be checked separately. */
1967 uint32_t cbInstrBuf; /* x86: 0x0c */
1968# else
1969 /** The number of bytes available at pbInstrBuf in total (for IEMExecLots).
1970 * @note Set to zero when the code TLB is flushed to trigger TLB reload. */
1971 uint32_t cbInstrBufTotal; /* !x86: 0x0c */
1972# endif
1973 /** Pointer to the page containing PC, user specified buffer or abOpcode.
1974 * This can be NULL if the page isn't mappable for some reason, in which
1975 * case we'll do fallback stuff.
1976 *
1977 * If we're executing an instruction from a user specified buffer,
1978 * IEMExecOneWithPrefetchedByPC and friends, this is not necessarily a page
1979 * aligned pointer but pointer to the user data.
1980 *
1981 * X86: For instructions crossing pages, this will start on the first page and
1982 * be advanced to the next page by the time we've decoded the instruction. This
1983 * therefore precludes stuff like <tt>pbInstrBuf[offInstrNextByte + cbInstrBuf - cbCurInstr]</tt>
1984 */
1985 uint8_t const *pbInstrBuf; /* 0x10 */
1986# if ARCH_BITS == 32
1987 uint32_t uInstrBufHigh; /** The high dword of the host context pbInstrBuf member. */
1988# endif
1989 /** The program counter corresponding to pbInstrBuf.
1990 * This is set to a non-canonical address when we need to invalidate it. */
1991 uint64_t uInstrBufPc; /* 0x18 */
1992 /** The guest physical address corresponding to pbInstrBuf. */
1993 RTGCPHYS GCPhysInstrBuf; /* 0x20 */
1994# if defined(VBOX_VMM_TARGET_X86) || defined(DOXYGEN_RUNNING)
1995 /** X86: The number of bytes available at pbInstrBuf in total (for IEMExecLots).
1996 * This takes the CS segment limit into account.
1997 * @note Set to zero when the code TLB is flushed to trigger TLB reload. */
1998 uint16_t cbInstrBufTotal; /* x86: 0x28 */
1999 /** X86: Offset into pbInstrBuf of the first byte of the current instruction.
2000 * Can be negative to efficiently handle cross page instructions. */
2001 int16_t offCurInstrStart; /* x86: 0x2a */
2002# endif
2003
2004# if (!defined(IEM_WITH_OPAQUE_DECODER_STATE) && defined(VBOX_VMM_TARGET_X86)) || defined(DOXYGEN_RUNNING)
2005 /** X86: The prefix mask (IEM_OP_PRF_XXX). */
2006 uint32_t fPrefixes; /* x86: 0x2c */
2007 /** X86: The extra REX ModR/M register field bit (REX.R << 3). */
2008 uint8_t uRexReg; /* x86: 0x30 */
2009 /** X86: The extra REX ModR/M r/m field, SIB base and opcode reg bit
2010 * (REX.B << 3). */
2011 uint8_t uRexB; /* x86: 0x31 */
2012 /** X86: The extra REX SIB index field bit (REX.X << 3). */
2013 uint8_t uRexIndex; /* x86: 0x32 */
2014
2015 /** X86: The effective segment register (X86_SREG_XXX). */
2016 uint8_t iEffSeg; /* x86: 0x33 */
2017
2018 /** X86: The offset of the ModR/M byte relative to the start of the instruction. */
2019 uint8_t offModRm; /* x86: 0x34 */
2020
2021# ifdef IEM_WITH_CODE_TLB_AND_OPCODE_BUF
2022 /** X86: The current offset into abOpcode. */
2023 uint8_t offOpcode; /* x86: 0x35 */
2024# else
2025 uint8_t bUnused; /* x86: 0x35 */
2026# endif
2027# else /* IEM_WITH_OPAQUE_DECODER_STATE || !X86 */
2028# ifdef VBOX_VMM_TARGET_X86
2029 uint8_t abOpaqueDecoderPart1[0x36 - 0x2c];
2030# endif
2031# endif /* IEM_WITH_OPAQUE_DECODER_STATE || !X86 */
2032
2033#else /* !IEM_WITH_CODE_TLB */
2034# ifndef IEM_WITH_OPAQUE_DECODER_STATE
2035 /** The size of what has currently been fetched into abOpcode. */
2036 uint8_t cbOpcode; /* 0x08 */
2037
2038# ifdef VBOX_VMM_TARGET_X86
2039 /** X86: The current offset into abOpcode. */
2040 uint8_t offOpcode; /* x86: 0x09 */
2041 /** X86: The offset of the ModR/M byte relative to the start of the
2042 * instruction. */
2043 uint8_t offModRm; /* x86: 0x0a */
2044
2045 /** X86: The effective segment register (X86_SREG_XXX). */
2046 uint8_t iEffSeg; /* x86: 0x0b */
2047
2048 /** X86: The prefix mask (IEM_OP_PRF_XXX). */
2049 uint32_t fPrefixes; /* x86: 0x0c */
2050 /** X86: The extra REX ModR/M register field bit (REX.R << 3). */
2051 uint8_t uRexReg; /* x86: 0x10 */
2052 /** X86: The extra REX ModR/M r/m field, SIB base and opcode reg bit
2053 * (REX.B << 3). */
2054 uint8_t uRexB; /* x86: 0x11 */
2055 /** X86: The extra REX SIB index field bit (REX.X << 3). */
2056 uint8_t uRexIndex; /* x86: 0x12 */
2057# endif
2058# else /* IEM_WITH_OPAQUE_DECODER_STATE */
2059# ifndef VBOX_VMM_TARGET_X86
2060 uint8_t abOpaqueDecoderPart1[1];
2061# else
2062 uint8_t abOpaqueDecoderPart1[0x13 - 0x08];
2063# endif
2064# endif /* IEM_WITH_OPAQUE_DECODER_STATE */
2065#endif /* !IEM_WITH_CODE_TLB */
2066
2067#if (!defined(IEM_WITH_OPAQUE_DECODER_STATE) && (defined(VBOX_VMM_TARGET_X86) || !defined(IEM_WITH_CODE_TLB))) \
2068 || defined(DOXGYEN_RUNNING)
2069# ifdef VBOX_VMM_TARGET_X86
2070 /** X86: The effective operand mode. */
2071 IEMMODE enmEffOpSize; /* x86: 0x36, 0x13 */
2072 /** X86: The default addressing mode. */
2073 IEMMODE enmDefAddrMode; /* x86: 0x37, 0x14 */
2074 /** X86: The effective addressing mode. */
2075 IEMMODE enmEffAddrMode; /* x86: 0x38, 0x15 */
2076 /** X86: The default operand mode. */
2077 IEMMODE enmDefOpSize; /* x86: 0x39, 0x16 */
2078
2079 /** X86: Prefix index (VEX.pp) for two byte and three byte tables. */
2080 uint8_t idxPrefix; /* x86: 0x3a, 0x17 */
2081 /** X86: 3rd VEX/EVEX/XOP register.
2082 * Please use IEM_GET_EFFECTIVE_VVVV to access. */
2083 uint8_t uVex3rdReg; /* x86: 0x3b, 0x18 */
2084 /** X86: The VEX/EVEX/XOP length field. */
2085 uint8_t uVexLength; /* x86: 0x3c, 0x19 */
2086 /** X86: Additional EVEX stuff. */
2087 uint8_t fEvexStuff; /* x86: 0x3d, 0x1a */
2088
2089# ifndef IEM_WITH_CODE_TLB
2090 /** Explicit alignment padding. */
2091 uint8_t abAlignment2a[1]; /* x86: 0x1b */
2092# endif
2093 /** X86: The FPU opcode (FOP). */
2094 uint16_t uFpuOpcode; /* x86: 0x3e, 0x1c */
2095# ifndef IEM_WITH_CODE_TLB
2096 /** Opcode buffer alignment padding. */
2097 uint8_t abAlignment2b[2]; /* x86: 0x1e */
2098# endif
2099# else /* !VBOX_VMM_TARGET_X86 */
2100 /** Opcode buffer alignment padding. */
2101 uint8_t abAlignment2b[3+4]; /* !x86: 0x09 */
2102# endif /* !VBOX_VMM_TARGET_X86 */
2103
2104 /** The opcode bytes. */
2105# ifdef VBOX_VMM_TARGET_X86
2106 uint8_t abOpcode[15]; /* x86: 0x40, 0x20 */
2107# else
2108 union
2109 {
2110 uint8_t abOpcode[ 32]; /* !x86: 0x10 */
2111 uint16_t au16Opcode[16];
2112 uint32_t au32Opcode[ 8];
2113 };
2114# endif
2115 /** Explicit alignment padding. */
2116# ifdef VBOX_VMM_TARGET_X86
2117# ifdef IEM_WITH_CODE_TLB
2118 //uint8_t abAlignment2c[0x4f - 0x4f]; /* x86: 0x4f */
2119# else
2120 uint8_t abAlignment2c[0x4f - 0x2f]; /* x86: 0x2f */
2121# endif
2122# else
2123 uint8_t abAlignment2c[0x4f - 0x30]; /* !x86: 0x30 */
2124# endif
2125
2126#else /* IEM_WITH_OPAQUE_DECODER_STATE || (!x86 && TLB) */
2127# ifdef IEM_WITH_CODE_TLB
2128# ifdef VBOX_VMM_TARGET_X86
2129 uint8_t abOpaqueDecoderPart2[0x4f - 0x36];
2130# else
2131 uint8_t abOpaqueDecoderPart2[0x4f - 0x28];
2132# endif
2133# else
2134# ifdef VBOX_VMM_TARGET_X86
2135 uint8_t abOpaqueDecoderPart2[0x4f - 0x13];
2136# else
2137 uint8_t abOpaqueDecoderPart2[0x4f - 0x09];
2138# endif
2139# endif
2140#endif /* IEM_WITH_OPAQUE_DECODER_STATE */
2141 /** @} */
2142
2143
2144 /** The number of active guest memory mappings. */
2145 uint8_t cActiveMappings; /* 0x4f, 0x4f */
2146
2147 /** Records for tracking guest memory mappings. */
2148 struct
2149 {
2150 /** The address of the mapped bytes. */
2151 R3R0PTRTYPE(void *) pv;
2152 /** The access flags (IEM_ACCESS_XXX).
2153 * IEM_ACCESS_INVALID if the entry is unused. */
2154 uint32_t fAccess;
2155#if HC_ARCH_BITS == 64
2156 uint32_t u32Alignment4; /**< Alignment padding. */
2157#endif
2158 } aMemMappings[IEM_MAX_MEM_MAPPINGS]; /* arm: 0x50 LB 0x20 x86: 0x50 LB 0x30 */
2159
2160 /** Locking records for the mapped memory. */
2161 union
2162 {
2163 PGMPAGEMAPLOCK Lock;
2164 uint64_t au64Padding[2];
2165 } aMemMappingLocks[IEM_MAX_MEM_MAPPINGS]; /* arm: 0x70 LB 0x20 x86: 0x80 LB 0x30 */
2166
2167 /** Bounce buffer info.
2168 * This runs in parallel to aMemMappings. */
2169 struct
2170 {
2171 /** The physical address of the first byte. */
2172 RTGCPHYS GCPhysFirst;
2173 /** The physical address of the second page. */
2174 RTGCPHYS GCPhysSecond;
2175 /** The number of bytes in the first page. */
2176 uint16_t cbFirst;
2177 /** The number of bytes in the second page. */
2178 uint16_t cbSecond;
2179 /** Whether it's unassigned memory. */
2180 bool fUnassigned;
2181 /** Explicit alignment padding. */
2182 bool afAlignment5[3];
2183 } aMemBbMappings[IEM_MAX_MEM_MAPPINGS]; /* arm: 0x90 LB 0x30 x86: 0xb0 LB 0x48 */
2184
2185 /** The flags of the current exception / interrupt.
2186 * @note X86 specific? */
2187 uint32_t fCurXcpt; /* arm: 0xc0 x86: 0xf8 */
2188 /** The current exception / interrupt.
2189 *@note X86 specific? */
2190 uint8_t uCurXcpt; /* arm: 0xc4 x86: 0xfc */
2191 /** Exception / interrupt recursion depth.
2192 *@note X86 specific? */
2193 int8_t cXcptRecursions; /* arm: 0xc5 x86: 0xfb */
2194
2195 /** The next unused mapping index.
2196 * @todo try find room for this up with cActiveMappings. */
2197 uint8_t iNextMapping; /* arm: 0xc6 x86: 0xfd */
2198 uint8_t abAlignment7[IEM_MAX_MEM_MAPPINGS == 3 ? 1 : 0x39];
2199
2200 /** Bounce buffer storage.
2201 * This runs in parallel to aMemMappings and aMemBbMappings. */
2202 struct
2203 {
2204 uint8_t ab[IEM_BOUNCE_BUFFER_SIZE];
2205 } aBounceBuffers[IEM_MAX_MEM_MAPPINGS]; /* arm: 0x100 LB 0x80 x86: 0x100 LB 0x600 */
2206
2207
2208 /** Pointer set jump buffer - ring-3 context. */
2209 R3PTRTYPE(jmp_buf *) pJmpBufR3;
2210 /** Pointer set jump buffer - ring-0 context. */
2211 R0PTRTYPE(jmp_buf *) pJmpBufR0;
2212
2213 /** @todo Should move this near @a fCurXcpt later. */
2214 /** The CR2 for the current exception / interrupt. */
2215 uint64_t uCurXcptCr2;
2216 /** The error code for the current exception / interrupt. */
2217 uint32_t uCurXcptErr;
2218
2219 /** @name Statistics
2220 * @{ */
2221 /** The number of instructions we've executed. */
2222 uint32_t cInstructions;
2223 /** The number of potential exits. */
2224 uint32_t cPotentialExits;
2225 /** Counts the VERR_IEM_INSTR_NOT_IMPLEMENTED returns. */
2226 uint32_t cRetInstrNotImplemented;
2227 /** Counts the VERR_IEM_ASPECT_NOT_IMPLEMENTED returns. */
2228 uint32_t cRetAspectNotImplemented;
2229 /** Counts informational statuses returned (other than VINF_SUCCESS). */
2230 uint32_t cRetInfStatuses;
2231 /** Counts other error statuses returned. */
2232 uint32_t cRetErrStatuses;
2233 /** Number of times rcPassUp has been used. */
2234 uint32_t cRetPassUpStatus;
2235 /** Number of times RZ left with instruction commit pending for ring-3. */
2236 uint32_t cPendingCommit;
2237 /** Number of misaligned (host sense) atomic instruction accesses. */
2238 uint32_t cMisalignedAtomics;
2239 /** Number of long jumps. */
2240 uint32_t cLongJumps;
2241 /** @} */
2242
2243 /** @name Target CPU information.
2244 * @{ */
2245#if IEM_CFG_TARGET_CPU == IEMTARGETCPU_DYNAMIC
2246 /** The target CPU. */
2247 uint8_t uTargetCpu;
2248#else
2249 uint8_t bTargetCpuPadding;
2250#endif
2251 /** For selecting assembly works matching the target CPU EFLAGS behaviour, see
2252 * IEMTARGETCPU_EFL_BEHAVIOR_XXX for values, with the 1st entry for when no
2253 * native host support and the 2nd for when there is.
2254 *
2255 * The two values are typically indexed by a g_CpumHostFeatures bit.
2256 *
2257 * This is for instance used for the BSF & BSR instructions where AMD and
2258 * Intel CPUs produce different EFLAGS. */
2259 uint8_t aidxTargetCpuEflFlavour[2];
2260
2261 /** The CPU vendor. */
2262 CPUMCPUVENDOR enmCpuVendor;
2263 /** @} */
2264
2265 /** Counts RDMSR \#GP(0) LogRel(). */
2266 uint8_t cLogRelRdMsr;
2267 /** Counts WRMSR \#GP(0) LogRel(). */
2268 uint8_t cLogRelWrMsr;
2269 /** Alignment padding. */
2270 uint8_t abAlignment9[50];
2271
2272
2273 /** @name Recompiled Exection
2274 * @{ */
2275 /** Pointer to the current translation block.
2276 * This can either be one being executed or one being compiled. */
2277 R3PTRTYPE(PIEMTB) pCurTbR3;
2278#ifdef VBOX_WITH_IEM_NATIVE_RECOMPILER_LONGJMP
2279 /** Frame pointer for the last native TB to execute. */
2280 R3PTRTYPE(void *) pvTbFramePointerR3;
2281#else
2282 R3PTRTYPE(void *) pvUnusedR3;
2283#endif
2284#ifdef IEMNATIVE_WITH_SIMD_FP_NATIVE_EMITTERS
2285 /** The saved host floating point control register (MXCSR on x86, FPCR on arm64)
2286 * needing restore when the TB finished, IEMNATIVE_SIMD_FP_CTRL_REG_NOT_MODIFIED indicates the TB
2287 * didn't modify it so we don't need to restore it. */
2288# ifdef RT_ARCH_AMD64
2289 uint32_t uRegFpCtrl;
2290 /** Temporary copy of MXCSR for stmxcsr/ldmxcsr (so we don't have to fiddle with stack pointers). */
2291 uint32_t uRegMxcsrTmp;
2292# elif defined(RT_ARCH_ARM64)
2293 uint64_t uRegFpCtrl;
2294# else
2295# error "Port me"
2296# endif
2297#else
2298 uint64_t u64Unused;
2299#endif
2300 /** Pointer to the ring-3 TB cache for this EMT. */
2301 R3PTRTYPE(PIEMTBCACHE) pTbCacheR3;
2302 /** Pointer to the ring-3 TB lookup entry.
2303 * This either points to pTbLookupEntryDummyR3 or an actually lookuptable
2304 * entry, thus it can always safely be used w/o NULL checking. */
2305 R3PTRTYPE(PIEMTB *) ppTbLookupEntryR3;
2306#if 0 /* unused */
2307 /** The PC (RIP) at the start of pCurTbR3/pCurTbR0.
2308 * The TBs are based on physical addresses, so this is needed to correleated
2309 * RIP to opcode bytes stored in the TB (AMD-V / VT-x). */
2310 uint64_t uCurTbStartPc;
2311#endif
2312
2313 /** Number of threaded TBs executed. */
2314 uint64_t cTbExecThreaded;
2315 /** Number of native TBs executed. */
2316 uint64_t cTbExecNative;
2317
2318 /** The number of IRQ/FF checks till the next timer poll call. */
2319 uint32_t cTbsTillNextTimerPoll;
2320 /** The virtual sync time at the last timer poll call in milliseconds. */
2321 uint32_t msRecompilerPollNow;
2322 /** The virtual sync time at the last timer poll call in nanoseconds. */
2323 uint64_t nsRecompilerPollNow;
2324 /** The previous cTbsTillNextTimerPoll value. */
2325 uint32_t cTbsTillNextTimerPollPrev;
2326
2327 /** The current instruction number in a native TB.
2328 * This is set by code that may trigger an unexpected TB exit (throw/longjmp)
2329 * and will be picked up by the TB execution loop. Only used when
2330 * IEMNATIVE_WITH_INSTRUCTION_COUNTING is defined. */
2331 uint8_t idxTbCurInstr;
2332 /** @} */
2333
2334 /** @name Recompilation
2335 * @{ */
2336 /** Whether we need to check the opcode bytes for the current instruction.
2337 * This is set by a previous instruction if it modified memory or similar. */
2338 bool fTbCheckOpcodes;
2339 /** Indicates whether and how we just branched - IEMBRANCHED_F_XXX. */
2340 uint8_t fTbBranched;
2341 /** Set when GCPhysInstrBuf is updated because of a page crossing. */
2342 bool fTbCrossedPage;
2343 /** Whether to end the current TB. */
2344 bool fEndTb;
2345 /** Indicates that the current instruction is an STI. This is set by the
2346 * iemCImpl_sti code and subsequently cleared by the recompiler. */
2347 bool fTbCurInstrIsSti;
2348 /** Spaced reserved for recompiler data / alignment. */
2349 bool afRecompilerStuff1[1];
2350 /** Number of instructions before we need emit an IRQ check call again.
2351 * This helps making sure we don't execute too long w/o checking for
2352 * interrupts and immediately following instructions that may enable
2353 * interrupts (e.g. POPF, IRET, STI). With STI an additional hack is
2354 * required to make sure we check following the next instruction as well, see
2355 * fTbCurInstrIsSti. */
2356 uint8_t cInstrTillIrqCheck;
2357 /** The index of the last CheckIrq call during threaded recompilation. */
2358 uint16_t idxLastCheckIrqCallNo;
2359 /** The size of the IEMTB::pabOpcodes allocation in pThrdCompileTbR3. */
2360 uint16_t cbOpcodesAllocated;
2361 /** The IEMTB::cUsed value when to attempt native recompilation of a TB. */
2362 uint32_t uTbNativeRecompileAtUsedCount;
2363 /** The IEM_CIMPL_F_XXX mask for the current instruction. */
2364 uint32_t fTbCurInstr;
2365 /** The IEM_CIMPL_F_XXX mask for the previous instruction. */
2366 uint32_t fTbPrevInstr;
2367 /** Strict: Tracking skipped EFLAGS calculations. Any bits set here are
2368 * currently not up to date in EFLAGS. */
2369 uint32_t fSkippingEFlags;
2370#if 0 /* unused */
2371 /** Previous GCPhysInstrBuf value - only valid if fTbCrossedPage is set. */
2372 RTGCPHYS GCPhysInstrBufPrev;
2373#endif
2374
2375 /** Fixed TB used for threaded recompilation.
2376 * This is allocated once with maxed-out sizes and re-used afterwards. */
2377 R3PTRTYPE(PIEMTB) pThrdCompileTbR3;
2378 /** Pointer to the ring-3 TB allocator for this EMT. */
2379 R3PTRTYPE(PIEMTBALLOCATOR) pTbAllocatorR3;
2380 /** Pointer to the ring-3 executable memory allocator for this EMT. */
2381 R3PTRTYPE(struct IEMEXECMEMALLOCATOR *) pExecMemAllocatorR3;
2382 /** Pointer to the native recompiler state for ring-3. */
2383 R3PTRTYPE(struct IEMRECOMPILERSTATE *) pNativeRecompilerStateR3;
2384 /** Dummy entry for ppTbLookupEntryR3. */
2385 R3PTRTYPE(PIEMTB) pTbLookupEntryDummyR3;
2386#ifdef IEMNATIVE_WITH_DELAYED_PC_UPDATING_DEBUG
2387 /** The debug code advances this register as if it was CPUMCTX::rip and we
2388 * didn't do delayed PC updating. When CPUMCTX::rip is finally updated,
2389 * the result is compared with this value. */
2390 uint64_t uPcUpdatingDebug;
2391#elif defined(VBOX_WITH_SAVE_THREADED_TBS_FOR_PROFILING)
2392 /** The SSM handle used for saving threaded TBs for recompiler profiling. */
2393 R3PTRTYPE(PSSMHANDLE) pSsmThreadedTbsForProfiling;
2394#else
2395 uint64_t u64Placeholder;
2396#endif
2397 /**
2398 * Whether we should use the host instruction invalidation APIs of the
2399 * host OS or our own version of it (macOS). */
2400 uint8_t fHostICacheInvalidation;
2401#define IEMNATIVE_ICACHE_F_USE_HOST_API UINT8_C(0x01) /**< Use the host API (macOS) instead of our code. */
2402#define IEMNATIVE_ICACHE_F_END_WITH_ISH UINT8_C(0x02) /**< Whether to end with a ISH barrier (arm). */
2403 bool afRecompilerStuff2[7];
2404 /** @} */
2405
2406 /** Dummy TLB entry used for accesses to pages with databreakpoints. */
2407 IEMTLBENTRY DataBreakpointTlbe;
2408
2409 /** Threaded TB statistics: Times TB execution was broken off before reaching the end. */
2410 STAMCOUNTER StatTbThreadedExecBreaks;
2411 /** Statistics: Times BltIn_CheckIrq breaks out of the TB. */
2412 STAMCOUNTER StatCheckIrqBreaks;
2413 /** Statistics: Times BltIn_CheckTimers breaks direct linking TBs. */
2414 STAMCOUNTER StatCheckTimersBreaks;
2415 /** Statistics: Times BltIn_CheckMode breaks out of the TB. */
2416 STAMCOUNTER StatCheckModeBreaks;
2417 /** Threaded TB statistics: Times execution break on call with lookup entries. */
2418 STAMCOUNTER StatTbThreadedExecBreaksWithLookup;
2419 /** Threaded TB statistics: Times execution break on call without lookup entries. */
2420 STAMCOUNTER StatTbThreadedExecBreaksWithoutLookup;
2421 /** Statistics: Times a post jump target check missed and had to find new TB. */
2422 STAMCOUNTER StatCheckBranchMisses;
2423 /** Statistics: Times a jump or page crossing required a TB with CS.LIM checking. */
2424 STAMCOUNTER StatCheckNeedCsLimChecking;
2425 /** Statistics: Times a loop was detected within a TB. */
2426 STAMCOUNTER StatTbLoopInTbDetected;
2427 /** Statistics: Times a loop back to the start of the TB was detected. */
2428 STAMCOUNTER StatTbLoopFullTbDetected;
2429 /** Statistics: Times a loop back to the start of the TB was detected, var 2. */
2430 STAMCOUNTER StatTbLoopFullTbDetected2;
2431 /** Exec memory allocator statistics: Number of times allocaintg executable memory failed. */
2432 STAMCOUNTER StatNativeExecMemInstrBufAllocFailed;
2433 /** Native TB statistics: Number of fully recompiled TBs. */
2434 STAMCOUNTER StatNativeFullyRecompiledTbs;
2435 /** TB statistics: Number of instructions per TB. */
2436 STAMPROFILE StatTbInstr;
2437 /** TB statistics: Number of TB lookup table entries per TB. */
2438 STAMPROFILE StatTbLookupEntries;
2439 /** Threaded TB statistics: Number of calls per TB. */
2440 STAMPROFILE StatTbThreadedCalls;
2441 /** Native TB statistics: Native code size per TB. */
2442 STAMPROFILE StatTbNativeCode;
2443 /** Native TB statistics: Profiling native recompilation. */
2444 STAMPROFILE StatNativeRecompilation;
2445 /** Native TB statistics: Number of calls per TB that were recompiled properly. */
2446 STAMPROFILE StatNativeCallsRecompiled;
2447 /** Native TB statistics: Number of threaded calls per TB that weren't recompiled. */
2448 STAMPROFILE StatNativeCallsThreaded;
2449 /** Native recompiled execution: TLB hits for data fetches. */
2450 STAMCOUNTER StatNativeTlbHitsForFetch;
2451 /** Native recompiled execution: TLB hits for data stores. */
2452 STAMCOUNTER StatNativeTlbHitsForStore;
2453 /** Native recompiled execution: TLB hits for stack accesses. */
2454 STAMCOUNTER StatNativeTlbHitsForStack;
2455 /** Native recompiled execution: TLB hits for mapped accesses. */
2456 STAMCOUNTER StatNativeTlbHitsForMapped;
2457 /** Native recompiled execution: Code TLB misses for new page. */
2458 STAMCOUNTER StatNativeCodeTlbMissesNewPage;
2459 /** Native recompiled execution: Code TLB hits for new page. */
2460 STAMCOUNTER StatNativeCodeTlbHitsForNewPage;
2461 /** Native recompiled execution: Code TLB misses for new page with offset. */
2462 STAMCOUNTER StatNativeCodeTlbMissesNewPageWithOffset;
2463 /** Native recompiled execution: Code TLB hits for new page with offset. */
2464 STAMCOUNTER StatNativeCodeTlbHitsForNewPageWithOffset;
2465
2466 /** Native recompiler: Number of calls to iemNativeRegAllocFindFree. */
2467 STAMCOUNTER StatNativeRegFindFree;
2468 /** Native recompiler: Number of times iemNativeRegAllocFindFree needed
2469 * to free a variable. */
2470 STAMCOUNTER StatNativeRegFindFreeVar;
2471 /** Native recompiler: Number of times iemNativeRegAllocFindFree did
2472 * not need to free any variables. */
2473 STAMCOUNTER StatNativeRegFindFreeNoVar;
2474 /** Native recompiler: Liveness info freed shadowed guest registers in
2475 * iemNativeRegAllocFindFree. */
2476 STAMCOUNTER StatNativeRegFindFreeLivenessUnshadowed;
2477 /** Native recompiler: Liveness info helped with the allocation in
2478 * iemNativeRegAllocFindFree. */
2479 STAMCOUNTER StatNativeRegFindFreeLivenessHelped;
2480
2481 /** Native recompiler: Number of times status flags calc has been skipped. */
2482 STAMCOUNTER StatNativeEflSkippedArithmetic;
2483 /** Native recompiler: Number of times status flags calc has been postponed. */
2484 STAMCOUNTER StatNativeEflPostponedArithmetic;
2485 /** Native recompiler: Total number instructions in this category. */
2486 STAMCOUNTER StatNativeEflTotalArithmetic;
2487
2488 /** Native recompiler: Number of times status flags calc has been skipped. */
2489 STAMCOUNTER StatNativeEflSkippedLogical;
2490 /** Native recompiler: Number of times status flags calc has been postponed. */
2491 STAMCOUNTER StatNativeEflPostponedLogical;
2492 /** Native recompiler: Total number instructions in this category. */
2493 STAMCOUNTER StatNativeEflTotalLogical;
2494
2495 /** Native recompiler: Number of times status flags calc has been skipped. */
2496 STAMCOUNTER StatNativeEflSkippedShift;
2497 /** Native recompiler: Number of times status flags calc has been postponed. */
2498 STAMCOUNTER StatNativeEflPostponedShift;
2499 /** Native recompiler: Total number instructions in this category. */
2500 STAMCOUNTER StatNativeEflTotalShift;
2501
2502 /** Native recompiler: Number of emits per postponement. */
2503 STAMPROFILE StatNativeEflPostponedEmits;
2504
2505 /** Native recompiler: Number of opportunities to skip EFLAGS.CF updating. */
2506 STAMCOUNTER StatNativeLivenessEflCfSkippable;
2507 /** Native recompiler: Number of opportunities to skip EFLAGS.PF updating. */
2508 STAMCOUNTER StatNativeLivenessEflPfSkippable;
2509 /** Native recompiler: Number of opportunities to skip EFLAGS.AF updating. */
2510 STAMCOUNTER StatNativeLivenessEflAfSkippable;
2511 /** Native recompiler: Number of opportunities to skip EFLAGS.ZF updating. */
2512 STAMCOUNTER StatNativeLivenessEflZfSkippable;
2513 /** Native recompiler: Number of opportunities to skip EFLAGS.SF updating. */
2514 STAMCOUNTER StatNativeLivenessEflSfSkippable;
2515 /** Native recompiler: Number of opportunities to skip EFLAGS.OF updating. */
2516 STAMCOUNTER StatNativeLivenessEflOfSkippable;
2517 /** Native recompiler: Number of required EFLAGS.CF updates. */
2518 STAMCOUNTER StatNativeLivenessEflCfRequired;
2519 /** Native recompiler: Number of required EFLAGS.PF updates. */
2520 STAMCOUNTER StatNativeLivenessEflPfRequired;
2521 /** Native recompiler: Number of required EFLAGS.AF updates. */
2522 STAMCOUNTER StatNativeLivenessEflAfRequired;
2523 /** Native recompiler: Number of required EFLAGS.ZF updates. */
2524 STAMCOUNTER StatNativeLivenessEflZfRequired;
2525 /** Native recompiler: Number of required EFLAGS.SF updates. */
2526 STAMCOUNTER StatNativeLivenessEflSfRequired;
2527 /** Native recompiler: Number of required EFLAGS.OF updates. */
2528 STAMCOUNTER StatNativeLivenessEflOfRequired;
2529 /** Native recompiler: Number of potentially delayable EFLAGS.CF updates. */
2530 STAMCOUNTER StatNativeLivenessEflCfDelayable;
2531 /** Native recompiler: Number of potentially delayable EFLAGS.PF updates. */
2532 STAMCOUNTER StatNativeLivenessEflPfDelayable;
2533 /** Native recompiler: Number of potentially delayable EFLAGS.AF updates. */
2534 STAMCOUNTER StatNativeLivenessEflAfDelayable;
2535 /** Native recompiler: Number of potentially delayable EFLAGS.ZF updates. */
2536 STAMCOUNTER StatNativeLivenessEflZfDelayable;
2537 /** Native recompiler: Number of potentially delayable EFLAGS.SF updates. */
2538 STAMCOUNTER StatNativeLivenessEflSfDelayable;
2539 /** Native recompiler: Number of potentially delayable EFLAGS.OF updates. */
2540 STAMCOUNTER StatNativeLivenessEflOfDelayable;
2541
2542 /** Native recompiler: Number of potential PC updates in total. */
2543 STAMCOUNTER StatNativePcUpdateTotal;
2544 /** Native recompiler: Number of PC updates which could be delayed. */
2545 STAMCOUNTER StatNativePcUpdateDelayed;
2546
2547 /** Native recompiler: Number of time we had complicated dirty shadow
2548 * register situations with the other branch in IEM_MC_ENDIF. */
2549 STAMCOUNTER StatNativeEndIfOtherBranchDirty;
2550
2551 /** Native recompiler: Number of calls to iemNativeSimdRegAllocFindFree. */
2552 STAMCOUNTER StatNativeSimdRegFindFree;
2553 /** Native recompiler: Number of times iemNativeSimdRegAllocFindFree needed
2554 * to free a variable. */
2555 STAMCOUNTER StatNativeSimdRegFindFreeVar;
2556 /** Native recompiler: Number of times iemNativeSimdRegAllocFindFree did
2557 * not need to free any variables. */
2558 STAMCOUNTER StatNativeSimdRegFindFreeNoVar;
2559 /** Native recompiler: Liveness info freed shadowed guest registers in
2560 * iemNativeSimdRegAllocFindFree. */
2561 STAMCOUNTER StatNativeSimdRegFindFreeLivenessUnshadowed;
2562 /** Native recompiler: Liveness info helped with the allocation in
2563 * iemNativeSimdRegAllocFindFree. */
2564 STAMCOUNTER StatNativeSimdRegFindFreeLivenessHelped;
2565
2566 /** Native recompiler: Number of potential IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() checks. */
2567 STAMCOUNTER StatNativeMaybeDeviceNotAvailXcptCheckPotential;
2568 /** Native recompiler: Number of potential IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() checks. */
2569 STAMCOUNTER StatNativeMaybeWaitDeviceNotAvailXcptCheckPotential;
2570 /** Native recompiler: Number of potential IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() checks. */
2571 STAMCOUNTER StatNativeMaybeSseXcptCheckPotential;
2572 /** Native recompiler: Number of potential IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() checks. */
2573 STAMCOUNTER StatNativeMaybeAvxXcptCheckPotential;
2574
2575 /** Native recompiler: Number of IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() checks omitted. */
2576 STAMCOUNTER StatNativeMaybeDeviceNotAvailXcptCheckOmitted;
2577 /** Native recompiler: Number of IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() checks omitted. */
2578 STAMCOUNTER StatNativeMaybeWaitDeviceNotAvailXcptCheckOmitted;
2579 /** Native recompiler: Number of IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() checks omitted. */
2580 STAMCOUNTER StatNativeMaybeSseXcptCheckOmitted;
2581 /** Native recompiler: Number of IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() checks omitted. */
2582 STAMCOUNTER StatNativeMaybeAvxXcptCheckOmitted;
2583
2584 /** Native recompiler: The TB finished executing completely without jumping to a an exit label.
2585 * Not availabe in release builds. */
2586 STAMCOUNTER StatNativeTbFinished;
2587 /** Native recompiler: The TB finished executing jumping to the ReturnBreak label. */
2588 STAMCOUNTER StatNativeTbExitReturnBreak;
2589 /** Native recompiler: The TB finished executing jumping to the ReturnBreakFF label. */
2590 STAMCOUNTER StatNativeTbExitReturnBreakFF;
2591 /** Native recompiler: The TB finished executing jumping to the ReturnWithFlags label. */
2592 STAMCOUNTER StatNativeTbExitReturnWithFlags;
2593 /** Native recompiler: The TB finished executing with other non-zero status. */
2594 STAMCOUNTER StatNativeTbExitReturnOtherStatus;
2595 /** Native recompiler: The TB finished executing via throw / long jump. */
2596 STAMCOUNTER StatNativeTbExitLongJump;
2597 /** Native recompiler: The TB finished executing jumping to the ReturnBreak
2598 * label, but directly jumped to the next TB, scenario \#1 w/o IRQ checks. */
2599 STAMCOUNTER StatNativeTbExitDirectLinking1NoIrq;
2600 /** Native recompiler: The TB finished executing jumping to the ReturnBreak
2601 * label, but directly jumped to the next TB, scenario \#1 with IRQ checks. */
2602 STAMCOUNTER StatNativeTbExitDirectLinking1Irq;
2603 /** Native recompiler: The TB finished executing jumping to the ReturnBreak
2604 * label, but directly jumped to the next TB, scenario \#1 w/o IRQ checks. */
2605 STAMCOUNTER StatNativeTbExitDirectLinking2NoIrq;
2606 /** Native recompiler: The TB finished executing jumping to the ReturnBreak
2607 * label, but directly jumped to the next TB, scenario \#2 with IRQ checks. */
2608 STAMCOUNTER StatNativeTbExitDirectLinking2Irq;
2609
2610 /** Native recompiler: The TB finished executing jumping to the RaiseDe label. */
2611 STAMCOUNTER StatNativeTbExitRaiseDe;
2612 /** Native recompiler: The TB finished executing jumping to the RaiseUd label. */
2613 STAMCOUNTER StatNativeTbExitRaiseUd;
2614 /** Native recompiler: The TB finished executing jumping to the RaiseSseRelated label. */
2615 STAMCOUNTER StatNativeTbExitRaiseSseRelated;
2616 /** Native recompiler: The TB finished executing jumping to the RaiseAvxRelated label. */
2617 STAMCOUNTER StatNativeTbExitRaiseAvxRelated;
2618 /** Native recompiler: The TB finished executing jumping to the RaiseSseAvxFpRelated label. */
2619 STAMCOUNTER StatNativeTbExitRaiseSseAvxFpRelated;
2620 /** Native recompiler: The TB finished executing jumping to the RaiseNm label. */
2621 STAMCOUNTER StatNativeTbExitRaiseNm;
2622 /** Native recompiler: The TB finished executing jumping to the RaiseGp0 label. */
2623 STAMCOUNTER StatNativeTbExitRaiseGp0;
2624 /** Native recompiler: The TB finished executing jumping to the RaiseMf label. */
2625 STAMCOUNTER StatNativeTbExitRaiseMf;
2626 /** Native recompiler: The TB finished executing jumping to the RaiseXf label. */
2627 STAMCOUNTER StatNativeTbExitRaiseXf;
2628 /** Native recompiler: The TB finished executing jumping to the ObsoleteTb label. */
2629 STAMCOUNTER StatNativeTbExitObsoleteTb;
2630
2631 /** Native recompiler: Number of full TB loops (jumps from end to start). */
2632 STAMCOUNTER StatNativeTbExitLoopFullTb;
2633
2634 /** Native recompiler: Failure situations with direct linking scenario \#1.
2635 * Counter with StatNativeTbExitReturnBreak. Not in release builds.
2636 * @{ */
2637 STAMCOUNTER StatNativeTbExitDirectLinking1NoTb;
2638 STAMCOUNTER StatNativeTbExitDirectLinking1MismatchGCPhysPc;
2639 STAMCOUNTER StatNativeTbExitDirectLinking1MismatchFlags;
2640 STAMCOUNTER StatNativeTbExitDirectLinking1PendingIrq;
2641 /** @} */
2642
2643 /** Native recompiler: Failure situations with direct linking scenario \#2.
2644 * Counter with StatNativeTbExitReturnBreak. Not in release builds.
2645 * @{ */
2646 STAMCOUNTER StatNativeTbExitDirectLinking2NoTb;
2647 STAMCOUNTER StatNativeTbExitDirectLinking2MismatchGCPhysPc;
2648 STAMCOUNTER StatNativeTbExitDirectLinking2MismatchFlags;
2649 STAMCOUNTER StatNativeTbExitDirectLinking2PendingIrq;
2650 /** @} */
2651
2652 /** iemMemMap and iemMemMapJmp statistics.
2653 * @{ */
2654 STAMCOUNTER StatMemMapJmp;
2655 STAMCOUNTER StatMemMapNoJmp;
2656 STAMCOUNTER StatMemBounceBufferCrossPage;
2657 STAMCOUNTER StatMemBounceBufferMapPhys;
2658 /** @} */
2659
2660 /** Timer polling statistics (debug only).
2661 * @{ */
2662 STAMPROFILE StatTimerPoll;
2663 STAMPROFILE StatTimerPollPoll;
2664 STAMPROFILE StatTimerPollRun;
2665 STAMCOUNTER StatTimerPollUnchanged;
2666 STAMCOUNTER StatTimerPollTiny;
2667 STAMCOUNTER StatTimerPollDefaultCalc;
2668 STAMCOUNTER StatTimerPollMax;
2669 STAMPROFILE StatTimerPollFactorDivision;
2670 STAMPROFILE StatTimerPollFactorMultiplication;
2671 /** @} */
2672
2673
2674 STAMCOUNTER aStatAdHoc[8];
2675
2676#ifdef IEM_WITH_TLB_TRACE
2677 /*uint64_t au64Padding[0];*/
2678#else
2679 uint64_t au64Padding[2];
2680#endif
2681
2682#ifdef IEM_WITH_TLB_TRACE
2683 /** The end (next) trace entry. */
2684 uint32_t idxTlbTraceEntry;
2685 /** Number of trace entries allocated expressed as a power of two. */
2686 uint32_t cTlbTraceEntriesShift;
2687 /** The trace entries. */
2688 PIEMTLBTRACEENTRY paTlbTraceEntries;
2689#endif
2690
2691 /** Data TLB.
2692 * @remarks Must be 64-byte aligned. */
2693 IEMTLB DataTlb;
2694 /** Instruction TLB.
2695 * @remarks Must be 64-byte aligned. */
2696 IEMTLB CodeTlb;
2697
2698 /** Exception statistics. */
2699 STAMCOUNTER aStatXcpts[32];
2700 /** Interrupt statistics. */
2701 uint32_t aStatInts[256];
2702
2703#if defined(VBOX_WITH_STATISTICS) && !defined(DOXYGEN_RUNNING) && !defined(IEM_WITHOUT_INSTRUCTION_STATS)
2704 /** Instruction statistics for ring-0/raw-mode. */
2705 IEMINSTRSTATS StatsRZ;
2706 /** Instruction statistics for ring-3. */
2707 IEMINSTRSTATS StatsR3;
2708# ifdef VBOX_WITH_IEM_RECOMPILER
2709 /** Statistics per threaded function call.
2710 * Updated by both the threaded and native recompilers. */
2711 uint32_t acThreadedFuncStats[0x6000 /*24576*/];
2712# endif
2713#endif
2714} IEMCPU;
2715AssertCompileMemberOffset(IEMCPU, cActiveMappings, 0x4f);
2716AssertCompileMemberAlignment(IEMCPU, aMemMappings, 16);
2717AssertCompileMemberAlignment(IEMCPU, aMemMappingLocks, 16);
2718AssertCompileMemberAlignment(IEMCPU, aBounceBuffers, 64);
2719AssertCompileMemberAlignment(IEMCPU, pCurTbR3, 64);
2720AssertCompileMemberAlignment(IEMCPU, DataTlb, 64);
2721AssertCompileMemberAlignment(IEMCPU, CodeTlb, 64);
2722
2723/** Pointer to the per-CPU IEM state. */
2724typedef IEMCPU *PIEMCPU;
2725/** Pointer to the const per-CPU IEM state. */
2726typedef IEMCPU const *PCIEMCPU;
2727
2728/** @def IEMNATIVE_SIMD_FP_CTRL_REG_NOT_MODIFIED
2729 * Value indicating the TB didn't modified the floating point control register.
2730 * @note Neither FPCR nor MXCSR accept this as a valid value (MXCSR is not fully populated,
2731 * FPCR has the upper 32-bit reserved), so this is safe. */
2732#if defined(IEMNATIVE_WITH_SIMD_FP_NATIVE_EMITTERS) || defined(DOXYGEN_RUNNING)
2733# ifdef RT_ARCH_AMD64
2734# define IEMNATIVE_SIMD_FP_CTRL_REG_NOT_MODIFIED UINT32_MAX
2735# elif defined(RT_ARCH_ARM64)
2736# define IEMNATIVE_SIMD_FP_CTRL_REG_NOT_MODIFIED UINT64_MAX
2737# else
2738# error "Port me"
2739# endif
2740#endif
2741
2742/** @def IEM_GET_CTX
2743 * Gets the guest CPU context for the calling EMT.
2744 * @returns PCPUMCTX
2745 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
2746 */
2747#define IEM_GET_CTX(a_pVCpu) (&(a_pVCpu)->cpum.GstCtx)
2748
2749/** @def IEM_CTX_ASSERT
2750 * Asserts that the @a a_fExtrnMbz is present in the CPU context.
2751 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
2752 * @param a_fExtrnMbz The mask of CPUMCTX_EXTRN_XXX flags that must be zero.
2753 */
2754#define IEM_CTX_ASSERT(a_pVCpu, a_fExtrnMbz) \
2755 AssertMsg(!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz)), \
2756 ("fExtrn=%#RX64 & fExtrnMbz=%#RX64 -> %#RX64\n", \
2757 (a_pVCpu)->cpum.GstCtx.fExtrn, (a_fExtrnMbz), (a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnMbz) ))
2758
2759/** @def IEM_CTX_IMPORT_RET
2760 * Makes sure the CPU context bits given by @a a_fExtrnImport are imported.
2761 *
2762 * Will call the keep to import the bits as needed.
2763 *
2764 * Returns on import failure.
2765 *
2766 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
2767 * @param a_fExtrnImport The mask of CPUMCTX_EXTRN_XXX flags to import.
2768 */
2769#define IEM_CTX_IMPORT_RET(a_pVCpu, a_fExtrnImport) \
2770 do { \
2771 if (!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnImport))) \
2772 { /* likely */ } \
2773 else \
2774 { \
2775 int rcCtxImport = CPUMImportGuestStateOnDemand(a_pVCpu, a_fExtrnImport); \
2776 AssertRCReturn(rcCtxImport, rcCtxImport); \
2777 } \
2778 } while (0)
2779
2780/** @def IEM_CTX_IMPORT_NORET
2781 * Makes sure the CPU context bits given by @a a_fExtrnImport are imported.
2782 *
2783 * Will call the keep to import the bits as needed.
2784 *
2785 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
2786 * @param a_fExtrnImport The mask of CPUMCTX_EXTRN_XXX flags to import.
2787 */
2788#define IEM_CTX_IMPORT_NORET(a_pVCpu, a_fExtrnImport) \
2789 do { \
2790 if (!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnImport))) \
2791 { /* likely */ } \
2792 else \
2793 { \
2794 int rcCtxImport = CPUMImportGuestStateOnDemand(a_pVCpu, a_fExtrnImport); \
2795 AssertLogRelRC(rcCtxImport); \
2796 } \
2797 } while (0)
2798
2799/** @def IEM_CTX_IMPORT_JMP
2800 * Makes sure the CPU context bits given by @a a_fExtrnImport are imported.
2801 *
2802 * Will call the keep to import the bits as needed.
2803 *
2804 * Jumps on import failure.
2805 *
2806 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
2807 * @param a_fExtrnImport The mask of CPUMCTX_EXTRN_XXX flags to import.
2808 */
2809#define IEM_CTX_IMPORT_JMP(a_pVCpu, a_fExtrnImport) \
2810 do { \
2811 if (!((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fExtrnImport))) \
2812 { /* likely */ } \
2813 else \
2814 { \
2815 int rcCtxImport = CPUMImportGuestStateOnDemand(a_pVCpu, a_fExtrnImport); \
2816 AssertRCStmt(rcCtxImport, IEM_DO_LONGJMP(pVCpu, rcCtxImport)); \
2817 } \
2818 } while (0)
2819
2820
2821
2822/** @def IEM_GET_TARGET_CPU
2823 * Gets the current IEMTARGETCPU value.
2824 * @returns IEMTARGETCPU value.
2825 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
2826 */
2827#if IEM_CFG_TARGET_CPU != IEMTARGETCPU_DYNAMIC
2828# define IEM_GET_TARGET_CPU(a_pVCpu) (IEM_CFG_TARGET_CPU)
2829#else
2830# define IEM_GET_TARGET_CPU(a_pVCpu) ((a_pVCpu)->iem.s.uTargetCpu)
2831#endif
2832
2833
2834/** @def IEM_TRY_SETJMP
2835 * Wrapper around setjmp / try, hiding all the ugly differences.
2836 *
2837 * @note Use with extreme care as this is a fragile macro.
2838 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
2839 * @param a_rcTarget The variable that should receive the status code in case
2840 * of a longjmp/throw.
2841 */
2842/** @def IEM_TRY_SETJMP_AGAIN
2843 * For when setjmp / try is used again in the same variable scope as a previous
2844 * IEM_TRY_SETJMP invocation.
2845 */
2846/** @def IEM_CATCH_LONGJMP_BEGIN
2847 * Start wrapper for catch / setjmp-else.
2848 *
2849 * This will set up a scope.
2850 *
2851 * @note Use with extreme care as this is a fragile macro.
2852 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
2853 * @param a_rcTarget The variable that should receive the status code in case
2854 * of a longjmp/throw.
2855 */
2856/** @def IEM_CATCH_LONGJMP_END
2857 * End wrapper for catch / setjmp-else.
2858 *
2859 * This will close the scope set up by IEM_CATCH_LONGJMP_BEGIN and clean up the
2860 * state.
2861 *
2862 * @note Use with extreme care as this is a fragile macro.
2863 * @param a_pVCpu The cross context virtual CPU structure of the calling EMT.
2864 */
2865#ifdef IEM_WITH_THROW_CATCH
2866# define IEM_TRY_SETJMP(a_pVCpu, a_rcTarget) \
2867 a_rcTarget = VINF_SUCCESS; \
2868 try
2869# define IEM_TRY_SETJMP_AGAIN(a_pVCpu, a_rcTarget) \
2870 IEM_TRY_SETJMP(a_pVCpu, a_rcTarget)
2871# define IEM_CATCH_LONGJMP_BEGIN(a_pVCpu, a_rcTarget) \
2872 catch (int rcThrown) \
2873 { \
2874 a_rcTarget = rcThrown
2875# define IEM_CATCH_LONGJMP_END(a_pVCpu) \
2876 } \
2877 ((void)0)
2878#else /* !IEM_WITH_THROW_CATCH */
2879# define IEM_TRY_SETJMP(a_pVCpu, a_rcTarget) \
2880 jmp_buf JmpBuf; \
2881 jmp_buf * volatile pSavedJmpBuf = (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf); \
2882 (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf; \
2883 if ((rcStrict = setjmp(JmpBuf)) == 0)
2884# define IEM_TRY_SETJMP_AGAIN(a_pVCpu, a_rcTarget) \
2885 pSavedJmpBuf = (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf); \
2886 (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf) = &JmpBuf; \
2887 if ((rcStrict = setjmp(JmpBuf)) == 0)
2888# define IEM_CATCH_LONGJMP_BEGIN(a_pVCpu, a_rcTarget) \
2889 else \
2890 { \
2891 ((void)0)
2892# define IEM_CATCH_LONGJMP_END(a_pVCpu) \
2893 } \
2894 (a_pVCpu)->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf
2895#endif /* !IEM_WITH_THROW_CATCH */
2896
2897
2898/**
2899 * Shared per-VM IEM data.
2900 */
2901typedef struct IEM
2902{
2903 /** The VMX APIC-access page handler type. */
2904 PGMPHYSHANDLERTYPE hVmxApicAccessPage;
2905#ifndef VBOX_WITHOUT_CPUID_HOST_CALL
2906 /** Set if the CPUID host call functionality is enabled. */
2907 bool fCpuIdHostCall;
2908#endif
2909} IEM;
2910
2911
2912
2913/** @name IEM_ACCESS_XXX - Access details.
2914 * @{ */
2915#define IEM_ACCESS_INVALID UINT32_C(0x000000ff)
2916#define IEM_ACCESS_TYPE_READ UINT32_C(0x00000001)
2917#define IEM_ACCESS_TYPE_WRITE UINT32_C(0x00000002)
2918#define IEM_ACCESS_TYPE_EXEC UINT32_C(0x00000004)
2919#define IEM_ACCESS_TYPE_MASK UINT32_C(0x00000007)
2920#define IEM_ACCESS_WHAT_CODE UINT32_C(0x00000010)
2921#define IEM_ACCESS_WHAT_DATA UINT32_C(0x00000020)
2922#define IEM_ACCESS_WHAT_STACK UINT32_C(0x00000030)
2923#define IEM_ACCESS_WHAT_SYS UINT32_C(0x00000040)
2924#define IEM_ACCESS_WHAT_MASK UINT32_C(0x00000070)
2925/** The writes are partial, so if initialize the bounce buffer with the
2926 * orignal RAM content. */
2927#define IEM_ACCESS_PARTIAL_WRITE UINT32_C(0x00000100)
2928/** Used in aMemMappings to indicate that the entry is bounce buffered. */
2929#define IEM_ACCESS_BOUNCE_BUFFERED UINT32_C(0x00000200)
2930/** Bounce buffer with ring-3 write pending, first page. */
2931#define IEM_ACCESS_PENDING_R3_WRITE_1ST UINT32_C(0x00000400)
2932/** Bounce buffer with ring-3 write pending, second page. */
2933#define IEM_ACCESS_PENDING_R3_WRITE_2ND UINT32_C(0x00000800)
2934/** Not locked, accessed via the TLB. */
2935#define IEM_ACCESS_NOT_LOCKED UINT32_C(0x00001000)
2936/** Atomic access.
2937 * This enables special alignment checks and the VINF_EM_EMULATE_SPLIT_LOCK
2938 * fallback for misaligned stuff. See @bugref{10547}. */
2939#define IEM_ACCESS_ATOMIC UINT32_C(0x00002000)
2940/** Valid bit mask. */
2941#define IEM_ACCESS_VALID_MASK UINT32_C(0x00003fff)
2942/** Shift count for the TLB flags (upper word). */
2943#define IEM_ACCESS_SHIFT_TLB_FLAGS 16
2944
2945/** Atomic read+write data alias. */
2946#define IEM_ACCESS_DATA_ATOMIC (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_DATA | IEM_ACCESS_ATOMIC)
2947/** Read+write data alias. */
2948#define IEM_ACCESS_DATA_RW (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_DATA)
2949/** Write data alias. */
2950#define IEM_ACCESS_DATA_W (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_DATA)
2951/** Read data alias. */
2952#define IEM_ACCESS_DATA_R (IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_DATA)
2953/** Instruction fetch alias. */
2954#define IEM_ACCESS_INSTRUCTION (IEM_ACCESS_TYPE_EXEC | IEM_ACCESS_WHAT_CODE)
2955/** Stack write alias. */
2956#define IEM_ACCESS_STACK_W (IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_STACK)
2957/** Stack read alias. */
2958#define IEM_ACCESS_STACK_R (IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_STACK)
2959/** Stack read+write alias. */
2960#define IEM_ACCESS_STACK_RW (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_STACK)
2961/** Read system table alias. */
2962#define IEM_ACCESS_SYS_R (IEM_ACCESS_TYPE_READ | IEM_ACCESS_WHAT_SYS)
2963/** Read+write system table alias. */
2964#define IEM_ACCESS_SYS_RW (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_WRITE | IEM_ACCESS_WHAT_SYS)
2965/** @} */
2966
2967
2968/** @def IEM_DECL_MSC_GUARD_IGNORE
2969 * Disables control flow guards checks inside a method and any function pointers
2970 * referenced by it. */
2971#if defined(_MSC_VER) && !defined(IN_RING0)
2972# define IEM_DECL_MSC_GUARD_IGNORE __declspec(guard(ignore))
2973#else
2974# define IEM_DECL_MSC_GUARD_IGNORE
2975#endif
2976
2977/** @def IEM_DECL_MSC_GUARD_NONE
2978 * Disables control flow guards checks inside a method and but continue track
2979 * function pointers references by it. */
2980#if defined(_MSC_VER) && !defined(IN_RING0)
2981# define IEM_DECL_MSC_GUARD_NONE __declspec(guard(nocf))
2982#else
2983# define IEM_DECL_MSC_GUARD_NONE
2984#endif
2985
2986
2987/** @def IEM_DECL_IMPL_TYPE
2988 * For typedef'ing an instruction implementation function.
2989 *
2990 * @param a_RetType The return type.
2991 * @param a_Name The name of the type.
2992 * @param a_ArgList The argument list enclosed in parentheses.
2993 */
2994
2995/** @def IEM_DECL_IMPL_DEF
2996 * For defining an instruction implementation function.
2997 *
2998 * @param a_RetType The return type.
2999 * @param a_Name The name of the type.
3000 * @param a_ArgList The argument list enclosed in parentheses.
3001 */
3002
3003#if defined(__GNUC__) && defined(RT_ARCH_X86)
3004# define IEM_DECL_IMPL_TYPE(a_RetType, a_Name, a_ArgList) \
3005 __attribute__((__fastcall__)) a_RetType (a_Name) a_ArgList
3006# define IEM_DECL_IMPL_DEF(a_RetType, a_Name, a_ArgList) \
3007 __attribute__((__fastcall__, __nothrow__)) DECL_HIDDEN_ONLY(a_RetType) a_Name a_ArgList
3008# define IEM_DECL_IMPL_PROTO(a_RetType, a_Name, a_ArgList) \
3009 __attribute__((__fastcall__, __nothrow__)) DECL_HIDDEN_ONLY(a_RetType) a_Name a_ArgList
3010
3011#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
3012# define IEM_DECL_IMPL_TYPE(a_RetType, a_Name, a_ArgList) \
3013 a_RetType (__fastcall a_Name) a_ArgList
3014# define IEM_DECL_IMPL_DEF(a_RetType, a_Name, a_ArgList) \
3015 IEM_DECL_MSC_GUARD_IGNORE a_RetType __fastcall a_Name a_ArgList RT_NOEXCEPT
3016# define IEM_DECL_IMPL_PROTO(a_RetType, a_Name, a_ArgList) \
3017 IEM_DECL_MSC_GUARD_IGNORE a_RetType __fastcall a_Name a_ArgList RT_NOEXCEPT
3018
3019#elif __cplusplus >= 201700 /* P0012R1 support */
3020# define IEM_DECL_IMPL_TYPE(a_RetType, a_Name, a_ArgList) \
3021 a_RetType (VBOXCALL a_Name) a_ArgList RT_NOEXCEPT
3022# define IEM_DECL_IMPL_DEF(a_RetType, a_Name, a_ArgList) \
3023 IEM_DECL_MSC_GUARD_IGNORE DECL_HIDDEN_ONLY(a_RetType) VBOXCALL a_Name a_ArgList RT_NOEXCEPT
3024# define IEM_DECL_IMPL_PROTO(a_RetType, a_Name, a_ArgList) \
3025 IEM_DECL_MSC_GUARD_IGNORE DECL_HIDDEN_ONLY(a_RetType) VBOXCALL a_Name a_ArgList RT_NOEXCEPT
3026
3027#else
3028# define IEM_DECL_IMPL_TYPE(a_RetType, a_Name, a_ArgList) \
3029 a_RetType (VBOXCALL a_Name) a_ArgList
3030# define IEM_DECL_IMPL_DEF(a_RetType, a_Name, a_ArgList) \
3031 IEM_DECL_MSC_GUARD_IGNORE DECL_HIDDEN_ONLY(a_RetType) VBOXCALL a_Name a_ArgList
3032# define IEM_DECL_IMPL_PROTO(a_RetType, a_Name, a_ArgList) \
3033 IEM_DECL_MSC_GUARD_IGNORE DECL_HIDDEN_ONLY(a_RetType) VBOXCALL a_Name a_ArgList
3034
3035#endif
3036
3037
3038/** @name C instruction implementations for anything slightly complicated.
3039 * @{ */
3040
3041#if defined(VBOX_VMM_TARGET_X86)
3042# define IEM_CIMPL_NEEDS_INSTR_LEN
3043#endif
3044#ifdef IEM_CIMPL_NEEDS_INSTR_LEN
3045# define IEM_CIMPL_COMMA_EXTRA_ARGS_CALL , cbInstr
3046# define IEM_CIMPL_COMMA_EXTRA_ARGS , uint8_t cbInstr
3047#else
3048# define IEM_CIMPL_COMMA_EXTRA_ARGS_CALL
3049# define IEM_CIMPL_COMMA_EXTRA_ARGS
3050#endif
3051
3052/**
3053 * For typedef'ing or declaring a C instruction implementation function taking
3054 * no extra arguments.
3055 *
3056 * @param a_Name The name of the type.
3057 */
3058#define IEM_CIMPL_DECL_TYPE_0(a_Name) \
3059 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu IEM_CIMPL_COMMA_EXTRA_ARGS))
3060
3061/**
3062 * For defining a C instruction implementation function taking no extra
3063 * arguments.
3064 *
3065 * @param a_Name The name of the function
3066 */
3067# define IEM_CIMPL_DEF_0(a_Name) \
3068 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu IEM_CIMPL_COMMA_EXTRA_ARGS))
3069/**
3070 * Prototype version of IEM_CIMPL_DEF_0.
3071 */
3072#define IEM_CIMPL_PROTO_0(a_Name) \
3073 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu IEM_CIMPL_COMMA_EXTRA_ARGS))
3074/**
3075 * For calling a C instruction implementation function taking no extra
3076 * arguments.
3077 *
3078 * This special call macro adds default arguments to the call and allow us to
3079 * change these later.
3080 *
3081 * @param a_fn The name of the function.
3082 */
3083#define IEM_CIMPL_CALL_0(a_fn) a_fn(pVCpu IEM_CIMPL_COMMA_EXTRA_ARGS_CALL)
3084
3085/** Type for a C instruction implementation function taking no extra
3086 * arguments. */
3087typedef IEM_CIMPL_DECL_TYPE_0(FNIEMCIMPL0);
3088/** Function pointer type for a C instruction implementation function taking
3089 * no extra arguments. */
3090typedef FNIEMCIMPL0 *PFNIEMCIMPL0;
3091
3092/**
3093 * For typedef'ing or declaring a C instruction implementation function taking
3094 * one extra argument.
3095 *
3096 * @param a_Name The name of the type.
3097 * @param a_Type0 The argument type.
3098 * @param a_Arg0 The argument name.
3099 */
3100#define IEM_CIMPL_DECL_TYPE_1(a_Name, a_Type0, a_Arg0) \
3101 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name,(PVMCPUCC pVCpu IEM_CIMPL_COMMA_EXTRA_ARGS, a_Type0 a_Arg0))
3102/**
3103 * For defining a C instruction implementation function taking one extra
3104 * argument.
3105 *
3106 * @param a_Name The name of the function
3107 * @param a_Type0 The argument type.
3108 * @param a_Arg0 The argument name.
3109 */
3110#define IEM_CIMPL_DEF_1(a_Name, a_Type0, a_Arg0) \
3111 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name,(PVMCPUCC pVCpu IEM_CIMPL_COMMA_EXTRA_ARGS, a_Type0 a_Arg0))
3112/**
3113 * Prototype version of IEM_CIMPL_DEF_1.
3114 */
3115#define IEM_CIMPL_PROTO_1(a_Name, a_Type0, a_Arg0) \
3116 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name,(PVMCPUCC pVCpu IEM_CIMPL_COMMA_EXTRA_ARGS, a_Type0 a_Arg0))
3117/**
3118 * For calling a C instruction implementation function taking one extra
3119 * argument.
3120 *
3121 * This special call macro adds default arguments to the call and allow us to
3122 * change these later.
3123 *
3124 * @param a_fn The name of the function.
3125 * @param a0 The name of the 1st argument.
3126 */
3127#define IEM_CIMPL_CALL_1(a_fn, a0) a_fn(pVCpu IEM_CIMPL_COMMA_EXTRA_ARGS_CALL, (a0))
3128
3129/**
3130 * For typedef'ing or declaring a C instruction implementation function taking
3131 * two extra arguments.
3132 *
3133 * @param a_Name The name of the type.
3134 * @param a_Type0 The type of the 1st argument
3135 * @param a_Arg0 The name of the 1st argument.
3136 * @param a_Type1 The type of the 2nd argument.
3137 * @param a_Arg1 The name of the 2nd argument.
3138 */
3139#define IEM_CIMPL_DECL_TYPE_2(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1) \
3140 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu IEM_CIMPL_COMMA_EXTRA_ARGS, a_Type0 a_Arg0, a_Type1 a_Arg1))
3141/**
3142 * For defining a C instruction implementation function taking two extra
3143 * arguments.
3144 *
3145 * @param a_Name The name of the function.
3146 * @param a_Type0 The type of the 1st argument
3147 * @param a_Arg0 The name of the 1st argument.
3148 * @param a_Type1 The type of the 2nd argument.
3149 * @param a_Arg1 The name of the 2nd argument.
3150 */
3151#define IEM_CIMPL_DEF_2(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1) \
3152 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu IEM_CIMPL_COMMA_EXTRA_ARGS, a_Type0 a_Arg0, a_Type1 a_Arg1))
3153/**
3154 * Prototype version of IEM_CIMPL_DEF_2.
3155 */
3156#define IEM_CIMPL_PROTO_2(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1) \
3157 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu IEM_CIMPL_COMMA_EXTRA_ARGS, a_Type0 a_Arg0, a_Type1 a_Arg1))
3158/**
3159 * For calling a C instruction implementation function taking two extra
3160 * arguments.
3161 *
3162 * This special call macro adds default arguments to the call and allow us to
3163 * change these later.
3164 *
3165 * @param a_fn The name of the function.
3166 * @param a0 The name of the 1st argument.
3167 * @param a1 The name of the 2nd argument.
3168 */
3169#define IEM_CIMPL_CALL_2(a_fn, a0, a1) a_fn(pVCpu IEM_CIMPL_COMMA_EXTRA_ARGS_CALL, (a0), (a1))
3170
3171/**
3172 * For typedef'ing or declaring a C instruction implementation function taking
3173 * three extra arguments.
3174 *
3175 * @param a_Name The name of the type.
3176 * @param a_Type0 The type of the 1st argument
3177 * @param a_Arg0 The name of the 1st argument.
3178 * @param a_Type1 The type of the 2nd argument.
3179 * @param a_Arg1 The name of the 2nd argument.
3180 * @param a_Type2 The type of the 3rd argument.
3181 * @param a_Arg2 The name of the 3rd argument.
3182 */
3183#define IEM_CIMPL_DECL_TYPE_3(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2) \
3184 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu IEM_CIMPL_COMMA_EXTRA_ARGS, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2))
3185/**
3186 * For defining a C instruction implementation function taking three extra
3187 * arguments.
3188 *
3189 * @param a_Name The name of the function.
3190 * @param a_Type0 The type of the 1st argument
3191 * @param a_Arg0 The name of the 1st argument.
3192 * @param a_Type1 The type of the 2nd argument.
3193 * @param a_Arg1 The name of the 2nd argument.
3194 * @param a_Type2 The type of the 3rd argument.
3195 * @param a_Arg2 The name of the 3rd argument.
3196 */
3197#define IEM_CIMPL_DEF_3(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2) \
3198 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu IEM_CIMPL_COMMA_EXTRA_ARGS, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2))
3199/**
3200 * Prototype version of IEM_CIMPL_DEF_3.
3201 */
3202#define IEM_CIMPL_PROTO_3(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2) \
3203 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu IEM_CIMPL_COMMA_EXTRA_ARGS, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2))
3204/**
3205 * For calling a C instruction implementation function taking three extra
3206 * arguments.
3207 *
3208 * This special call macro adds default arguments to the call and allow us to
3209 * change these later.
3210 *
3211 * @param a_fn The name of the function.
3212 * @param a0 The name of the 1st argument.
3213 * @param a1 The name of the 2nd argument.
3214 * @param a2 The name of the 3rd argument.
3215 */
3216#define IEM_CIMPL_CALL_3(a_fn, a0, a1, a2) a_fn(pVCpu IEM_CIMPL_COMMA_EXTRA_ARGS_CALL, (a0), (a1), (a2))
3217
3218
3219/**
3220 * For typedef'ing or declaring a C instruction implementation function taking
3221 * four extra arguments.
3222 *
3223 * @param a_Name The name of the type.
3224 * @param a_Type0 The type of the 1st argument
3225 * @param a_Arg0 The name of the 1st argument.
3226 * @param a_Type1 The type of the 2nd argument.
3227 * @param a_Arg1 The name of the 2nd argument.
3228 * @param a_Type2 The type of the 3rd argument.
3229 * @param a_Arg2 The name of the 3rd argument.
3230 * @param a_Type3 The type of the 4th argument.
3231 * @param a_Arg3 The name of the 4th argument.
3232 */
3233#define IEM_CIMPL_DECL_TYPE_4(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3) \
3234 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu IEM_CIMPL_COMMA_EXTRA_ARGS, a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2, a_Type3 a_Arg3))
3235/**
3236 * For defining a C instruction implementation function taking four extra
3237 * arguments.
3238 *
3239 * @param a_Name The name of the function.
3240 * @param a_Type0 The type of the 1st argument
3241 * @param a_Arg0 The name of the 1st argument.
3242 * @param a_Type1 The type of the 2nd argument.
3243 * @param a_Arg1 The name of the 2nd argument.
3244 * @param a_Type2 The type of the 3rd argument.
3245 * @param a_Arg2 The name of the 3rd argument.
3246 * @param a_Type3 The type of the 4th argument.
3247 * @param a_Arg3 The name of the 4th argument.
3248 */
3249#define IEM_CIMPL_DEF_4(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3) \
3250 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu IEM_CIMPL_COMMA_EXTRA_ARGS, a_Type0 a_Arg0, a_Type1 a_Arg1, \
3251 a_Type2 a_Arg2, a_Type3 a_Arg3))
3252/**
3253 * Prototype version of IEM_CIMPL_DEF_4.
3254 */
3255# define IEM_CIMPL_PROTO_4(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3) \
3256 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu IEM_CIMPL_COMMA_EXTRA_ARGS, a_Type0 a_Arg0, a_Type1 a_Arg1, \
3257 a_Type2 a_Arg2, a_Type3 a_Arg3))
3258/**
3259 * For calling a C instruction implementation function taking four extra
3260 * arguments.
3261 *
3262 * This special call macro adds default arguments to the call and allow us to
3263 * change these later.
3264 *
3265 * @param a_fn The name of the function.
3266 * @param a0 The name of the 1st argument.
3267 * @param a1 The name of the 2nd argument.
3268 * @param a2 The name of the 3rd argument.
3269 * @param a3 The name of the 4th argument.
3270 */
3271#define IEM_CIMPL_CALL_4(a_fn, a0, a1, a2, a3) a_fn(pVCpu IEM_CIMPL_COMMA_EXTRA_ARGS_CALL, (a0), (a1), (a2), (a3))
3272
3273
3274/**
3275 * For typedef'ing or declaring a C instruction implementation function taking
3276 * five extra arguments.
3277 *
3278 * @param a_Name The name of the type.
3279 * @param a_Type0 The type of the 1st argument
3280 * @param a_Arg0 The name of the 1st argument.
3281 * @param a_Type1 The type of the 2nd argument.
3282 * @param a_Arg1 The name of the 2nd argument.
3283 * @param a_Type2 The type of the 3rd argument.
3284 * @param a_Arg2 The name of the 3rd argument.
3285 * @param a_Type3 The type of the 4th argument.
3286 * @param a_Arg3 The name of the 4th argument.
3287 * @param a_Type4 The type of the 5th argument.
3288 * @param a_Arg4 The name of the 5th argument.
3289 */
3290#define IEM_CIMPL_DECL_TYPE_5(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3, a_Type4, a_Arg4) \
3291 IEM_DECL_IMPL_TYPE(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu IEM_CIMPL_COMMA_EXTRA_ARGS, \
3292 a_Type0 a_Arg0, a_Type1 a_Arg1, a_Type2 a_Arg2, \
3293 a_Type3 a_Arg3, a_Type4 a_Arg4))
3294/**
3295 * For defining a C instruction implementation function taking five extra
3296 * arguments.
3297 *
3298 * @param a_Name The name of the function.
3299 * @param a_Type0 The type of the 1st argument
3300 * @param a_Arg0 The name of the 1st argument.
3301 * @param a_Type1 The type of the 2nd argument.
3302 * @param a_Arg1 The name of the 2nd argument.
3303 * @param a_Type2 The type of the 3rd argument.
3304 * @param a_Arg2 The name of the 3rd argument.
3305 * @param a_Type3 The type of the 4th argument.
3306 * @param a_Arg3 The name of the 4th argument.
3307 * @param a_Type4 The type of the 5th argument.
3308 * @param a_Arg4 The name of the 5th argument.
3309 */
3310#define IEM_CIMPL_DEF_5(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3, a_Type4, a_Arg4) \
3311 IEM_DECL_IMPL_DEF(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu IEM_CIMPL_COMMA_EXTRA_ARGS, a_Type0 a_Arg0, a_Type1 a_Arg1, \
3312 a_Type2 a_Arg2, a_Type3 a_Arg3, a_Type4 a_Arg4))
3313/**
3314 * Prototype version of IEM_CIMPL_DEF_5.
3315 */
3316#define IEM_CIMPL_PROTO_5(a_Name, a_Type0, a_Arg0, a_Type1, a_Arg1, a_Type2, a_Arg2, a_Type3, a_Arg3, a_Type4, a_Arg4) \
3317 IEM_DECL_IMPL_PROTO(VBOXSTRICTRC, a_Name, (PVMCPUCC pVCpu IEM_CIMPL_COMMA_EXTRA_ARGS, a_Type0 a_Arg0, a_Type1 a_Arg1, \
3318 a_Type2 a_Arg2, a_Type3 a_Arg3, a_Type4 a_Arg4))
3319/**
3320 * For calling a C instruction implementation function taking five extra
3321 * arguments.
3322 *
3323 * This special call macro adds default arguments to the call and allow us to
3324 * change these later.
3325 *
3326 * @param a_fn The name of the function.
3327 * @param a0 The name of the 1st argument.
3328 * @param a1 The name of the 2nd argument.
3329 * @param a2 The name of the 3rd argument.
3330 * @param a3 The name of the 4th argument.
3331 * @param a4 The name of the 5th argument.
3332 */
3333#define IEM_CIMPL_CALL_5(a_fn, a0, a1, a2, a3, a4) a_fn(pVCpu IEM_CIMPL_COMMA_EXTRA_ARGS_CALL, (a0), (a1), (a2), (a3), (a4))
3334
3335/** @} */
3336
3337
3338/** @name Opcode Decoder Function Types.
3339 * @{ */
3340
3341/** @typedef PFNIEMOP
3342 * Pointer to an opcode decoder function.
3343 */
3344
3345/** @def FNIEMOP_DEF
3346 * Define an opcode decoder function.
3347 *
3348 * We're using macors for this so that adding and removing parameters as well as
3349 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL
3350 *
3351 * @param a_Name The function name.
3352 */
3353
3354/** @typedef PFNIEMOPRM
3355 * Pointer to an opcode decoder function with RM byte.
3356 */
3357
3358/** @def FNIEMOPRM_DEF
3359 * Define an opcode decoder function with RM byte.
3360 *
3361 * We're using macors for this so that adding and removing parameters as well as
3362 * tweaking compiler specific attributes becomes easier. See FNIEMOP_CALL_1
3363 *
3364 * @param a_Name The function name.
3365 */
3366
3367#if defined(__GNUC__) && defined(RT_ARCH_X86)
3368typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOP)(PVMCPUCC pVCpu);
3369typedef VBOXSTRICTRC (__attribute__((__fastcall__)) * PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
3370# define FNIEMOP_DEF(a_Name) \
3371 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu)
3372# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
3373 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0)
3374# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
3375 IEM_STATIC VBOXSTRICTRC __attribute__((__fastcall__, __nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
3376
3377# define FNIEMOP_TYPE_1(a_TypeName, a_Type0, a_Name0) \
3378 typedef VBOXSTRICTRC (__attribute__((__fastcall__, __nothrow__)) * a_TypeName)(PVMCPUCC pVCpu, a_Type0 a_Name0)
3379
3380#elif defined(_MSC_VER) && defined(RT_ARCH_X86)
3381typedef VBOXSTRICTRC (__fastcall * PFNIEMOP)(PVMCPUCC pVCpu);
3382typedef VBOXSTRICTRC (__fastcall * PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
3383# define FNIEMOP_DEF(a_Name) \
3384 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
3385# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
3386 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0) IEM_NOEXCEPT_MAY_LONGJMP
3387# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
3388 IEM_STATIC /*__declspec(naked)*/ VBOXSTRICTRC __fastcall a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) IEM_NOEXCEPT_MAY_LONGJMP
3389
3390# define FNIEMOP_TYPE_1(a_TypeName, a_Type0, a_Name0) \
3391 typedef VBOXSTRICTRC (__fastcall * a_TypeName)(PVMCPUCC pVCpu, a_Type0 a_Name0) IEM_NOEXCEPT_MAY_LONGJMP_TYPEDEF
3392
3393#elif defined(__GNUC__) && !defined(IEM_WITH_THROW_CATCH)
3394typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPUCC pVCpu);
3395typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
3396# define FNIEMOP_DEF(a_Name) \
3397 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu)
3398# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
3399 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0)
3400# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
3401 IEM_STATIC VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1)
3402
3403# define FNIEMOP_TYPE_1(a_TypeName, a_Type0, a_Name0) \
3404 typedef VBOXSTRICTRC (* a_TypeName)(PVMCPUCC pVCpu, a_Type0 a_Name0)
3405
3406#else
3407typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPUCC pVCpu);
3408typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPUCC pVCpu, uint8_t bRm);
3409# define FNIEMOP_DEF(a_Name) \
3410 IEM_STATIC IEM_DECL_MSC_GUARD_IGNORE VBOXSTRICTRC a_Name(PVMCPUCC pVCpu) IEM_NOEXCEPT_MAY_LONGJMP
3411# define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
3412 IEM_STATIC IEM_DECL_MSC_GUARD_IGNORE VBOXSTRICTRC a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0) IEM_NOEXCEPT_MAY_LONGJMP
3413# define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
3414 IEM_STATIC IEM_DECL_MSC_GUARD_IGNORE VBOXSTRICTRC a_Name(PVMCPUCC pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) IEM_NOEXCEPT_MAY_LONGJMP
3415
3416# define FNIEMOP_TYPE_1(a_TypeName, a_Type0, a_Name0) \
3417 typedef VBOXSTRICTRC (* a_TypeName)(PVMCPUCC pVCpu, a_Type0 a_Name0) IEM_NOEXCEPT_MAY_LONGJMP_TYPEDEF
3418
3419#endif
3420
3421FNIEMOP_TYPE_1(PFIEMOPU32, uint32_t, u32);
3422
3423#define FNIEMOPRM_DEF(a_Name) FNIEMOP_DEF_1(a_Name, uint8_t, bRm)
3424
3425/**
3426 * Call an opcode decoder function.
3427 *
3428 * We're using macors for this so that adding and removing parameters can be
3429 * done as we please. See FNIEMOP_DEF.
3430 */
3431#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
3432
3433/**
3434 * Call a common opcode decoder function taking one extra argument.
3435 *
3436 * We're using macors for this so that adding and removing parameters can be
3437 * done as we please. See FNIEMOP_DEF_1.
3438 */
3439#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
3440
3441/**
3442 * Call a common opcode decoder function taking one extra argument.
3443 *
3444 * We're using macors for this so that adding and removing parameters can be
3445 * done as we please. See FNIEMOP_DEF_1.
3446 */
3447#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
3448/** @} */
3449
3450
3451/** @name Misc Helpers
3452 * @{ */
3453
3454/** Used to shut up GCC warnings about variables that 'may be used uninitialized'
3455 * due to GCC lacking knowledge about the value range of a switch. */
3456#if RT_CPLUSPLUS_PREREQ(202000)
3457# define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: [[unlikely]] AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
3458#else
3459# define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: AssertFailedReturn(VERR_IPE_NOT_REACHED_DEFAULT_CASE)
3460#endif
3461
3462/** Variant of IEM_NOT_REACHED_DEFAULT_CASE_RET that returns a custom value. */
3463#if RT_CPLUSPLUS_PREREQ(202000)
3464# define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: [[unlikely]] AssertFailedReturn(a_RetValue)
3465#else
3466# define IEM_NOT_REACHED_DEFAULT_CASE_RET2(a_RetValue) default: AssertFailedReturn(a_RetValue)
3467#endif
3468
3469/**
3470 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
3471 * occation.
3472 */
3473#ifdef LOG_ENABLED
3474# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
3475 do { \
3476 /*Log*/ LogAlways(("%s: returning IEM_RETURN_ASPECT_NOT_IMPLEMENTED (line %d)\n", __FUNCTION__, __LINE__)); \
3477 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
3478 } while (0)
3479#else
3480# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() \
3481 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
3482#endif
3483
3484/**
3485 * Returns IEM_RETURN_ASPECT_NOT_IMPLEMENTED, and in debug builds logs the
3486 * occation using the supplied logger statement.
3487 *
3488 * @param a_LoggerArgs What to log on failure.
3489 */
3490#ifdef LOG_ENABLED
3491# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
3492 do { \
3493 LogAlways((LOG_FN_FMT ": ", __PRETTY_FUNCTION__)); LogAlways(a_LoggerArgs); \
3494 /*LogFunc(a_LoggerArgs);*/ \
3495 return VERR_IEM_ASPECT_NOT_IMPLEMENTED; \
3496 } while (0)
3497#else
3498# define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) \
3499 return VERR_IEM_ASPECT_NOT_IMPLEMENTED
3500#endif
3501
3502/**
3503 * Returns a (const) pointer to the CPUMFEATURES for the guest CPU.
3504 * @returns PCCPUMFEATURES
3505 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3506 */
3507#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) (&((a_pVCpu)->CTX_SUFF(pVM)->cpum.ro.GuestFeatures))
3508
3509/**
3510 * Returns a (const) pointer to the CPUMFEATURES for the host CPU.
3511 * @returns PCCPUMFEATURES
3512 * @param a_pVCpu The cross context virtual CPU structure of the calling thread.
3513 */
3514#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) (&g_CpumHostFeatures.s)
3515
3516
3517
3518/** @} */
3519
3520uint32_t iemCalcExecDbgFlagsSlow(PVMCPUCC pVCpu);
3521VBOXSTRICTRC iemExecInjectPendingTrap(PVMCPUCC pVCpu);
3522
3523/** @} */
3524
3525
3526/** @name Memory access.
3527 * @{ */
3528VBOXSTRICTRC iemMemBounceBufferMapCrossPage(PVMCPUCC pVCpu, int iMemMap, void **ppvMem, uint8_t *pbUnmapInfo,
3529 size_t cbMem, RTGCPTR GCPtrFirst, uint32_t fAccess) RT_NOEXCEPT;
3530VBOXSTRICTRC iemMemBounceBufferMapPhys(PVMCPUCC pVCpu, unsigned iMemMap, void **ppvMem, uint8_t *pbUnmapInfo, size_t cbMem,
3531 RTGCPHYS GCPhysFirst, uint32_t fAccess, VBOXSTRICTRC rcMap) RT_NOEXCEPT;
3532VBOXSTRICTRC iemMemCommitAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT;
3533#ifndef IN_RING3
3534VBOXSTRICTRC iemMemCommitAndUnmapPostponeTroubleToR3(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT;
3535#endif
3536void iemMemRollbackAndUnmap(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT;
3537void iemMemRollback(PVMCPUCC pVCpu) RT_NOEXCEPT;
3538
3539void iemMemCommitAndUnmapJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP;
3540void iemMemCommitAndUnmapRwSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP;
3541void iemMemCommitAndUnmapAtSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP;
3542void iemMemCommitAndUnmapWoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP;
3543void iemMemCommitAndUnmapRoSafeJmp(PVMCPUCC pVCpu, uint8_t bUnmapInfo) IEM_NOEXCEPT_MAY_LONGJMP;
3544void iemMemRollbackAndUnmapWoSafe(PVMCPUCC pVCpu, uint8_t bUnmapInfo) RT_NOEXCEPT;
3545
3546VBOXSTRICTRC iemMemPageTranslateAndCheckAccess(PVMCPUCC pVCpu, RTGCPTR GCPtrMem, uint32_t cbAccess, uint32_t fAccess,
3547 PRTGCPHYS pGCPhysMem) RT_NOEXCEPT;
3548
3549void iemTlbInvalidateAllPhysicalSlow(PVMCPUCC pVCpu) RT_NOEXCEPT;
3550/** @} */
3551
3552/*
3553 * Recompiler related stuff.
3554 */
3555
3556DECLHIDDEN(int) iemPollTimers(PVMCC pVM, PVMCPUCC pVCpu) RT_NOEXCEPT;
3557
3558DECLCALLBACK(int) iemTbInit(PVMCC pVM, uint32_t cInitialTbs, uint32_t cMaxTbs,
3559 uint64_t cbInitialExec, uint64_t cbMaxExec, uint32_t cbChunkExec);
3560void iemThreadedTbObsolete(PVMCPUCC pVCpu, PIEMTB pTb, bool fSafeToFree);
3561DECLHIDDEN(void) iemTbAllocatorFree(PVMCPUCC pVCpu, PIEMTB pTb);
3562void iemTbAllocatorProcessDelayedFrees(PVMCPUCC pVCpu, PIEMTBALLOCATOR pTbAllocator);
3563void iemTbAllocatorFreeupNativeSpace(PVMCPUCC pVCpu, uint32_t cNeededInstrs);
3564DECLHIDDEN(PIEMTBALLOCATOR) iemTbAllocatorFreeBulkStart(PVMCPUCC pVCpu);
3565DECLHIDDEN(void) iemTbAllocatorFreeBulk(PVMCPUCC pVCpu, PIEMTBALLOCATOR pTbAllocator, PIEMTB pTb);
3566DECLHIDDEN(const char *) iemTbFlagsToString(uint32_t fFlags, char *pszBuf, size_t cbBuf) RT_NOEXCEPT;
3567DECLHIDDEN(void) iemThreadedDisassembleTb(PCIEMTB pTb, PCDBGFINFOHLP pHlp) RT_NOEXCEPT;
3568#if defined(VBOX_WITH_IEM_NATIVE_RECOMPILER) && defined(VBOX_WITH_SAVE_THREADED_TBS_FOR_PROFILING)
3569DECLHIDDEN(void) iemThreadedSaveTbForProfilingCleanup(PVMCPU pVCpu);
3570#endif
3571
3572
3573/** @todo FNIEMTHREADEDFUNC and friends may need more work... */
3574#if defined(__GNUC__) && !defined(IEM_WITH_THROW_CATCH)
3575typedef VBOXSTRICTRC /*__attribute__((__nothrow__))*/ FNIEMTHREADEDFUNC(PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2);
3576typedef FNIEMTHREADEDFUNC *PFNIEMTHREADEDFUNC;
3577# define IEM_DECL_IEMTHREADEDFUNC_DEF(a_Name) \
3578 VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2)
3579# define IEM_DECL_IEMTHREADEDFUNC_PROTO(a_Name) \
3580 VBOXSTRICTRC __attribute__((__nothrow__)) a_Name(PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2)
3581
3582#else
3583typedef VBOXSTRICTRC (FNIEMTHREADEDFUNC)(PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2);
3584typedef FNIEMTHREADEDFUNC *PFNIEMTHREADEDFUNC;
3585# define IEM_DECL_IEMTHREADEDFUNC_DEF(a_Name) \
3586 IEM_DECL_MSC_GUARD_IGNORE VBOXSTRICTRC a_Name(PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2) IEM_NOEXCEPT_MAY_LONGJMP
3587# define IEM_DECL_IEMTHREADEDFUNC_PROTO(a_Name) \
3588 IEM_DECL_MSC_GUARD_IGNORE VBOXSTRICTRC a_Name(PVMCPU pVCpu, uint64_t uParam0, uint64_t uParam1, uint64_t uParam2) IEM_NOEXCEPT_MAY_LONGJMP
3589#endif
3590
3591
3592/* Native recompiler public bits: */
3593
3594DECLHIDDEN(PIEMTB) iemNativeRecompile(PVMCPUCC pVCpu, PIEMTB pTb) RT_NOEXCEPT;
3595DECLHIDDEN(void) iemNativeDisassembleTb(PVMCPU pVCpu, PCIEMTB pTb, PCDBGFINFOHLP pHlp) RT_NOEXCEPT;
3596int iemExecMemAllocatorInit(PVMCPU pVCpu, uint64_t cbMax, uint64_t cbInitial, uint32_t cbChunk) RT_NOEXCEPT;
3597DECLHIDDEN(PIEMNATIVEINSTR) iemExecMemAllocatorAlloc(PVMCPU pVCpu, uint32_t cbReq, PIEMTB pTb, PIEMNATIVEINSTR *ppaExec,
3598 struct IEMNATIVEPERCHUNKCTX const **ppChunkCtx) RT_NOEXCEPT;
3599DECLHIDDEN(PIEMNATIVEINSTR) iemExecMemAllocatorAllocFromChunk(PVMCPU pVCpu, uint32_t idxChunk, uint32_t cbReq,
3600 PIEMNATIVEINSTR *ppaExec);
3601DECLHIDDEN(void) iemExecMemAllocatorReadyForUse(PVMCPUCC pVCpu, void *pv, size_t cb) RT_NOEXCEPT;
3602void iemExecMemAllocatorFree(PVMCPU pVCpu, void *pv, size_t cb) RT_NOEXCEPT;
3603DECLASM(DECL_NO_RETURN(void)) iemNativeTbLongJmp(void *pvFramePointer, int rc) RT_NOEXCEPT;
3604DECLHIDDEN(struct IEMNATIVEPERCHUNKCTX const *) iemExecMemGetTbChunkCtx(PVMCPU pVCpu, PCIEMTB pTb);
3605DECLHIDDEN(int) iemNativeRecompileAttachExecMemChunkCtx(PVMCPU pVCpu, uint32_t idxChunk, struct IEMNATIVEPERCHUNKCTX const **ppCtx);
3606
3607# ifdef VBOX_VMM_TARGET_X86
3608# include "VMMAll/target-x86/IEMInternal-x86.h"
3609# elif defined(VBOX_VMM_TARGET_ARMV8)
3610# include "VMMAll/target-armv8/IEMInternal-armv8.h"
3611# endif
3612
3613#endif /* !RT_IN_ASSEMBLER - ASM-NOINC-END */
3614
3615
3616/** @} */
3617
3618RT_C_DECLS_END
3619
3620/* ASM-INC: %include "IEMInternalStruct.mac" */
3621
3622#endif /* !VMM_INCLUDED_SRC_include_IEMInternal_h */
3623
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette