VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/target-x86/IEMAllThrdTables-x86.h@ 108226

Last change on this file since 108226 was 108205, checked in by vboxsync, 3 months ago

VMM/IEM: Moving x86 target specific files to VMMAll/target-x86/... [scm fixes] jiraref:VBP-1531

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 19.9 KB
Line 
1/* $Id: IEMAllThrdTables-x86.h 108205 2025-02-13 16:28:16Z vboxsync $ */
2/** @file
3 * IEM - Instruction Decoding and Threaded Recompilation, Instruction Tables, x86 target.
4 */
5
6/*
7 * Copyright (C) 2011-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#ifndef VMM_INCLUDED_SRC_VMMAll_target_x86_IEMAllThrdTables_x86_h
29#define VMM_INCLUDED_SRC_VMMAll_target_x86_IEMAllThrdTables_x86_h
30#ifndef RT_WITHOUT_PRAGMA_ONCE
31# pragma once
32#endif
33
34
35/*********************************************************************************************************************************
36* Header Files *
37*********************************************************************************************************************************/
38#ifndef LOG_GROUP /* defined when included by tstIEMCheckMc.cpp */
39# define LOG_GROUP LOG_GROUP_IEM_RE_THREADED
40#endif
41#define IEM_WITH_CODE_TLB_AND_OPCODE_BUF /* A bit hackish, but its all in IEMInline.h. */
42#define VMCPU_INCL_CPUM_GST_CTX
43#ifdef IN_RING0
44# define VBOX_VMM_TARGET_X86
45#endif
46#include <VBox/vmm/iem.h>
47#include <VBox/vmm/cpum.h>
48#include <VBox/vmm/pdmapic.h>
49#include <VBox/vmm/pdm.h>
50#include <VBox/vmm/pgm.h>
51#include <VBox/vmm/iom.h>
52#include <VBox/vmm/em.h>
53#include <VBox/vmm/hm.h>
54#include <VBox/vmm/nem.h>
55#include <VBox/vmm/gim.h>
56#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
57# include <VBox/vmm/em.h>
58# include <VBox/vmm/hm_svm.h>
59#endif
60#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
61# include <VBox/vmm/hmvmxinline.h>
62#endif
63#include <VBox/vmm/tm.h>
64#include <VBox/vmm/dbgf.h>
65#include <VBox/vmm/dbgftrace.h>
66#ifndef TST_IEM_CHECK_MC
67# include "IEMInternal.h"
68#endif
69#include <VBox/vmm/vmcc.h>
70#include <VBox/log.h>
71#include <VBox/err.h>
72#include <VBox/param.h>
73#include <VBox/dis.h>
74#include <VBox/disopcode-x86-amd64.h>
75#include <iprt/asm-math.h>
76#include <iprt/assert.h>
77#include <iprt/mem.h>
78#include <iprt/string.h>
79#include <iprt/x86.h>
80
81#ifndef TST_IEM_CHECK_MC
82# include "IEMInline.h"
83# include "IEMOpHlp.h"
84# include "IEMMc.h"
85#endif
86
87#include "IEMThreadedFunctions.h"
88#include "IEMN8veRecompiler.h" /* For a_fGstShwFlush and iemThreadedRecompilerMcDeferToCImpl0. */
89
90
91/*
92 * Narrow down configs here to avoid wasting time on unused configs here.
93 */
94
95#ifndef IEM_WITH_CODE_TLB
96# error The code TLB must be enabled for the recompiler.
97#endif
98
99#ifndef IEM_WITH_DATA_TLB
100# error The data TLB must be enabled for the recompiler.
101#endif
102
103#ifndef IEM_WITH_SETJMP
104# error The setjmp approach must be enabled for the recompiler.
105#endif
106
107
108/*********************************************************************************************************************************
109* Defined Constants And Macros *
110*********************************************************************************************************************************/
111#define g_apfnOneByteMap g_apfnIemThreadedRecompilerOneByteMap
112#define g_apfnTwoByteMap g_apfnIemThreadedRecompilerTwoByteMap
113#define g_apfnThreeByte0f3a g_apfnIemThreadedRecompilerThreeByte0f3a
114#define g_apfnThreeByte0f38 g_apfnIemThreadedRecompilerThreeByte0f38
115#define g_apfnVexMap1 g_apfnIemThreadedRecompilerVecMap1
116#define g_apfnVexMap2 g_apfnIemThreadedRecompilerVecMap2
117#define g_apfnVexMap3 g_apfnIemThreadedRecompilerVecMap3
118
119
120/*
121 * Override IEM_MC_BEGIN to take down the IEM_CIMPL_F_XXX flags.
122 */
123#undef IEM_MC_BEGIN
124#define IEM_MC_BEGIN(a_fMcFlags, a_fCImplFlags) \
125 { \
126 pVCpu->iem.s.fTbCurInstr = (a_fCImplFlags) /*| ((a_fMcFlags) << 20*/
127
128/*
129 * Override IEM_MC_CALC_RM_EFF_ADDR to use iemOpHlpCalcRmEffAddrJmpEx and produce uEffAddrInfo.
130 */
131#undef IEM_MC_CALC_RM_EFF_ADDR
132#ifndef IEM_WITH_SETJMP
133# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, a_bRm, a_cbImmAndRspOffset) \
134 uint64_t uEffAddrInfo; \
135 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddrJmpEx(pVCpu, (a_bRm), (a_cbImmAndRspOffset), &(a_GCPtrEff), &uEffAddrInfo))
136#else
137# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, a_bRm, a_cbImmAndRspOffset) \
138 uint64_t uEffAddrInfo; \
139 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmpEx(pVCpu, (a_bRm), (a_cbImmAndRspOffset), &uEffAddrInfo))
140#endif
141
142/*
143 * Likewise override IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES so we fetch all the opcodes.
144 */
145#undef IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES
146#define IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES(a_bRm) do { \
147 uint64_t uEffAddrInfo; \
148 (void)iemOpHlpCalcRmEffAddrJmpEx(pVCpu, bRm, 0, &uEffAddrInfo); \
149 } while (0)
150
151/*
152 * Override the IEM_MC_REL_JMP_S*_AND_FINISH macros to check for zero byte jumps.
153 */
154#undef IEM_MC_REL_JMP_S8_AND_FINISH
155#define IEM_MC_REL_JMP_S8_AND_FINISH(a_i8) do { \
156 Assert(pVCpu->iem.s.fTbBranched != 0); \
157 if ((a_i8) == 0) \
158 pVCpu->iem.s.fTbBranched |= IEMBRANCHED_F_ZERO; \
159 return iemRegRipRelativeJumpS8AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i8), pVCpu->iem.s.enmEffOpSize); \
160 } while (0)
161
162#undef IEM_MC_REL_JMP_S16_AND_FINISH
163#define IEM_MC_REL_JMP_S16_AND_FINISH(a_i16) do { \
164 Assert(pVCpu->iem.s.fTbBranched != 0); \
165 if ((a_i16) == 0) \
166 pVCpu->iem.s.fTbBranched |= IEMBRANCHED_F_ZERO; \
167 return iemRegRipRelativeJumpS16AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i16)); \
168 } while (0)
169
170#undef IEM_MC_REL_JMP_S32_AND_FINISH
171#define IEM_MC_REL_JMP_S32_AND_FINISH(a_i32) do { \
172 Assert(pVCpu->iem.s.fTbBranched != 0); \
173 if ((a_i32) == 0) \
174 pVCpu->iem.s.fTbBranched |= IEMBRANCHED_F_ZERO; \
175 return iemRegRipRelativeJumpS32AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i32), pVCpu->iem.s.enmEffOpSize); \
176 } while (0)
177
178
179#ifndef IEM_WITH_INTRA_TB_JUMPS
180/**
181 * Stub for a no-jumps config, see IEMAllThrdRecompiler.cpp for the real thing.
182 */
183DECL_FORCE_INLINE(int) iemThreadedCompileBackAtFirstInstruction(PVMCPU pVCpu, PIEMTB pTb)
184{
185 RT_NOREF(pTb);
186 STAM_REL_COUNTER_INC(&pVCpu->iem.s.StatTbLoopFullTbDetected2);
187 return VINF_IEM_RECOMPILE_END_TB;
188}
189#endif
190
191
192/*
193 * Emit call macros.
194 */
195#define IEM_MC2_BEGIN_EMIT_CALLS(a_fCheckIrqBefore) \
196 { \
197 PIEMTB const pTb = pVCpu->iem.s.pCurTbR3; \
198 uint8_t const cbInstrMc2 = IEM_GET_INSTR_LEN(pVCpu); \
199 AssertMsg(pVCpu->iem.s.offOpcode == cbInstrMc2, \
200 ("%u vs %u (%04x:%08RX64)\n", pVCpu->iem.s.offOpcode, cbInstrMc2, \
201 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip)); \
202 \
203 /* If we need to check for IRQs before the instruction, we do that before \
204 adding any opcodes as it may abort the instruction. \
205 Note! During compilation, we may swap IRQ and #PF exceptions here \
206 in a manner that a real CPU would not do. However it shouldn't \
207 be something that is easy (if at all possible) to observe in the \
208 guest, so fine. The unexpected end-of-tb below have the same \
209 potential "issue". */ \
210 if (!(a_fCheckIrqBefore) || iemThreadedCompileEmitIrqCheckBefore(pVCpu, pTb)) \
211 { /* likely */ } \
212 else \
213 return VINF_IEM_RECOMPILE_END_TB; \
214 \
215 /* No page crossing, right? */ \
216 uint16_t const offOpcodeMc2 = pTb->cbOpcodes; \
217 uint8_t const idxRangeMc2 = pTb->cRanges - 1; \
218 if ( !pVCpu->iem.s.fTbCrossedPage \
219 && !pVCpu->iem.s.fTbCheckOpcodes \
220 && !pVCpu->iem.s.fTbBranched \
221 && !(pTb->fFlags & IEMTB_F_CS_LIM_CHECKS)) \
222 { \
223 /* Break/loop if we're back to the first instruction in the TB again. */ \
224 if ( pTb->aRanges[idxRangeMc2].idxPhysPage != 0 \
225 || (unsigned)pTb->aRanges[idxRangeMc2].offPhysPage + (unsigned)pTb->aRanges[idxRangeMc2].cbOpcodes \
226 != (pTb->GCPhysPc & GUEST_PAGE_OFFSET_MASK) \
227 || offOpcodeMc2 == 0) \
228 { \
229 /** @todo Custom copy function, given range is 1 thru 15 bytes. */ \
230 memcpy(&pTb->pabOpcodes[offOpcodeMc2], pVCpu->iem.s.abOpcode, pVCpu->iem.s.offOpcode); \
231 pTb->cbOpcodes = offOpcodeMc2 + pVCpu->iem.s.offOpcode; \
232 pTb->aRanges[idxRangeMc2].cbOpcodes += cbInstrMc2; \
233 Assert(pTb->cbOpcodes <= pVCpu->iem.s.cbOpcodesAllocated); \
234 } \
235 else \
236 return iemThreadedCompileBackAtFirstInstruction(pVCpu, pTb); \
237 } \
238 else if (iemThreadedCompileBeginEmitCallsComplications(pVCpu, pTb)) \
239 { /* likely */ } \
240 else \
241 return VINF_IEM_RECOMPILE_END_TB; \
242 \
243 uint8_t const idxInstrMc2 = pTb->cInstructions; \
244 \
245 /* Emit hardware instruction breakpoint check if enabled. */ \
246 if (!(pTb->fFlags & IEM_F_PENDING_BRK_INSTR)) \
247 { /* likely */ } \
248 else \
249 IEM_MC2_EMIT_CALL_0(kIemThreadedFunc_BltIn_CheckHwInstrBps)
250
251#define IEM_MC2_EMIT_CALL_0(a_enmFunction) do { \
252 IEMTHREADEDFUNCS const enmFunctionCheck = a_enmFunction; RT_NOREF(enmFunctionCheck); \
253 \
254 LogFlow(("Call #%u: " #a_enmFunction "\n", pTb->Thrd.cCalls)); \
255 PIEMTHRDEDCALLENTRY const pCall = &pTb->Thrd.paCalls[pTb->Thrd.cCalls++]; \
256 pCall->enmFunction = a_enmFunction; \
257 pCall->idxInstr = idxInstrMc2; \
258 pCall->cbOpcode = cbInstrMc2; \
259 pCall->offOpcode = offOpcodeMc2; \
260 pCall->uTbLookup = 0; \
261 pCall->fFlags = 0; \
262 pCall->auParams[0] = 0; \
263 pCall->auParams[1] = 0; \
264 pCall->auParams[2] = 0; \
265 } while (0)
266#define IEM_MC2_EMIT_CALL_1(a_enmFunction, a_uArg0) do { \
267 IEMTHREADEDFUNCS const enmFunctionCheck = a_enmFunction; RT_NOREF(enmFunctionCheck); \
268 uint64_t const uArg0Check = (a_uArg0); RT_NOREF(uArg0Check); \
269 \
270 LogFlow(("Call #%u: " #a_enmFunction " a0=%RX64\n", pTb->Thrd.cCalls, (uint64_t)a_uArg0)); \
271 PIEMTHRDEDCALLENTRY const pCall = &pTb->Thrd.paCalls[pTb->Thrd.cCalls++]; \
272 pCall->enmFunction = a_enmFunction; \
273 pCall->idxInstr = idxInstrMc2; \
274 pCall->cbOpcode = cbInstrMc2; \
275 pCall->offOpcode = offOpcodeMc2; \
276 pCall->uTbLookup = 0; \
277 pCall->fFlags = 0; \
278 pCall->auParams[0] = a_uArg0; \
279 pCall->auParams[1] = 0; \
280 pCall->auParams[2] = 0; \
281 } while (0)
282#define IEM_MC2_EMIT_CALL_2(a_enmFunction, a_uArg0, a_uArg1) do { \
283 IEMTHREADEDFUNCS const enmFunctionCheck = a_enmFunction; RT_NOREF(enmFunctionCheck); \
284 uint64_t const uArg0Check = (a_uArg0); RT_NOREF(uArg0Check); \
285 uint64_t const uArg1Check = (a_uArg1); RT_NOREF(uArg1Check); \
286 \
287 LogFlow(("Call #%u: " #a_enmFunction " a0=%RX64 a1=%RX64\n", pTb->Thrd.cCalls, (uint64_t)a_uArg0, (uint64_t)a_uArg1)); \
288 PIEMTHRDEDCALLENTRY const pCall = &pTb->Thrd.paCalls[pTb->Thrd.cCalls++]; \
289 pCall->enmFunction = a_enmFunction; \
290 pCall->idxInstr = idxInstrMc2; \
291 pCall->cbOpcode = cbInstrMc2; \
292 pCall->offOpcode = offOpcodeMc2; \
293 pCall->uTbLookup = 0; \
294 pCall->fFlags = 0; \
295 pCall->auParams[0] = a_uArg0; \
296 pCall->auParams[1] = a_uArg1; \
297 pCall->auParams[2] = 0; \
298 } while (0)
299#define IEM_MC2_EMIT_CALL_3(a_enmFunction, a_uArg0, a_uArg1, a_uArg2) do { \
300 IEMTHREADEDFUNCS const enmFunctionCheck = a_enmFunction; RT_NOREF(enmFunctionCheck); \
301 uint64_t const uArg0Check = (a_uArg0); RT_NOREF(uArg0Check); \
302 uint64_t const uArg1Check = (a_uArg1); RT_NOREF(uArg1Check); \
303 uint64_t const uArg2Check = (a_uArg2); RT_NOREF(uArg2Check); \
304 \
305 LogFlow(("Call #%u: " #a_enmFunction " a0=%RX64 a1=%RX64 a2=%RX64\n", pTb->Thrd.cCalls, (uint64_t)a_uArg0, (uint64_t)a_uArg1, (uint64_t)a_uArg2)); \
306 PIEMTHRDEDCALLENTRY const pCall = &pTb->Thrd.paCalls[pTb->Thrd.cCalls++]; \
307 pCall->enmFunction = a_enmFunction; \
308 pCall->idxInstr = idxInstrMc2; \
309 pCall->offOpcode = offOpcodeMc2; \
310 pCall->cbOpcode = cbInstrMc2; \
311 pCall->uTbLookup = 0; \
312 pCall->fFlags = 0; \
313 pCall->auParams[0] = a_uArg0; \
314 pCall->auParams[1] = a_uArg1; \
315 pCall->auParams[2] = a_uArg2; \
316 } while (0)
317
318#define IEM_MC2_EMIT_CALL_WITH_TB_LOOKUP_0(a_fLargeTbLookup, a_enmFunction) do { \
319 IEMTHREADEDFUNCS const enmFunctionCheck = a_enmFunction; RT_NOREF(enmFunctionCheck); \
320 \
321 LogFlow(("Call #%u: " #a_enmFunction "\n", pTb->Thrd.cCalls)); \
322 PIEMTHRDEDCALLENTRY const pCall = &pTb->Thrd.paCalls[pTb->Thrd.cCalls++]; \
323 pCall->enmFunction = a_enmFunction; \
324 pCall->idxInstr = idxInstrMc2; \
325 pCall->cbOpcode = cbInstrMc2; \
326 pCall->offOpcode = offOpcodeMc2; \
327 pCall->uTbLookup = IEM_TB_LOOKUP_TAB_MAKE(pTb->cTbLookupEntries, a_fLargeTbLookup); \
328 pTb->cTbLookupEntries += !(a_fLargeTbLookup) ? 1 : IEM_TB_LOOKUP_TAB_LARGE_SIZE; \
329 pCall->fFlags = 0; \
330 pCall->auParams[0] = 0; \
331 pCall->auParams[1] = 0; \
332 pCall->auParams[2] = 0; \
333 } while (0)
334#define IEM_MC2_EMIT_CALL_WITH_TB_LOOKUP_1(a_fLargeTbLookup, a_enmFunction, a_uArg0) do { \
335 IEMTHREADEDFUNCS const enmFunctionCheck = a_enmFunction; RT_NOREF(enmFunctionCheck); \
336 uint64_t const uArg0Check = (a_uArg0); RT_NOREF(uArg0Check); \
337 \
338 LogFlow(("Call #%u: " #a_enmFunction " a0=%RX64\n", pTb->Thrd.cCalls, (uint64_t)a_uArg0)); \
339 PIEMTHRDEDCALLENTRY const pCall = &pTb->Thrd.paCalls[pTb->Thrd.cCalls++]; \
340 pCall->enmFunction = a_enmFunction; \
341 pCall->idxInstr = idxInstrMc2; \
342 pCall->cbOpcode = cbInstrMc2; \
343 pCall->offOpcode = offOpcodeMc2; \
344 pCall->uTbLookup = IEM_TB_LOOKUP_TAB_MAKE(pTb->cTbLookupEntries, a_fLargeTbLookup); \
345 pTb->cTbLookupEntries += !(a_fLargeTbLookup) ? 1 : IEM_TB_LOOKUP_TAB_LARGE_SIZE; \
346 pCall->fFlags = 0; \
347 pCall->auParams[0] = a_uArg0; \
348 pCall->auParams[1] = 0; \
349 pCall->auParams[2] = 0; \
350 } while (0)
351#define IEM_MC2_EMIT_CALL_WITH_TB_LOOKUP_2(a_fLargeTbLookup, a_enmFunction, a_uArg0, a_uArg1) do { \
352 IEMTHREADEDFUNCS const enmFunctionCheck = a_enmFunction; RT_NOREF(enmFunctionCheck); \
353 uint64_t const uArg0Check = (a_uArg0); RT_NOREF(uArg0Check); \
354 uint64_t const uArg1Check = (a_uArg1); RT_NOREF(uArg1Check); \
355 \
356 LogFlow(("Call #%u: " #a_enmFunction " a0=%RX64 a1=%RX64\n", pTb->Thrd.cCalls, (uint64_t)a_uArg0, (uint64_t)a_uArg1)); \
357 PIEMTHRDEDCALLENTRY const pCall = &pTb->Thrd.paCalls[pTb->Thrd.cCalls++]; \
358 pCall->enmFunction = a_enmFunction; \
359 pCall->idxInstr = idxInstrMc2; \
360 pCall->cbOpcode = cbInstrMc2; \
361 pCall->offOpcode = offOpcodeMc2; \
362 pCall->uTbLookup = IEM_TB_LOOKUP_TAB_MAKE(pTb->cTbLookupEntries, a_fLargeTbLookup); \
363 pTb->cTbLookupEntries += !(a_fLargeTbLookup) ? 1 : IEM_TB_LOOKUP_TAB_LARGE_SIZE; \
364 pCall->fFlags = 0; \
365 pCall->auParams[0] = a_uArg0; \
366 pCall->auParams[1] = a_uArg1; \
367 pCall->auParams[2] = 0; \
368 } while (0)
369#define IEM_MC2_EMIT_CALL_WITH_TB_LOOKUP_3(a_fLargeTbLookup, a_enmFunction, a_uArg0, a_uArg1, a_uArg2) do { \
370 IEMTHREADEDFUNCS const enmFunctionCheck = a_enmFunction; RT_NOREF(enmFunctionCheck); \
371 uint64_t const uArg0Check = (a_uArg0); RT_NOREF(uArg0Check); \
372 uint64_t const uArg1Check = (a_uArg1); RT_NOREF(uArg1Check); \
373 uint64_t const uArg2Check = (a_uArg2); RT_NOREF(uArg2Check); \
374 \
375 LogFlow(("Call #%u: " #a_enmFunction " a0=%RX64 a1=%RX64 a2=%RX64\n", pTb->Thrd.cCalls, (uint64_t)a_uArg0, (uint64_t)a_uArg1, (uint64_t)a_uArg2)); \
376 PIEMTHRDEDCALLENTRY const pCall = &pTb->Thrd.paCalls[pTb->Thrd.cCalls++]; \
377 pCall->enmFunction = a_enmFunction; \
378 pCall->idxInstr = idxInstrMc2; \
379 pCall->cbOpcode = cbInstrMc2; \
380 pCall->offOpcode = offOpcodeMc2; \
381 pCall->uTbLookup = IEM_TB_LOOKUP_TAB_MAKE(pTb->cTbLookupEntries, a_fLargeTbLookup); \
382 pTb->cTbLookupEntries += !(a_fLargeTbLookup) ? 1 : IEM_TB_LOOKUP_TAB_LARGE_SIZE; \
383 pCall->fFlags = 0; \
384 pCall->auParams[0] = a_uArg0; \
385 pCall->auParams[1] = a_uArg1; \
386 pCall->auParams[2] = a_uArg2; \
387 } while (0)
388
389#define IEM_MC2_END_EMIT_CALLS(a_fCImplFlags) \
390 Assert(pTb->cInstructions <= pTb->Thrd.cCalls); \
391 if (pTb->cInstructions < 255) \
392 pTb->cInstructions++; \
393 uint32_t const fCImplFlagsMc2 = (a_fCImplFlags); \
394 RT_NOREF(fCImplFlagsMc2); \
395 } while (0)
396
397
398/*
399 * IEM_MC_DEFER_TO_CIMPL_0 is easily wrapped up.
400 *
401 * Doing so will also take care of IEMOP_RAISE_DIVIDE_ERROR, IEMOP_RAISE_INVALID_LOCK_PREFIX,
402 * IEMOP_RAISE_INVALID_OPCODE and their users.
403 */
404#undef IEM_MC_DEFER_TO_CIMPL_0_RET
405#define IEM_MC_DEFER_TO_CIMPL_0_RET(a_fFlags, a_fGstShwFlush, a_pfnCImpl) \
406 return iemThreadedRecompilerMcDeferToCImpl0(pVCpu, a_fFlags, a_fGstShwFlush, a_pfnCImpl)
407
408IEM_DECL_MSC_GUARD_IGNORE DECLINLINE(VBOXSTRICTRC)
409iemThreadedRecompilerMcDeferToCImpl0(PVMCPUCC pVCpu, uint32_t fFlags, uint64_t fGstShwFlush, PFNIEMCIMPL0 pfnCImpl)
410{
411 LogFlow(("CImpl0: %04x:%08RX64 LB %#x: %#x %#RX64 %p\n",
412 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, IEM_GET_INSTR_LEN(pVCpu), fFlags, fGstShwFlush, pfnCImpl));
413 pVCpu->iem.s.fTbCurInstr = fFlags;
414
415 IEM_MC2_BEGIN_EMIT_CALLS(fFlags & IEM_CIMPL_F_CHECK_IRQ_BEFORE);
416 IEM_MC2_EMIT_CALL_3(kIemThreadedFunc_BltIn_DeferToCImpl0, (uintptr_t)pfnCImpl, IEM_GET_INSTR_LEN(pVCpu), fGstShwFlush);
417 if ( (fFlags & (IEM_CIMPL_F_MODE | IEM_CIMPL_F_VMEXIT))
418 && !(fFlags & (IEM_CIMPL_F_END_TB | IEM_CIMPL_F_BRANCH_FAR)))
419 IEM_MC2_EMIT_CALL_1(kIemThreadedFunc_BltIn_CheckMode, pVCpu->iem.s.fExec);
420 IEM_MC2_END_EMIT_CALLS(fFlags);
421
422 /*
423 * We have to repeat work normally done by kdCImplFlags and
424 * ThreadedFunctionVariation.emitThreadedCallStmts here.
425 */
426 AssertCompile(IEM_CIMPL_F_BRANCH_DIRECT == IEMBRANCHED_F_DIRECT);
427 AssertCompile(IEM_CIMPL_F_BRANCH_INDIRECT == IEMBRANCHED_F_INDIRECT);
428 AssertCompile(IEM_CIMPL_F_BRANCH_RELATIVE == IEMBRANCHED_F_RELATIVE);
429 AssertCompile(IEM_CIMPL_F_BRANCH_CONDITIONAL == IEMBRANCHED_F_CONDITIONAL);
430 AssertCompile(IEM_CIMPL_F_BRANCH_FAR == IEMBRANCHED_F_FAR);
431
432 if (fFlags & (IEM_CIMPL_F_END_TB | IEM_CIMPL_F_BRANCH_FAR))
433 pVCpu->iem.s.fEndTb = true;
434 else if (fFlags & IEM_CIMPL_F_BRANCH_ANY)
435 pVCpu->iem.s.fTbBranched = fFlags & (IEM_CIMPL_F_BRANCH_ANY | IEM_CIMPL_F_BRANCH_FAR | IEM_CIMPL_F_BRANCH_CONDITIONAL);
436
437 if (fFlags & IEM_CIMPL_F_CHECK_IRQ_BEFORE)
438 pVCpu->iem.s.cInstrTillIrqCheck = 0;
439
440 return pfnCImpl(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
441}
442
443
444/**
445 * Helper for indicating that we've branched.
446 */
447DECL_FORCE_INLINE(void) iemThreadedSetBranched(PVMCPUCC pVCpu, uint8_t fTbBranched)
448{
449 pVCpu->iem.s.fTbBranched = fTbBranched;
450 //pVCpu->iem.s.GCPhysTbBranchSrcBuf = pVCpu->iem.s.GCPhysInstrBuf;
451 //pVCpu->iem.s.GCVirtTbBranchSrcBuf = pVCpu->iem.s.uInstrBufPc;
452}
453
454
455#endif /* !VMM_INCLUDED_SRC_VMMAll_target_x86_IEMAllThrdTables_x86_h */
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette