VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllThrdTables.h@ 101504

Last change on this file since 101504 was 101387, checked in by vboxsync, 14 months ago

VMM/IEM: Added a new class of threaded function variants, the 16f/32f/64f variants that will clear RF (and vbox internal friends) and check for TF (and vbox internal friends). The variants w/o the 'f' after the bitcount will skip this test+branch. The motivation of this was to deal with this issue that the threaded recompiler level rather than try optimize away the test+branch++ code when generating native code, make the IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC32 a very simple place to start emitting native code (compared to IEM_MC_ADVANCE_RIP_AND_FINISH_THREADED_PC32_WITH_FLAGS). bugref:10371

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 14.2 KB
Line 
1/* $Id: IEMAllThrdTables.h 101387 2023-10-07 23:34:54Z vboxsync $ */
2/** @file
3 * IEM - Instruction Decoding and Threaded Recompilation, Instruction Tables.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28#ifndef VMM_INCLUDED_SRC_VMMAll_IEMAllThrdTables_h
29#define VMM_INCLUDED_SRC_VMMAll_IEMAllThrdTables_h
30#ifndef RT_WITHOUT_PRAGMA_ONCE
31# pragma once
32#endif
33
34
35/*********************************************************************************************************************************
36* Header Files *
37*********************************************************************************************************************************/
38#ifndef LOG_GROUP /* defined when included by tstIEMCheckMc.cpp */
39# define LOG_GROUP LOG_GROUP_IEM_RE_THREADED
40#endif
41#define IEM_WITH_CODE_TLB_AND_OPCODE_BUF /* A bit hackish, but its all in IEMInline.h. */
42#define VMCPU_INCL_CPUM_GST_CTX
43#include <VBox/vmm/iem.h>
44#include <VBox/vmm/cpum.h>
45#include <VBox/vmm/apic.h>
46#include <VBox/vmm/pdm.h>
47#include <VBox/vmm/pgm.h>
48#include <VBox/vmm/iom.h>
49#include <VBox/vmm/em.h>
50#include <VBox/vmm/hm.h>
51#include <VBox/vmm/nem.h>
52#include <VBox/vmm/gim.h>
53#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
54# include <VBox/vmm/em.h>
55# include <VBox/vmm/hm_svm.h>
56#endif
57#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
58# include <VBox/vmm/hmvmxinline.h>
59#endif
60#include <VBox/vmm/tm.h>
61#include <VBox/vmm/dbgf.h>
62#include <VBox/vmm/dbgftrace.h>
63#ifndef TST_IEM_CHECK_MC
64# include "IEMInternal.h"
65#endif
66#include <VBox/vmm/vmcc.h>
67#include <VBox/log.h>
68#include <VBox/err.h>
69#include <VBox/param.h>
70#include <VBox/dis.h>
71#include <VBox/disopcode-x86-amd64.h>
72#include <iprt/asm-math.h>
73#include <iprt/assert.h>
74#include <iprt/mem.h>
75#include <iprt/string.h>
76#include <iprt/x86.h>
77
78#ifndef TST_IEM_CHECK_MC
79# include "IEMInline.h"
80# include "IEMOpHlp.h"
81# include "IEMMc.h"
82#endif
83
84#include "IEMThreadedFunctions.h"
85
86
87/*
88 * Narrow down configs here to avoid wasting time on unused configs here.
89 */
90
91#ifndef IEM_WITH_CODE_TLB
92# error The code TLB must be enabled for the recompiler.
93#endif
94
95#ifndef IEM_WITH_DATA_TLB
96# error The data TLB must be enabled for the recompiler.
97#endif
98
99#ifndef IEM_WITH_SETJMP
100# error The setjmp approach must be enabled for the recompiler.
101#endif
102
103
104/*********************************************************************************************************************************
105* Defined Constants And Macros *
106*********************************************************************************************************************************/
107#define g_apfnOneByteMap g_apfnIemThreadedRecompilerOneByteMap
108#define g_apfnTwoByteMap g_apfnIemThreadedRecompilerTwoByteMap
109#define g_apfnThreeByte0f3a g_apfnIemThreadedRecompilerThreeByte0f3a
110#define g_apfnThreeByte0f38 g_apfnIemThreadedRecompilerThreeByte0f38
111#define g_apfnVexMap1 g_apfnIemThreadedRecompilerVecMap1
112#define g_apfnVexMap2 g_apfnIemThreadedRecompilerVecMap2
113#define g_apfnVexMap3 g_apfnIemThreadedRecompilerVecMap3
114
115
116/*
117 * Override IEM_MC_BEGIN to take down the IEM_CIMPL_F_XXX flags.
118 */
119#undef IEM_MC_BEGIN
120#define IEM_MC_BEGIN(a_cArgs, a_cLocals, a_fMcFlags, a_fCImplFlags) \
121 { \
122 pVCpu->iem.s.fTbCurInstr = (a_fCImplFlags) /*| ((a_fMcFlags) << 20*/
123
124/*
125 * Override IEM_MC_CALC_RM_EFF_ADDR to use iemOpHlpCalcRmEffAddrJmpEx and produce uEffAddrInfo.
126 */
127#undef IEM_MC_CALC_RM_EFF_ADDR
128#ifndef IEM_WITH_SETJMP
129# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, a_bRm, a_cbImmAndRspOffset) \
130 uint64_t uEffAddrInfo; \
131 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddrJmpEx(pVCpu, (a_bRm), (a_cbImmAndRspOffset), &(a_GCPtrEff), &uEffAddrInfo))
132#else
133# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, a_bRm, a_cbImmAndRspOffset) \
134 uint64_t uEffAddrInfo; \
135 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmpEx(pVCpu, (a_bRm), (a_cbImmAndRspOffset), &uEffAddrInfo))
136#endif
137
138/*
139 * Likewise override IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES so we fetch all the opcodes.
140 */
141#undef IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES
142#define IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES(a_bRm) do { \
143 uint64_t uEffAddrInfo; \
144 (void)iemOpHlpCalcRmEffAddrJmpEx(pVCpu, bRm, 0, &uEffAddrInfo); \
145 } while (0)
146
147/*
148 * Override the IEM_MC_REL_JMP_S*_AND_FINISH macros to check for zero byte jumps.
149 */
150#undef IEM_MC_REL_JMP_S8_AND_FINISH
151#define IEM_MC_REL_JMP_S8_AND_FINISH(a_i8) do { \
152 Assert(pVCpu->iem.s.fTbBranched != 0); \
153 if ((a_i8) == 0) \
154 pVCpu->iem.s.fTbBranched |= IEMBRANCHED_F_ZERO; \
155 return iemRegRipRelativeJumpS8AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i8), pVCpu->iem.s.enmEffOpSize); \
156 } while (0)
157
158#undef IEM_MC_REL_JMP_S16_AND_FINISH
159#define IEM_MC_REL_JMP_S16_AND_FINISH(a_i16) do { \
160 Assert(pVCpu->iem.s.fTbBranched != 0); \
161 if ((a_i16) == 0) \
162 pVCpu->iem.s.fTbBranched |= IEMBRANCHED_F_ZERO; \
163 return iemRegRipRelativeJumpS16AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i16)); \
164 } while (0)
165
166#undef IEM_MC_REL_JMP_S32_AND_FINISH
167#define IEM_MC_REL_JMP_S32_AND_FINISH(a_i32) do { \
168 Assert(pVCpu->iem.s.fTbBranched != 0); \
169 if ((a_i32) == 0) \
170 pVCpu->iem.s.fTbBranched |= IEMBRANCHED_F_ZERO; \
171 return iemRegRipRelativeJumpS32AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i32), pVCpu->iem.s.enmEffOpSize); \
172 } while (0)
173
174
175/*
176 * Emit call macros.
177 */
178#define IEM_MC2_BEGIN_EMIT_CALLS(a_fCheckIrqBefore) \
179 { \
180 PIEMTB const pTb = pVCpu->iem.s.pCurTbR3; \
181 uint8_t const cbInstrMc2 = IEM_GET_INSTR_LEN(pVCpu); \
182 AssertMsg(pVCpu->iem.s.offOpcode == cbInstrMc2, \
183 ("%u vs %u (%04x:%08RX64)\n", pVCpu->iem.s.offOpcode, cbInstrMc2, \
184 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip)); \
185 \
186 /* If we need to check for IRQs before the instruction, we do that before \
187 adding any opcodes as it may abort the instruction. \
188 Note! During compilation, we may swap IRQ and #PF exceptions here \
189 in a manner that a real CPU would not do. However it shouldn't \
190 be something that is easy (if at all possible) to observe in the \
191 guest, so fine. The unexpected end-of-tb below have the same \
192 potential "issue". */ \
193 if (!(a_fCheckIrqBefore) || iemThreadedCompileEmitIrqCheckBefore(pVCpu, pTb)) \
194 { /* likely */ } \
195 else \
196 return VINF_IEM_RECOMPILE_END_TB; \
197 \
198 /* No page crossing, right? */ \
199 uint16_t const offOpcodeMc2 = pTb->cbOpcodes; \
200 uint8_t const idxRangeMc2 = pTb->cRanges - 1; \
201 if ( !pVCpu->iem.s.fTbCrossedPage \
202 && !pVCpu->iem.s.fTbCheckOpcodes \
203 && !pVCpu->iem.s.fTbBranched \
204 && !(pTb->fFlags & IEMTB_F_CS_LIM_CHECKS)) \
205 { \
206 /** @todo Custom copy function, given range is 1 thru 15 bytes. */ \
207 memcpy(&pTb->pabOpcodes[offOpcodeMc2], pVCpu->iem.s.abOpcode, pVCpu->iem.s.offOpcode); \
208 pTb->cbOpcodes = offOpcodeMc2 + pVCpu->iem.s.offOpcode; \
209 pTb->aRanges[idxRangeMc2].cbOpcodes += cbInstrMc2; \
210 Assert(pTb->cbOpcodes <= pTb->cbOpcodesAllocated); \
211 } \
212 else if (iemThreadedCompileBeginEmitCallsComplications(pVCpu, pTb)) \
213 { /* likely */ } \
214 else \
215 return VINF_IEM_RECOMPILE_END_TB; \
216 \
217 uint8_t const idxInstrMc2 = pTb->cInstructions; \
218 \
219 /* Emit hardware instruction breakpoint check if enabled. */ \
220 if (!(pTb->fFlags & IEM_F_PENDING_BRK_INSTR)) \
221 { /* likely */ } \
222 else \
223 IEM_MC2_EMIT_CALL_0(kIemThreadedFunc_BltIn_CheckHwInstrBps)
224
225#define IEM_MC2_EMIT_CALL_0(a_enmFunction) do { \
226 IEMTHREADEDFUNCS const enmFunctionCheck = a_enmFunction; RT_NOREF(enmFunctionCheck); \
227 \
228 PIEMTHRDEDCALLENTRY const pCall = &pTb->Thrd.paCalls[pTb->Thrd.cCalls++]; \
229 pCall->enmFunction = a_enmFunction; \
230 pCall->idxInstr = idxInstrMc2; \
231 pCall->uUnused0 = 0; \
232 pCall->offOpcode = offOpcodeMc2; \
233 pCall->cbOpcode = cbInstrMc2; \
234 pCall->idxRange = idxRangeMc2; \
235 pCall->auParams[0] = 0; \
236 pCall->auParams[1] = 0; \
237 pCall->auParams[2] = 0; \
238 } while (0)
239#define IEM_MC2_EMIT_CALL_1(a_enmFunction, a_uArg0) do { \
240 IEMTHREADEDFUNCS const enmFunctionCheck = a_enmFunction; RT_NOREF(enmFunctionCheck); \
241 uint64_t const uArg0Check = (a_uArg0); RT_NOREF(uArg0Check); \
242 \
243 PIEMTHRDEDCALLENTRY const pCall = &pTb->Thrd.paCalls[pTb->Thrd.cCalls++]; \
244 pCall->enmFunction = a_enmFunction; \
245 pCall->idxInstr = idxInstrMc2; \
246 pCall->uUnused0 = 0; \
247 pCall->offOpcode = offOpcodeMc2; \
248 pCall->cbOpcode = cbInstrMc2; \
249 pCall->idxRange = idxRangeMc2; \
250 pCall->auParams[0] = a_uArg0; \
251 pCall->auParams[1] = 0; \
252 pCall->auParams[2] = 0; \
253 } while (0)
254#define IEM_MC2_EMIT_CALL_2(a_enmFunction, a_uArg0, a_uArg1) do { \
255 IEMTHREADEDFUNCS const enmFunctionCheck = a_enmFunction; RT_NOREF(enmFunctionCheck); \
256 uint64_t const uArg0Check = (a_uArg0); RT_NOREF(uArg0Check); \
257 uint64_t const uArg1Check = (a_uArg1); RT_NOREF(uArg1Check); \
258 \
259 PIEMTHRDEDCALLENTRY const pCall = &pTb->Thrd.paCalls[pTb->Thrd.cCalls++]; \
260 pCall->enmFunction = a_enmFunction; \
261 pCall->idxInstr = idxInstrMc2; \
262 pCall->uUnused0 = 0; \
263 pCall->offOpcode = offOpcodeMc2; \
264 pCall->cbOpcode = cbInstrMc2; \
265 pCall->idxRange = idxRangeMc2; \
266 pCall->auParams[0] = a_uArg0; \
267 pCall->auParams[1] = a_uArg1; \
268 pCall->auParams[2] = 0; \
269 } while (0)
270#define IEM_MC2_EMIT_CALL_3(a_enmFunction, a_uArg0, a_uArg1, a_uArg2) do { \
271 IEMTHREADEDFUNCS const enmFunctionCheck = a_enmFunction; RT_NOREF(enmFunctionCheck); \
272 uint64_t const uArg0Check = (a_uArg0); RT_NOREF(uArg0Check); \
273 uint64_t const uArg1Check = (a_uArg1); RT_NOREF(uArg1Check); \
274 uint64_t const uArg2Check = (a_uArg2); RT_NOREF(uArg2Check); \
275 \
276 PIEMTHRDEDCALLENTRY const pCall = &pTb->Thrd.paCalls[pTb->Thrd.cCalls++]; \
277 pCall->enmFunction = a_enmFunction; \
278 pCall->idxInstr = idxInstrMc2; \
279 pCall->uUnused0 = 0; \
280 pCall->offOpcode = offOpcodeMc2; \
281 pCall->cbOpcode = cbInstrMc2; \
282 pCall->idxRange = idxRangeMc2; \
283 pCall->auParams[0] = a_uArg0; \
284 pCall->auParams[1] = a_uArg1; \
285 pCall->auParams[2] = a_uArg2; \
286 } while (0)
287#define IEM_MC2_END_EMIT_CALLS(a_fCImplFlags) \
288 Assert(pTb->cInstructions <= pTb->Thrd.cCalls); \
289 if (pTb->cInstructions < 255) \
290 pTb->cInstructions++; \
291 uint32_t const fCImplFlagsMc2 = (a_fCImplFlags); \
292 RT_NOREF(fCImplFlagsMc2); \
293 } while (0)
294
295
296/*
297 * IEM_MC_DEFER_TO_CIMPL_0 is easily wrapped up.
298 *
299 * Doing so will also take care of IEMOP_RAISE_DIVIDE_ERROR, IEMOP_RAISE_INVALID_LOCK_PREFIX,
300 * IEMOP_RAISE_INVALID_OPCODE and their users.
301 */
302#undef IEM_MC_DEFER_TO_CIMPL_0_RET
303#define IEM_MC_DEFER_TO_CIMPL_0_RET(a_fFlags, a_pfnCImpl) \
304 return iemThreadedRecompilerMcDeferToCImpl0(pVCpu, a_fFlags, a_pfnCImpl)
305
306DECLINLINE(VBOXSTRICTRC) iemThreadedRecompilerMcDeferToCImpl0(PVMCPUCC pVCpu, uint32_t fFlags, PFNIEMCIMPL0 pfnCImpl)
307{
308 Log8(("CImpl0: %04x:%08RX64 LB %#x: %#x %p\n",
309 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, IEM_GET_INSTR_LEN(pVCpu), fFlags, pfnCImpl));
310 pVCpu->iem.s.fTbCurInstr = fFlags;
311
312 IEM_MC2_BEGIN_EMIT_CALLS(fFlags & IEM_CIMPL_F_CHECK_IRQ_BEFORE);
313 IEM_MC2_EMIT_CALL_2(kIemThreadedFunc_BltIn_DeferToCImpl0, (uintptr_t)pfnCImpl, IEM_GET_INSTR_LEN(pVCpu));
314 if ( (fFlags & (IEM_CIMPL_F_MODE | IEM_CIMPL_F_VMEXIT))
315 && !(fFlags & (IEM_CIMPL_F_END_TB | IEM_CIMPL_F_BRANCH_FAR)))
316 IEM_MC2_EMIT_CALL_1(kIemThreadedFunc_BltIn_CheckMode, pVCpu->iem.s.fExec);
317 IEM_MC2_END_EMIT_CALLS(fFlags);
318
319 /*
320 * We have to repeat work normally done by kdCImplFlags and
321 * ThreadedFunctionVariation.emitThreadedCallStmts here.
322 */
323 AssertCompile(IEM_CIMPL_F_BRANCH_DIRECT == IEMBRANCHED_F_DIRECT);
324 AssertCompile(IEM_CIMPL_F_BRANCH_INDIRECT == IEMBRANCHED_F_INDIRECT);
325 AssertCompile(IEM_CIMPL_F_BRANCH_RELATIVE == IEMBRANCHED_F_RELATIVE);
326 AssertCompile(IEM_CIMPL_F_BRANCH_CONDITIONAL == IEMBRANCHED_F_CONDITIONAL);
327 AssertCompile(IEM_CIMPL_F_BRANCH_FAR == IEMBRANCHED_F_FAR);
328
329 if (fFlags & (IEM_CIMPL_F_END_TB | IEM_CIMPL_F_BRANCH_FAR))
330 pVCpu->iem.s.fEndTb = true;
331 else if (fFlags & IEM_CIMPL_F_BRANCH_ANY)
332 pVCpu->iem.s.fTbBranched = fFlags & (IEM_CIMPL_F_BRANCH_ANY | IEM_CIMPL_F_BRANCH_FAR | IEM_CIMPL_F_BRANCH_CONDITIONAL);
333
334 if (fFlags & IEM_CIMPL_F_CHECK_IRQ_BEFORE)
335 pVCpu->iem.s.cInstrTillIrqCheck = 0;
336
337 return pfnCImpl(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
338}
339
340
341/**
342 * Helper for indicating that we've branched.
343 */
344DECL_FORCE_INLINE(void) iemThreadedSetBranched(PVMCPUCC pVCpu, uint8_t fTbBranched)
345{
346 pVCpu->iem.s.fTbBranched = fTbBranched;
347 pVCpu->iem.s.GCPhysTbBranchSrcBuf = pVCpu->iem.s.GCPhysInstrBuf;
348 pVCpu->iem.s.GCVirtTbBranchSrcBuf = pVCpu->iem.s.uInstrBufPc;
349}
350
351
352#endif /* !VMM_INCLUDED_SRC_VMMAll_IEMAllThrdTables_h */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette