VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMAll/IEMAllThrdTables.cpp@ 100740

Last change on this file since 100740 was 100736, checked in by vboxsync, 21 months ago

VMM/IEM: Put the instruction tables for the recompiler in a separate file to speed up compilation a little bit (~10 secs). bugref:10369

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 11.8 KB
Line 
1/* $Id: IEMAllThrdTables.cpp 100736 2023-07-30 00:54:04Z vboxsync $ */
2/** @file
3 * IEM - Instruction Decoding and Threaded Recompilation, Instruction Tables.
4 */
5
6/*
7 * Copyright (C) 2011-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28
29/*********************************************************************************************************************************
30* Header Files *
31*********************************************************************************************************************************/
32#ifndef LOG_GROUP /* defined when included by tstIEMCheckMc.cpp */
33# define LOG_GROUP LOG_GROUP_IEM_RE_THREADED
34#endif
35#define IEM_WITH_CODE_TLB_AND_OPCODE_BUF /* A bit hackish, but its all in IEMInline.h. */
36#define VMCPU_INCL_CPUM_GST_CTX
37#include <VBox/vmm/iem.h>
38#include <VBox/vmm/cpum.h>
39#include <VBox/vmm/apic.h>
40#include <VBox/vmm/pdm.h>
41#include <VBox/vmm/pgm.h>
42#include <VBox/vmm/iom.h>
43#include <VBox/vmm/em.h>
44#include <VBox/vmm/hm.h>
45#include <VBox/vmm/nem.h>
46#include <VBox/vmm/gim.h>
47#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
48# include <VBox/vmm/em.h>
49# include <VBox/vmm/hm_svm.h>
50#endif
51#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
52# include <VBox/vmm/hmvmxinline.h>
53#endif
54#include <VBox/vmm/tm.h>
55#include <VBox/vmm/dbgf.h>
56#include <VBox/vmm/dbgftrace.h>
57#ifndef TST_IEM_CHECK_MC
58# include "IEMInternal.h"
59#endif
60#include <VBox/vmm/vmcc.h>
61#include <VBox/log.h>
62#include <VBox/err.h>
63#include <VBox/param.h>
64#include <VBox/dis.h>
65#include <VBox/disopcode-x86-amd64.h>
66#include <iprt/asm-math.h>
67#include <iprt/assert.h>
68#include <iprt/mem.h>
69#include <iprt/string.h>
70#include <iprt/x86.h>
71
72#ifndef TST_IEM_CHECK_MC
73# include "IEMInline.h"
74# include "IEMOpHlp.h"
75# include "IEMMc.h"
76#endif
77
78#include "IEMThreadedFunctions.h"
79
80
81/*
82 * Narrow down configs here to avoid wasting time on unused configs here.
83 */
84
85#ifndef IEM_WITH_CODE_TLB
86# error The code TLB must be enabled for the recompiler.
87#endif
88
89#ifndef IEM_WITH_DATA_TLB
90# error The data TLB must be enabled for the recompiler.
91#endif
92
93#ifndef IEM_WITH_SETJMP
94# error The setjmp approach must be enabled for the recompiler.
95#endif
96
97
98/*********************************************************************************************************************************
99* Defined Constants And Macros *
100*********************************************************************************************************************************/
101#define g_apfnOneByteMap g_apfnIemThreadedRecompilerOneByteMap
102
103
104/*
105 * Override IEM_MC_CALC_RM_EFF_ADDR to use iemOpHlpCalcRmEffAddrJmpEx and produce uEffAddrInfo.
106 */
107#undef IEM_MC_CALC_RM_EFF_ADDR
108#ifndef IEM_WITH_SETJMP
109# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, a_bRm, a_cbImmAndRspOffset) \
110 uint64_t uEffAddrInfo; \
111 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddrJmpEx(pVCpu, (a_bRm), (a_cbImmAndRspOffset), &(a_GCPtrEff), &uEffAddrInfo))
112#else
113# define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, a_bRm, a_cbImmAndRspOffset) \
114 uint64_t uEffAddrInfo; \
115 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmpEx(pVCpu, (a_bRm), (a_cbImmAndRspOffset), &uEffAddrInfo))
116#endif
117
118/*
119 * Likewise override IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES so we fetch all the opcodes.
120 */
121#undef IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES
122#define IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES(a_bRm) do { \
123 uint64_t uEffAddrInfo; \
124 (void)iemOpHlpCalcRmEffAddrJmpEx(pVCpu, bRm, 0, &uEffAddrInfo); \
125 } while (0)
126
127/*
128 * Override the IEM_MC_REL_JMP_S*_AND_FINISH macros to check for zero byte jumps.
129 */
130#undef IEM_MC_REL_JMP_S8_AND_FINISH
131#define IEM_MC_REL_JMP_S8_AND_FINISH(a_i8) do { \
132 Assert(pVCpu->iem.s.fTbBranched != 0); \
133 if ((a_i8) == 0) \
134 pVCpu->iem.s.fTbBranched |= IEMBRANCHED_F_ZERO; \
135 return iemRegRipRelativeJumpS8AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i8), pVCpu->iem.s.enmEffOpSize); \
136 } while (0)
137
138#undef IEM_MC_REL_JMP_S16_AND_FINISH
139#define IEM_MC_REL_JMP_S16_AND_FINISH(a_i16) do { \
140 Assert(pVCpu->iem.s.fTbBranched != 0); \
141 if ((a_i16) == 0) \
142 pVCpu->iem.s.fTbBranched |= IEMBRANCHED_F_ZERO; \
143 return iemRegRipRelativeJumpS16AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i16)); \
144 } while (0)
145
146#undef IEM_MC_REL_JMP_S32_AND_FINISH
147#define IEM_MC_REL_JMP_S32_AND_FINISH(a_i32) do { \
148 Assert(pVCpu->iem.s.fTbBranched != 0); \
149 if ((a_i32) == 0) \
150 pVCpu->iem.s.fTbBranched |= IEMBRANCHED_F_ZERO; \
151 return iemRegRipRelativeJumpS32AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i32), pVCpu->iem.s.enmEffOpSize); \
152 } while (0)
153
154
155/*
156 * Emit call macros.
157 */
158#define IEM_MC2_BEGIN_EMIT_CALLS() \
159 { \
160 PIEMTB const pTb = pVCpu->iem.s.pCurTbR3; \
161 uint8_t const cbInstrMc2 = IEM_GET_INSTR_LEN(pVCpu); \
162 AssertMsg(pVCpu->iem.s.offOpcode == cbInstrMc2, \
163 ("%u vs %u (%04x:%08RX64)\n", pVCpu->iem.s.offOpcode, cbInstrMc2, \
164 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip)); \
165 \
166 /* No page crossing, right? */ \
167 uint16_t const offOpcodeMc2 = pTb->cbOpcodes; \
168 uint8_t const idxRangeMc2 = pTb->cRanges - 1; \
169 if ( !pVCpu->iem.s.fTbCrossedPage \
170 && !pVCpu->iem.s.fTbCheckOpcodes \
171 && !pVCpu->iem.s.fTbBranched \
172 && !(pTb->fFlags & IEMTB_F_CS_LIM_CHECKS)) \
173 { \
174 /** @todo Custom copy function, given range is 1 thru 15 bytes. */ \
175 memcpy(&pTb->pabOpcodes[offOpcodeMc2], pVCpu->iem.s.abOpcode, pVCpu->iem.s.offOpcode); \
176 pTb->cbOpcodes = offOpcodeMc2 + pVCpu->iem.s.offOpcode; \
177 pTb->aRanges[idxRangeMc2].cbOpcodes += cbInstrMc2; \
178 Assert(pTb->cbOpcodes <= pTb->cbOpcodesAllocated); \
179 } \
180 else if (iemThreadedCompileBeginEmitCallsComplications(pVCpu, pTb)) \
181 { /* likely */ } \
182 else \
183 return VINF_IEM_RECOMPILE_END_TB; \
184 \
185 do { } while (0)
186#define IEM_MC2_EMIT_CALL_0(a_enmFunction) do { \
187 IEMTHREADEDFUNCS const enmFunctionCheck = a_enmFunction; RT_NOREF(enmFunctionCheck); \
188 \
189 PIEMTHRDEDCALLENTRY const pCall = &pTb->Thrd.paCalls[pTb->Thrd.cCalls++]; \
190 pCall->enmFunction = a_enmFunction; \
191 pCall->offOpcode = offOpcodeMc2; \
192 pCall->cbOpcode = cbInstrMc2; \
193 pCall->idxRange = idxRangeMc2; \
194 pCall->auParams[0] = 0; \
195 pCall->auParams[1] = 0; \
196 pCall->auParams[2] = 0; \
197 } while (0)
198#define IEM_MC2_EMIT_CALL_1(a_enmFunction, a_uArg0) do { \
199 IEMTHREADEDFUNCS const enmFunctionCheck = a_enmFunction; RT_NOREF(enmFunctionCheck); \
200 uint64_t const uArg0Check = (a_uArg0); RT_NOREF(uArg0Check); \
201 \
202 PIEMTHRDEDCALLENTRY const pCall = &pTb->Thrd.paCalls[pTb->Thrd.cCalls++]; \
203 pCall->enmFunction = a_enmFunction; \
204 pCall->offOpcode = offOpcodeMc2; \
205 pCall->cbOpcode = cbInstrMc2; \
206 pCall->idxRange = idxRangeMc2; \
207 pCall->auParams[0] = a_uArg0; \
208 pCall->auParams[1] = 0; \
209 pCall->auParams[2] = 0; \
210 } while (0)
211#define IEM_MC2_EMIT_CALL_2(a_enmFunction, a_uArg0, a_uArg1) do { \
212 IEMTHREADEDFUNCS const enmFunctionCheck = a_enmFunction; RT_NOREF(enmFunctionCheck); \
213 uint64_t const uArg0Check = (a_uArg0); RT_NOREF(uArg0Check); \
214 uint64_t const uArg1Check = (a_uArg1); RT_NOREF(uArg1Check); \
215 \
216 PIEMTHRDEDCALLENTRY const pCall = &pTb->Thrd.paCalls[pTb->Thrd.cCalls++]; \
217 pCall->enmFunction = a_enmFunction; \
218 pCall->offOpcode = offOpcodeMc2; \
219 pCall->cbOpcode = cbInstrMc2; \
220 pCall->idxRange = idxRangeMc2; \
221 pCall->auParams[0] = a_uArg0; \
222 pCall->auParams[1] = a_uArg1; \
223 pCall->auParams[2] = 0; \
224 } while (0)
225#define IEM_MC2_EMIT_CALL_3(a_enmFunction, a_uArg0, a_uArg1, a_uArg2) do { \
226 IEMTHREADEDFUNCS const enmFunctionCheck = a_enmFunction; RT_NOREF(enmFunctionCheck); \
227 uint64_t const uArg0Check = (a_uArg0); RT_NOREF(uArg0Check); \
228 uint64_t const uArg1Check = (a_uArg1); RT_NOREF(uArg1Check); \
229 uint64_t const uArg2Check = (a_uArg2); RT_NOREF(uArg2Check); \
230 \
231 PIEMTHRDEDCALLENTRY const pCall = &pTb->Thrd.paCalls[pTb->Thrd.cCalls++]; \
232 pCall->enmFunction = a_enmFunction; \
233 pCall->offOpcode = offOpcodeMc2; \
234 pCall->cbOpcode = cbInstrMc2; \
235 pCall->idxRange = idxRangeMc2; \
236 pCall->auParams[0] = a_uArg0; \
237 pCall->auParams[1] = a_uArg1; \
238 pCall->auParams[2] = a_uArg2; \
239 } while (0)
240#define IEM_MC2_END_EMIT_CALLS(a_fCImplFlags) \
241 Assert(pTb->cInstructions <= pTb->Thrd.cCalls); \
242 if (pTb->cInstructions < 255) \
243 pTb->cInstructions++; \
244 uint32_t const fCImplFlagsMc2 = (a_fCImplFlags); \
245 RT_NOREF(fCImplFlagsMc2); \
246 } while (0)
247
248
249/*
250 * IEM_MC_DEFER_TO_CIMPL_0 is easily wrapped up.
251 *
252 * Doing so will also take care of IEMOP_RAISE_DIVIDE_ERROR, IEMOP_RAISE_INVALID_LOCK_PREFIX,
253 * IEMOP_RAISE_INVALID_OPCODE and their users.
254 */
255#undef IEM_MC_DEFER_TO_CIMPL_0_RET
256#define IEM_MC_DEFER_TO_CIMPL_0_RET(a_fFlags, a_pfnCImpl) \
257 return iemThreadedRecompilerMcDeferToCImpl0(pVCpu, a_fFlags, a_pfnCImpl)
258
259DECLINLINE(VBOXSTRICTRC) iemThreadedRecompilerMcDeferToCImpl0(PVMCPUCC pVCpu, uint32_t fFlags, PFNIEMCIMPL0 pfnCImpl)
260{
261 Log8(("CImpl0: %04x:%08RX64 LB %#x: %#x %p\n",
262 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, IEM_GET_INSTR_LEN(pVCpu), fFlags, pfnCImpl));
263
264 IEM_MC2_BEGIN_EMIT_CALLS();
265 IEM_MC2_EMIT_CALL_2(kIemThreadedFunc_DeferToCImpl0, (uintptr_t)pfnCImpl, IEM_GET_INSTR_LEN(pVCpu));
266 IEM_MC2_END_EMIT_CALLS(fFlags);
267
268 /* We have to repeat work normally done by kdCImplFlags and
269 ThreadedFunctionVariation.emitThreadedCallStmts here. */
270 if (fFlags & (IEM_CIMPL_F_END_TB | IEM_CIMPL_F_MODE | IEM_CIMPL_F_VMEXIT | IEM_CIMPL_F_BRANCH_FAR | IEM_CIMPL_F_REP))
271 pVCpu->iem.s.fEndTb = true;
272
273 AssertCompile(IEM_CIMPL_F_BRANCH_DIRECT == IEMBRANCHED_F_DIRECT);
274 AssertCompile(IEM_CIMPL_F_BRANCH_INDIRECT == IEMBRANCHED_F_INDIRECT);
275 AssertCompile(IEM_CIMPL_F_BRANCH_RELATIVE == IEMBRANCHED_F_RELATIVE);
276 AssertCompile(IEM_CIMPL_F_BRANCH_CONDITIONAL == IEMBRANCHED_F_CONDITIONAL);
277 AssertCompile(IEM_CIMPL_F_BRANCH_FAR == IEMBRANCHED_F_FAR);
278 if (fFlags & IEM_CIMPL_F_BRANCH_ANY)
279 pVCpu->iem.s.fTbBranched = fFlags & (IEM_CIMPL_F_BRANCH_ANY | IEM_CIMPL_F_BRANCH_FAR | IEM_CIMPL_F_BRANCH_CONDITIONAL);
280
281 return pfnCImpl(pVCpu, IEM_GET_INSTR_LEN(pVCpu));
282}
283
284
285/**
286 * Helper for indicating that we've branched.
287 */
288DECL_FORCE_INLINE(void) iemThreadedSetBranched(PVMCPUCC pVCpu, uint8_t fTbBranched)
289{
290 pVCpu->iem.s.fTbBranched = fTbBranched;
291 pVCpu->iem.s.GCPhysTbBranchSrcBuf = pVCpu->iem.s.GCPhysInstrBuf;
292 pVCpu->iem.s.GCVirtTbBranchSrcBuf = pVCpu->iem.s.uInstrBufPc;
293}
294
295
296/*
297 * Include the "annotated" IEMAllInst*.cpp.h files.
298 */
299#include "IEMThreadedInstructions.cpp.h"
300
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette