- Timestamp:
- Jul 30, 2023 8:29:08 PM (19 months ago)
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 1 edited
- 1 copied
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/IEMAllThrdTables.cpp
r100741 r100742 26 26 */ 27 27 28 29 /********************************************************************************************************************************* 30 * Header Files * 31 *********************************************************************************************************************************/ 32 #ifndef LOG_GROUP /* defined when included by tstIEMCheckMc.cpp */ 33 # define LOG_GROUP LOG_GROUP_IEM_RE_THREADED 34 #endif 35 #define IEM_WITH_CODE_TLB_AND_OPCODE_BUF /* A bit hackish, but its all in IEMInline.h. */ 36 #define VMCPU_INCL_CPUM_GST_CTX 37 #include <VBox/vmm/iem.h> 38 #include <VBox/vmm/cpum.h> 39 #include <VBox/vmm/apic.h> 40 #include <VBox/vmm/pdm.h> 41 #include <VBox/vmm/pgm.h> 42 #include <VBox/vmm/iom.h> 43 #include <VBox/vmm/em.h> 44 #include <VBox/vmm/hm.h> 45 #include <VBox/vmm/nem.h> 46 #include <VBox/vmm/gim.h> 47 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 48 # include <VBox/vmm/em.h> 49 # include <VBox/vmm/hm_svm.h> 50 #endif 51 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX 52 # include <VBox/vmm/hmvmxinline.h> 53 #endif 54 #include <VBox/vmm/tm.h> 55 #include <VBox/vmm/dbgf.h> 56 #include <VBox/vmm/dbgftrace.h> 57 #ifndef TST_IEM_CHECK_MC 58 # include "IEMInternal.h" 59 #endif 60 #include <VBox/vmm/vmcc.h> 61 #include <VBox/log.h> 62 #include <VBox/err.h> 63 #include <VBox/param.h> 64 #include <VBox/dis.h> 65 #include <VBox/disopcode-x86-amd64.h> 66 #include <iprt/asm-math.h> 67 #include <iprt/assert.h> 68 #include <iprt/mem.h> 69 #include <iprt/string.h> 70 #include <iprt/x86.h> 71 72 #ifndef TST_IEM_CHECK_MC 73 # include "IEMInline.h" 74 # include "IEMOpHlp.h" 75 # include "IEMMc.h" 76 #endif 77 78 #include "IEMThreadedFunctions.h" 79 80 81 /* 82 * Narrow down configs here to avoid wasting time on unused configs here. 83 */ 84 85 #ifndef IEM_WITH_CODE_TLB 86 # error The code TLB must be enabled for the recompiler. 87 #endif 88 89 #ifndef IEM_WITH_DATA_TLB 90 # error The data TLB must be enabled for the recompiler. 91 #endif 92 93 #ifndef IEM_WITH_SETJMP 94 # error The setjmp approach must be enabled for the recompiler. 95 #endif 96 97 98 /********************************************************************************************************************************* 99 * Defined Constants And Macros * 100 *********************************************************************************************************************************/ 101 #define g_apfnOneByteMap g_apfnIemThreadedRecompilerOneByteMap 102 #define g_apfnTwoByteMap g_apfnIemThreadedRecompilerTwoByteMap 103 #define g_apfnThreeByte0f3a g_apfnIemThreadedRecompilerThreeByte0f3a 104 #define g_apfnThreeByte0f38 g_apfnIemThreadedRecompilerThreeByte0f38 105 #define g_apfnVexMap1 g_apfnIemThreadedRecompilerVecMap1 106 #define g_apfnVexMap2 g_apfnIemThreadedRecompilerVecMap2 107 #define g_apfnVexMap3 g_apfnIemThreadedRecompilerVecMap3 108 109 110 /* 111 * Override IEM_MC_CALC_RM_EFF_ADDR to use iemOpHlpCalcRmEffAddrJmpEx and produce uEffAddrInfo. 112 */ 113 #undef IEM_MC_CALC_RM_EFF_ADDR 114 #ifndef IEM_WITH_SETJMP 115 # define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, a_bRm, a_cbImmAndRspOffset) \ 116 uint64_t uEffAddrInfo; \ 117 IEM_MC_RETURN_ON_FAILURE(iemOpHlpCalcRmEffAddrJmpEx(pVCpu, (a_bRm), (a_cbImmAndRspOffset), &(a_GCPtrEff), &uEffAddrInfo)) 118 #else 119 # define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, a_bRm, a_cbImmAndRspOffset) \ 120 uint64_t uEffAddrInfo; \ 121 ((a_GCPtrEff) = iemOpHlpCalcRmEffAddrJmpEx(pVCpu, (a_bRm), (a_cbImmAndRspOffset), &uEffAddrInfo)) 122 #endif 123 124 /* 125 * Likewise override IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES so we fetch all the opcodes. 126 */ 127 #undef IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES 128 #define IEM_OPCODE_SKIP_RM_EFF_ADDR_BYTES(a_bRm) do { \ 129 uint64_t uEffAddrInfo; \ 130 (void)iemOpHlpCalcRmEffAddrJmpEx(pVCpu, bRm, 0, &uEffAddrInfo); \ 131 } while (0) 132 133 /* 134 * Override the IEM_MC_REL_JMP_S*_AND_FINISH macros to check for zero byte jumps. 135 */ 136 #undef IEM_MC_REL_JMP_S8_AND_FINISH 137 #define IEM_MC_REL_JMP_S8_AND_FINISH(a_i8) do { \ 138 Assert(pVCpu->iem.s.fTbBranched != 0); \ 139 if ((a_i8) == 0) \ 140 pVCpu->iem.s.fTbBranched |= IEMBRANCHED_F_ZERO; \ 141 return iemRegRipRelativeJumpS8AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i8), pVCpu->iem.s.enmEffOpSize); \ 142 } while (0) 143 144 #undef IEM_MC_REL_JMP_S16_AND_FINISH 145 #define IEM_MC_REL_JMP_S16_AND_FINISH(a_i16) do { \ 146 Assert(pVCpu->iem.s.fTbBranched != 0); \ 147 if ((a_i16) == 0) \ 148 pVCpu->iem.s.fTbBranched |= IEMBRANCHED_F_ZERO; \ 149 return iemRegRipRelativeJumpS16AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i16)); \ 150 } while (0) 151 152 #undef IEM_MC_REL_JMP_S32_AND_FINISH 153 #define IEM_MC_REL_JMP_S32_AND_FINISH(a_i32) do { \ 154 Assert(pVCpu->iem.s.fTbBranched != 0); \ 155 if ((a_i32) == 0) \ 156 pVCpu->iem.s.fTbBranched |= IEMBRANCHED_F_ZERO; \ 157 return iemRegRipRelativeJumpS32AndFinishClearingRF(pVCpu, IEM_GET_INSTR_LEN(pVCpu), (a_i32), pVCpu->iem.s.enmEffOpSize); \ 158 } while (0) 159 160 161 /* 162 * Emit call macros. 163 */ 164 #define IEM_MC2_BEGIN_EMIT_CALLS() \ 165 { \ 166 PIEMTB const pTb = pVCpu->iem.s.pCurTbR3; \ 167 uint8_t const cbInstrMc2 = IEM_GET_INSTR_LEN(pVCpu); \ 168 AssertMsg(pVCpu->iem.s.offOpcode == cbInstrMc2, \ 169 ("%u vs %u (%04x:%08RX64)\n", pVCpu->iem.s.offOpcode, cbInstrMc2, \ 170 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip)); \ 171 \ 172 /* No page crossing, right? */ \ 173 uint16_t const offOpcodeMc2 = pTb->cbOpcodes; \ 174 uint8_t const idxRangeMc2 = pTb->cRanges - 1; \ 175 if ( !pVCpu->iem.s.fTbCrossedPage \ 176 && !pVCpu->iem.s.fTbCheckOpcodes \ 177 && !pVCpu->iem.s.fTbBranched \ 178 && !(pTb->fFlags & IEMTB_F_CS_LIM_CHECKS)) \ 179 { \ 180 /** @todo Custom copy function, given range is 1 thru 15 bytes. */ \ 181 memcpy(&pTb->pabOpcodes[offOpcodeMc2], pVCpu->iem.s.abOpcode, pVCpu->iem.s.offOpcode); \ 182 pTb->cbOpcodes = offOpcodeMc2 + pVCpu->iem.s.offOpcode; \ 183 pTb->aRanges[idxRangeMc2].cbOpcodes += cbInstrMc2; \ 184 Assert(pTb->cbOpcodes <= pTb->cbOpcodesAllocated); \ 185 } \ 186 else if (iemThreadedCompileBeginEmitCallsComplications(pVCpu, pTb)) \ 187 { /* likely */ } \ 188 else \ 189 return VINF_IEM_RECOMPILE_END_TB; \ 190 \ 191 do { } while (0) 192 #define IEM_MC2_EMIT_CALL_0(a_enmFunction) do { \ 193 IEMTHREADEDFUNCS const enmFunctionCheck = a_enmFunction; RT_NOREF(enmFunctionCheck); \ 194 \ 195 PIEMTHRDEDCALLENTRY const pCall = &pTb->Thrd.paCalls[pTb->Thrd.cCalls++]; \ 196 pCall->enmFunction = a_enmFunction; \ 197 pCall->offOpcode = offOpcodeMc2; \ 198 pCall->cbOpcode = cbInstrMc2; \ 199 pCall->idxRange = idxRangeMc2; \ 200 pCall->auParams[0] = 0; \ 201 pCall->auParams[1] = 0; \ 202 pCall->auParams[2] = 0; \ 203 } while (0) 204 #define IEM_MC2_EMIT_CALL_1(a_enmFunction, a_uArg0) do { \ 205 IEMTHREADEDFUNCS const enmFunctionCheck = a_enmFunction; RT_NOREF(enmFunctionCheck); \ 206 uint64_t const uArg0Check = (a_uArg0); RT_NOREF(uArg0Check); \ 207 \ 208 PIEMTHRDEDCALLENTRY const pCall = &pTb->Thrd.paCalls[pTb->Thrd.cCalls++]; \ 209 pCall->enmFunction = a_enmFunction; \ 210 pCall->offOpcode = offOpcodeMc2; \ 211 pCall->cbOpcode = cbInstrMc2; \ 212 pCall->idxRange = idxRangeMc2; \ 213 pCall->auParams[0] = a_uArg0; \ 214 pCall->auParams[1] = 0; \ 215 pCall->auParams[2] = 0; \ 216 } while (0) 217 #define IEM_MC2_EMIT_CALL_2(a_enmFunction, a_uArg0, a_uArg1) do { \ 218 IEMTHREADEDFUNCS const enmFunctionCheck = a_enmFunction; RT_NOREF(enmFunctionCheck); \ 219 uint64_t const uArg0Check = (a_uArg0); RT_NOREF(uArg0Check); \ 220 uint64_t const uArg1Check = (a_uArg1); RT_NOREF(uArg1Check); \ 221 \ 222 PIEMTHRDEDCALLENTRY const pCall = &pTb->Thrd.paCalls[pTb->Thrd.cCalls++]; \ 223 pCall->enmFunction = a_enmFunction; \ 224 pCall->offOpcode = offOpcodeMc2; \ 225 pCall->cbOpcode = cbInstrMc2; \ 226 pCall->idxRange = idxRangeMc2; \ 227 pCall->auParams[0] = a_uArg0; \ 228 pCall->auParams[1] = a_uArg1; \ 229 pCall->auParams[2] = 0; \ 230 } while (0) 231 #define IEM_MC2_EMIT_CALL_3(a_enmFunction, a_uArg0, a_uArg1, a_uArg2) do { \ 232 IEMTHREADEDFUNCS const enmFunctionCheck = a_enmFunction; RT_NOREF(enmFunctionCheck); \ 233 uint64_t const uArg0Check = (a_uArg0); RT_NOREF(uArg0Check); \ 234 uint64_t const uArg1Check = (a_uArg1); RT_NOREF(uArg1Check); \ 235 uint64_t const uArg2Check = (a_uArg2); RT_NOREF(uArg2Check); \ 236 \ 237 PIEMTHRDEDCALLENTRY const pCall = &pTb->Thrd.paCalls[pTb->Thrd.cCalls++]; \ 238 pCall->enmFunction = a_enmFunction; \ 239 pCall->offOpcode = offOpcodeMc2; \ 240 pCall->cbOpcode = cbInstrMc2; \ 241 pCall->idxRange = idxRangeMc2; \ 242 pCall->auParams[0] = a_uArg0; \ 243 pCall->auParams[1] = a_uArg1; \ 244 pCall->auParams[2] = a_uArg2; \ 245 } while (0) 246 #define IEM_MC2_END_EMIT_CALLS(a_fCImplFlags) \ 247 Assert(pTb->cInstructions <= pTb->Thrd.cCalls); \ 248 if (pTb->cInstructions < 255) \ 249 pTb->cInstructions++; \ 250 uint32_t const fCImplFlagsMc2 = (a_fCImplFlags); \ 251 RT_NOREF(fCImplFlagsMc2); \ 252 } while (0) 253 254 255 /* 256 * IEM_MC_DEFER_TO_CIMPL_0 is easily wrapped up. 257 * 258 * Doing so will also take care of IEMOP_RAISE_DIVIDE_ERROR, IEMOP_RAISE_INVALID_LOCK_PREFIX, 259 * IEMOP_RAISE_INVALID_OPCODE and their users. 260 */ 261 #undef IEM_MC_DEFER_TO_CIMPL_0_RET 262 #define IEM_MC_DEFER_TO_CIMPL_0_RET(a_fFlags, a_pfnCImpl) \ 263 return iemThreadedRecompilerMcDeferToCImpl0(pVCpu, a_fFlags, a_pfnCImpl) 264 265 DECLINLINE(VBOXSTRICTRC) iemThreadedRecompilerMcDeferToCImpl0(PVMCPUCC pVCpu, uint32_t fFlags, PFNIEMCIMPL0 pfnCImpl) 266 { 267 Log8(("CImpl0: %04x:%08RX64 LB %#x: %#x %p\n", 268 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, IEM_GET_INSTR_LEN(pVCpu), fFlags, pfnCImpl)); 269 270 IEM_MC2_BEGIN_EMIT_CALLS(); 271 IEM_MC2_EMIT_CALL_2(kIemThreadedFunc_DeferToCImpl0, (uintptr_t)pfnCImpl, IEM_GET_INSTR_LEN(pVCpu)); 272 IEM_MC2_END_EMIT_CALLS(fFlags); 273 274 /* We have to repeat work normally done by kdCImplFlags and 275 ThreadedFunctionVariation.emitThreadedCallStmts here. */ 276 if (fFlags & (IEM_CIMPL_F_END_TB | IEM_CIMPL_F_MODE | IEM_CIMPL_F_VMEXIT | IEM_CIMPL_F_BRANCH_FAR | IEM_CIMPL_F_REP)) 277 pVCpu->iem.s.fEndTb = true; 278 279 AssertCompile(IEM_CIMPL_F_BRANCH_DIRECT == IEMBRANCHED_F_DIRECT); 280 AssertCompile(IEM_CIMPL_F_BRANCH_INDIRECT == IEMBRANCHED_F_INDIRECT); 281 AssertCompile(IEM_CIMPL_F_BRANCH_RELATIVE == IEMBRANCHED_F_RELATIVE); 282 AssertCompile(IEM_CIMPL_F_BRANCH_CONDITIONAL == IEMBRANCHED_F_CONDITIONAL); 283 AssertCompile(IEM_CIMPL_F_BRANCH_FAR == IEMBRANCHED_F_FAR); 284 if (fFlags & IEM_CIMPL_F_BRANCH_ANY) 285 pVCpu->iem.s.fTbBranched = fFlags & (IEM_CIMPL_F_BRANCH_ANY | IEM_CIMPL_F_BRANCH_FAR | IEM_CIMPL_F_BRANCH_CONDITIONAL); 286 287 return pfnCImpl(pVCpu, IEM_GET_INSTR_LEN(pVCpu)); 288 } 289 290 291 /** 292 * Helper for indicating that we've branched. 293 */ 294 DECL_FORCE_INLINE(void) iemThreadedSetBranched(PVMCPUCC pVCpu, uint8_t fTbBranched) 295 { 296 pVCpu->iem.s.fTbBranched = fTbBranched; 297 pVCpu->iem.s.GCPhysTbBranchSrcBuf = pVCpu->iem.s.GCPhysInstrBuf; 298 pVCpu->iem.s.GCVirtTbBranchSrcBuf = pVCpu->iem.s.uInstrBufPc; 299 } 300 28 #include "IEMAllThrdTables.h" 301 29 302 30 /* -
trunk/src/VBox/VMM/VMMAll/IEMAllThrdTables.h
r100741 r100742 25 25 * SPDX-License-Identifier: GPL-3.0-only 26 26 */ 27 28 #ifndef VMM_INCLUDED_SRC_VMMAll_IEMAllThrdTables_h 29 #define VMM_INCLUDED_SRC_VMMAll_IEMAllThrdTables_h 30 #ifndef RT_WITHOUT_PRAGMA_ONCE 31 # pragma once 32 #endif 27 33 28 34 … … 300 306 301 307 302 /* 303 * Include the "annotated" IEMAllInst*.cpp.h files. 304 */ 305 #define IEM_WITH_ONE_BYTE_TABLE 306 #define IEM_WITH_TWO_BYTE_TABLE 307 #define IEM_WITH_THREE_BYTE_TABLES 308 #define IEM_WITH_3DNOW_BYTE_TABLE 309 #define IEM_WITH_VEX_TABLES 310 #include "IEMThreadedInstructions.cpp.h" 311 308 #endif /* !VMM_INCLUDED_SRC_VMMAll_IEMAllThrdTables_h */
Note:
See TracChangeset
for help on using the changeset viewer.