Changeset 72493 in vbox
- Timestamp:
- Jun 10, 2018 4:08:44 PM (6 years ago)
- Location:
- trunk
- Files:
-
- 16 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/iem.h
r72484 r72493 251 251 /** @} */ 252 252 253 #if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)254 VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue);255 VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue);256 VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue);257 VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue);258 VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, void *pvDst, RTGCUINTREG cTransfers, size_t cbValue);259 VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, void const *pvSrc, RTGCUINTREG cTransfers, size_t cbValue);260 #endif261 262 253 263 254 /** @defgroup grp_iem_r3 The IEM Host Context Ring-3 API. -
trunk/src/VBox/VMM/Makefile.kmk
r72451 r72493 98 98 # VMM_COMMON_DEFS += VBOX_WITH_NS_ACCOUNTING_STATS 99 99 100 # Special IEM debug mode which compares the result with HM/REM101 ifdef IEM_VERIFICATION_MODE102 VMM_COMMON_DEFS += IEM_VERIFICATION_MODE IEM_VERIFICATION_MODE_FULL IEM_VERIFICATION_MODE_FULL_HM103 endif104 100 105 101 -
trunk/src/VBox/VMM/VMMAll/IEMAll.cpp
r72488 r72493 74 74 */ 75 75 76 /** @def IEM_VERIFICATION_MODE_MINIMAL77 * Use for pitting IEM against EM or something else in ring-0 or raw-mode78 * context. */79 #if defined(DOXYGEN_RUNNING)80 # define IEM_VERIFICATION_MODE_MINIMAL81 #endif82 76 //#define IEM_LOG_MEMORY_WRITES 83 77 #define IEM_IMPLEMENTS_TASKSWITCH … … 118 112 #endif 119 113 #include "IEMInternal.h" 120 #ifdef IEM_VERIFICATION_MODE_FULL121 # include <VBox/vmm/rem.h>122 # include <VBox/vmm/mm.h>123 #endif124 114 #include <VBox/vmm/vm.h> 125 115 #include <VBox/log.h> … … 244 234 # define IEM_WITH_SETJMP 245 235 #endif 246 247 /** Temporary hack to disable the double execution. Will be removed in favor248 * of a dedicated execution mode in EM. */249 //#define IEM_VERIFICATION_MODE_NO_REM250 236 251 237 /** Used to shut up GCC warnings about variables that 'may be used uninitialized' … … 853 839 854 840 855 #if defined(IEM_ VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES)841 #if defined(IEM_LOG_MEMORY_WRITES) 856 842 /** What IEM just wrote. */ 857 843 uint8_t g_abIemWrote[256]; … … 907 893 IEM_STATIC uint64_t iemSRegBaseFetchU64(PVMCPU pVCpu, uint8_t iSegReg); 908 894 909 #if defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE_MINIMAL)910 IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu);911 #endif912 IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue);913 IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue);914 915 895 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 916 896 IEM_STATIC VBOXSTRICTRC iemSvmVmexit(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t uExitCode, uint64_t uExitInfo1, … … 996 976 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM)); 997 977 998 #if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))978 #if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0) 999 979 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs)); 1000 980 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss)); … … 1052 1032 CPUMRawLeave(pVCpu, VINF_SUCCESS); 1053 1033 #endif 1054 1055 #ifdef IEM_VERIFICATION_MODE_FULL1056 pVCpu->iem.s.fNoRemSavedByExec = pVCpu->iem.s.fNoRem;1057 pVCpu->iem.s.fNoRem = true;1058 #endif1059 1034 } 1060 1035 … … 1108 1083 { 1109 1084 /* Note! do not touch fInPatchCode here! (see iemUninitExecAndFiddleStatusAndMaybeReenter) */ 1110 #ifdef IEM_VERIFICATION_MODE_FULL1111 pVCpu->iem.s.fNoRem = pVCpu->iem.s.fNoRemSavedByExec;1112 #endif1113 1085 #ifdef VBOX_STRICT 1114 1086 # ifdef IEM_WITH_CODE_TLB … … 1139 1111 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM)); 1140 1112 1141 #if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))1113 #if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0) 1142 1114 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs)); 1143 1115 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss)); … … 1154 1126 #endif 1155 1127 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); 1156 #ifdef IEM_VERIFICATION_MODE_FULL1157 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)1158 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;1159 #endif1160 1128 IEMMODE enmMode = iemCalcCpuMode(pCtx); 1161 1129 pVCpu->iem.s.enmCpuMode = enmMode; … … 1237 1205 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM)); 1238 1206 1239 #if defined(VBOX_STRICT) && (defined(IEM_VERIFICATION_MODE_FULL) || !defined(VBOX_WITH_RAW_MODE_NOT_R0))1207 #if defined(VBOX_STRICT) && !defined(VBOX_WITH_RAW_MODE_NOT_R0) 1240 1208 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->cs)); 1241 1209 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ss)); … … 1249 1217 1250 1218 pVCpu->iem.s.uCpl = CPUMGetGuestCPL(pVCpu); /** @todo this should be updated during execution! */ 1251 #ifdef IEM_VERIFICATION_MODE_FULL1252 if (pVCpu->iem.s.uInjectCpl != UINT8_MAX)1253 pVCpu->iem.s.uCpl = pVCpu->iem.s.uInjectCpl;1254 #endif1255 1219 IEMMODE enmMode = iemCalcCpuMode(pCtx); 1256 1220 pVCpu->iem.s.enmCpuMode = enmMode; /** @todo this should be updated during execution! */ … … 1356 1320 IEM_STATIC VBOXSTRICTRC iemInitDecoderAndPrefetchOpcodes(PVMCPU pVCpu, bool fBypassHandlers) 1357 1321 { 1358 #ifdef IEM_VERIFICATION_MODE_FULL1359 uint8_t const cbOldOpcodes = pVCpu->iem.s.cbOpcode;1360 #endif1361 1322 iemInitDecoder(pVCpu, fBypassHandlers); 1362 1323 … … 1439 1400 * that, so do it when implementing the guest virtual address 1440 1401 * TLB... */ 1441 1442 # ifdef IEM_VERIFICATION_MODE_FULL1443 /*1444 * Optimistic optimization: Use unconsumed opcode bytes from the previous1445 * instruction.1446 */1447 /** @todo optimize this differently by not using PGMPhysRead. */1448 RTGCPHYS const offPrevOpcodes = GCPhys - pVCpu->iem.s.GCPhysOpcodes;1449 pVCpu->iem.s.GCPhysOpcodes = GCPhys;1450 if ( offPrevOpcodes < cbOldOpcodes1451 && PAGE_SIZE - (GCPhys & PAGE_OFFSET_MASK) > sizeof(pVCpu->iem.s.abOpcode))1452 {1453 uint8_t cbNew = cbOldOpcodes - (uint8_t)offPrevOpcodes;1454 Assert(cbNew <= RT_ELEMENTS(pVCpu->iem.s.abOpcode));1455 memmove(&pVCpu->iem.s.abOpcode[0], &pVCpu->iem.s.abOpcode[offPrevOpcodes], cbNew);1456 pVCpu->iem.s.cbOpcode = cbNew;1457 return VINF_SUCCESS;1458 }1459 # endif1460 1402 1461 1403 /* … … 3463 3405 */ 3464 3406 #ifdef VBOX_WITH_RAW_MODE_NOT_R0 3465 # define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \ 3466 ( IEM_VERIFICATION_ENABLED(a_pVCpu) \ 3467 ? (a_pCtx)->eflags.u \ 3468 : CPUMRawGetEFlags(a_pVCpu) ) 3407 # define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) ( CPUMRawGetEFlags(a_pVCpu) ) 3469 3408 #else 3470 # define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) \ 3471 ( (a_pCtx)->eflags.u ) 3409 # define IEMMISC_GET_EFL(a_pVCpu, a_pCtx) ( (a_pCtx)->eflags.u ) 3472 3410 #endif 3473 3411 … … 3480 3418 */ 3481 3419 #ifdef VBOX_WITH_RAW_MODE_NOT_R0 3482 # define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \ 3483 do { \ 3484 if (IEM_VERIFICATION_ENABLED(a_pVCpu)) \ 3485 (a_pCtx)->eflags.u = (a_fEfl); \ 3486 else \ 3487 CPUMRawSetEFlags((a_pVCpu), a_fEfl); \ 3488 } while (0) 3420 # define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) CPUMRawSetEFlags((a_pVCpu), a_fEfl) 3489 3421 #else 3490 # define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) \ 3491 do { \ 3492 (a_pCtx)->eflags.u = (a_fEfl); \ 3493 } while (0) 3422 # define IEMMISC_SET_EFL(a_pVCpu, a_pCtx, a_fEfl) do { (a_pCtx)->eflags.u = (a_fEfl); } while (0) 3494 3423 #endif 3495 3424 … … 3735 3664 pSReg->Sel = 0; 3736 3665 pSReg->ValidSel = 0; 3737 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))3666 if (IEM_IS_GUEST_CPU_INTEL(pVCpu)) 3738 3667 { 3739 3668 /* VT-x (Intel 3960x) doesn't change the base and limit, clears and sets the following attributes */ … … 3784 3713 pSReg->ValidSel = uRpl; 3785 3714 pSReg->fFlags = CPUMSELREG_FLAGS_VALID; 3786 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))3715 if (IEM_IS_GUEST_CPU_INTEL(pVCpu)) 3787 3716 { 3788 3717 /* VT-x (Intel 3960x) observed doing something like this. */ … … 3890 3819 pSReg->ValidSel = uSel; 3891 3820 pSReg->fFlags = CPUMSELREG_FLAGS_VALID; 3892 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))3821 if (IEM_IS_GUEST_CPU_INTEL(pVCpu)) 3893 3822 pSReg->Attr.u &= ~X86DESCATTR_UNUSABLE; 3894 3823 … … 4338 4267 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_LDTR); 4339 4268 4340 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))4269 if (IEM_IS_GUEST_CPU_INTEL(pVCpu)) 4341 4270 { 4342 4271 pCtx->es.Attr.u |= X86DESCATTR_UNUSABLE; … … 4356 4285 { 4357 4286 /** @todo Should we update and flush TLBs only if CR3 value actually changes? */ 4358 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu)) 4359 { 4360 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3); 4361 AssertRCSuccessReturn(rc, rc); 4362 } 4363 else 4364 pCtx->cr3 = uNewCr3; 4287 int rc = CPUMSetGuestCR3(pVCpu, uNewCr3); 4288 AssertRCSuccessReturn(rc, rc); 4365 4289 4366 4290 /* Inform PGM. */ 4367 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu)) 4368 { 4369 int rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE)); 4370 AssertRCReturn(rc, rc); 4371 /* ignore informational status codes */ 4372 } 4291 rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE)); 4292 AssertRCReturn(rc, rc); 4293 /* ignore informational status codes */ 4294 4373 4295 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_CR3); 4374 4296 } … … 4405 4327 pCtx->ldtr.u32Limit = X86DESC_LIMIT_G(&DescNewLdt.Legacy); 4406 4328 pCtx->ldtr.Attr.u = X86DESC_GET_HID_ATTR(&DescNewLdt.Legacy); 4407 if (IEM_IS_GUEST_CPU_INTEL(pVCpu) && !IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))4329 if (IEM_IS_GUEST_CPU_INTEL(pVCpu)) 4408 4330 pCtx->ldtr.Attr.u &= ~X86DESCATTR_UNUSABLE; 4409 4331 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &pCtx->ldtr)); … … 4896 4818 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT)) 4897 4819 fEfl &= ~X86_EFL_RF; 4898 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))4820 else 4899 4821 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */ 4900 4822 … … 5308 5230 if (fFlags & (IEM_XCPT_FLAGS_DRx_INSTR_BP | IEM_XCPT_FLAGS_T_SOFT_INT)) 5309 5231 fEfl &= ~X86_EFL_RF; 5310 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))5232 else 5311 5233 fEfl |= X86_EFL_RF; /* Vagueness is all I've found on this so far... */ /** @todo Automatically pushing EFLAGS.RF. */ 5312 5234 … … 5872 5794 if (fAccess & IEM_ACCESS_TYPE_WRITE) 5873 5795 { 5874 if (! IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu) || !(fAccess & IEM_ACCESS_TYPE_READ))5796 if (!(fAccess & IEM_ACCESS_TYPE_READ)) 5875 5797 uErr |= X86_TRAP_PF_RW; 5876 5798 } … … 8242 8164 IEM_STATIC int iemMemPageMap(PVMCPU pVCpu, RTGCPHYS GCPhysMem, uint32_t fAccess, void **ppvMem, PPGMPAGEMAPLOCK pLock) 8243 8165 { 8244 #ifdef IEM_VERIFICATION_MODE_FULL8245 /* Force the alternative path so we can ignore writes. */8246 if ((fAccess & IEM_ACCESS_TYPE_WRITE) && !pVCpu->iem.s.fNoRem)8247 {8248 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))8249 {8250 int rc2 = PGMPhysIemQueryAccess(pVCpu->CTX_SUFF(pVM), GCPhysMem,8251 RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);8252 if (RT_FAILURE(rc2))8253 pVCpu->iem.s.fProblematicMemory = true;8254 }8255 return VERR_PGM_PHYS_TLB_CATCH_ALL;8256 }8257 #endif8258 8166 #ifdef IEM_LOG_MEMORY_WRITES 8259 8167 if (fAccess & IEM_ACCESS_TYPE_WRITE) 8260 8168 return VERR_PGM_PHYS_TLB_CATCH_ALL; 8261 #endif8262 #ifdef IEM_VERIFICATION_MODE_MINIMAL8263 return VERR_PGM_PHYS_TLB_CATCH_ALL;8264 8169 #endif 8265 8170 … … 8277 8182 AssertMsg(rc == VINF_SUCCESS || RT_FAILURE_NP(rc), ("%Rrc\n", rc)); 8278 8183 8279 #ifdef IEM_VERIFICATION_MODE_FULL8280 if (RT_FAILURE(rc) && IEM_FULL_VERIFICATION_ENABLED(pVCpu))8281 pVCpu->iem.s.fProblematicMemory = true;8282 #endif8283 8184 return rc; 8284 8185 } … … 8378 8279 * Do the writing. 8379 8280 */ 8380 #ifndef IEM_VERIFICATION_MODE_MINIMAL8381 8281 PVM pVM = pVCpu->CTX_SUFF(pVM); 8382 if ( !pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned 8383 && !IEM_VERIFICATION_ENABLED(pVCpu)) 8282 if (!pVCpu->iem.s.aMemBbMappings[iMemMap].fUnassigned) 8384 8283 { 8385 8284 uint16_t const cbFirst = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst; … … 8415 8314 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict); 8416 8315 } 8417 # 8316 #ifndef IN_RING3 8418 8317 else if (fPostponeFail) 8419 8318 { … … 8425 8324 return iemSetPassUpStatus(pVCpu, rcStrict); 8426 8325 } 8427 # 8326 #endif 8428 8327 else 8429 8328 { … … 8465 8364 rcStrict = iemSetPassUpStatus(pVCpu, rcStrict); 8466 8365 } 8467 # 8366 #ifndef IN_RING3 8468 8367 else if (fPostponeFail) 8469 8368 { … … 8475 8374 return iemSetPassUpStatus(pVCpu, rcStrict); 8476 8375 } 8477 # 8376 #endif 8478 8377 else 8479 8378 { … … 8485 8384 } 8486 8385 } 8487 # 8386 #ifndef IN_RING3 8488 8387 else if (fPostponeFail) 8489 8388 { … … 8498 8397 return iemSetPassUpStatus(pVCpu, rcStrict); 8499 8398 } 8500 # 8399 #endif 8501 8400 else 8502 8401 { … … 8538 8437 } 8539 8438 } 8540 #endif 8541 8542 #if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3) 8543 /* 8544 * Record the write(s). 8545 */ 8546 if (!pVCpu->iem.s.fNoRem) 8547 { 8548 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu); 8549 if (pEvtRec) 8550 { 8551 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE; 8552 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst; 8553 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst; 8554 memcpy(pEvtRec->u.RamWrite.ab, &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0], pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst); 8555 AssertCompile(sizeof(pEvtRec->u.RamWrite.ab) == sizeof(pVCpu->iem.s.aBounceBuffers[0].ab)); 8556 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext; 8557 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec; 8558 } 8559 if (pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond) 8560 { 8561 pEvtRec = iemVerifyAllocRecord(pVCpu); 8562 if (pEvtRec) 8563 { 8564 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE; 8565 pEvtRec->u.RamWrite.GCPhys = pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysSecond; 8566 pEvtRec->u.RamWrite.cb = pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond; 8567 memcpy(pEvtRec->u.RamWrite.ab, 8568 &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst], 8569 pVCpu->iem.s.aMemBbMappings[iMemMap].cbSecond); 8570 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext; 8571 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec; 8572 } 8573 } 8574 } 8575 #endif 8576 #if defined(IEM_VERIFICATION_MODE_MINIMAL) || defined(IEM_LOG_MEMORY_WRITES) 8439 8440 #if defined(IEM_LOG_MEMORY_WRITES) 8577 8441 Log(("IEM Wrote %RGp: %.*Rhxs\n", pVCpu->iem.s.aMemBbMappings[iMemMap].GCPhysFirst, 8578 8442 RT_MAX(RT_MIN(pVCpu->iem.s.aMemBbMappings[iMemMap].cbFirst, 64), 1), &pVCpu->iem.s.aBounceBuffers[iMemMap].ab[0])); … … 8619 8483 8620 8484 PVM pVM = pVCpu->CTX_SUFF(pVM); 8621 #ifdef IEM_VERIFICATION_MODE_FULL8622 /*8623 * Detect problematic memory when verifying so we can select8624 * the right execution engine. (TLB: Redo this.)8625 */8626 if (IEM_FULL_VERIFICATION_ENABLED(pVCpu))8627 {8628 int rc2 = PGMPhysIemQueryAccess(pVM, GCPhysFirst, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);8629 if (RT_SUCCESS(rc2))8630 rc2 = PGMPhysIemQueryAccess(pVM, GCPhysSecond, RT_BOOL(fAccess & IEM_ACCESS_TYPE_WRITE), pVCpu->iem.s.fBypassHandlers);8631 if (RT_FAILURE(rc2))8632 pVCpu->iem.s.fProblematicMemory = true;8633 }8634 #endif8635 8636 8485 8637 8486 /* … … 8712 8561 } 8713 8562 } 8714 8715 #if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)8716 if ( !pVCpu->iem.s.fNoRem8717 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )8718 {8719 /*8720 * Record the reads.8721 */8722 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);8723 if (pEvtRec)8724 {8725 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;8726 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;8727 pEvtRec->u.RamRead.cb = cbFirstPage;8728 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;8729 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;8730 }8731 pEvtRec = iemVerifyAllocRecord(pVCpu);8732 if (pEvtRec)8733 {8734 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;8735 pEvtRec->u.RamRead.GCPhys = GCPhysSecond;8736 pEvtRec->u.RamRead.cb = cbSecondPage;8737 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;8738 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;8739 }8740 }8741 #endif8742 8563 } 8743 8564 #ifdef VBOX_STRICT … … 8824 8645 } 8825 8646 } 8826 8827 #if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)8828 if ( !pVCpu->iem.s.fNoRem8829 && (fAccess & (IEM_ACCESS_TYPE_READ | IEM_ACCESS_TYPE_EXEC)) )8830 {8831 /*8832 * Record the read.8833 */8834 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);8835 if (pEvtRec)8836 {8837 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;8838 pEvtRec->u.RamRead.GCPhys = GCPhysFirst;8839 pEvtRec->u.RamRead.cb = (uint32_t)cbMem;8840 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;8841 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;8842 }8843 }8844 #endif8845 8647 } 8846 8648 #ifdef VBOX_STRICT … … 10462 10264 RTGCPTR GCPtrTop = iemRegGetRspForPush(pVCpu, pCtx, 4, &uNewRsp); 10463 10265 10464 VBOXSTRICTRC rc; 10465 if (IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu)) 10466 { 10467 /* The recompiler writes a full dword. */ 10468 uint32_t *pu32Dst; 10469 rc = iemMemMap(pVCpu, (void **)&pu32Dst, sizeof(*pu32Dst), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_W); 10470 if (rc == VINF_SUCCESS) 10471 { 10472 *pu32Dst = u32Value; 10473 rc = iemMemCommitAndUnmap(pVCpu, pu32Dst, IEM_ACCESS_STACK_W); 10474 } 10475 } 10476 else 10477 { 10478 /* The intel docs talks about zero extending the selector register 10479 value. My actual intel CPU here might be zero extending the value 10480 but it still only writes the lower word... */ 10481 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what 10482 * happens when crossing an electric page boundrary, is the high word checked 10483 * for write accessibility or not? Probably it is. What about segment limits? 10484 * It appears this behavior is also shared with trap error codes. 10485 * 10486 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check 10487 * ancient hardware when it actually did change. */ 10488 uint16_t *pu16Dst; 10489 rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW); 10490 if (rc == VINF_SUCCESS) 10491 { 10492 *pu16Dst = (uint16_t)u32Value; 10493 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW); 10494 } 10266 /* The intel docs talks about zero extending the selector register 10267 value. My actual intel CPU here might be zero extending the value 10268 but it still only writes the lower word... */ 10269 /** @todo Test this on new HW and on AMD and in 64-bit mode. Also test what 10270 * happens when crossing an electric page boundrary, is the high word checked 10271 * for write accessibility or not? Probably it is. What about segment limits? 10272 * It appears this behavior is also shared with trap error codes. 10273 * 10274 * Docs indicate the behavior changed maybe in Pentium or Pentium Pro. Check 10275 * ancient hardware when it actually did change. */ 10276 uint16_t *pu16Dst; 10277 VBOXSTRICTRC rc = iemMemMap(pVCpu, (void **)&pu16Dst, sizeof(uint32_t), X86_SREG_SS, GCPtrTop, IEM_ACCESS_STACK_RW); 10278 if (rc == VINF_SUCCESS) 10279 { 10280 *pu16Dst = (uint16_t)u32Value; 10281 rc = iemMemCommitAndUnmap(pVCpu, pu16Dst, IEM_ACCESS_STACK_RW); 10495 10282 } 10496 10283 … … 13905 13692 13906 13693 13907 13908 #if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)13909 13910 /**13911 * Sets up execution verification mode.13912 */13913 IEM_STATIC void iemExecVerificationModeSetup(PVMCPU pVCpu)13914 {13915 PVMCPU pVCpu = pVCpu;13916 PCPUMCTX pOrgCtx = IEM_GET_CTX(pVCpu);13917 13918 /*13919 * Always note down the address of the current instruction.13920 */13921 pVCpu->iem.s.uOldCs = pOrgCtx->cs.Sel;13922 pVCpu->iem.s.uOldRip = pOrgCtx->rip;13923 13924 /*13925 * Enable verification and/or logging.13926 */13927 bool fNewNoRem = !LogIs6Enabled(); /* logging triggers the no-rem/rem verification stuff */;13928 if ( fNewNoRem13929 && ( 013930 #if 0 /* auto enable on first paged protected mode interrupt */13931 || ( pOrgCtx->eflags.Bits.u1IF13932 && (pOrgCtx->cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)13933 && TRPMHasTrap(pVCpu)13934 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip) )13935 #endif13936 #if 013937 || ( pOrgCtx->cs == 0x1013938 && ( pOrgCtx->rip == 0x90119e3e13939 || pOrgCtx->rip == 0x901d9810)13940 #endif13941 #if 0 /* Auto enable DSL - FPU stuff. */13942 || ( pOrgCtx->cs == 0x1013943 && (// pOrgCtx->rip == 0xc02ec07f13944 //|| pOrgCtx->rip == 0xc02ec08213945 //|| pOrgCtx->rip == 0xc02ec0c913946 013947 || pOrgCtx->rip == 0x0c010e7c4 /* fxsave */ ) )13948 #endif13949 #if 0 /* Auto enable DSL - fstp st0 stuff. */13950 || (pOrgCtx->cs.Sel == 0x23 pOrgCtx->rip == 0x804aff7)13951 #endif13952 #if 013953 || pOrgCtx->rip == 0x9022bb3a13954 #endif13955 #if 013956 || (pOrgCtx->cs.Sel == 0x58 && pOrgCtx->rip == 0x3be) /* NT4SP1 sidt/sgdt in early loader code */13957 #endif13958 #if 013959 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013ec28) /* NT4SP1 first str (early boot) */13960 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x80119e3f) /* NT4SP1 second str (early boot) */13961 #endif13962 #if 0 /* NT4SP1 - later on the blue screen, things goes wrong... */13963 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8010a5df)13964 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7c4)13965 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013a7d2)13966 #endif13967 #if 0 /* NT4SP1 - xadd early boot. */13968 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8019cf0f)13969 #endif13970 #if 0 /* NT4SP1 - wrmsr (intel MSR). */13971 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8011a6d4)13972 #endif13973 #if 0 /* NT4SP1 - cmpxchg (AMD). */13974 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801684c1)13975 #endif13976 #if 0 /* NT4SP1 - fnstsw + 2 (AMD). */13977 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x801c6b88+2)13978 #endif13979 #if 0 /* NT4SP1 - iret to v8086 -- too generic a place? (N/A with GAs installed) */13980 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013bd5d)13981 13982 #endif13983 #if 0 /* NT4SP1 - iret to v8086 (executing edlin) */13984 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013b609)13985 13986 #endif13987 #if 0 /* NT4SP1 - frstor [ecx] */13988 || (pOrgCtx->cs.Sel == 8 && pOrgCtx->rip == 0x8013d11f)13989 #endif13990 #if 0 /* xxxxxx - All long mode code. */13991 || (pOrgCtx->msrEFER & MSR_K6_EFER_LMA)13992 #endif13993 #if 0 /* rep movsq linux 3.7 64-bit boot. */13994 || (pOrgCtx->rip == 0x0000000000100241)13995 #endif13996 #if 0 /* linux 3.7 64-bit boot - '000000000215e240'. */13997 || (pOrgCtx->rip == 0x000000000215e240)13998 #endif13999 #if 0 /* DOS's size-overridden iret to v8086. */14000 || (pOrgCtx->rip == 0x427 && pOrgCtx->cs.Sel == 0xb8)14001 #endif14002 )14003 )14004 {14005 RTLogGroupSettings(NULL, "iem.eo.l6.l2");14006 RTLogFlags(NULL, "enabled");14007 fNewNoRem = false;14008 }14009 if (fNewNoRem != pVCpu->iem.s.fNoRem)14010 {14011 pVCpu->iem.s.fNoRem = fNewNoRem;14012 if (!fNewNoRem)14013 {14014 LogAlways(("Enabling verification mode!\n"));14015 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);14016 }14017 else14018 LogAlways(("Disabling verification mode!\n"));14019 }14020 14021 /*14022 * Switch state.14023 */14024 if (IEM_VERIFICATION_ENABLED(pVCpu))14025 {14026 static CPUMCTX s_DebugCtx; /* Ugly! */14027 14028 s_DebugCtx = *pOrgCtx;14029 IEM_GET_CTX(pVCpu) = &s_DebugCtx;14030 }14031 14032 /*14033 * See if there is an interrupt pending in TRPM and inject it if we can.14034 */14035 pVCpu->iem.s.uInjectCpl = UINT8_MAX;14036 /** @todo Maybe someday we can centralize this under CPUMCanInjectInterrupt()? */14037 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM14038 bool fIntrEnabled = pOrgCtx->hwvirt.Gif;14039 if (fIntrEnabled)14040 {14041 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))14042 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx);14043 else14044 fIntrEnabled = pOrgCtx->eflags.Bits.u1IF;14045 }14046 #else14047 bool fIntrEnabled = pOrgCtx->eflags.Bits.u1IF;14048 #endif14049 if ( fIntrEnabled14050 && TRPMHasTrap(pVCpu)14051 && EMGetInhibitInterruptsPC(pVCpu) != pOrgCtx->rip)14052 {14053 uint8_t u8TrapNo;14054 TRPMEVENT enmType;14055 RTGCUINT uErrCode;14056 RTGCPTR uCr2;14057 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2);14058 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */);14059 if (!IEM_VERIFICATION_ENABLED(pVCpu))14060 TRPMResetTrap(pVCpu);14061 pVCpu->iem.s.uInjectCpl = pVCpu->iem.s.uCpl;14062 }14063 14064 /*14065 * Reset the counters.14066 */14067 pVCpu->iem.s.cIOReads = 0;14068 pVCpu->iem.s.cIOWrites = 0;14069 pVCpu->iem.s.fIgnoreRaxRdx = false;14070 pVCpu->iem.s.fOverlappingMovs = false;14071 pVCpu->iem.s.fProblematicMemory = false;14072 pVCpu->iem.s.fUndefinedEFlags = 0;14073 14074 if (IEM_VERIFICATION_ENABLED(pVCpu))14075 {14076 /*14077 * Free all verification records.14078 */14079 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pIemEvtRecHead;14080 pVCpu->iem.s.pIemEvtRecHead = NULL;14081 pVCpu->iem.s.ppIemEvtRecNext = &pVCpu->iem.s.pIemEvtRecHead;14082 do14083 {14084 while (pEvtRec)14085 {14086 PIEMVERIFYEVTREC pNext = pEvtRec->pNext;14087 pEvtRec->pNext = pVCpu->iem.s.pFreeEvtRec;14088 pVCpu->iem.s.pFreeEvtRec = pEvtRec;14089 pEvtRec = pNext;14090 }14091 pEvtRec = pVCpu->iem.s.pOtherEvtRecHead;14092 pVCpu->iem.s.pOtherEvtRecHead = NULL;14093 pVCpu->iem.s.ppOtherEvtRecNext = &pVCpu->iem.s.pOtherEvtRecHead;14094 } while (pEvtRec);14095 }14096 }14097 14098 14099 /**14100 * Allocate an event record.14101 * @returns Pointer to a record.14102 */14103 IEM_STATIC PIEMVERIFYEVTREC iemVerifyAllocRecord(PVMCPU pVCpu)14104 {14105 if (!IEM_VERIFICATION_ENABLED(pVCpu))14106 return NULL;14107 14108 PIEMVERIFYEVTREC pEvtRec = pVCpu->iem.s.pFreeEvtRec;14109 if (pEvtRec)14110 pVCpu->iem.s.pFreeEvtRec = pEvtRec->pNext;14111 else14112 {14113 if (!pVCpu->iem.s.ppIemEvtRecNext)14114 return NULL; /* Too early (fake PCIBIOS), ignore notification. */14115 14116 pEvtRec = (PIEMVERIFYEVTREC)MMR3HeapAlloc(pVCpu->CTX_SUFF(pVM), MM_TAG_EM /* lazy bird*/, sizeof(*pEvtRec));14117 if (!pEvtRec)14118 return NULL;14119 }14120 pEvtRec->enmEvent = IEMVERIFYEVENT_INVALID;14121 pEvtRec->pNext = NULL;14122 return pEvtRec;14123 }14124 14125 14126 /**14127 * IOMMMIORead notification.14128 */14129 VMM_INT_DECL(void) IEMNotifyMMIORead(PVM pVM, RTGCPHYS GCPhys, size_t cbValue)14130 {14131 PVMCPU pVCpu = VMMGetCpu(pVM);14132 if (!pVCpu)14133 return;14134 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);14135 if (!pEvtRec)14136 return;14137 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_READ;14138 pEvtRec->u.RamRead.GCPhys = GCPhys;14139 pEvtRec->u.RamRead.cb = (uint32_t)cbValue;14140 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;14141 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;14142 }14143 14144 14145 /**14146 * IOMMMIOWrite notification.14147 */14148 VMM_INT_DECL(void) IEMNotifyMMIOWrite(PVM pVM, RTGCPHYS GCPhys, uint32_t u32Value, size_t cbValue)14149 {14150 PVMCPU pVCpu = VMMGetCpu(pVM);14151 if (!pVCpu)14152 return;14153 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);14154 if (!pEvtRec)14155 return;14156 pEvtRec->enmEvent = IEMVERIFYEVENT_RAM_WRITE;14157 pEvtRec->u.RamWrite.GCPhys = GCPhys;14158 pEvtRec->u.RamWrite.cb = (uint32_t)cbValue;14159 pEvtRec->u.RamWrite.ab[0] = RT_BYTE1(u32Value);14160 pEvtRec->u.RamWrite.ab[1] = RT_BYTE2(u32Value);14161 pEvtRec->u.RamWrite.ab[2] = RT_BYTE3(u32Value);14162 pEvtRec->u.RamWrite.ab[3] = RT_BYTE4(u32Value);14163 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;14164 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;14165 }14166 14167 14168 /**14169 * IOMIOPortRead notification.14170 */14171 VMM_INT_DECL(void) IEMNotifyIOPortRead(PVM pVM, RTIOPORT Port, size_t cbValue)14172 {14173 PVMCPU pVCpu = VMMGetCpu(pVM);14174 if (!pVCpu)14175 return;14176 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);14177 if (!pEvtRec)14178 return;14179 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;14180 pEvtRec->u.IOPortRead.Port = Port;14181 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;14182 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;14183 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;14184 }14185 14186 /**14187 * IOMIOPortWrite notification.14188 */14189 VMM_INT_DECL(void) IEMNotifyIOPortWrite(PVM pVM, RTIOPORT Port, uint32_t u32Value, size_t cbValue)14190 {14191 PVMCPU pVCpu = VMMGetCpu(pVM);14192 if (!pVCpu)14193 return;14194 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);14195 if (!pEvtRec)14196 return;14197 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;14198 pEvtRec->u.IOPortWrite.Port = Port;14199 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;14200 pEvtRec->u.IOPortWrite.u32Value = u32Value;14201 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;14202 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;14203 }14204 14205 14206 VMM_INT_DECL(void) IEMNotifyIOPortReadString(PVM pVM, RTIOPORT Port, void *pvDst, RTGCUINTREG cTransfers, size_t cbValue)14207 {14208 PVMCPU pVCpu = VMMGetCpu(pVM);14209 if (!pVCpu)14210 return;14211 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);14212 if (!pEvtRec)14213 return;14214 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_READ;14215 pEvtRec->u.IOPortStrRead.Port = Port;14216 pEvtRec->u.IOPortStrRead.cbValue = (uint8_t)cbValue;14217 pEvtRec->u.IOPortStrRead.cTransfers = cTransfers;14218 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;14219 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;14220 }14221 14222 14223 VMM_INT_DECL(void) IEMNotifyIOPortWriteString(PVM pVM, RTIOPORT Port, void const *pvSrc, RTGCUINTREG cTransfers, size_t cbValue)14224 {14225 PVMCPU pVCpu = VMMGetCpu(pVM);14226 if (!pVCpu)14227 return;14228 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);14229 if (!pEvtRec)14230 return;14231 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_STR_WRITE;14232 pEvtRec->u.IOPortStrWrite.Port = Port;14233 pEvtRec->u.IOPortStrWrite.cbValue = (uint8_t)cbValue;14234 pEvtRec->u.IOPortStrWrite.cTransfers = cTransfers;14235 pEvtRec->pNext = *pVCpu->iem.s.ppOtherEvtRecNext;14236 *pVCpu->iem.s.ppOtherEvtRecNext = pEvtRec;14237 }14238 14239 14240 /**14241 * Fakes and records an I/O port read.14242 *14243 * @returns VINF_SUCCESS.14244 * @param pVCpu The cross context virtual CPU structure of the calling thread.14245 * @param Port The I/O port.14246 * @param pu32Value Where to store the fake value.14247 * @param cbValue The size of the access.14248 */14249 IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)14250 {14251 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);14252 if (pEvtRec)14253 {14254 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_READ;14255 pEvtRec->u.IOPortRead.Port = Port;14256 pEvtRec->u.IOPortRead.cbValue = (uint8_t)cbValue;14257 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;14258 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;14259 }14260 pVCpu->iem.s.cIOReads++;14261 *pu32Value = 0xcccccccc;14262 return VINF_SUCCESS;14263 }14264 14265 14266 /**14267 * Fakes and records an I/O port write.14268 *14269 * @returns VINF_SUCCESS.14270 * @param pVCpu The cross context virtual CPU structure of the calling thread.14271 * @param Port The I/O port.14272 * @param u32Value The value being written.14273 * @param cbValue The size of the access.14274 */14275 IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)14276 {14277 PIEMVERIFYEVTREC pEvtRec = iemVerifyAllocRecord(pVCpu);14278 if (pEvtRec)14279 {14280 pEvtRec->enmEvent = IEMVERIFYEVENT_IOPORT_WRITE;14281 pEvtRec->u.IOPortWrite.Port = Port;14282 pEvtRec->u.IOPortWrite.cbValue = (uint8_t)cbValue;14283 pEvtRec->u.IOPortWrite.u32Value = u32Value;14284 pEvtRec->pNext = *pVCpu->iem.s.ppIemEvtRecNext;14285 *pVCpu->iem.s.ppIemEvtRecNext = pEvtRec;14286 }14287 pVCpu->iem.s.cIOWrites++;14288 return VINF_SUCCESS;14289 }14290 14291 14292 /**14293 * Used to add extra details about a stub case.14294 * @param pVCpu The cross context virtual CPU structure of the calling thread.14295 */14296 IEM_STATIC void iemVerifyAssertMsg2(PVMCPU pVCpu)14297 {14298 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu);14299 PVM pVM = pVCpu->CTX_SUFF(pVM);14300 PVMCPU pVCpu = pVCpu;14301 char szRegs[4096];14302 DBGFR3RegPrintf(pVM->pUVM, pVCpu->idCpu, &szRegs[0], sizeof(szRegs),14303 "rax=%016VR{rax} rbx=%016VR{rbx} rcx=%016VR{rcx} rdx=%016VR{rdx}\n"14304 "rsi=%016VR{rsi} rdi=%016VR{rdi} r8 =%016VR{r8} r9 =%016VR{r9}\n"14305 "r10=%016VR{r10} r11=%016VR{r11} r12=%016VR{r12} r13=%016VR{r13}\n"14306 "r14=%016VR{r14} r15=%016VR{r15} %VRF{rflags}\n"14307 "rip=%016VR{rip} rsp=%016VR{rsp} rbp=%016VR{rbp}\n"14308 "cs={%04VR{cs} base=%016VR{cs_base} limit=%08VR{cs_lim} flags=%04VR{cs_attr}} cr0=%016VR{cr0}\n"14309 "ds={%04VR{ds} base=%016VR{ds_base} limit=%08VR{ds_lim} flags=%04VR{ds_attr}} cr2=%016VR{cr2}\n"14310 "es={%04VR{es} base=%016VR{es_base} limit=%08VR{es_lim} flags=%04VR{es_attr}} cr3=%016VR{cr3}\n"14311 "fs={%04VR{fs} base=%016VR{fs_base} limit=%08VR{fs_lim} flags=%04VR{fs_attr}} cr4=%016VR{cr4}\n"14312 "gs={%04VR{gs} base=%016VR{gs_base} limit=%08VR{gs_lim} flags=%04VR{gs_attr}} cr8=%016VR{cr8}\n"14313 "ss={%04VR{ss} base=%016VR{ss_base} limit=%08VR{ss_lim} flags=%04VR{ss_attr}}\n"14314 "dr0=%016VR{dr0} dr1=%016VR{dr1} dr2=%016VR{dr2} dr3=%016VR{dr3}\n"14315 "dr6=%016VR{dr6} dr7=%016VR{dr7}\n"14316 "gdtr=%016VR{gdtr_base}:%04VR{gdtr_lim} idtr=%016VR{idtr_base}:%04VR{idtr_lim} rflags=%08VR{rflags}\n"14317 "ldtr={%04VR{ldtr} base=%016VR{ldtr_base} limit=%08VR{ldtr_lim} flags=%08VR{ldtr_attr}}\n"14318 "tr ={%04VR{tr} base=%016VR{tr_base} limit=%08VR{tr_lim} flags=%08VR{tr_attr}}\n"14319 " sysenter={cs=%04VR{sysenter_cs} eip=%08VR{sysenter_eip} esp=%08VR{sysenter_esp}}\n"14320 " efer=%016VR{efer}\n"14321 " pat=%016VR{pat}\n"14322 " sf_mask=%016VR{sf_mask}\n"14323 "krnl_gs_base=%016VR{krnl_gs_base}\n"14324 " lstar=%016VR{lstar}\n"14325 " star=%016VR{star} cstar=%016VR{cstar}\n"14326 "fcw=%04VR{fcw} fsw=%04VR{fsw} ftw=%04VR{ftw} mxcsr=%04VR{mxcsr} mxcsr_mask=%04VR{mxcsr_mask}\n"14327 );14328 14329 char szInstr1[256];14330 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, pVCpu->iem.s.uOldCs, pVCpu->iem.s.uOldRip,14331 DBGF_DISAS_FLAGS_DEFAULT_MODE,14332 szInstr1, sizeof(szInstr1), NULL);14333 char szInstr2[256];14334 DBGFR3DisasInstrEx(pVM->pUVM, pVCpu->idCpu, 0, 0,14335 DBGF_DISAS_FLAGS_CURRENT_GUEST | DBGF_DISAS_FLAGS_DEFAULT_MODE,14336 szInstr2, sizeof(szInstr2), NULL);14337 14338 RTAssertMsg2Weak("%s%s\n%s\n", szRegs, szInstr1, szInstr2);14339 }14340 14341 14342 /**14343 * Used by iemVerifyAssertRecord and iemVerifyAssertRecords to add a record14344 * dump to the assertion info.14345 *14346 * @param pEvtRec The record to dump.14347 */14348 IEM_STATIC void iemVerifyAssertAddRecordDump(PIEMVERIFYEVTREC pEvtRec)14349 {14350 switch (pEvtRec->enmEvent)14351 {14352 case IEMVERIFYEVENT_IOPORT_READ:14353 RTAssertMsg2Add("I/O PORT READ from %#6x, %d bytes\n",14354 pEvtRec->u.IOPortWrite.Port,14355 pEvtRec->u.IOPortWrite.cbValue);14356 break;14357 case IEMVERIFYEVENT_IOPORT_WRITE:14358 RTAssertMsg2Add("I/O PORT WRITE to %#6x, %d bytes, value %#x\n",14359 pEvtRec->u.IOPortWrite.Port,14360 pEvtRec->u.IOPortWrite.cbValue,14361 pEvtRec->u.IOPortWrite.u32Value);14362 break;14363 case IEMVERIFYEVENT_IOPORT_STR_READ:14364 RTAssertMsg2Add("I/O PORT STRING READ from %#6x, %d bytes, %#x times\n",14365 pEvtRec->u.IOPortStrWrite.Port,14366 pEvtRec->u.IOPortStrWrite.cbValue,14367 pEvtRec->u.IOPortStrWrite.cTransfers);14368 break;14369 case IEMVERIFYEVENT_IOPORT_STR_WRITE:14370 RTAssertMsg2Add("I/O PORT STRING WRITE to %#6x, %d bytes, %#x times\n",14371 pEvtRec->u.IOPortStrWrite.Port,14372 pEvtRec->u.IOPortStrWrite.cbValue,14373 pEvtRec->u.IOPortStrWrite.cTransfers);14374 break;14375 case IEMVERIFYEVENT_RAM_READ:14376 RTAssertMsg2Add("RAM READ at %RGp, %#4zx bytes\n",14377 pEvtRec->u.RamRead.GCPhys,14378 pEvtRec->u.RamRead.cb);14379 break;14380 case IEMVERIFYEVENT_RAM_WRITE:14381 RTAssertMsg2Add("RAM WRITE at %RGp, %#4zx bytes: %.*Rhxs\n",14382 pEvtRec->u.RamWrite.GCPhys,14383 pEvtRec->u.RamWrite.cb,14384 (int)pEvtRec->u.RamWrite.cb,14385 pEvtRec->u.RamWrite.ab);14386 break;14387 default:14388 AssertMsgFailed(("Invalid event type %d\n", pEvtRec->enmEvent));14389 break;14390 }14391 }14392 14393 14394 /**14395 * Raises an assertion on the specified record, showing the given message with14396 * a record dump attached.14397 *14398 * @param pVCpu The cross context virtual CPU structure of the calling thread.14399 * @param pEvtRec1 The first record.14400 * @param pEvtRec2 The second record.14401 * @param pszMsg The message explaining why we're asserting.14402 */14403 IEM_STATIC void iemVerifyAssertRecords(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec1, PIEMVERIFYEVTREC pEvtRec2, const char *pszMsg)14404 {14405 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);14406 iemVerifyAssertAddRecordDump(pEvtRec1);14407 iemVerifyAssertAddRecordDump(pEvtRec2);14408 iemVerifyAssertMsg2(pVCpu);14409 RTAssertPanic();14410 }14411 14412 14413 /**14414 * Raises an assertion on the specified record, showing the given message with14415 * a record dump attached.14416 *14417 * @param pVCpu The cross context virtual CPU structure of the calling thread.14418 * @param pEvtRec1 The first record.14419 * @param pszMsg The message explaining why we're asserting.14420 */14421 IEM_STATIC void iemVerifyAssertRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, const char *pszMsg)14422 {14423 RTAssertMsg1(pszMsg, __LINE__, __FILE__, __PRETTY_FUNCTION__);14424 iemVerifyAssertAddRecordDump(pEvtRec);14425 iemVerifyAssertMsg2(pVCpu);14426 RTAssertPanic();14427 }14428 14429 14430 /**14431 * Verifies a write record.14432 *14433 * @param pVCpu The cross context virtual CPU structure of the calling thread.14434 * @param pEvtRec The write record.14435 * @param fRem Set if REM was doing the other executing. If clear14436 * it was HM.14437 */14438 IEM_STATIC void iemVerifyWriteRecord(PVMCPU pVCpu, PIEMVERIFYEVTREC pEvtRec, bool fRem)14439 {14440 uint8_t abBuf[sizeof(pEvtRec->u.RamWrite.ab)]; RT_ZERO(abBuf);14441 Assert(sizeof(abBuf) >= pEvtRec->u.RamWrite.cb);14442 int rc = PGMPhysSimpleReadGCPhys(pVCpu->CTX_SUFF(pVM), abBuf, pEvtRec->u.RamWrite.GCPhys, pEvtRec->u.RamWrite.cb);14443 if ( RT_FAILURE(rc)14444 || memcmp(abBuf, pEvtRec->u.RamWrite.ab, pEvtRec->u.RamWrite.cb) )14445 {14446 /* fend off ins */14447 if ( !pVCpu->iem.s.cIOReads14448 || pEvtRec->u.RamWrite.ab[0] != 0xcc14449 || ( pEvtRec->u.RamWrite.cb != 114450 && pEvtRec->u.RamWrite.cb != 214451 && pEvtRec->u.RamWrite.cb != 4) )14452 {14453 /* fend off ROMs and MMIO */14454 if ( pEvtRec->u.RamWrite.GCPhys - UINT32_C(0x000a0000) > UINT32_C(0x60000)14455 && pEvtRec->u.RamWrite.GCPhys - UINT32_C(0xfffc0000) > UINT32_C(0x40000) )14456 {14457 /* fend off fxsave */14458 if (pEvtRec->u.RamWrite.cb != 512)14459 {14460 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVCpu->CTX_SUFF(pVM)->pUVM) ? "vmx" : "svm";14461 RTAssertMsg1(NULL, __LINE__, __FILE__, __PRETTY_FUNCTION__);14462 RTAssertMsg2Weak("Memory at %RGv differs\n", pEvtRec->u.RamWrite.GCPhys);14463 RTAssertMsg2Add("%s: %.*Rhxs\n"14464 "iem: %.*Rhxs\n",14465 pszWho, pEvtRec->u.RamWrite.cb, abBuf,14466 pEvtRec->u.RamWrite.cb, pEvtRec->u.RamWrite.ab);14467 iemVerifyAssertAddRecordDump(pEvtRec);14468 iemVerifyAssertMsg2(pVCpu);14469 RTAssertPanic();14470 }14471 }14472 }14473 }14474 14475 }14476 14477 /**14478 * Performs the post-execution verfication checks.14479 */14480 IEM_STATIC VBOXSTRICTRC iemExecVerificationModeCheck(PVMCPU pVCpu, VBOXSTRICTRC rcStrictIem)14481 {14482 if (!IEM_VERIFICATION_ENABLED(pVCpu))14483 return rcStrictIem;14484 14485 /*14486 * Switch back the state.14487 */14488 PCPUMCTX pOrgCtx = CPUMQueryGuestCtxPtr(pVCpu);14489 PCPUMCTX pDebugCtx = IEM_GET_CTX(pVCpu);14490 Assert(pOrgCtx != pDebugCtx);14491 IEM_GET_CTX(pVCpu) = pOrgCtx;14492 14493 /*14494 * Execute the instruction in REM.14495 */14496 bool fRem = false;14497 PVM pVM = pVCpu->CTX_SUFF(pVM);14498 PVMCPU pVCpu = pVCpu;14499 VBOXSTRICTRC rc = VERR_EM_CANNOT_EXEC_GUEST;14500 # ifdef IEM_VERIFICATION_MODE_FULL_HM14501 if ( HMIsEnabled(pVM)14502 && pVCpu->iem.s.cIOReads == 014503 && pVCpu->iem.s.cIOWrites == 014504 && !pVCpu->iem.s.fProblematicMemory)14505 {14506 uint64_t uStartRip = pOrgCtx->rip;14507 unsigned iLoops = 0;14508 do14509 {14510 rc = EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE);14511 iLoops++;14512 } while ( rc == VINF_SUCCESS14513 || ( rc == VINF_EM_DBG_STEPPED14514 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)14515 && EMGetInhibitInterruptsPC(pVCpu) == pOrgCtx->rip)14516 || ( pOrgCtx->rip != pDebugCtx->rip14517 && pVCpu->iem.s.uInjectCpl != UINT8_MAX14518 && iLoops < 8) );14519 if (rc == VINF_EM_RESCHEDULE && pOrgCtx->rip != uStartRip)14520 rc = VINF_SUCCESS;14521 }14522 # endif14523 if ( rc == VERR_EM_CANNOT_EXEC_GUEST14524 || rc == VINF_IOM_R3_IOPORT_READ14525 || rc == VINF_IOM_R3_IOPORT_WRITE14526 || rc == VINF_IOM_R3_MMIO_READ14527 || rc == VINF_IOM_R3_MMIO_READ_WRITE14528 || rc == VINF_IOM_R3_MMIO_WRITE14529 || rc == VINF_CPUM_R3_MSR_READ14530 || rc == VINF_CPUM_R3_MSR_WRITE14531 || rc == VINF_EM_RESCHEDULE14532 )14533 {14534 EMRemLock(pVM);14535 rc = REMR3EmulateInstruction(pVM, pVCpu);14536 AssertRC(rc);14537 EMRemUnlock(pVM);14538 fRem = true;14539 }14540 14541 # if 1 /* Skip unimplemented instructions for now. */14542 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)14543 {14544 IEM_GET_CTX(pVCpu) = pOrgCtx;14545 if (rc == VINF_EM_DBG_STEPPED)14546 return VINF_SUCCESS;14547 return rc;14548 }14549 # endif14550 14551 /*14552 * Compare the register states.14553 */14554 unsigned cDiffs = 0;14555 if (memcmp(pOrgCtx, pDebugCtx, sizeof(*pDebugCtx)))14556 {14557 //Log(("REM and IEM ends up with different registers!\n"));14558 const char *pszWho = fRem ? "rem" : HMR3IsVmxEnabled(pVM->pUVM) ? "vmx" : "svm";14559 14560 # define CHECK_FIELD(a_Field) \14561 do \14562 { \14563 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \14564 { \14565 switch (sizeof(pOrgCtx->a_Field)) \14566 { \14567 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \14568 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \14569 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \14570 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); break; \14571 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \14572 } \14573 cDiffs++; \14574 } \14575 } while (0)14576 # define CHECK_XSTATE_FIELD(a_Field) \14577 do \14578 { \14579 if (pOrgXState->a_Field != pDebugXState->a_Field) \14580 { \14581 switch (sizeof(pOrgXState->a_Field)) \14582 { \14583 case 1: RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \14584 case 2: RTAssertMsg2Weak(" %8s differs - iem=%04x - %s=%04x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \14585 case 4: RTAssertMsg2Weak(" %8s differs - iem=%08x - %s=%08x\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \14586 case 8: RTAssertMsg2Weak(" %8s differs - iem=%016llx - %s=%016llx\n", #a_Field, pDebugXState->a_Field, pszWho, pOrgXState->a_Field); break; \14587 default: RTAssertMsg2Weak(" %8s differs\n", #a_Field); break; \14588 } \14589 cDiffs++; \14590 } \14591 } while (0)14592 14593 # define CHECK_BIT_FIELD(a_Field) \14594 do \14595 { \14596 if (pOrgCtx->a_Field != pDebugCtx->a_Field) \14597 { \14598 RTAssertMsg2Weak(" %8s differs - iem=%02x - %s=%02x\n", #a_Field, pDebugCtx->a_Field, pszWho, pOrgCtx->a_Field); \14599 cDiffs++; \14600 } \14601 } while (0)14602 14603 # define CHECK_SEL(a_Sel) \14604 do \14605 { \14606 CHECK_FIELD(a_Sel.Sel); \14607 CHECK_FIELD(a_Sel.Attr.u); \14608 CHECK_FIELD(a_Sel.u64Base); \14609 CHECK_FIELD(a_Sel.u32Limit); \14610 CHECK_FIELD(a_Sel.fFlags); \14611 } while (0)14612 14613 PX86XSAVEAREA pOrgXState = pOrgCtx->CTX_SUFF(pXState);14614 PX86XSAVEAREA pDebugXState = pDebugCtx->CTX_SUFF(pXState);14615 14616 # if 1 /* The recompiler doesn't update these the intel way. */14617 if (fRem)14618 {14619 pOrgXState->x87.FOP = pDebugXState->x87.FOP;14620 pOrgXState->x87.FPUIP = pDebugXState->x87.FPUIP;14621 pOrgXState->x87.CS = pDebugXState->x87.CS;14622 pOrgXState->x87.Rsrvd1 = pDebugXState->x87.Rsrvd1;14623 pOrgXState->x87.FPUDP = pDebugXState->x87.FPUDP;14624 pOrgXState->x87.DS = pDebugXState->x87.DS;14625 pOrgXState->x87.Rsrvd2 = pDebugXState->x87.Rsrvd2;14626 //pOrgXState->x87.MXCSR_MASK = pDebugXState->x87.MXCSR_MASK;14627 if ((pOrgXState->x87.FSW & X86_FSW_TOP_MASK) == (pDebugXState->x87.FSW & X86_FSW_TOP_MASK))14628 pOrgXState->x87.FSW = pDebugXState->x87.FSW;14629 }14630 # endif14631 if (memcmp(&pOrgXState->x87, &pDebugXState->x87, sizeof(pDebugXState->x87)))14632 {14633 RTAssertMsg2Weak(" the FPU state differs\n");14634 cDiffs++;14635 CHECK_XSTATE_FIELD(x87.FCW);14636 CHECK_XSTATE_FIELD(x87.FSW);14637 CHECK_XSTATE_FIELD(x87.FTW);14638 CHECK_XSTATE_FIELD(x87.FOP);14639 CHECK_XSTATE_FIELD(x87.FPUIP);14640 CHECK_XSTATE_FIELD(x87.CS);14641 CHECK_XSTATE_FIELD(x87.Rsrvd1);14642 CHECK_XSTATE_FIELD(x87.FPUDP);14643 CHECK_XSTATE_FIELD(x87.DS);14644 CHECK_XSTATE_FIELD(x87.Rsrvd2);14645 CHECK_XSTATE_FIELD(x87.MXCSR);14646 CHECK_XSTATE_FIELD(x87.MXCSR_MASK);14647 CHECK_XSTATE_FIELD(x87.aRegs[0].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[0].au64[1]);14648 CHECK_XSTATE_FIELD(x87.aRegs[1].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[1].au64[1]);14649 CHECK_XSTATE_FIELD(x87.aRegs[2].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[2].au64[1]);14650 CHECK_XSTATE_FIELD(x87.aRegs[3].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[3].au64[1]);14651 CHECK_XSTATE_FIELD(x87.aRegs[4].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[4].au64[1]);14652 CHECK_XSTATE_FIELD(x87.aRegs[5].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[5].au64[1]);14653 CHECK_XSTATE_FIELD(x87.aRegs[6].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[6].au64[1]);14654 CHECK_XSTATE_FIELD(x87.aRegs[7].au64[0]); CHECK_XSTATE_FIELD(x87.aRegs[7].au64[1]);14655 CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 0].au64[1]);14656 CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 1].au64[1]);14657 CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 2].au64[1]);14658 CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 3].au64[1]);14659 CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 4].au64[1]);14660 CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 5].au64[1]);14661 CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 6].au64[1]);14662 CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 7].au64[1]);14663 CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 8].au64[1]);14664 CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[ 9].au64[1]);14665 CHECK_XSTATE_FIELD(x87.aXMM[10].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[10].au64[1]);14666 CHECK_XSTATE_FIELD(x87.aXMM[11].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[11].au64[1]);14667 CHECK_XSTATE_FIELD(x87.aXMM[12].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[12].au64[1]);14668 CHECK_XSTATE_FIELD(x87.aXMM[13].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[13].au64[1]);14669 CHECK_XSTATE_FIELD(x87.aXMM[14].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[14].au64[1]);14670 CHECK_XSTATE_FIELD(x87.aXMM[15].au64[0]); CHECK_XSTATE_FIELD(x87.aXMM[15].au64[1]);14671 for (unsigned i = 0; i < RT_ELEMENTS(pOrgXState->x87.au32RsrvdRest); i++)14672 CHECK_XSTATE_FIELD(x87.au32RsrvdRest[i]);14673 }14674 CHECK_FIELD(rip);14675 uint32_t fFlagsMask = UINT32_MAX & ~pVCpu->iem.s.fUndefinedEFlags;14676 if ((pOrgCtx->rflags.u & fFlagsMask) != (pDebugCtx->rflags.u & fFlagsMask))14677 {14678 RTAssertMsg2Weak(" rflags differs - iem=%08llx %s=%08llx\n", pDebugCtx->rflags.u, pszWho, pOrgCtx->rflags.u);14679 CHECK_BIT_FIELD(rflags.Bits.u1CF);14680 CHECK_BIT_FIELD(rflags.Bits.u1Reserved0);14681 CHECK_BIT_FIELD(rflags.Bits.u1PF);14682 CHECK_BIT_FIELD(rflags.Bits.u1Reserved1);14683 CHECK_BIT_FIELD(rflags.Bits.u1AF);14684 CHECK_BIT_FIELD(rflags.Bits.u1Reserved2);14685 CHECK_BIT_FIELD(rflags.Bits.u1ZF);14686 CHECK_BIT_FIELD(rflags.Bits.u1SF);14687 CHECK_BIT_FIELD(rflags.Bits.u1TF);14688 CHECK_BIT_FIELD(rflags.Bits.u1IF);14689 CHECK_BIT_FIELD(rflags.Bits.u1DF);14690 CHECK_BIT_FIELD(rflags.Bits.u1OF);14691 CHECK_BIT_FIELD(rflags.Bits.u2IOPL);14692 CHECK_BIT_FIELD(rflags.Bits.u1NT);14693 CHECK_BIT_FIELD(rflags.Bits.u1Reserved3);14694 if (0 && !fRem) /** @todo debug the occational clear RF flags when running against VT-x. */14695 CHECK_BIT_FIELD(rflags.Bits.u1RF);14696 CHECK_BIT_FIELD(rflags.Bits.u1VM);14697 CHECK_BIT_FIELD(rflags.Bits.u1AC);14698 CHECK_BIT_FIELD(rflags.Bits.u1VIF);14699 CHECK_BIT_FIELD(rflags.Bits.u1VIP);14700 CHECK_BIT_FIELD(rflags.Bits.u1ID);14701 }14702 14703 if (pVCpu->iem.s.cIOReads != 1 && !pVCpu->iem.s.fIgnoreRaxRdx)14704 CHECK_FIELD(rax);14705 CHECK_FIELD(rcx);14706 if (!pVCpu->iem.s.fIgnoreRaxRdx)14707 CHECK_FIELD(rdx);14708 CHECK_FIELD(rbx);14709 CHECK_FIELD(rsp);14710 CHECK_FIELD(rbp);14711 CHECK_FIELD(rsi);14712 CHECK_FIELD(rdi);14713 CHECK_FIELD(r8);14714 CHECK_FIELD(r9);14715 CHECK_FIELD(r10);14716 CHECK_FIELD(r11);14717 CHECK_FIELD(r12);14718 CHECK_FIELD(r13);14719 CHECK_SEL(cs);14720 CHECK_SEL(ss);14721 CHECK_SEL(ds);14722 CHECK_SEL(es);14723 CHECK_SEL(fs);14724 CHECK_SEL(gs);14725 CHECK_FIELD(cr0);14726 14727 /* Klugde #1: REM fetches code and across the page boundrary and faults on the next page, while we execute14728 the faulting instruction first: 001b:77f61ff3 66 8b 42 02 mov ax, word [edx+002h] (NT4SP1) */14729 /* Kludge #2: CR2 differs slightly on cross page boundrary faults, we report the last address of the access14730 while REM reports the address of the first byte on the page. Pending investigation as to which is correct. */14731 if (pOrgCtx->cr2 != pDebugCtx->cr2)14732 {14733 if (pVCpu->iem.s.uOldCs == 0x1b && pVCpu->iem.s.uOldRip == 0x77f61ff3 && fRem)14734 { /* ignore */ }14735 else if ( (pOrgCtx->cr2 & ~(uint64_t)3) == (pDebugCtx->cr2 & ~(uint64_t)3)14736 && (pOrgCtx->cr2 & PAGE_OFFSET_MASK) == 014737 && fRem)14738 { /* ignore */ }14739 else14740 CHECK_FIELD(cr2);14741 }14742 CHECK_FIELD(cr3);14743 CHECK_FIELD(cr4);14744 CHECK_FIELD(dr[0]);14745 CHECK_FIELD(dr[1]);14746 CHECK_FIELD(dr[2]);14747 CHECK_FIELD(dr[3]);14748 CHECK_FIELD(dr[6]);14749 if (!fRem || (pOrgCtx->dr[7] & ~X86_DR7_RA1_MASK) != (pDebugCtx->dr[7] & ~X86_DR7_RA1_MASK)) /* REM 'mov drX,greg' bug.*/14750 CHECK_FIELD(dr[7]);14751 CHECK_FIELD(gdtr.cbGdt);14752 CHECK_FIELD(gdtr.pGdt);14753 CHECK_FIELD(idtr.cbIdt);14754 CHECK_FIELD(idtr.pIdt);14755 CHECK_SEL(ldtr);14756 CHECK_SEL(tr);14757 CHECK_FIELD(SysEnter.cs);14758 CHECK_FIELD(SysEnter.eip);14759 CHECK_FIELD(SysEnter.esp);14760 CHECK_FIELD(msrEFER);14761 CHECK_FIELD(msrSTAR);14762 CHECK_FIELD(msrPAT);14763 CHECK_FIELD(msrLSTAR);14764 CHECK_FIELD(msrCSTAR);14765 CHECK_FIELD(msrSFMASK);14766 CHECK_FIELD(msrKERNELGSBASE);14767 14768 if (cDiffs != 0)14769 {14770 DBGFR3InfoEx(pVM->pUVM, pVCpu->idCpu, "cpumguest", "verbose", NULL);14771 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);14772 RTAssertPanic();14773 static bool volatile s_fEnterDebugger = true;14774 if (s_fEnterDebugger)14775 DBGFSTOP(pVM);14776 14777 # if 1 /* Ignore unimplemented instructions for now. */14778 if (rcStrictIem == VERR_IEM_INSTR_NOT_IMPLEMENTED)14779 rcStrictIem = VINF_SUCCESS;14780 # endif14781 }14782 # undef CHECK_FIELD14783 # undef CHECK_BIT_FIELD14784 }14785 14786 /*14787 * If the register state compared fine, check the verification event14788 * records.14789 */14790 if (cDiffs == 0 && !pVCpu->iem.s.fOverlappingMovs)14791 {14792 /*14793 * Compare verficiation event records.14794 * - I/O port accesses should be a 1:1 match.14795 */14796 PIEMVERIFYEVTREC pIemRec = pVCpu->iem.s.pIemEvtRecHead;14797 PIEMVERIFYEVTREC pOtherRec = pVCpu->iem.s.pOtherEvtRecHead;14798 while (pIemRec && pOtherRec)14799 {14800 /* Since we might miss RAM writes and reads, ignore reads and check14801 that any written memory is the same extra ones. */14802 while ( IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent)14803 && !IEMVERIFYEVENT_IS_RAM(pOtherRec->enmEvent)14804 && pIemRec->pNext)14805 {14806 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)14807 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);14808 pIemRec = pIemRec->pNext;14809 }14810 14811 /* Do the compare. */14812 if (pIemRec->enmEvent != pOtherRec->enmEvent)14813 {14814 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Type mismatches");14815 break;14816 }14817 bool fEquals;14818 switch (pIemRec->enmEvent)14819 {14820 case IEMVERIFYEVENT_IOPORT_READ:14821 fEquals = pIemRec->u.IOPortRead.Port == pOtherRec->u.IOPortRead.Port14822 && pIemRec->u.IOPortRead.cbValue == pOtherRec->u.IOPortRead.cbValue;14823 break;14824 case IEMVERIFYEVENT_IOPORT_WRITE:14825 fEquals = pIemRec->u.IOPortWrite.Port == pOtherRec->u.IOPortWrite.Port14826 && pIemRec->u.IOPortWrite.cbValue == pOtherRec->u.IOPortWrite.cbValue14827 && pIemRec->u.IOPortWrite.u32Value == pOtherRec->u.IOPortWrite.u32Value;14828 break;14829 case IEMVERIFYEVENT_IOPORT_STR_READ:14830 fEquals = pIemRec->u.IOPortStrRead.Port == pOtherRec->u.IOPortStrRead.Port14831 && pIemRec->u.IOPortStrRead.cbValue == pOtherRec->u.IOPortStrRead.cbValue14832 && pIemRec->u.IOPortStrRead.cTransfers == pOtherRec->u.IOPortStrRead.cTransfers;14833 break;14834 case IEMVERIFYEVENT_IOPORT_STR_WRITE:14835 fEquals = pIemRec->u.IOPortStrWrite.Port == pOtherRec->u.IOPortStrWrite.Port14836 && pIemRec->u.IOPortStrWrite.cbValue == pOtherRec->u.IOPortStrWrite.cbValue14837 && pIemRec->u.IOPortStrWrite.cTransfers == pOtherRec->u.IOPortStrWrite.cTransfers;14838 break;14839 case IEMVERIFYEVENT_RAM_READ:14840 fEquals = pIemRec->u.RamRead.GCPhys == pOtherRec->u.RamRead.GCPhys14841 && pIemRec->u.RamRead.cb == pOtherRec->u.RamRead.cb;14842 break;14843 case IEMVERIFYEVENT_RAM_WRITE:14844 fEquals = pIemRec->u.RamWrite.GCPhys == pOtherRec->u.RamWrite.GCPhys14845 && pIemRec->u.RamWrite.cb == pOtherRec->u.RamWrite.cb14846 && !memcmp(pIemRec->u.RamWrite.ab, pOtherRec->u.RamWrite.ab, pIemRec->u.RamWrite.cb);14847 break;14848 default:14849 fEquals = false;14850 break;14851 }14852 if (!fEquals)14853 {14854 iemVerifyAssertRecords(pVCpu, pIemRec, pOtherRec, "Mismatch");14855 break;14856 }14857 14858 /* advance */14859 pIemRec = pIemRec->pNext;14860 pOtherRec = pOtherRec->pNext;14861 }14862 14863 /* Ignore extra writes and reads. */14864 while (pIemRec && IEMVERIFYEVENT_IS_RAM(pIemRec->enmEvent))14865 {14866 if (pIemRec->enmEvent == IEMVERIFYEVENT_RAM_WRITE)14867 iemVerifyWriteRecord(pVCpu, pIemRec, fRem);14868 pIemRec = pIemRec->pNext;14869 }14870 if (pIemRec != NULL)14871 iemVerifyAssertRecord(pVCpu, pIemRec, "Extra IEM record!");14872 else if (pOtherRec != NULL)14873 iemVerifyAssertRecord(pVCpu, pOtherRec, "Extra Other record!");14874 }14875 IEM_GET_CTX(pVCpu) = pOrgCtx;14876 14877 return rcStrictIem;14878 }14879 14880 #else /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */14881 14882 /* stubs */14883 IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortRead(PVMCPU pVCpu, RTIOPORT Port, uint32_t *pu32Value, size_t cbValue)14884 {14885 NOREF(pVCpu); NOREF(Port); NOREF(pu32Value); NOREF(cbValue);14886 return VERR_INTERNAL_ERROR;14887 }14888 14889 IEM_STATIC VBOXSTRICTRC iemVerifyFakeIOPortWrite(PVMCPU pVCpu, RTIOPORT Port, uint32_t u32Value, size_t cbValue)14890 {14891 NOREF(pVCpu); NOREF(Port); NOREF(u32Value); NOREF(cbValue);14892 return VERR_INTERNAL_ERROR;14893 }14894 14895 #endif /* !IEM_VERIFICATION_MODE_FULL || !IN_RING3 */14896 14897 14898 13694 #ifdef LOG_ENABLED 14899 13695 /** … … 15029 13825 else if (rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED) 15030 13826 pVCpu->iem.s.cRetInstrNotImplemented++; 15031 #ifdef IEM_VERIFICATION_MODE_FULL15032 else if (rcStrict == VERR_IEM_RESTART_INSTRUCTION)15033 rcStrict = VINF_SUCCESS;15034 #endif15035 13827 else 15036 13828 pVCpu->iem.s.cRetErrStatuses++; … … 15146 13938 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs)); 15147 13939 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss)); 15148 #if defined(IEM_VERIFICATION_MODE_FULL)15149 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));15150 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));15151 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));15152 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));15153 #endif15154 13940 return rcStrict; 15155 13941 } … … 15193 13979 VMMDECL(VBOXSTRICTRC) IEMExecOne(PVMCPU pVCpu) 15194 13980 { 15195 #if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)15196 if (++pVCpu->iem.s.cVerifyDepth == 1)15197 iemExecVerificationModeSetup(pVCpu);15198 #endif15199 13981 #ifdef LOG_ENABLED 15200 13982 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); … … 15211 13993 iemMemRollback(pVCpu); 15212 13994 15213 #if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)15214 /*15215 * Assert some sanity.15216 */15217 if (pVCpu->iem.s.cVerifyDepth == 1)15218 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict);15219 pVCpu->iem.s.cVerifyDepth--;15220 #endif15221 13995 #ifdef IN_RC 15222 13996 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict); … … 15409 14183 uint32_t const cInstructionsAtStart = pVCpu->iem.s.cInstructions; 15410 14184 15411 #if defined(IEM_VERIFICATION_MODE_FULL) && defined(IN_RING3)15412 14185 /* 15413 14186 * See if there is an interrupt pending in TRPM, inject it if we can. 15414 14187 */ 15415 14188 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 15416 # ifdef IEM_VERIFICATION_MODE_FULL 15417 pVCpu->iem.s.uInjectCpl = UINT8_MAX; 15418 # endif 15419 15420 /** @todo Maybe someday we can centralize this under CPUMCanInjectInterrupt()? */ 15421 # if defined(VBOX_WITH_NESTED_HWVIRT_SVM) 15422 bool fIntrEnabled = pCtx->hwvirt.Gif; 14189 14190 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */ 14191 #if defined(VBOX_WITH_NESTED_HWVIRT_SVM) 14192 bool fIntrEnabled = pCtx->hwvirt.fGif; 15423 14193 if (fIntrEnabled) 15424 14194 { … … 15428 14198 fIntrEnabled = pCtx->eflags.Bits.u1IF; 15429 14199 } 15430 # 14200 #else 15431 14201 bool fIntrEnabled = pCtx->eflags.Bits.u1IF; 15432 # 14202 #endif 15433 14203 if ( fIntrEnabled 15434 14204 && TRPMHasTrap(pVCpu) … … 15441 14211 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2); 15442 14212 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */); 15443 if (!IEM_VERIFICATION_ENABLED(pVCpu)) 15444 TRPMResetTrap(pVCpu); 15445 } 15446 15447 /* 15448 * Log the state. 15449 */ 15450 # ifdef LOG_ENABLED 15451 iemLogCurInstr(pVCpu, pCtx, true); 15452 # endif 15453 15454 /* 15455 * Do the decoding and emulation. 15456 */ 15457 VBOXSTRICTRC rcStrict = iemInitDecoderAndPrefetchOpcodes(pVCpu, false); 15458 if (rcStrict == VINF_SUCCESS) 15459 rcStrict = iemExecOneInner(pVCpu, true); 15460 else if (pVCpu->iem.s.cActiveMappings > 0) 15461 iemMemRollback(pVCpu); 15462 15463 /* 15464 * Assert some sanity. 15465 */ 15466 rcStrict = iemExecVerificationModeCheck(pVCpu, rcStrict); 15467 15468 /* 15469 * Log and return. 15470 */ 15471 if (rcStrict != VINF_SUCCESS) 15472 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n", 15473 pCtx->cs.Sel, pCtx->rip, pCtx->ss.Sel, pCtx->rsp, pCtx->eflags.u, VBOXSTRICTRC_VAL(rcStrict))); 15474 if (pcInstructions) 15475 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart; 15476 return rcStrict; 15477 15478 #else /* Not verification mode */ 15479 15480 /* 15481 * See if there is an interrupt pending in TRPM, inject it if we can. 15482 */ 15483 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 15484 # ifdef IEM_VERIFICATION_MODE_FULL 15485 pVCpu->iem.s.uInjectCpl = UINT8_MAX; 15486 # endif 15487 15488 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */ 15489 # if defined(VBOX_WITH_NESTED_HWVIRT_SVM) 15490 bool fIntrEnabled = pCtx->hwvirt.fGif; 15491 if (fIntrEnabled) 15492 { 15493 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 15494 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx); 15495 else 15496 fIntrEnabled = pCtx->eflags.Bits.u1IF; 15497 } 15498 # else 15499 bool fIntrEnabled = pCtx->eflags.Bits.u1IF; 15500 # endif 15501 if ( fIntrEnabled 15502 && TRPMHasTrap(pVCpu) 15503 && EMGetInhibitInterruptsPC(pVCpu) != pCtx->rip) 15504 { 15505 uint8_t u8TrapNo; 15506 TRPMEVENT enmType; 15507 RTGCUINT uErrCode; 15508 RTGCPTR uCr2; 15509 int rc2 = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrCode, &uCr2, NULL /* pu8InstLen */); AssertRC(rc2); 15510 IEMInjectTrap(pVCpu, u8TrapNo, enmType, (uint16_t)uErrCode, uCr2, 0 /* cbInstr */); 15511 if (!IEM_VERIFICATION_ENABLED(pVCpu)) 15512 TRPMResetTrap(pVCpu); 14213 TRPMResetTrap(pVCpu); 15513 14214 } 15514 14215 … … 15519 14220 if (rcStrict == VINF_SUCCESS) 15520 14221 { 15521 # 14222 #ifdef IEM_WITH_SETJMP 15522 14223 jmp_buf JmpBuf; 15523 14224 jmp_buf *pSavedJmpBuf = pVCpu->iem.s.CTX_SUFF(pJmpBuf); … … 15525 14226 pVCpu->iem.s.cActiveMappings = 0; 15526 14227 if ((rcStrict = setjmp(JmpBuf)) == 0) 15527 # 14228 #endif 15528 14229 { 15529 14230 /* … … 15537 14238 * Log the state. 15538 14239 */ 15539 # 14240 #ifdef LOG_ENABLED 15540 14241 iemLogCurInstr(pVCpu, pCtx, true); 15541 # 14242 #endif 15542 14243 15543 14244 /* … … 15556 14257 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL 15557 14258 | VMCPU_FF_TLB_FLUSH 15558 # 14259 #ifdef VBOX_WITH_RAW_MODE 15559 14260 | VMCPU_FF_TRPM_SYNC_IDT 15560 14261 | VMCPU_FF_SELM_SYNC_TSS 15561 14262 | VMCPU_FF_SELM_SYNC_GDT 15562 14263 | VMCPU_FF_SELM_SYNC_LDT 15563 # 14264 #endif 15564 14265 | VMCPU_FF_INHIBIT_INTERRUPTS 15565 14266 | VMCPU_FF_BLOCK_NMIS … … 15587 14288 } 15588 14289 } 15589 # 14290 #ifdef IEM_WITH_SETJMP 15590 14291 else 15591 14292 { … … 15595 14296 } 15596 14297 pVCpu->iem.s.CTX_SUFF(pJmpBuf) = pSavedJmpBuf; 15597 # 14298 #endif 15598 14299 15599 14300 /* … … 15602 14303 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->cs)); 15603 14304 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ss)); 15604 # if defined(IEM_VERIFICATION_MODE_FULL)15605 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->es));15606 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->ds));15607 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->fs));15608 Assert(CPUMSELREG_ARE_HIDDEN_PARTS_VALID(pVCpu, &IEM_GET_CTX(pVCpu)->gs));15609 # endif15610 14305 } 15611 14306 else … … 15614 14309 iemMemRollback(pVCpu); 15615 14310 15616 # 14311 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 15617 14312 /* 15618 14313 * When a nested-guest causes an exception intercept (e.g. #PF) when fetching … … 15620 14315 */ 15621 14316 rcStrict = iemExecStatusCodeFiddling(pVCpu, rcStrict); 15622 # 14317 #endif 15623 14318 } 15624 14319 … … 15626 14321 * Maybe re-enter raw-mode and log. 15627 14322 */ 15628 # 14323 #ifdef IN_RC 15629 14324 rcStrict = iemRCRawMaybeReenter(pVCpu, IEM_GET_CTX(pVCpu), rcStrict); 15630 # 14325 #endif 15631 14326 if (rcStrict != VINF_SUCCESS) 15632 14327 LogFlow(("IEMExecLots: cs:rip=%04x:%08RX64 ss:rsp=%04x:%08RX64 EFL=%06x - rcStrict=%Rrc\n", … … 15635 14330 *pcInstructions = pVCpu->iem.s.cInstructions - cInstructionsAtStart; 15636 14331 return rcStrict; 15637 #endif /* Not verification mode */15638 14332 } 15639 14333 -
trunk/src/VBox/VMM/VMMAll/IEMAllCImpl.cpp.h
r72484 r72493 188 188 pCtx->eflags.u &= ~(fToUpdate | fUndefined); 189 189 pCtx->eflags.u |= (fToUpdate | fUndefined) & fEFlags; 190 #ifdef IEM_VERIFICATION_MODE_FULL191 pVCpu->iem.s.fUndefinedEFlags |= fUndefined;192 #endif193 190 } 194 191 … … 210 207 pCtx->eflags.u &= ~(fToUpdate | fUndefined); 211 208 pCtx->eflags.u |= (fToUpdate | fUndefined) & fEFlags; 212 #ifdef IEM_VERIFICATION_MODE_FULL213 pVCpu->iem.s.fUndefinedEFlags |= fUndefined;214 #endif215 209 } 216 210 … … 4703 4697 || X86_IS_CANONICAL(GCPtrBase)) 4704 4698 { 4705 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu)) 4706 rcStrict = CPUMSetGuestGDTR(pVCpu, GCPtrBase, cbLimit); 4707 else 4708 { 4709 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 4710 pCtx->gdtr.cbGdt = cbLimit; 4711 pCtx->gdtr.pGdt = GCPtrBase; 4712 pCtx->fExtrn &= ~CPUMCTX_EXTRN_GDTR; 4713 } 4699 rcStrict = CPUMSetGuestGDTR(pVCpu, GCPtrBase, cbLimit); 4714 4700 if (rcStrict == VINF_SUCCESS) 4715 4701 iemRegAddToRipAndClearRF(pVCpu, cbInstr); … … 4785 4771 || X86_IS_CANONICAL(GCPtrBase)) 4786 4772 { 4787 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu)) 4788 CPUMSetGuestIDTR(pVCpu, GCPtrBase, cbLimit); 4789 else 4790 { 4791 PCPUMCTX pCtx = IEM_GET_CTX(pVCpu); 4792 pCtx->idtr.cbIdt = cbLimit; 4793 pCtx->idtr.pIdt = GCPtrBase; 4794 pCtx->fExtrn &= ~CPUMCTX_EXTRN_IDTR; 4795 } 4773 CPUMSetGuestIDTR(pVCpu, GCPtrBase, cbLimit); 4796 4774 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 4797 4775 } … … 4878 4856 Log(("lldt %04x: Loading NULL selector.\n", uNewLdt)); 4879 4857 pCtx->fExtrn &= ~CPUMCTX_EXTRN_LDTR; 4880 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu)) 4881 CPUMSetGuestLDTR(pVCpu, uNewLdt); 4882 else 4883 pCtx->ldtr.Sel = uNewLdt; 4858 CPUMSetGuestLDTR(pVCpu, uNewLdt); 4884 4859 pCtx->ldtr.ValidSel = uNewLdt; 4885 4860 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID; 4886 if (IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu)) 4887 { 4888 pCtx->ldtr.Attr.u = X86DESCATTR_UNUSABLE; 4889 pCtx->ldtr.u64Base = pCtx->ldtr.u32Limit = 0; /* For verfication against REM. */ 4890 } 4891 else if (IEM_IS_GUEST_CPU_AMD(pVCpu)) 4861 if (IEM_IS_GUEST_CPU_AMD(pVCpu)) 4892 4862 { 4893 4863 /* AMD-V seems to leave the base and limit alone. */ 4894 4864 pCtx->ldtr.Attr.u = X86DESCATTR_UNUSABLE; 4895 4865 } 4896 else if (!IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))4866 else 4897 4867 { 4898 4868 /* VT-x (Intel 3960x) seems to be doing the following. */ … … 4964 4934 */ 4965 4935 /** @todo check if the actual value is loaded or if the RPL is dropped */ 4966 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu)) 4967 CPUMSetGuestLDTR(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL); 4968 else 4969 pCtx->ldtr.Sel = uNewLdt & X86_SEL_MASK_OFF_RPL; 4936 CPUMSetGuestLDTR(pVCpu, uNewLdt & X86_SEL_MASK_OFF_RPL); 4970 4937 pCtx->ldtr.ValidSel = uNewLdt & X86_SEL_MASK_OFF_RPL; 4971 4938 pCtx->ldtr.fFlags = CPUMSELREG_FLAGS_VALID; … … 5092 5059 */ 5093 5060 /** @todo check if the actual value is loaded or if the RPL is dropped */ 5094 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu)) 5095 CPUMSetGuestTR(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL); 5096 else 5097 pCtx->tr.Sel = uNewTr & X86_SEL_MASK_OFF_RPL; 5061 CPUMSetGuestTR(pVCpu, uNewTr & X86_SEL_MASK_OFF_RPL); 5098 5062 pCtx->tr.ValidSel = uNewTr & X86_SEL_MASK_OFF_RPL; 5099 5063 pCtx->tr.fFlags = CPUMSELREG_FLAGS_VALID; … … 5307 5271 * Change CR0. 5308 5272 */ 5309 if (!IEM_VERIFICATION_ENABLED(pVCpu)) 5310 CPUMSetGuestCR0(pVCpu, uNewCrX); 5311 else 5312 pCtx->cr0 = uNewCrX; 5273 CPUMSetGuestCR0(pVCpu, uNewCrX); 5313 5274 Assert(pCtx->cr0 == uNewCrX); 5314 5275 … … 5325 5286 NewEFER &= ~MSR_K6_EFER_LMA; 5326 5287 5327 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu)) 5328 CPUMSetGuestEFER(pVCpu, NewEFER); 5329 else 5330 pCtx->msrEFER = NewEFER; 5288 CPUMSetGuestEFER(pVCpu, NewEFER); 5331 5289 Assert(pCtx->msrEFER == NewEFER); 5332 5290 } … … 5335 5293 * Inform PGM. 5336 5294 */ 5337 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu)) 5338 { 5339 if ( (uNewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) 5340 != (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) ) 5341 { 5342 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */); 5343 AssertRCReturn(rc, rc); 5344 /* ignore informational status codes */ 5345 } 5346 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER); 5347 } 5348 else 5349 rcStrict = VINF_SUCCESS; 5295 if ( (uNewCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) 5296 != (uOldCrX & (X86_CR0_PG | X86_CR0_WP | X86_CR0_PE)) ) 5297 { 5298 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */); 5299 AssertRCReturn(rc, rc); 5300 /* ignore informational status codes */ 5301 } 5302 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER); 5350 5303 5351 5304 #ifdef IN_RC … … 5431 5384 5432 5385 /* Make the change. */ 5433 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu)) 5434 { 5435 rc = CPUMSetGuestCR3(pVCpu, uNewCrX); 5436 AssertRCSuccessReturn(rc, rc); 5437 } 5438 else 5439 pCtx->cr3 = uNewCrX; 5386 rc = CPUMSetGuestCR3(pVCpu, uNewCrX); 5387 AssertRCSuccessReturn(rc, rc); 5440 5388 5441 5389 /* Inform PGM. */ 5442 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu)) 5443 { 5444 if (pCtx->cr0 & X86_CR0_PG) 5445 { 5446 rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE)); 5447 AssertRCReturn(rc, rc); 5448 /* ignore informational status codes */ 5449 } 5390 if (pCtx->cr0 & X86_CR0_PG) 5391 { 5392 rc = PGMFlushTLB(pVCpu, pCtx->cr3, !(pCtx->cr4 & X86_CR4_PGE)); 5393 AssertRCReturn(rc, rc); 5394 /* ignore informational status codes */ 5450 5395 } 5451 5396 rcStrict = VINF_SUCCESS; … … 5517 5462 * Change it. 5518 5463 */ 5519 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu)) 5520 { 5521 rc = CPUMSetGuestCR4(pVCpu, uNewCrX); 5522 AssertRCSuccessReturn(rc, rc); 5523 } 5524 else 5525 pCtx->cr4 = uNewCrX; 5464 rc = CPUMSetGuestCR4(pVCpu, uNewCrX); 5465 AssertRCSuccessReturn(rc, rc); 5526 5466 Assert(pCtx->cr4 == uNewCrX); 5527 5467 … … 5529 5469 * Notify SELM and PGM. 5530 5470 */ 5531 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu)) 5532 { 5533 /* SELM - VME may change things wrt to the TSS shadowing. */ 5534 if ((uNewCrX ^ uOldCrX) & X86_CR4_VME) 5535 { 5536 Log(("iemCImpl_load_CrX: VME %d -> %d => Setting VMCPU_FF_SELM_SYNC_TSS\n", 5537 RT_BOOL(uOldCrX & X86_CR4_VME), RT_BOOL(uNewCrX & X86_CR4_VME) )); 5471 /* SELM - VME may change things wrt to the TSS shadowing. */ 5472 if ((uNewCrX ^ uOldCrX) & X86_CR4_VME) 5473 { 5474 Log(("iemCImpl_load_CrX: VME %d -> %d => Setting VMCPU_FF_SELM_SYNC_TSS\n", 5475 RT_BOOL(uOldCrX & X86_CR4_VME), RT_BOOL(uNewCrX & X86_CR4_VME) )); 5538 5476 #ifdef VBOX_WITH_RAW_MODE 5539 5540 5477 if (VM_IS_RAW_MODE_ENABLED(pVCpu->CTX_SUFF(pVM))) 5478 VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS); 5541 5479 #endif 5542 } 5543 5544 /* PGM - flushing and mode. */ 5545 if ((uNewCrX ^ uOldCrX) & (X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_PCIDE /* | X86_CR4_SMEP */)) 5546 { 5547 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */); 5548 AssertRCReturn(rc, rc); 5549 /* ignore informational status codes */ 5550 } 5551 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER); 5552 } 5553 else 5554 rcStrict = VINF_SUCCESS; 5480 } 5481 5482 /* PGM - flushing and mode. */ 5483 if ((uNewCrX ^ uOldCrX) & (X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_PCIDE /* | X86_CR4_SMEP */)) 5484 { 5485 rc = PGMFlushTLB(pVCpu, pCtx->cr3, true /* global */); 5486 AssertRCReturn(rc, rc); 5487 /* ignore informational status codes */ 5488 } 5489 rcStrict = PGMChangeMode(pVCpu, pCtx->cr0, pCtx->cr4, pCtx->msrEFER); 5555 5490 break; 5556 5491 } … … 5587 5522 } 5588 5523 #endif 5589 if (!IEM_FULL_VERIFICATION_ENABLED(pVCpu)) 5590 { 5591 uint8_t const u8Tpr = (uint8_t)uNewCrX << 4; 5592 APICSetTpr(pVCpu, u8Tpr); 5593 } 5524 uint8_t const u8Tpr = (uint8_t)uNewCrX << 4; 5525 APICSetTpr(pVCpu, u8Tpr); 5594 5526 rcStrict = VINF_SUCCESS; 5595 5527 break; … … 5871 5803 else if (iDrReg == 6) 5872 5804 IEM_CTX_IMPORT_RET(pVCpu, pCtx, CPUMCTX_EXTRN_DR6); 5873 if (!IEM_VERIFICATION_ENABLED(pVCpu)) 5874 { 5875 int rc = CPUMSetGuestDRx(pVCpu, iDrReg, uNewDrX); 5876 AssertRCSuccessReturn(rc, RT_SUCCESS_NP(rc) ? VERR_IEM_IPE_1 : rc); 5877 } 5878 else 5879 pCtx->dr[iDrReg] = uNewDrX; 5805 5806 int rc = CPUMSetGuestDRx(pVCpu, iDrReg, uNewDrX); 5807 AssertRCSuccessReturn(rc, RT_SUCCESS_NP(rc) ? VERR_IEM_IPE_1 : rc); 5880 5808 5881 5809 iemRegAddToRipAndClearRF(pVCpu, cbInstr); … … 6061 5989 pCtx->rax = RT_LO_U32(uTicks); 6062 5990 pCtx->rdx = RT_HI_U32(uTicks); 6063 #ifdef IEM_VERIFICATION_MODE_FULL6064 pVCpu->iem.s.fIgnoreRaxRdx = true;6065 #endif6066 6067 5991 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 6068 5992 return VINF_SUCCESS; … … 6115 6039 pCtx->rax = RT_LO_U32(uTicks); 6116 6040 pCtx->rdx = RT_HI_U32(uTicks); 6117 #ifdef IEM_VERIFICATION_MODE_FULL6118 pVCpu->iem.s.fIgnoreRaxRdx = true;6119 #endif6120 6041 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 6121 6042 } … … 6255 6176 IEM_CTX_IMPORT_RET(pVCpu, pCtx, CPUMCTX_EXTRN_ALL_MSRS); 6256 6177 6257 if (!IEM_VERIFICATION_ENABLED(pVCpu)) 6258 rcStrict = CPUMSetGuestMsr(pVCpu, pCtx->ecx, uValue.u); 6259 else 6260 { 6261 #ifdef IN_RING3 6262 CPUMCTX CtxTmp = *pCtx; 6263 rcStrict = CPUMSetGuestMsr(pVCpu, pCtx->ecx, uValue.u); 6264 PCPUMCTX pCtx2 = CPUMQueryGuestCtxPtr(pVCpu); 6265 *pCtx = *pCtx2; 6266 *pCtx2 = CtxTmp; 6267 #else 6268 AssertReleaseFailedReturn(VERR_IEM_IPE_2); 6269 #endif 6270 } 6178 rcStrict = CPUMSetGuestMsr(pVCpu, pCtx->ecx, uValue.u); 6271 6179 if (rcStrict == VINF_SUCCESS) 6272 6180 { … … 6342 6250 * Perform the I/O. 6343 6251 */ 6344 uint32_t u32Value; 6345 if (!IEM_VERIFICATION_ENABLED(pVCpu)) 6346 rcStrict = IOMIOPortRead(pVCpu->CTX_SUFF(pVM), pVCpu, u16Port, &u32Value, cbReg); 6347 else 6348 rcStrict = iemVerifyFakeIOPortRead(pVCpu, u16Port, &u32Value, cbReg); 6252 uint32_t u32Value = 0; 6253 rcStrict = IOMIOPortRead(pVCpu->CTX_SUFF(pVM), pVCpu, u16Port, &u32Value, cbReg); 6349 6254 if (IOM_SUCCESS(rcStrict)) 6350 6255 { … … 6448 6353 default: AssertFailedReturn(VERR_IEM_IPE_4); 6449 6354 } 6450 if (!IEM_VERIFICATION_ENABLED(pVCpu)) 6451 rcStrict = IOMIOPortWrite(pVCpu->CTX_SUFF(pVM), pVCpu, u16Port, u32Value, cbReg); 6452 else 6453 rcStrict = iemVerifyFakeIOPortWrite(pVCpu, u16Port, u32Value, cbReg); 6355 rcStrict = IOMIOPortWrite(pVCpu->CTX_SUFF(pVM), pVCpu, u16Port, u32Value, cbReg); 6454 6356 if (IOM_SUCCESS(rcStrict)) 6455 6357 { … … 6575 6477 IEMMISC_SET_EFL(pVCpu, pCtx, fEfl); 6576 6478 iemRegAddToRipAndClearRF(pVCpu, cbInstr); 6577 if ( (!(fEflOld & X86_EFL_IF) && (fEfl & X86_EFL_IF)) || IEM_FULL_VERIFICATION_REM_ENABLED(pVCpu))6479 if (!(fEflOld & X86_EFL_IF) && (fEfl & X86_EFL_IF)) 6578 6480 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip); 6579 6481 Log2(("STI: %#x -> %#x\n", fEflOld, fEfl)); … … 6920 6822 pCtx->eflags.Bits.u1AF = 1; 6921 6823 pCtx->eflags.Bits.u1CF = 1; 6922 #ifdef IEM_VERIFICATION_MODE_FULL6923 pVCpu->iem.s.fUndefinedEFlags |= X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF;6924 #endif6925 6824 } 6926 6825 else … … 6970 6869 pCtx->eflags.Bits.u1AF = 1; 6971 6870 pCtx->eflags.Bits.u1CF = 1; 6972 #ifdef IEM_VERIFICATION_MODE_FULL6973 pVCpu->iem.s.fUndefinedEFlags |= X86_EFL_SF | X86_EFL_ZF | X86_EFL_PF, X86_EFL_OF;6974 #endif6975 6871 } 6976 6872 else -
trunk/src/VBox/VMM/VMMAll/IEMAllCImplStrInstr.cpp.h
r72484 r72493 71 71 # define IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(a_pVM, a_pVCpu, a_fEflags) \ 72 72 do { \ 73 if (RT_LIKELY( (!VMCPU_FF_IS_PENDING(a_pVCpu, (a_fEflags) & X86_EFL_IF ? VMCPU_FF_YIELD_REPSTR_MASK \74 75 && !VM_FF_IS_PENDING(a_pVM, VM_FF_YIELD_REPSTR_MASK)) \76 || IEM_VERIFICATION_ENABLED(a_pVCpu))) \73 if (RT_LIKELY( !VMCPU_FF_IS_PENDING(a_pVCpu, (a_fEflags) & X86_EFL_IF ? VMCPU_FF_YIELD_REPSTR_MASK \ 74 : VMCPU_FF_YIELD_REPSTR_NOINT_MASK) \ 75 && !VM_FF_IS_PENDING(a_pVM, VM_FF_YIELD_REPSTR_MASK) \ 76 )) \ 77 77 { \ 78 78 RTCCUINTREG fSavedFlags = ASMGetFlags(); \ … … 94 94 # define IEM_CHECK_FF_YIELD_REPSTR_MAYBE_RETURN(a_pVM, a_pVCpu, a_fEflags) \ 95 95 do { \ 96 if (RT_LIKELY( (!VMCPU_FF_IS_PENDING(a_pVCpu, (a_fEflags) & X86_EFL_IF ? VMCPU_FF_YIELD_REPSTR_MASK \97 98 && !VM_FF_IS_PENDING(a_pVM, VM_FF_YIELD_REPSTR_MASK)) \99 || IEM_VERIFICATION_ENABLED(a_pVCpu))) \96 if (RT_LIKELY( !VMCPU_FF_IS_PENDING(a_pVCpu, (a_fEflags) & X86_EFL_IF ? VMCPU_FF_YIELD_REPSTR_MASK \ 97 : VMCPU_FF_YIELD_REPSTR_NOINT_MASK) \ 98 && !VM_FF_IS_PENDING(a_pVM, VM_FF_YIELD_REPSTR_MASK) \ 99 )) \ 100 100 { /* probable */ } \ 101 101 else \ … … 117 117 if (RT_LIKELY( ( !VMCPU_FF_IS_PENDING(a_pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_REPSTR_MASK) \ 118 118 && !VM_FF_IS_PENDING(a_pVM, VM_FF_HIGH_PRIORITY_POST_REPSTR_MASK)) \ 119 || (a_fExitExpr) \ 120 || IEM_VERIFICATION_ENABLED(a_pVCpu) )) \ 119 || (a_fExitExpr) )) \ 121 120 { /* very likely */ } \ 122 121 else \ … … 138 137 do { \ 139 138 if (RT_LIKELY( !VMCPU_FF_IS_PENDING(a_pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_REPSTR_MASK) \ 140 || (a_fExitExpr) \ 141 || IEM_VERIFICATION_ENABLED(a_pVCpu) )) \ 139 || (a_fExitExpr) )) \ 142 140 { /* very likely */ } \ 143 141 else \ … … 804 802 805 803 /* 806 * If we're reading back what we write, we have to let the verfication code807 * to prevent a false positive.808 * Note! This doesn't take aliasing or wrapping into account - lazy bird.809 */810 #ifdef IEM_VERIFICATION_MODE_FULL811 if ( IEM_VERIFICATION_ENABLED(pVCpu)812 && (cbIncr > 0813 ? uSrcAddrReg <= uDstAddrReg814 && uSrcAddrReg + cbIncr * uCounterReg > uDstAddrReg815 : uDstAddrReg <= uSrcAddrReg816 && uDstAddrReg + cbIncr * uCounterReg > uSrcAddrReg))817 pVCpu->iem.s.fOverlappingMovs = true;818 #endif819 820 /*821 804 * The loop. 822 805 */ … … 1254 1237 1255 1238 uint32_t u32Value = 0; 1256 if (!IEM_VERIFICATION_ENABLED(pVCpu)) 1257 rcStrict = IOMIOPortRead(pVM, pVCpu, pCtx->dx, &u32Value, OP_SIZE / 8); 1258 else 1259 rcStrict = iemVerifyFakeIOPortRead(pVCpu, pCtx->dx, &u32Value, OP_SIZE / 8); 1239 rcStrict = IOMIOPortRead(pVM, pVCpu, pCtx->dx, &u32Value, OP_SIZE / 8); 1260 1240 if (IOM_SUCCESS(rcStrict)) 1261 1241 { … … 1365 1345 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit) 1366 1346 ) 1367 && !IEM_VERIFICATION_ENABLED(pVCpu)1368 1347 ) 1369 1348 { … … 1435 1414 1436 1415 uint32_t u32Value = 0; 1437 if (!IEM_VERIFICATION_ENABLED(pVCpu)) 1438 rcStrict = IOMIOPortRead(pVM, pVCpu, u16Port, &u32Value, OP_SIZE / 8); 1439 else 1440 rcStrict = iemVerifyFakeIOPortRead(pVCpu, u16Port, &u32Value, OP_SIZE / 8); 1416 rcStrict = IOMIOPortRead(pVM, pVCpu, u16Port, &u32Value, OP_SIZE / 8); 1441 1417 if (!IOM_SUCCESS(rcStrict)) 1442 1418 return rcStrict; … … 1530 1506 if (rcStrict == VINF_SUCCESS) 1531 1507 { 1532 if (!IEM_VERIFICATION_ENABLED(pVCpu)) 1533 rcStrict = IOMIOPortWrite(pVM, pVCpu, pCtx->dx, uValue, OP_SIZE / 8); 1534 else 1535 rcStrict = iemVerifyFakeIOPortWrite(pVCpu, pCtx->dx, uValue, OP_SIZE / 8); 1508 rcStrict = IOMIOPortWrite(pVM, pVCpu, pCtx->dx, uValue, OP_SIZE / 8); 1536 1509 if (IOM_SUCCESS(rcStrict)) 1537 1510 { … … 1623 1596 && uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pHid->u32Limit) 1624 1597 ) 1625 && !IEM_VERIFICATION_ENABLED(pVCpu)1626 1598 ) 1627 1599 { … … 1693 1665 return rcStrict; 1694 1666 1695 if (!IEM_VERIFICATION_ENABLED(pVCpu)) 1696 rcStrict = IOMIOPortWrite(pVM, pVCpu, u16Port, uValue, OP_SIZE / 8); 1697 else 1698 rcStrict = iemVerifyFakeIOPortWrite(pVCpu, u16Port, uValue, OP_SIZE / 8); 1667 rcStrict = IOMIOPortWrite(pVM, pVCpu, u16Port, uValue, OP_SIZE / 8); 1699 1668 if (IOM_SUCCESS(rcStrict)) 1700 1669 { -
trunk/src/VBox/VMM/VMMAll/IOMAll.cpp
r69111 r72493 23 23 #include <VBox/vmm/iom.h> 24 24 #include <VBox/vmm/mm.h> 25 #if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)26 # include <VBox/vmm/iem.h>27 #endif28 25 #include <VBox/param.h> 29 26 #include "IOMInternal.h" … … 90 87 #endif 91 88 AssertRC(rc2); 92 #if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)93 IEMNotifyIOPortRead(pVM, Port, cbValue);94 #endif95 89 96 90 #ifdef VBOX_WITH_STATISTICS … … 259 253 #endif 260 254 AssertRC(rc2); 261 #if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)262 IEMNotifyIOPortReadString(pVM, uPort, pvDst, *pcTransfers, cb);263 #endif264 255 265 256 const uint32_t cRequestedTransfers = *pcTransfers; … … 479 470 #endif 480 471 AssertRC(rc2); 481 #if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)482 IEMNotifyIOPortWrite(pVM, Port, u32Value, cbValue);483 #endif484 472 485 473 /** @todo bird: When I get time, I'll remove the RC/R0 trees and link the RC/R0 … … 635 623 #endif 636 624 AssertRC(rc2); 637 #if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)638 IEMNotifyIOPortWriteString(pVM, uPort, pvSrc, *pcTransfers, cb);639 #endif640 625 641 626 const uint32_t cRequestedTransfers = *pcTransfers; -
trunk/src/VBox/VMM/VMMAll/IOMAllMMIO.cpp
r72248 r72493 873 873 #endif 874 874 AssertRC(VBOXSTRICTRC_VAL(rc)); 875 #if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)876 IEMNotifyMMIORead(pVM, GCPhys, cbValue);877 #endif878 875 879 876 /* … … 1006 1003 #endif 1007 1004 AssertRC(VBOXSTRICTRC_VAL(rc)); 1008 #if defined(IEM_VERIFICATION_MODE) && defined(IN_RING3)1009 IEMNotifyMMIOWrite(pVM, GCPhys, u32Value, cbValue);1010 #endif1011 1005 1012 1006 /* … … 1111 1105 VMMDECL(int) IOMMMIOMapMMIO2Page(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysRemapped, uint64_t fPageFlags) 1112 1106 { 1113 # ifndef IEM_VERIFICATION_MODE_FULL1114 1107 /* Currently only called from the VGA device during MMIO. */ 1115 1108 Log(("IOMMMIOMapMMIO2Page %RGp -> %RGp flags=%RX64\n", GCPhys, GCPhysRemapped, fPageFlags)); … … 1155 1148 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page. 1156 1149 */ 1157 # 1158 # 1150 # if 0 /* The assertion is wrong for the PGM_SYNC_CLEAR_PGM_POOL and VINF_PGM_HANDLER_ALREADY_ALIASED cases. */ 1151 # ifdef VBOX_STRICT 1159 1152 uint64_t fFlags; 1160 1153 RTHCPHYS HCPhys; 1161 1154 rc = PGMShwGetPage(pVCpu, (RTGCPTR)GCPhys, &fFlags, &HCPhys); 1162 1155 Assert(rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT); 1163 # endif1164 1156 # endif 1157 # endif 1165 1158 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys); 1166 1159 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT); 1167 # else1168 RT_NOREF_PV(pVM); RT_NOREF(GCPhys); RT_NOREF(GCPhysRemapped); RT_NOREF(fPageFlags);1169 # endif /* !IEM_VERIFICATION_MODE_FULL */1170 1160 return VINF_SUCCESS; 1171 1161 } 1172 1162 1173 1163 1174 # ifndef IEM_VERIFICATION_MODE_FULL1175 1164 /** 1176 1165 * Mapping a HC page in place of an MMIO page for direct access. … … 1195 1184 /** @todo NEM: MMIO page aliasing. */ 1196 1185 Assert(HMIsEnabled(pVM)); 1197 1198 /*1199 * Lookup the context range node the page belongs to.1200 */1201 # ifdef VBOX_STRICT1202 /* Can't lock IOM here due to potential deadlocks in the VGA device; not safe to access. */1203 PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(pVM, pVCpu, GCPhys);1204 AssertMsgReturn(pRange,1205 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND);1206 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0);1207 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK);1208 # endif1209 1210 /*1211 * Do the aliasing; page align the addresses since PGM is picky.1212 */1213 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK;1214 HCPhys &= ~(RTHCPHYS)PAGE_OFFSET_MASK;1215 1216 int rc = PGMHandlerPhysicalPageAliasHC(pVM, GCPhys, GCPhys, HCPhys);1217 AssertRCReturn(rc, rc);1218 1219 /*1220 * Modify the shadow page table. Since it's an MMIO page it won't be present and we1221 * can simply prefetch it.1222 *1223 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page.1224 */1225 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys);1226 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT);1227 return VINF_SUCCESS;1228 }1229 # endif /* !IEM_VERIFICATION_MODE_FULL */1230 1231 1232 /**1233 * Reset a previously modified MMIO region; restore the access flags.1234 *1235 * @returns VBox status code.1236 *1237 * @param pVM The cross context VM structure.1238 * @param GCPhys Physical address that's part of the MMIO region to be reset.1239 */1240 VMMDECL(int) IOMMMIOResetRegion(PVM pVM, RTGCPHYS GCPhys)1241 {1242 Log(("IOMMMIOResetRegion %RGp\n", GCPhys));1243 1244 PVMCPU pVCpu = VMMGetCpu(pVM);1245 1246 /* This currently only works in real mode, protected mode without paging or with nested paging. */1247 /** @todo NEM: MMIO page aliasing. */1248 if ( !HMIsEnabled(pVM) /* useless without VT-x/AMD-V */1249 || ( CPUMIsGuestInPagedProtectedMode(pVCpu)1250 && !HMIsNestedPagingActive(pVM)))1251 return VINF_SUCCESS; /* ignore */1252 1186 1253 1187 /* … … 1264 1198 1265 1199 /* 1200 * Do the aliasing; page align the addresses since PGM is picky. 1201 */ 1202 GCPhys &= ~(RTGCPHYS)PAGE_OFFSET_MASK; 1203 HCPhys &= ~(RTHCPHYS)PAGE_OFFSET_MASK; 1204 1205 int rc = PGMHandlerPhysicalPageAliasHC(pVM, GCPhys, GCPhys, HCPhys); 1206 AssertRCReturn(rc, rc); 1207 1208 /* 1209 * Modify the shadow page table. Since it's an MMIO page it won't be present and we 1210 * can simply prefetch it. 1211 * 1212 * Note: This is a NOP in the EPT case; we'll just let it fault again to resync the page. 1213 */ 1214 rc = PGMPrefetchPage(pVCpu, (RTGCPTR)GCPhys); 1215 Assert(rc == VINF_SUCCESS || rc == VERR_PAGE_NOT_PRESENT || rc == VERR_PAGE_TABLE_NOT_PRESENT); 1216 return VINF_SUCCESS; 1217 } 1218 1219 1220 /** 1221 * Reset a previously modified MMIO region; restore the access flags. 1222 * 1223 * @returns VBox status code. 1224 * 1225 * @param pVM The cross context VM structure. 1226 * @param GCPhys Physical address that's part of the MMIO region to be reset. 1227 */ 1228 VMMDECL(int) IOMMMIOResetRegion(PVM pVM, RTGCPHYS GCPhys) 1229 { 1230 Log(("IOMMMIOResetRegion %RGp\n", GCPhys)); 1231 1232 PVMCPU pVCpu = VMMGetCpu(pVM); 1233 1234 /* This currently only works in real mode, protected mode without paging or with nested paging. */ 1235 /** @todo NEM: MMIO page aliasing. */ 1236 if ( !HMIsEnabled(pVM) /* useless without VT-x/AMD-V */ 1237 || ( CPUMIsGuestInPagedProtectedMode(pVCpu) 1238 && !HMIsNestedPagingActive(pVM))) 1239 return VINF_SUCCESS; /* ignore */ 1240 1241 /* 1242 * Lookup the context range node the page belongs to. 1243 */ 1244 # ifdef VBOX_STRICT 1245 /* Can't lock IOM here due to potential deadlocks in the VGA device; not safe to access. */ 1246 PIOMMMIORANGE pRange = iomMMIOGetRangeUnsafe(pVM, pVCpu, GCPhys); 1247 AssertMsgReturn(pRange, 1248 ("Handlers and page tables are out of sync or something! GCPhys=%RGp\n", GCPhys), VERR_IOM_MMIO_RANGE_NOT_FOUND); 1249 Assert((pRange->GCPhys & PAGE_OFFSET_MASK) == 0); 1250 Assert((pRange->Core.KeyLast & PAGE_OFFSET_MASK) == PAGE_OFFSET_MASK); 1251 # endif 1252 1253 /* 1266 1254 * Call PGM to do the job work. 1267 1255 * -
trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp
r71043 r72493 1347 1347 } 1348 1348 1349 #ifndef IEM_VERIFICATION_MODE_FULL1350 1349 1351 1350 /** … … 1598 1597 } 1599 1598 1600 #endif /* !IEM_VERIFICATION_MODE_FULL */1601 1599 1602 1600 /** -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r72484 r72493 8936 8936 return rcStrict; 8937 8937 8938 #ifndef IEM_VERIFICATION_MODE_FULL8939 8938 /* 8940 8939 * Setup the virtualized-APIC accesses. … … 8967 8966 pVCpu->hm.s.vmx.u64MsrApicBase = u64MsrApicBase; 8968 8967 } 8969 #endif /* !IEM_VERIFICATION_MODE_FULL */8970 8968 8971 8969 if (TRPMHasTrap(pVCpu)) -
trunk/src/VBox/VMM/VMMR3/DBGF.cpp
r70948 r72493 360 360 #ifndef RT_OS_L4 361 361 362 # if !defined(DEBUG) || defined(DEBUG_sandervl) || defined(DEBUG_frank) || defined(IEM_VERIFICATION_MODE)362 # if !defined(DEBUG) || defined(DEBUG_sandervl) || defined(DEBUG_frank) 363 363 int cWait = 10; 364 364 # else -
trunk/src/VBox/VMM/VMMR3/TRPM.cpp
r72266 r72493 1507 1507 Assert(enmEvent == TRPM_HARDWARE_INT); 1508 1508 1509 #if defined(TRPM_FORWARD_TRAPS_IN_GC) && !defined(IEM_VERIFICATION_MODE)1509 #if defined(TRPM_FORWARD_TRAPS_IN_GC) 1510 1510 1511 1511 # ifdef LOG_ENABLED … … 1581 1581 return VINF_EM_RESCHEDULE_REM; /* (Heed the halted state if this is changed!) */ 1582 1582 1583 #else /* !TRPM_FORWARD_TRAPS_IN_GC || IEM_VERIFICATION_MODE*/1583 #else /* !TRPM_FORWARD_TRAPS_IN_GC */ 1584 1584 RT_NOREF(pVM, enmEvent); 1585 1585 uint8_t u8Interrupt = 0; … … 1609 1609 : VM_IS_NEM_ENABLED(pVM) ? VINF_EM_RESCHEDULE 1610 1610 : VINF_EM_RESCHEDULE_REM; /* (Heed the halted state if this is changed!) */ 1611 #endif /* !TRPM_FORWARD_TRAPS_IN_GC || IEM_VERIFICATION_MODE*/1611 #endif /* !TRPM_FORWARD_TRAPS_IN_GC */ 1612 1612 } 1613 1613 -
trunk/src/VBox/VMM/include/IEMInternal.h
r72488 r72493 57 57 * Includes the VEX decoding. */ 58 58 #define IEM_WITH_VEX 59 60 61 /** @def IEM_VERIFICATION_MODE_FULL62 * Shorthand for:63 * defined(IEM_VERIFICATION_MODE) && !defined(IEM_VERIFICATION_MODE_MINIMAL)64 */65 #if (defined(IEM_VERIFICATION_MODE) && !defined(IEM_VERIFICATION_MODE_MINIMAL) && !defined(IEM_VERIFICATION_MODE_FULL)) \66 || defined(DOXYGEN_RUNNING)67 # define IEM_VERIFICATION_MODE_FULL68 #endif69 70 59 71 60 /** @def IEM_CFG_TARGET_CPU … … 191 180 /** Pointer to a const FPU result consisting of two output values and FSW. */ 192 181 typedef IEMFPURESULTTWO const *PCIEMFPURESULTTWO; 193 194 195 196 #ifdef IEM_VERIFICATION_MODE_FULL197 198 /**199 * Verification event type.200 */201 typedef enum IEMVERIFYEVENT202 {203 IEMVERIFYEVENT_INVALID = 0,204 IEMVERIFYEVENT_IOPORT_READ,205 IEMVERIFYEVENT_IOPORT_WRITE,206 IEMVERIFYEVENT_IOPORT_STR_READ,207 IEMVERIFYEVENT_IOPORT_STR_WRITE,208 IEMVERIFYEVENT_RAM_WRITE,209 IEMVERIFYEVENT_RAM_READ210 } IEMVERIFYEVENT;211 212 /** Checks if the event type is a RAM read or write. */213 # define IEMVERIFYEVENT_IS_RAM(a_enmType) ((a_enmType) == IEMVERIFYEVENT_RAM_WRITE || (a_enmType) == IEMVERIFYEVENT_RAM_READ)214 215 /**216 * Verification event record.217 */218 typedef struct IEMVERIFYEVTREC219 {220 /** Pointer to the next record in the list. */221 struct IEMVERIFYEVTREC *pNext;222 /** The event type. */223 IEMVERIFYEVENT enmEvent;224 /** The event data. */225 union226 {227 /** IEMVERIFYEVENT_IOPORT_READ */228 struct229 {230 RTIOPORT Port;231 uint8_t cbValue;232 } IOPortRead;233 234 /** IEMVERIFYEVENT_IOPORT_WRITE */235 struct236 {237 RTIOPORT Port;238 uint8_t cbValue;239 uint32_t u32Value;240 } IOPortWrite;241 242 /** IEMVERIFYEVENT_IOPORT_STR_READ */243 struct244 {245 RTIOPORT Port;246 uint8_t cbValue;247 RTGCUINTREG cTransfers;248 } IOPortStrRead;249 250 /** IEMVERIFYEVENT_IOPORT_STR_WRITE */251 struct252 {253 RTIOPORT Port;254 uint8_t cbValue;255 RTGCUINTREG cTransfers;256 } IOPortStrWrite;257 258 /** IEMVERIFYEVENT_RAM_READ */259 struct260 {261 RTGCPHYS GCPhys;262 uint32_t cb;263 } RamRead;264 265 /** IEMVERIFYEVENT_RAM_WRITE */266 struct267 {268 RTGCPHYS GCPhys;269 uint32_t cb;270 uint8_t ab[512];271 } RamWrite;272 } u;273 } IEMVERIFYEVTREC;274 /** Pointer to an IEM event verification records. */275 typedef IEMVERIFYEVTREC *PIEMVERIFYEVTREC;276 277 #endif /* IEM_VERIFICATION_MODE_FULL */278 182 279 183 … … 674 578 uint32_t cLongJumps; 675 579 uint32_t uAlignment6; /**< Alignment padding. */ 676 #ifdef IEM_VERIFICATION_MODE_FULL677 /** The Number of I/O port reads that has been performed. */678 uint32_t cIOReads;679 /** The Number of I/O port writes that has been performed. */680 uint32_t cIOWrites;681 /** Set if no comparison to REM is currently performed.682 * This is used to skip past really slow bits. */683 bool fNoRem;684 /** Saved fNoRem flag used by #iemInitExec and #iemUninitExec. */685 bool fNoRemSavedByExec;686 /** Indicates that RAX and RDX differences should be ignored since RDTSC687 * and RDTSCP are timing sensitive. */688 bool fIgnoreRaxRdx;689 /** Indicates that a MOVS instruction with overlapping source and destination690 * was executed, causing the memory write records to be incorrrect. */691 bool fOverlappingMovs;692 /** Set if there are problematic memory accesses (MMIO, write monitored, ++). */693 bool fProblematicMemory;694 /** This is used to communicate a CPL changed caused by IEMInjectTrap that695 * CPUM doesn't yet reflect. */696 uint8_t uInjectCpl;697 /** To prevent EMR3HmSingleInstruction from triggering endless recursion via698 * emR3ExecuteInstruction and iemExecVerificationModeCheck. */699 uint8_t cVerifyDepth;700 bool afAlignment7[2];701 /** Mask of undefined eflags.702 * The verifier will any difference in these flags. */703 uint32_t fUndefinedEFlags;704 /** The CS of the instruction being interpreted. */705 RTSEL uOldCs;706 /** The RIP of the instruction being interpreted. */707 uint64_t uOldRip;708 /** The physical address corresponding to abOpcodes[0]. */709 RTGCPHYS GCPhysOpcodes;710 #endif711 580 /** @} */ 712 581 … … 754 623 /** Pointer to instruction statistics for ring-3 context. */ 755 624 R3PTRTYPE(PIEMINSTRSTATS) pStatsR3; 756 757 #ifdef IEM_VERIFICATION_MODE_FULL758 /** The event verification records for what IEM did (LIFO). */759 R3PTRTYPE(PIEMVERIFYEVTREC) pIemEvtRecHead;760 /** Insertion point for pIemEvtRecHead. */761 R3PTRTYPE(PIEMVERIFYEVTREC *) ppIemEvtRecNext;762 /** The event verification records for what the other party did (FIFO). */763 R3PTRTYPE(PIEMVERIFYEVTREC) pOtherEvtRecHead;764 /** Insertion point for pOtherEvtRecHead. */765 R3PTRTYPE(PIEMVERIFYEVTREC *) ppOtherEvtRecNext;766 /** List of free event records. */767 R3PTRTYPE(PIEMVERIFYEVTREC) pFreeEvtRec;768 #endif769 625 } IEMCPU; 770 626 AssertCompileMemberOffset(IEMCPU, fCurXcpt, 0x48); … … 782 638 * @param a_pVCpu The cross context virtual CPU structure of the calling thread. 783 639 */ 784 #if !defined(IEM_VERIFICATION_MODE_FULL) && !defined(IEM_VERIFICATION_MODE) \ 785 && !defined(IEM_VERIFICATION_MODE_MINIMAL) && defined(VMCPU_INCL_CPUM_GST_CTX) 786 # define IEM_GET_CTX(a_pVCpu) (&(a_pVCpu)->cpum.GstCtx) 787 #else 788 # define IEM_GET_CTX(a_pVCpu) ((a_pVCpu)->iem.s.CTX_SUFF(pCtx)) 789 #endif 640 #define IEM_GET_CTX(a_pVCpu) (&(a_pVCpu)->cpum.GstCtx) 790 641 791 642 /** @def IEM_CTX_ASSERT … … 1074 925 } IEMACCESSCRX; 1075 926 1076 /**1077 * Tests if verification mode is enabled.1078 *1079 * This expands to @c false when IEM_VERIFICATION_MODE is not defined and1080 * should therefore cause the compiler to eliminate the verification branch1081 * of an if statement. */1082 #ifdef IEM_VERIFICATION_MODE_FULL1083 # define IEM_VERIFICATION_ENABLED(a_pVCpu) (!(a_pVCpu)->iem.s.fNoRem)1084 #elif defined(IEM_VERIFICATION_MODE_MINIMAL)1085 # define IEM_VERIFICATION_ENABLED(a_pVCpu) (true)1086 #else1087 # define IEM_VERIFICATION_ENABLED(a_pVCpu) (false)1088 #endif1089 1090 /**1091 * Tests if full verification mode is enabled.1092 *1093 * This expands to @c false when IEM_VERIFICATION_MODE_FULL is not defined and1094 * should therefore cause the compiler to eliminate the verification branch1095 * of an if statement. */1096 #ifdef IEM_VERIFICATION_MODE_FULL1097 # define IEM_FULL_VERIFICATION_ENABLED(a_pVCpu) (!(a_pVCpu)->iem.s.fNoRem)1098 #else1099 # define IEM_FULL_VERIFICATION_ENABLED(a_pVCpu) (false)1100 #endif1101 1102 /**1103 * Tests if full verification mode is enabled again REM.1104 *1105 * This expands to @c false when IEM_VERIFICATION_MODE_FULL is not defined and1106 * should therefore cause the compiler to eliminate the verification branch1107 * of an if statement. */1108 #ifdef IEM_VERIFICATION_MODE_FULL1109 # ifdef IEM_VERIFICATION_MODE_FULL_HM1110 # define IEM_FULL_VERIFICATION_REM_ENABLED(a_pVCpu) (!(a_pVCpu)->iem.s.fNoRem && !HMIsEnabled((a_pVCpu)->CTX_SUFF(pVM)))1111 # else1112 # define IEM_FULL_VERIFICATION_REM_ENABLED(a_pVCpu) (!(a_pVCpu)->iem.s.fNoRem)1113 # endif1114 #else1115 # define IEM_FULL_VERIFICATION_REM_ENABLED(a_pVCpu) (false)1116 #endif1117 1118 /** @def IEM_VERIFICATION_MODE1119 * Indicates that one of the verfication modes are enabled.1120 */1121 #if (defined(IEM_VERIFICATION_MODE_FULL) || defined(IEM_VERIFICATION_MODE_MINIMAL)) && !defined(IEM_VERIFICATION_MODE) \1122 || defined(DOXYGEN_RUNNING)1123 # define IEM_VERIFICATION_MODE1124 #endif1125 927 1126 928 /** … … 1130 932 * 1131 933 * This is a NOOP if the verifier isn't compiled in. 1132 */ 1133 #ifdef IEM_VERIFICATION_MODE_FULL 1134 # define IEMOP_VERIFICATION_UNDEFINED_EFLAGS(a_fEfl) do { pVCpu->iem.s.fUndefinedEFlags |= (a_fEfl); } while (0) 1135 #else 1136 # define IEMOP_VERIFICATION_UNDEFINED_EFLAGS(a_fEfl) do { } while (0) 1137 #endif 934 * 935 * @note We're temporarily keeping this until code is converted to new 936 * disassembler style opcode handling. 937 */ 938 #define IEMOP_VERIFICATION_UNDEFINED_EFLAGS(a_fEfl) do { } while (0) 1138 939 1139 940 -
trunk/src/VBox/VMM/include/PDMInternal.h
r69474 r72493 57 57 /** @def PDMCRITSECT_STRICT 58 58 * Enables/disables PDM critsect strictness like deadlock detection. */ 59 #if (defined(RT_LOCK_STRICT) && defined(IN_RING3) && !defined( IEM_VERIFICATION_MODE) && !defined(PDMCRITSECT_STRICT)) \59 #if (defined(RT_LOCK_STRICT) && defined(IN_RING3) && !defined(PDMCRITSECT_STRICT)) \ 60 60 || defined(DOXYGEN_RUNNING) 61 61 # define PDMCRITSECT_STRICT … … 65 65 * Enables/disables PDM read/write critsect strictness like deadlock 66 66 * detection. */ 67 #if (defined(RT_LOCK_STRICT) && defined(IN_RING3) && !defined( IEM_VERIFICATION_MODE) && !defined(PDMCRITSECTRW_STRICT)) \67 #if (defined(RT_LOCK_STRICT) && defined(IN_RING3) && !defined(PDMCRITSECTRW_STRICT)) \ 68 68 || defined(DOXYGEN_RUNNING) 69 69 # define PDMCRITSECTRW_STRICT -
trunk/src/recompiler/Makefile.kmk
r69111 r72493 68 68 #VBoxRemPrimary_DEFS += DEBUG_ALL_LOGGING DEBUG_DISAS DEBUG_PCALL CONFIG_DEBUG_EXEC DEBUG_FLUSH DEBUG_IOPORT DEBUG_SIGNAL DEBUG_TLB_CHECK DEBUG_TB_INVALIDATE DEBUG_TLB # Enables huge amounts of debug logging. 69 69 #VBoxRemPrimary_DEFS += DEBUG_DISAS DEBUG_PCALL CONFIG_DEBUG_EXEC DEBUG_FLUSH DEBUG_IOPORT DEBUG_SIGNAL DEBUG_TLB_CHECK DEBUG_TB_INVALIDATE DEBUG_TLB # Enables huge amounts of debug logging. 70 ifdef IEM_VERIFICATION_MODE71 VBoxRemPrimary_DEFS += IEM_VERIFICATION_MODE72 endif73 70 ifdef VBOX_WITH_RAW_MODE 74 71 VBoxRemPrimary_DEFS += VBOX_WITH_RAW_MODE -
trunk/src/recompiler/VBoxRecompiler.c
r70948 r72493 1412 1412 uint32_t u32CR0; 1413 1413 1414 #ifdef IEM_VERIFICATION_MODE1415 return false;1416 #endif1417 1418 1414 /* Update counter. */ 1419 1415 env->pVM->rem.s.cCanExecuteRaw++; … … 4263 4259 REMR3DECL(void) REMR3NotifyInterruptSet(PVM pVM, PVMCPU pVCpu) 4264 4260 { 4265 #ifndef IEM_VERIFICATION_MODE4266 4261 LogFlow(("REMR3NotifyInterruptSet: fInRem=%d interrupts %s\n", pVM->rem.s.fInREM, 4267 4262 (pVM->rem.s.Env.eflags & IF_MASK) && !(pVM->rem.s.Env.hflags & HF_INHIBIT_IRQ_MASK) ? "enabled" : "disabled")); 4268 4263 if (pVM->rem.s.fInREM) 4269 { 4270 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request, 4271 CPU_INTERRUPT_EXTERNAL_HARD); 4272 } 4273 #endif 4264 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request, CPU_INTERRUPT_EXTERNAL_HARD); 4274 4265 } 4275 4266 … … 4302 4293 REMR3DECL(void) REMR3NotifyTimerPending(PVM pVM, PVMCPU pVCpuDst) 4303 4294 { 4304 #ifndef IEM_VERIFICATION_MODE4305 4295 #ifndef DEBUG_bird 4306 4296 LogFlow(("REMR3NotifyTimerPending: fInRem=%d\n", pVM->rem.s.fInREM)); … … 4319 4309 else 4320 4310 LogIt(RTLOGGRPFLAGS_LEVEL_5, LOG_GROUP_TM, ("REMR3NotifyTimerPending: !fInREM; cpu state=%d\n", VMCPU_GET_STATE(pVCpuDst))); 4321 #endif4322 4311 } 4323 4312 … … 4331 4320 REMR3DECL(void) REMR3NotifyDmaPending(PVM pVM) 4332 4321 { 4333 #ifndef IEM_VERIFICATION_MODE4334 4322 LogFlow(("REMR3NotifyDmaPending: fInRem=%d\n", pVM->rem.s.fInREM)); 4335 4323 if (pVM->rem.s.fInREM) 4336 { 4337 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request, 4338 CPU_INTERRUPT_EXTERNAL_DMA); 4339 } 4340 #endif 4324 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request, CPU_INTERRUPT_EXTERNAL_DMA); 4341 4325 } 4342 4326 … … 4350 4334 REMR3DECL(void) REMR3NotifyQueuePending(PVM pVM) 4351 4335 { 4352 #ifndef IEM_VERIFICATION_MODE4353 4336 LogFlow(("REMR3NotifyQueuePending: fInRem=%d\n", pVM->rem.s.fInREM)); 4354 4337 if (pVM->rem.s.fInREM) 4355 { 4356 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request, 4357 CPU_INTERRUPT_EXTERNAL_EXIT); 4358 } 4359 #endif 4338 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request, CPU_INTERRUPT_EXTERNAL_EXIT); 4360 4339 } 4361 4340 … … 4369 4348 REMR3DECL(void) REMR3NotifyFF(PVM pVM) 4370 4349 { 4371 #ifndef IEM_VERIFICATION_MODE4372 4350 LogFlow(("REMR3NotifyFF: fInRem=%d\n", pVM->rem.s.fInREM)); 4373 4351 if (pVM->rem.s.fInREM) 4374 { 4375 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request, 4376 CPU_INTERRUPT_EXTERNAL_EXIT); 4377 } 4378 #endif 4352 ASMAtomicOrS32((int32_t volatile *)&cpu_single_env->interrupt_request, CPU_INTERRUPT_EXTERNAL_EXIT); 4379 4353 } 4380 4354 -
trunk/src/recompiler/cpu-exec.c
r69465 r72493 350 350 env->exception_next_eip, 351 351 env->exception_is_int == EXCEPTION_IS_INT_VALUE_HARDWARE_IRQ); 352 # ifdef IEM_VERIFICATION_MODE /* Ugly hacks */353 cpu_loop_exit();354 # endif355 352 /* successfully delivered */ 356 353 env->old_exception = -1; … … 450 447 /* Clear CPU_INTERRUPT_SINGLE_INSTR and leave CPU_INTERRUPT_SINGLE_INSTR_IN_FLIGHT set. */ 451 448 ASMAtomicAndS32((int32_t volatile *)&env->interrupt_request, ~CPU_INTERRUPT_SINGLE_INSTR); 452 # ifdef IEM_VERIFICATION_MODE453 env->exception_index = ret = EXCP_SINGLE_INSTR;454 cpu_loop_exit();455 # endif456 449 } 457 450 # endif /* VBOX */
Note:
See TracChangeset
for help on using the changeset viewer.