Changeset 72744 in vbox for trunk/src/VBox/VMM/VMMR0
- Timestamp:
- Jun 29, 2018 7:36:19 AM (6 years ago)
- Location:
- trunk/src/VBox/VMM/VMMR0
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMR0.cpp
r72643 r72744 89 89 DECLR0CALLBACKMEMBER(int, pfnEnterSession, (PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)); 90 90 DECLR0CALLBACKMEMBER(void, pfnThreadCtxCallback, (RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit)); 91 DECLR0CALLBACKMEMBER(int, pfn SaveHostState, (PVM pVM,PVMCPU pVCpu));91 DECLR0CALLBACKMEMBER(int, pfnExportHostState, (PVMCPU pVCpu)); 92 92 DECLR0CALLBACKMEMBER(VBOXSTRICTRC, pfnRunGuestCode, (PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)); 93 93 DECLR0CALLBACKMEMBER(int, pfnEnableCpu, (PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, … … 116 116 /** Host CR4 value (set by ring-0 VMX init) */ 117 117 uint64_t u64HostCr4; 118 119 118 /** Host EFER value (set by ring-0 VMX init) */ 120 119 uint64_t u64HostEfer; 121 122 120 /** Host SMM monitor control (used for logging/diagnostics) */ 123 121 uint64_t u64HostSmmMonitorCtl; … … 149 147 bool fSupported; 150 148 } svm; 149 151 150 /** Saved error from detection */ 152 151 int32_t lLastError; … … 176 175 177 176 178 179 177 /** 180 178 * Initializes a first return code structure. … … 237 235 static DECLCALLBACK(int) hmR0DummyEnter(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu) 238 236 { 239 NOREF(pVM); NOREF(pVCpu); NOREF(pCpu);237 RT_NOREF3(pVM, pVCpu, pCpu); 240 238 return VINF_SUCCESS; 241 239 } … … 243 241 static DECLCALLBACK(void) hmR0DummyThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit) 244 242 { 245 NOREF(enmEvent); NOREF(pVCpu); NOREF(fGlobalInit);243 RT_NOREF3(enmEvent, pVCpu, fGlobalInit); 246 244 } 247 245 … … 249 247 bool fEnabledBySystem, void *pvArg) 250 248 { 251 NOREF(pCpu); NOREF(pVM); NOREF(pvCpuPage); NOREF(HCPhysCpuPage); NOREF(fEnabledBySystem); NOREF(pvArg);249 RT_NOREF6(pCpu, pVM, pvCpuPage, HCPhysCpuPage, fEnabledBySystem, pvArg); 252 250 return VINF_SUCCESS; 253 251 } … … 255 253 static DECLCALLBACK(int) hmR0DummyDisableCpu(PHMGLOBALCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage) 256 254 { 257 NOREF(pCpu); NOREF(pvCpuPage); NOREF(HCPhysCpuPage);255 RT_NOREF3(pCpu, pvCpuPage, HCPhysCpuPage); 258 256 return VINF_SUCCESS; 259 257 } … … 261 259 static DECLCALLBACK(int) hmR0DummyInitVM(PVM pVM) 262 260 { 263 NOREF(pVM);261 RT_NOREF1(pVM); 264 262 return VINF_SUCCESS; 265 263 } … … 267 265 static DECLCALLBACK(int) hmR0DummyTermVM(PVM pVM) 268 266 { 269 NOREF(pVM);267 RT_NOREF1(pVM); 270 268 return VINF_SUCCESS; 271 269 } … … 273 271 static DECLCALLBACK(int) hmR0DummySetupVM(PVM pVM) 274 272 { 275 NOREF(pVM);273 RT_NOREF1(pVM); 276 274 return VINF_SUCCESS; 277 275 } … … 279 277 static DECLCALLBACK(VBOXSTRICTRC) hmR0DummyRunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 280 278 { 281 NOREF(pVM); NOREF(pVCpu); NOREF(pCtx);279 RT_NOREF3(pVM, pVCpu, pCtx); 282 280 return VINF_SUCCESS; 283 281 } 284 282 285 static DECLCALLBACK(int) hmR0Dummy SaveHostState(PVM pVM,PVMCPU pVCpu)286 { 287 NOREF(pVM); NOREF(pVCpu);283 static DECLCALLBACK(int) hmR0DummyExportHostState(PVMCPU pVCpu) 284 { 285 RT_NOREF1(pVCpu); 288 286 return VINF_SUCCESS; 289 287 } … … 440 438 441 439 /* Make sure we don't get rescheduled to another cpu during this probe. */ 442 RTCCUINTREG fFlags = ASMIntDisableFlags();440 RTCCUINTREG const fEFlags = ASMIntDisableFlags(); 443 441 444 442 /* 445 * Check CR4.VMXE 443 * Check CR4.VMXE. 446 444 */ 447 445 g_HmR0.vmx.u64HostCr4 = ASMGetCR4(); 448 446 if (!(g_HmR0.vmx.u64HostCr4 & X86_CR4_VMXE)) 449 447 { 450 /* In theory this bit could be cleared behind our back. Which would cause451 #UDfaults when we try to execute the VMX instructions... */448 /* In theory this bit could be cleared behind our back. Which would cause #UD 449 faults when we try to execute the VMX instructions... */ 452 450 ASMSetCR4(g_HmR0.vmx.u64HostCr4 | X86_CR4_VMXE); 453 451 } … … 481 479 } 482 480 483 /* Restore CR4 again; don't leave the X86_CR4_VMXE flag set 484 if it wasn't so before (some software could incorrectly 485 think it's in VMX mode). */ 481 /* 482 * Restore CR4 again; don't leave the X86_CR4_VMXE flag set if it was not 483 * set before (some software could incorrectly think it is in VMX mode). 484 */ 486 485 ASMSetCR4(g_HmR0.vmx.u64HostCr4); 487 ASMSetFlags(f Flags);486 ASMSetFlags(fEFlags); 488 487 489 488 RTR0MemObjFree(hScatchMemObj, false); … … 501 500 g_HmR0.pfnEnterSession = VMXR0Enter; 502 501 g_HmR0.pfnThreadCtxCallback = VMXR0ThreadCtxCallback; 503 g_HmR0.pfn SaveHostState = VMXR0SaveHostState;502 g_HmR0.pfnExportHostState = VMXR0ExportHostState; 504 503 g_HmR0.pfnRunGuestCode = VMXR0RunGuestCode; 505 504 g_HmR0.pfnEnableCpu = VMXR0EnableCpu; … … 565 564 g_HmR0.pfnEnterSession = SVMR0Enter; 566 565 g_HmR0.pfnThreadCtxCallback = SVMR0ThreadCtxCallback; 567 g_HmR0.pfn SaveHostState = SVMR0SaveHostState;566 g_HmR0.pfnExportHostState = SVMR0ExportHostState; 568 567 g_HmR0.pfnRunGuestCode = SVMR0RunGuestCode; 569 568 g_HmR0.pfnEnableCpu = SVMR0EnableCpu; … … 642 641 g_HmR0.pfnEnterSession = hmR0DummyEnter; 643 642 g_HmR0.pfnThreadCtxCallback = hmR0DummyThreadCtxCallback; 644 g_HmR0.pfn SaveHostState = hmR0DummySaveHostState;643 g_HmR0.pfnExportHostState = hmR0DummyExportHostState; 645 644 g_HmR0.pfnRunGuestCode = hmR0DummyRunGuestCode; 646 645 g_HmR0.pfnEnableCpu = hmR0DummyEnableCpu; … … 1338 1337 /* On first entry we'll sync everything. */ 1339 1338 for (VMCPUID i = 0; i < pVM->cCpus; i++) 1340 HMCPU_CF_RESET_TO(&pVM->aCpus[i], HM_CHANGED_HOST_CONTEXT | HM_CHANGED_ALL_GUEST); 1339 { 1340 PVMCPU pVCpu = &pVM->aCpus[i]; 1341 pVCpu->hm.s.fCtxChanged |= HM_CHANGED_HOST_CONTEXT | HM_CHANGED_ALL_GUEST; 1342 } 1341 1343 1342 1344 /* … … 1346 1348 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER; 1347 1349 RTThreadPreemptDisable(&PreemptState); 1348 RTCPUID idCpu= RTMpCpuId();1350 RTCPUID idCpu = RTMpCpuId(); 1349 1351 1350 1352 /* Enable VT-x or AMD-V if local init is required. */ … … 1400 1402 1401 1403 /* Reload host-state (back from ring-3/migrated CPUs) and shared guest/host bits. */ 1402 HMCPU_CF_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE); 1404 if (g_HmR0.vmx.fSupported) 1405 pVCpu->hm.s.fCtxChanged |= HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE; 1406 else 1407 pVCpu->hm.s.fCtxChanged |= HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE; 1403 1408 1404 1409 Assert(pCpu->idCpu == idCpu && pCpu->idCpu != NIL_RTCPUID); … … 1435 1440 PHMGLOBALCPUINFO pCpu = &g_HmR0.aCpuInfo[idCpu]; 1436 1441 Assert(pCpu); 1437 Assert(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE)); 1442 if (g_HmR0.vmx.fSupported) 1443 { 1444 Assert((pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)) 1445 == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)); 1446 } 1447 else 1448 { 1449 Assert((pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE)) 1450 == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE)); 1451 } 1438 1452 1439 1453 rc = g_HmR0.pfnEnterSession(pVM, pVCpu, pCpu); 1440 AssertMsgRCReturn(rc, (" pfnEnterSession failed.rc=%Rrc pVCpu=%p HostCpuId=%u\n", rc, pVCpu, idCpu), rc);1441 1442 /* Loadthe host-state as we may be resuming code after a longjmp and quite1454 AssertMsgRCReturn(rc, ("rc=%Rrc pVCpu=%p HostCpuId=%u\n", rc, pVCpu, idCpu), rc); 1455 1456 /* Exports the host-state as we may be resuming code after a longjmp and quite 1443 1457 possibly now be scheduled on a different CPU. */ 1444 rc = g_HmR0.pfn SaveHostState(pVM,pVCpu);1445 AssertMsgRCReturn(rc, (" pfnSaveHostState failed.rc=%Rrc pVCpu=%p HostCpuId=%u\n", rc, pVCpu, idCpu), rc);1458 rc = g_HmR0.pfnExportHostState(pVCpu); 1459 AssertMsgRCReturn(rc, ("rc=%Rrc pVCpu=%p HostCpuId=%u\n", rc, pVCpu, idCpu), rc); 1446 1460 1447 1461 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE … … 1556 1570 VMMR0_INT_DECL(void) HMR0NotifyCpumUnloadedGuestFpuState(PVMCPU pVCpu) 1557 1571 { 1558 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);1572 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR0); 1559 1573 } 1560 1574 … … 1567 1581 VMMR0_INT_DECL(void) HMR0NotifyCpumModifiedHostCr0(PVMCPU pVCpu) 1568 1582 { 1569 HMCPU_CF_SET(pVCpu, HM_CHANGED_HOST_CONTEXT);1583 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_HOST_CONTEXT); 1570 1584 } 1571 1585 … … 1585 1599 STAM_COUNTER_INC(&pVCpu->hm.s.StatFpu64SwitchBack); 1586 1600 if (pVM->hm.s.vmx.fSupported) 1587 return VMXR0Execute64BitsHandler(pV M, pVCpu, pCtx, HM64ON32OP_HMRCSaveGuestFPU64, 0, NULL);1588 return SVMR0Execute64BitsHandler(pV M, pVCpu, pCtx, HM64ON32OP_HMRCSaveGuestFPU64, 0, NULL);1601 return VMXR0Execute64BitsHandler(pVCpu, HM64ON32OP_HMRCSaveGuestFPU64, 0, NULL); 1602 return SVMR0Execute64BitsHandler(pVCpu, HM64ON32OP_HMRCSaveGuestFPU64, 0, NULL); 1589 1603 } 1590 1604 … … 1602 1616 STAM_COUNTER_INC(&pVCpu->hm.s.StatDebug64SwitchBack); 1603 1617 if (pVM->hm.s.vmx.fSupported) 1604 return VMXR0Execute64BitsHandler(pV M, pVCpu, pCtx, HM64ON32OP_HMRCSaveGuestDebug64, 0, NULL);1605 return SVMR0Execute64BitsHandler(pV M, pVCpu, pCtx, HM64ON32OP_HMRCSaveGuestDebug64, 0, NULL);1618 return VMXR0Execute64BitsHandler(pVCpu, HM64ON32OP_HMRCSaveGuestDebug64, 0, NULL); 1619 return SVMR0Execute64BitsHandler(pVCpu, HM64ON32OP_HMRCSaveGuestDebug64, 0, NULL); 1606 1620 } 1607 1621 … … 1622 1636 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatWorldSwitch3264, z); 1623 1637 if (pVM->hm.s.vmx.fSupported) 1624 rc = VMXR0Execute64BitsHandler(pV M, pVCpu, pCtx, HM64ON32OP_HMRCTestSwitcher64, 5, &aParam[0]);1638 rc = VMXR0Execute64BitsHandler(pVCpu, HM64ON32OP_HMRCTestSwitcher64, 5, &aParam[0]); 1625 1639 else 1626 rc = SVMR0Execute64BitsHandler(pV M, pVCpu, pCtx, HM64ON32OP_HMRCTestSwitcher64, 5, &aParam[0]);1640 rc = SVMR0Execute64BitsHandler(pVCpu, HM64ON32OP_HMRCTestSwitcher64, 5, &aParam[0]); 1627 1641 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatWorldSwitch3264, z); 1628 1642 … … 1701 1715 * @returns VBox status code. 1702 1716 * @param pVCpu The cross context CPU structure. 1703 * @param pCtx The target CPU context.1704 1717 * @param fWhat What to import, CPUMCTX_EXTRN_XXX. 1705 1718 */ 1706 VMMR0_INT_DECL(int) HMR0ImportStateOnDemand(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t fWhat) 1707 { 1708 /** @todo Intel. */ 1709 #if 0 1710 if (pVCpu->CTX_SUFF(pVM).hm.s.vmx.fSupported) 1711 return VMXR0ImportStateOnDemand(pVCpu, pCtx, fWhat); 1712 #endif 1713 return SVMR0ImportStateOnDemand(pVCpu, pCtx, fWhat); 1719 VMMR0_INT_DECL(int) HMR0ImportStateOnDemand(PVMCPU pVCpu, uint64_t fWhat) 1720 { 1721 if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fSupported) 1722 return VMXR0ImportStateOnDemand(pVCpu, fWhat); 1723 return SVMR0ImportStateOnDemand(pVCpu, fWhat); 1714 1724 } 1715 1725 … … 1959 1969 { "vip", NULL, X86_EFL_VIP }, 1960 1970 { "vif", NULL, X86_EFL_VIF }, 1961 { "ac", NULL, X86_EFL_AC },1962 { "vm", NULL, X86_EFL_VM },1963 { "rf", NULL, X86_EFL_RF },1964 { "nt", NULL, X86_EFL_NT },1965 { "ov", "nv", X86_EFL_OF },1966 { "dn", "up", X86_EFL_DF },1967 { "ei", "di", X86_EFL_IF },1968 { "tf", NULL, X86_EFL_TF },1969 { "nt", "pl", X86_EFL_SF },1970 { "nz", "zr", X86_EFL_ZF },1971 { "ac", "na", X86_EFL_AF },1972 { "po", "pe", X86_EFL_PF },1973 { "cy", "nc", X86_EFL_CF },1971 { "ac", NULL, X86_EFL_AC }, 1972 { "vm", NULL, X86_EFL_VM }, 1973 { "rf", NULL, X86_EFL_RF }, 1974 { "nt", NULL, X86_EFL_NT }, 1975 { "ov", "nv", X86_EFL_OF }, 1976 { "dn", "up", X86_EFL_DF }, 1977 { "ei", "di", X86_EFL_IF }, 1978 { "tf", NULL, X86_EFL_TF }, 1979 { "nt", "pl", X86_EFL_SF }, 1980 { "nz", "zr", X86_EFL_ZF }, 1981 { "ac", "na", X86_EFL_AF }, 1982 { "po", "pe", X86_EFL_PF }, 1983 { "cy", "nc", X86_EFL_CF }, 1974 1984 }; 1975 1985 char szEFlags[80]; … … 1987 1997 } 1988 1998 psz[-1] = '\0'; 1989 1990 1999 1991 2000 /* -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r72661 r72744 37 37 #include "dtrace/VBoxVMM.h" 38 38 39 #define HMSVM_USE_IEM_EVENT_REFLECTION40 39 #ifdef DEBUG_ramshankar 41 40 # define HMSVM_SYNC_FULL_GUEST_STATE … … 106 105 | CPUMCTX_EXTRN_HM_SVM_MASK) 107 106 107 /** 108 * Subset of the guest-CPU state that is shared between the guest and host. 109 */ 110 #define HMSVM_CPUMCTX_SHARED_STATE CPUMCTX_EXTRN_DR_MASK 111 108 112 /** Macro for importing guest state from the VMCB back into CPUMCTX. */ 109 #define HMSVM_CPUMCTX_IMPORT_STATE(a_pVCpu, a_ pCtx, a_fWhat) \113 #define HMSVM_CPUMCTX_IMPORT_STATE(a_pVCpu, a_fWhat) \ 110 114 do { \ 111 if ((a_p Ctx)->fExtrn & (a_fWhat)) \112 hmR0SvmImportGuestState((a_pVCpu), (a_ pCtx), (a_fWhat)); \115 if ((a_pVCpu)->cpum.GstCtx.fExtrn & (a_fWhat)) \ 116 hmR0SvmImportGuestState((a_pVCpu), (a_fWhat)); \ 113 117 } while (0) 114 118 … … 131 135 && CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_SHUTDOWN)) \ 132 136 { \ 133 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx,IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK); \137 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK); \ 134 138 return VBOXSTRICTRC_TODO(IEMExecSvmVmexit(pVCpu, SVM_EXIT_SHUTDOWN, 0, 0)); \ 135 139 } \ … … 368 372 * Internal Functions * 369 373 *********************************************************************************************************************************/ 370 static void hmR0SvmSetMsrPermission(PCCPUMCTX pCtx, uint8_t *pbMsrBitmap, unsigned uMsr, SVMMSREXITREAD enmRead,371 SVMMSREXITWRITE enmWrite);372 374 static void hmR0SvmPendingEventToTrpmTrap(PVMCPU pVCpu); 373 375 static void hmR0SvmLeave(PVMCPU pVCpu, bool fImportState); 374 static void hmR0SvmImportGuestState(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t fWhat);375 376 376 377 … … 463 464 * @param pVCpu The cross context virtual CPU structure. 464 465 * @param pVmcb Pointer to the VM control block. 465 * @param pCtx Pointer to the guest-CPU context.466 466 * @param pszPrefix Log prefix. 467 467 * @param fFlags Log flags, see HMSVM_LOG_XXX. 468 468 * @param uVerbose The verbosity level, currently unused. 469 469 */ 470 static void hmR0SvmLogState(PVMCPU pVCpu, PCSVMVMCB pVmcb, PCCPUMCTX pCtx, const char *pszPrefix, uint32_t fFlags, 471 uint8_t uVerbose) 470 static void hmR0SvmLogState(PVMCPU pVCpu, PCSVMVMCB pVmcb, const char *pszPrefix, uint32_t fFlags, uint8_t uVerbose) 472 471 { 473 472 RT_NOREF2(pVCpu, uVerbose); 473 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 474 474 475 475 HMSVM_CPUMCTX_ASSERT(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS); … … 549 549 550 550 /* Paranoid: Disable interrupt as, in theory, interrupt handlers might mess with EFER. */ 551 RTCCUINTREG fEFlags = ASMIntDisableFlags();551 RTCCUINTREG const fEFlags = ASMIntDisableFlags(); 552 552 553 553 /* … … 560 560 if ( pVM 561 561 && pVM->hm.s.svm.fIgnoreInUseError) 562 {563 562 pCpu->fIgnoreAMDVInUseError = true; 564 }565 563 566 564 if (!pCpu->fIgnoreAMDVInUseError) … … 581 579 582 580 /* 583 * Theoretically, other hypervisors may have used ASIDs, ideally we should flush all non-zero ASIDs584 * when enabling SVM. AMD doesn't have an SVM instruction to flush all ASIDs (flushing is done585 * upon VMRUN). Therefore, flag that we need to flush the TLB entirely with before executing any586 * guest code.581 * Theoretically, other hypervisors may have used ASIDs, ideally we should flush all 582 * non-zero ASIDs when enabling SVM. AMD doesn't have an SVM instruction to flush all 583 * ASIDs (flushing is done upon VMRUN). Therefore, flag that we need to flush the TLB 584 * entirely with before executing any guest code. 587 585 */ 588 586 pCpu->fFlushAsidBeforeUse = true; … … 614 612 615 613 /* Paranoid: Disable interrupts as, in theory, interrupt handlers might mess with EFER. */ 616 RTCCUINTREG fEFlags = ASMIntDisableFlags();614 RTCCUINTREG const fEFlags = ASMIntDisableFlags(); 617 615 618 616 /* Turn off AMD-V in the EFER MSR. */ … … 727 725 if (HMAmdIsSubjectToErratum170(&u32Family, &u32Model, &u32Stepping)) 728 726 { 729 Log4 (("SVMR0InitVM:AMD cpu with erratum 170 family %#x model %#x stepping %#x\n", u32Family, u32Model, u32Stepping));727 Log4Func(("AMD cpu with erratum 170 family %#x model %#x stepping %#x\n", u32Family, u32Model, u32Stepping)); 730 728 pVM->hm.s.svm.fAlwaysFlushTLB = true; 731 729 } … … 989 987 /* If you add any exceptions here, make sure to update hmR0SvmHandleExit(). */ 990 988 pVmcbCtrl->u32InterceptXcpt |= 0 991 992 993 994 995 996 997 998 999 1000 989 | RT_BIT(X86_XCPT_BP) 990 | RT_BIT(X86_XCPT_DE) 991 | RT_BIT(X86_XCPT_NM) 992 | RT_BIT(X86_XCPT_UD) 993 | RT_BIT(X86_XCPT_NP) 994 | RT_BIT(X86_XCPT_SS) 995 | RT_BIT(X86_XCPT_GP) 996 | RT_BIT(X86_XCPT_PF) 997 | RT_BIT(X86_XCPT_MF) 998 ; 1001 999 #endif 1002 1000 … … 1185 1183 if (!fFlushPending) 1186 1184 { 1187 Log4 (("SVMR0InvalidatePage %RGv\n", GCVirt));1185 Log4Func(("%#RGv\n", GCVirt)); 1188 1186 1189 1187 PCPUMCTX pCtx = CPUMQueryGuestCtxPtr(pVCpu); … … 1222 1220 1223 1221 /* 1224 * Force a TLB flush for the first world switch if the current CPU differs from the one we ran on last. 1225 * This can happen both for start & resume due to long jumps back to ring-3. 1222 * Force a TLB flush for the first world switch if the current CPU differs from the one 1223 * we ran on last. This can happen both for start & resume due to long jumps back to 1224 * ring-3. 1226 1225 * 1227 * We also force a TLB flush every time when executing a nested-guest VCPU as there is no correlation1228 * between it and the physical CPU.1226 * We also force a TLB flush every time when executing a nested-guest VCPU as there is no 1227 * correlation between it and the physical CPU. 1229 1228 * 1230 * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB,1231 * so we cannot reuse the ASIDs without flushing.1229 * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while 1230 * flushing the TLB, so we cannot reuse the ASIDs without flushing. 1232 1231 */ 1233 1232 bool fNewAsid = false; … … 1376 1375 aParam[7] = 0; 1377 1376 1378 return SVMR0Execute64BitsHandler(pV M, pVCpu, pCtx, HM64ON32OP_SVMRCVMRun64, RT_ELEMENTS(aParam), &aParam[0]);1377 return SVMR0Execute64BitsHandler(pVCpu, HM64ON32OP_SVMRCVMRun64, RT_ELEMENTS(aParam), &aParam[0]); 1379 1378 } 1380 1379 … … 1384 1383 * 1385 1384 * @returns VBox status code. 1386 * @param pVM The cross context VM structure.1387 1385 * @param pVCpu The cross context virtual CPU structure. 1388 * @param pCtx Pointer to the guest-CPU context.1389 1386 * @param enmOp The operation to perform. 1390 1387 * @param cParams Number of parameters. 1391 1388 * @param paParam Array of 32-bit parameters. 1392 1389 */ 1393 VMMR0DECL(int) SVMR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp,1394 uint32_t cParams, uint32_t *paParam) 1395 { 1390 VMMR0DECL(int) SVMR0Execute64BitsHandler(PVMCPU pVCpu, HM64ON32OP enmOp, uint32_t cParams, uint32_t *paParam) 1391 { 1392 PVM pVM = pVCpu->CTX_SUFF(pVM); 1396 1393 AssertReturn(pVM->hm.s.pfnHost32ToGuest64R0, VERR_HM_NO_32_TO_64_SWITCHER); 1397 1394 Assert(enmOp > HM64ON32OP_INVALID && enmOp < HM64ON32OP_END); 1398 1395 1399 NOREF(pCtx);1400 1401 1396 /* Disable interrupts. */ 1402 RTHCUINTREG uOldEFlags = ASMIntDisableFlags();1397 RTHCUINTREG const fEFlags = ASMIntDisableFlags(); 1403 1398 1404 1399 #ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI … … 1412 1407 CPUMPushHyper(pVCpu, paParam[i]); 1413 1408 1409 /* Call the switcher. */ 1414 1410 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatWorldSwitch3264, z); 1415 /* Call the switcher. */1416 1411 int rc = pVM->hm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum)); 1417 1412 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatWorldSwitch3264, z); 1418 1413 1419 1414 /* Restore interrupts. */ 1420 ASMSetFlags( uOldEFlags);1415 ASMSetFlags(fEFlags); 1421 1416 return rc; 1422 1417 } … … 1446 1441 * 1447 1442 * @param pVCpu The cross context virtual CPU structure. 1448 * @param pCtx Pointer to the guest-CPU context.1449 1443 * @param pVmcb Pointer to the VM control block. 1450 1444 * @param uXcpt The exception (X86_XCPT_*). … … 1454 1448 * are not intercepting it. 1455 1449 */ 1456 DECLINLINE(void) hmR0SvmClearXcptIntercept(PVMCPU pVCpu, P CCPUMCTX pCtx, PSVMVMCB pVmcb, uint8_t uXcpt)1450 DECLINLINE(void) hmR0SvmClearXcptIntercept(PVMCPU pVCpu, PSVMVMCB pVmcb, uint8_t uXcpt) 1457 1451 { 1458 1452 Assert(uXcpt != X86_XCPT_DB); … … 1464 1458 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 1465 1459 /* Only remove the intercept if the nested-guest is also not intercepting it! */ 1460 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 1466 1461 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 1467 1462 { … … 1469 1464 fRemove = !(pVmcbNstGstCache->u32InterceptXcpt & RT_BIT(uXcpt)); 1470 1465 } 1471 #else1472 RT_NOREF2(pVCpu, pCtx);1473 1466 #endif 1474 1467 if (fRemove) … … 1479 1472 } 1480 1473 #else 1481 RT_NOREF3(pVCpu, p Ctx, pVmcb);1474 RT_NOREF3(pVCpu, pVmcb, uXcpt); 1482 1475 #endif 1483 1476 } … … 1540 1533 1541 1534 /** 1542 * Loads the guest (or nested-guest) CR0 control register into the guest-state 1543 * area in the VMCB. 1544 * 1545 * Although the guest CR0 is a separate field in the VMCB we have to consider 1546 * the FPU state itself which is shared between the host and the guest. 1547 * 1548 * @returns VBox status code. 1535 * Exports the guest (or nested-guest) CR0 into the VMCB. 1536 * 1549 1537 * @param pVCpu The cross context virtual CPU structure. 1550 1538 * @param pVmcb Pointer to the VM control block. 1551 * @param pCtx Pointer to the guest-CPU context.1552 * 1539 * 1540 * @remarks This assumes we always pre-load the guest FPU. 1553 1541 * @remarks No-long-jump zone!!! 1554 1542 */ 1555 static void hmR0Svm LoadSharedCR0(PVMCPU pVCpu, PSVMVMCB pVmcb, PCCPUMCTX pCtx)1556 { 1557 /* The guest FPU is now always pre-loaded before executing guest code, see @bugref{7243#c101}. */1558 Assert(CPUMIsGuestFPUStateActive(pVCpu)); 1559 1543 static void hmR0SvmExportGuestCR0(PVMCPU pVCpu, PSVMVMCB pVmcb) 1544 { 1545 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1546 1547 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 1560 1548 uint64_t const uGuestCr0 = pCtx->cr0; 1561 1549 uint64_t uShadowCr0 = uGuestCr0; … … 1564 1552 uShadowCr0 &= ~(X86_CR0_CD | X86_CR0_NW); 1565 1553 1566 /* When Nested Paging is not available use shadow page tables and intercept #PFs ( thelatter done in SVMR0SetupVM()). */1554 /* When Nested Paging is not available use shadow page tables and intercept #PFs (latter done in SVMR0SetupVM()). */ 1567 1555 if (!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging) 1568 1556 { … … 1572 1560 1573 1561 /* 1574 * Use the #MF style of legacy-FPU error reporting for now. Although AMD-V has MSRs that lets us 1575 * isolate the host from it, IEM/REM still needs work to emulate it properly. see @bugref{7243#c103}. 1562 * Use the #MF style of legacy-FPU error reporting for now. Although AMD-V has MSRs that 1563 * lets us isolate the host from it, IEM/REM still needs work to emulate it properly, 1564 * see @bugref{7243#c103}. 1576 1565 */ 1577 1566 if (!(uGuestCr0 & X86_CR0_NE)) … … 1581 1570 } 1582 1571 else 1583 hmR0SvmClearXcptIntercept(pVCpu, p Ctx, pVmcb, X86_XCPT_MF);1572 hmR0SvmClearXcptIntercept(pVCpu, pVmcb, X86_XCPT_MF); 1584 1573 1585 1574 /* 1586 1575 * If the shadow and guest CR0 are identical we can avoid intercepting CR0 reads. 1587 1576 * 1588 * CR0 writes still needs interception as PGM requires tracking paging mode changes, see @bugref{6944}. 1589 * We also don't ever want to honor weird things like cache disable from the guest. However, we can 1590 * avoid intercepting changes to the TS & MP bits by clearing the CR0 write intercept below and keeping 1591 * SVM_CTRL_INTERCEPT_CR0_SEL_WRITE instead. 1577 * CR0 writes still needs interception as PGM requires tracking paging mode changes, 1578 * see @bugref{6944}. 1579 * 1580 * We also don't ever want to honor weird things like cache disable from the guest. 1581 * However, we can avoid intercepting changes to the TS & MP bits by clearing the CR0 1582 * write intercept below and keeping SVM_CTRL_INTERCEPT_CR0_SEL_WRITE instead. 1592 1583 */ 1593 1584 if (uShadowCr0 == uGuestCr0) … … 1626 1617 1627 1618 /** 1628 * Loads the guest/nested-guest control registers (CR2, CR3, CR4) into the VMCB. 1619 * Exports the guest (or nested-guest) CR3 into the VMCB. 1620 * 1621 * @param pVCpu The cross context virtual CPU structure. 1622 * @param pVmcb Pointer to the VM control block. 1623 * 1624 * @remarks No-long-jump zone!!! 1625 */ 1626 static void hmR0SvmExportGuestCR3(PVMCPU pVCpu, PSVMVMCB pVmcb) 1627 { 1628 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1629 1630 PVM pVM = pVCpu->CTX_SUFF(pVM); 1631 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 1632 if (pVM->hm.s.fNestedPaging) 1633 { 1634 PGMMODE enmShwPagingMode; 1635 #if HC_ARCH_BITS == 32 1636 if (CPUMIsGuestInLongModeEx(pCtx)) 1637 enmShwPagingMode = PGMMODE_AMD64_NX; 1638 else 1639 #endif 1640 enmShwPagingMode = PGMGetHostMode(pVM); 1641 1642 pVmcb->ctrl.u64NestedPagingCR3 = PGMGetNestedCR3(pVCpu, enmShwPagingMode); 1643 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_NP; 1644 pVmcb->guest.u64CR3 = pCtx->cr3; 1645 Assert(pVmcb->ctrl.u64NestedPagingCR3); 1646 } 1647 else 1648 pVmcb->guest.u64CR3 = PGMGetHyperCR3(pVCpu); 1649 1650 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER; 1651 } 1652 1653 1654 /** 1655 * Exports the guest (or nested-guest) CR4 into the VMCB. 1656 * 1657 * @param pVCpu The cross context virtual CPU structure. 1658 * @param pVmcb Pointer to the VM control block. 1659 * 1660 * @remarks No-long-jump zone!!! 1661 */ 1662 static int hmR0SvmExportGuestCR4(PVMCPU pVCpu, PSVMVMCB pVmcb) 1663 { 1664 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1665 1666 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 1667 uint64_t uShadowCr4 = pCtx->cr4; 1668 if (!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging) 1669 { 1670 switch (pVCpu->hm.s.enmShadowMode) 1671 { 1672 case PGMMODE_REAL: 1673 case PGMMODE_PROTECTED: /* Protected mode, no paging. */ 1674 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE; 1675 1676 case PGMMODE_32_BIT: /* 32-bit paging. */ 1677 uShadowCr4 &= ~X86_CR4_PAE; 1678 break; 1679 1680 case PGMMODE_PAE: /* PAE paging. */ 1681 case PGMMODE_PAE_NX: /* PAE paging with NX enabled. */ 1682 /** Must use PAE paging as we could use physical memory > 4 GB */ 1683 uShadowCr4 |= X86_CR4_PAE; 1684 break; 1685 1686 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */ 1687 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */ 1688 #ifdef VBOX_ENABLE_64_BITS_GUESTS 1689 break; 1690 #else 1691 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE; 1692 #endif 1693 1694 default: /* shut up gcc */ 1695 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE; 1696 } 1697 } 1698 1699 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */ 1700 pVCpu->hm.s.fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0(); 1701 1702 /* Avoid intercepting CR4 reads if the guest and shadow CR4 values are identical. */ 1703 if (uShadowCr4 == pCtx->cr4) 1704 { 1705 if (!CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 1706 pVmcb->ctrl.u16InterceptRdCRx &= ~RT_BIT(4); 1707 else 1708 { 1709 /* If the nested-hypervisor intercepts CR4 reads, we need to continue intercepting them. */ 1710 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = hmR0SvmGetNestedVmcbCache(pVCpu); 1711 pVmcb->ctrl.u16InterceptRdCRx = (pVmcb->ctrl.u16InterceptRdCRx & ~RT_BIT(4)) 1712 | (pVmcbNstGstCache->u16InterceptRdCRx & RT_BIT(4)); 1713 } 1714 } 1715 else 1716 pVmcb->ctrl.u16InterceptRdCRx |= RT_BIT(4); 1717 1718 /* CR4 writes are always intercepted (both guest, nested-guest) for tracking PGM mode changes. */ 1719 Assert(pVmcb->ctrl.u16InterceptWrCRx & RT_BIT(4)); 1720 1721 /* Update VMCB with the shadow CR4 the appropriate VMCB clean bits. */ 1722 Assert(RT_HI_U32(uShadowCr4) == 0); 1723 pVmcb->guest.u64CR4 = uShadowCr4; 1724 pVmcb->ctrl.u32VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_CRX_EFER | HMSVM_VMCB_CLEAN_INTERCEPTS); 1725 1726 return VINF_SUCCESS; 1727 } 1728 1729 1730 /** 1731 * Exports the guest (or nested-guest) control registers into the VMCB. 1629 1732 * 1630 1733 * @returns VBox status code. 1631 1734 * @param pVCpu The cross context virtual CPU structure. 1632 1735 * @param pVmcb Pointer to the VM control block. 1633 * @param pCtx Pointer to the guest-CPU context.1634 1736 * 1635 1737 * @remarks No-long-jump zone!!! 1636 1738 */ 1637 static int hmR0SvmLoadGuestControlRegs(PVMCPU pVCpu, PSVMVMCB pVmcb, PCCPUMCTX pCtx) 1638 { 1639 PVM pVM = pVCpu->CTX_SUFF(pVM); 1640 1641 /* 1642 * Guest CR2. 1643 */ 1644 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR2)) 1645 { 1646 pVmcb->guest.u64CR2 = pCtx->cr2; 1647 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CR2; 1648 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR2); 1649 } 1650 1651 /* 1652 * Guest CR3. 1653 */ 1654 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR3)) 1655 { 1656 if (pVM->hm.s.fNestedPaging) 1657 { 1658 PGMMODE enmShwPagingMode; 1659 #if HC_ARCH_BITS == 32 1660 if (CPUMIsGuestInLongModeEx(pCtx)) 1661 enmShwPagingMode = PGMMODE_AMD64_NX; 1662 else 1663 #endif 1664 enmShwPagingMode = PGMGetHostMode(pVM); 1665 1666 pVmcb->ctrl.u64NestedPagingCR3 = PGMGetNestedCR3(pVCpu, enmShwPagingMode); 1667 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_NP; 1668 Assert(pVmcb->ctrl.u64NestedPagingCR3); 1669 pVmcb->guest.u64CR3 = pCtx->cr3; 1670 } 1671 else 1672 { 1673 pVmcb->guest.u64CR3 = PGMGetHyperCR3(pVCpu); 1674 Log4(("hmR0SvmLoadGuestControlRegs: CR3=%#RX64 (HyperCR3=%#RX64)\n", pCtx->cr3, pVmcb->guest.u64CR3)); 1675 } 1676 1677 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER; 1678 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR3); 1679 } 1680 1681 /* 1682 * Guest CR4. 1683 * ASSUMES this is done everytime we get in from ring-3! (XCR0) 1684 */ 1685 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR4)) 1686 { 1687 uint64_t uShadowCr4 = pCtx->cr4; 1688 if (!pVM->hm.s.fNestedPaging) 1689 { 1690 switch (pVCpu->hm.s.enmShadowMode) 1691 { 1692 case PGMMODE_REAL: 1693 case PGMMODE_PROTECTED: /* Protected mode, no paging. */ 1694 AssertFailed(); 1695 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE; 1696 1697 case PGMMODE_32_BIT: /* 32-bit paging. */ 1698 uShadowCr4 &= ~X86_CR4_PAE; 1699 break; 1700 1701 case PGMMODE_PAE: /* PAE paging. */ 1702 case PGMMODE_PAE_NX: /* PAE paging with NX enabled. */ 1703 /** Must use PAE paging as we could use physical memory > 4 GB */ 1704 uShadowCr4 |= X86_CR4_PAE; 1705 break; 1706 1707 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */ 1708 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */ 1709 #ifdef VBOX_ENABLE_64_BITS_GUESTS 1710 break; 1711 #else 1712 AssertFailed(); 1713 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE; 1714 #endif 1715 1716 default: /* shut up gcc */ 1717 AssertFailed(); 1718 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE; 1719 } 1720 } 1721 1722 /* Whether to save/load/restore XCR0 during world switch depends on CR4.OSXSAVE and host+guest XCR0. */ 1723 pVCpu->hm.s.fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0(); 1724 1725 /* Avoid intercepting CR4 reads if the guest and shadow CR4 values are identical. */ 1726 if (uShadowCr4 == pCtx->cr4) 1727 { 1728 if (!CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 1729 pVmcb->ctrl.u16InterceptRdCRx &= ~RT_BIT(4); 1730 else 1731 { 1732 /* If the nested-hypervisor intercepts CR4 reads, we need to continue intercepting them. */ 1733 PCSVMNESTEDVMCBCACHE pVmcbNstGstCache = hmR0SvmGetNestedVmcbCache(pVCpu); 1734 pVmcb->ctrl.u16InterceptRdCRx = (pVmcb->ctrl.u16InterceptRdCRx & ~RT_BIT(4)) 1735 | (pVmcbNstGstCache->u16InterceptRdCRx & RT_BIT(4)); 1736 } 1737 } 1738 else 1739 pVmcb->ctrl.u16InterceptRdCRx |= RT_BIT(4); 1740 1741 /* CR4 writes are always intercepted (both guest, nested-guest) from tracking PGM mode changes. */ 1742 Assert(pVmcb->ctrl.u16InterceptWrCRx & RT_BIT(4)); 1743 1744 /* Update VMCB with the shadow CR4 the appropriate VMCB clean bits. */ 1745 Assert(RT_HI_U32(uShadowCr4) == 0); 1746 pVmcb->guest.u64CR4 = uShadowCr4; 1747 pVmcb->ctrl.u32VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_CRX_EFER | HMSVM_VMCB_CLEAN_INTERCEPTS); 1748 1749 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR4); 1750 } 1751 1739 static int hmR0SvmExportGuestControlRegs(PVMCPU pVCpu, PSVMVMCB pVmcb) 1740 { 1741 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1742 1743 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_CR_MASK) 1744 { 1745 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_CR0) 1746 hmR0SvmExportGuestCR0(pVCpu, pVmcb); 1747 1748 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_CR2) 1749 { 1750 pVmcb->guest.u64CR2 = pVCpu->cpum.GstCtx.cr2; 1751 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CR2; 1752 } 1753 1754 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_CR3) 1755 hmR0SvmExportGuestCR3(pVCpu, pVmcb); 1756 1757 /* CR4 re-loading is ASSUMED to be done everytime we get in from ring-3! (XCR0) */ 1758 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_CR4) 1759 { 1760 int rc = hmR0SvmExportGuestCR4(pVCpu, pVmcb); 1761 if (RT_FAILURE(rc)) 1762 return rc; 1763 } 1764 1765 pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_GUEST_CR_MASK; 1766 } 1752 1767 return VINF_SUCCESS; 1753 1768 } … … 1755 1770 1756 1771 /** 1757 * Loads the guest (or nested-guest) segment registers into the VMCB.1772 * Exports the guest (or nested-guest) segment registers into the VMCB. 1758 1773 * 1759 1774 * @returns VBox status code. 1760 1775 * @param pVCpu The cross context virtual CPU structure. 1761 1776 * @param pVmcb Pointer to the VM control block. 1762 * @param pCtx Pointer to the guest-CPU or nested-guest-CPU context.1763 1777 * 1764 1778 * @remarks No-long-jump zone!!! 1765 1779 */ 1766 static void hmR0SvmLoadGuestSegmentRegs(PVMCPU pVCpu, PSVMVMCB pVmcb, PCCPUMCTX pCtx) 1767 { 1768 /* Guest Segment registers: CS, SS, DS, ES, FS, GS. */ 1769 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS)) 1770 { 1771 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, CS, cs); 1772 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, SS, ss); 1773 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, DS, ds); 1774 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, ES, es); 1775 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, FS, fs); 1776 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, GS, gs); 1777 1778 pVmcb->guest.u8CPL = pCtx->ss.Attr.n.u2Dpl; 1780 static void hmR0SvmExportGuestSegmentRegs(PVMCPU pVCpu, PSVMVMCB pVmcb) 1781 { 1782 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1783 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 1784 1785 /* Guest segment registers. */ 1786 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_SREG_MASK) 1787 { 1788 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_CS) 1789 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, CS, cs); 1790 1791 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_SS) 1792 { 1793 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, SS, ss); 1794 pVmcb->guest.u8CPL = pCtx->ss.Attr.n.u2Dpl; 1795 } 1796 1797 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_DS) 1798 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, DS, ds); 1799 1800 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_ES) 1801 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, ES, es); 1802 1803 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_FS) 1804 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, FS, fs); 1805 1806 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_GS) 1807 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, GS, gs); 1808 1779 1809 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_SEG; 1780 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS);1781 1810 } 1782 1811 1783 1812 /* Guest TR. */ 1784 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_TR)) 1785 { 1813 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_TR) 1786 1814 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, TR, tr); 1787 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_TR);1788 }1789 1815 1790 1816 /* Guest LDTR. */ 1791 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_LDTR)) 1792 { 1817 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_LDTR) 1793 1818 HMSVM_SEG_REG_COPY_TO_VMCB(pCtx, &pVmcb->guest, LDTR, ldtr); 1794 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_LDTR);1795 }1796 1819 1797 1820 /* Guest GDTR. */ 1798 if ( HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_GDTR))1821 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_GDTR) 1799 1822 { 1800 1823 pVmcb->guest.GDTR.u32Limit = pCtx->gdtr.cbGdt; 1801 1824 pVmcb->guest.GDTR.u64Base = pCtx->gdtr.pGdt; 1802 1825 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DT; 1803 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_GDTR);1804 1826 } 1805 1827 1806 1828 /* Guest IDTR. */ 1807 if ( HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_IDTR))1829 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_IDTR) 1808 1830 { 1809 1831 pVmcb->guest.IDTR.u32Limit = pCtx->idtr.cbIdt; 1810 1832 pVmcb->guest.IDTR.u64Base = pCtx->idtr.pIdt; 1811 1833 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DT; 1812 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_IDTR); 1813 } 1814 } 1815 1816 1817 /** 1818 * Loads the guest (or nested-guest) MSRs into the VMCB. 1834 } 1835 1836 pVCpu->hm.s.fCtxChanged &= ~( HM_CHANGED_GUEST_SREG_MASK 1837 | HM_CHANGED_GUEST_TABLE_MASK); 1838 } 1839 1840 1841 /** 1842 * Exports the guest (or nested-guest) MSRs into the VMCB. 1819 1843 * 1820 1844 * @param pVCpu The cross context virtual CPU structure. … … 1824 1848 * @remarks No-long-jump zone!!! 1825 1849 */ 1826 static void hmR0SvmLoadGuestMsrs(PVMCPU pVCpu, PSVMVMCB pVmcb, PCCPUMCTX pCtx) 1827 { 1850 static void hmR0SvmExportGuestMsrs(PVMCPU pVCpu, PSVMVMCB pVmcb) 1851 { 1852 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1853 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 1854 1828 1855 /* Guest Sysenter MSRs. */ 1829 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR)) 1830 { 1831 pVmcb->guest.u64SysEnterCS = pCtx->SysEnter.cs; 1832 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR); 1833 } 1834 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR)) 1835 { 1836 pVmcb->guest.u64SysEnterEIP = pCtx->SysEnter.eip; 1837 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); 1838 } 1839 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR)) 1840 { 1841 pVmcb->guest.u64SysEnterESP = pCtx->SysEnter.esp; 1842 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); 1856 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_SYSENTER_MSR_MASK) 1857 { 1858 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_SYSENTER_CS_MSR) 1859 pVmcb->guest.u64SysEnterCS = pCtx->SysEnter.cs; 1860 1861 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_SYSENTER_EIP_MSR) 1862 pVmcb->guest.u64SysEnterEIP = pCtx->SysEnter.eip; 1863 1864 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_SYSENTER_ESP_MSR) 1865 pVmcb->guest.u64SysEnterESP = pCtx->SysEnter.esp; 1843 1866 } 1844 1867 … … 1848 1871 * See AMD spec. 15.5.1 "Basic Operation" | "Canonicalization and Consistency Checks". 1849 1872 */ 1850 if ( HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_EFER_MSR))1873 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_EFER_MSR) 1851 1874 { 1852 1875 pVmcb->guest.u64EFER = pCtx->msrEFER | MSR_K6_EFER_SVME; 1853 1876 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER; 1854 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_EFER_MSR); 1855 } 1856 1857 /* 64-bit MSRs. */ 1858 if (CPUMIsGuestInLongModeEx(pCtx)) 1859 { 1860 /* Load these always as the guest may modify FS/GS base using MSRs in 64-bit mode which we don't intercept. */ 1861 //pVmcb->guest.FS.u64Base = pCtx->fs.u64Base; 1862 //pVmcb->guest.GS.u64Base = pCtx->gs.u64Base; 1863 //pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_SEG; 1864 } 1865 else 1866 { 1867 /* If the guest isn't in 64-bit mode, clear MSR_K6_LME bit from guest EFER otherwise AMD-V expects amd64 shadow paging. */ 1868 if (pCtx->msrEFER & MSR_K6_EFER_LME) 1869 { 1870 pVmcb->guest.u64EFER &= ~MSR_K6_EFER_LME; 1871 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER; 1872 } 1873 } 1874 1875 /** @todo HM_CHANGED_GUEST_SYSCALL_MSRS, 1876 * HM_CHANGED_GUEST_KERNEL_GS_BASE */ 1877 pVmcb->guest.u64STAR = pCtx->msrSTAR; 1878 pVmcb->guest.u64LSTAR = pCtx->msrLSTAR; 1879 pVmcb->guest.u64CSTAR = pCtx->msrCSTAR; 1880 pVmcb->guest.u64SFMASK = pCtx->msrSFMASK; 1881 pVmcb->guest.u64KernelGSBase = pCtx->msrKERNELGSBASE; 1877 } 1878 1879 /* If the guest isn't in 64-bit mode, clear MSR_K6_LME bit, otherwise SVM expects amd64 shadow paging. */ 1880 if ( !CPUMIsGuestInLongModeEx(pCtx) 1881 && (pCtx->msrEFER & MSR_K6_EFER_LME)) 1882 { 1883 pVmcb->guest.u64EFER &= ~MSR_K6_EFER_LME; 1884 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_CRX_EFER; 1885 } 1886 1887 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_SYSCALL_MSRS) 1888 { 1889 pVmcb->guest.u64STAR = pCtx->msrSTAR; 1890 pVmcb->guest.u64LSTAR = pCtx->msrLSTAR; 1891 pVmcb->guest.u64CSTAR = pCtx->msrCSTAR; 1892 pVmcb->guest.u64SFMASK = pCtx->msrSFMASK; 1893 } 1894 1895 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_KERNEL_GS_BASE) 1896 pVmcb->guest.u64KernelGSBase = pCtx->msrKERNELGSBASE; 1897 1898 pVCpu->hm.s.fCtxChanged &= ~( HM_CHANGED_GUEST_SYSENTER_MSR_MASK 1899 | HM_CHANGED_GUEST_EFER_MSR 1900 | HM_CHANGED_GUEST_SYSCALL_MSRS 1901 | HM_CHANGED_GUEST_KERNEL_GS_BASE); 1882 1902 1883 1903 /* 1884 1904 * Setup the PAT MSR (applicable for Nested Paging only). 1885 1905 * 1886 * While guests can modify and see the modified values throug the shadow values,1906 * While guests can modify and see the modified values through the shadow values, 1887 1907 * we shall not honor any guest modifications of this MSR to ensure caching is always 1888 * enabled similar to how we always run with CR0.CD and NW bits cleared.1908 * enabled similar to how we clear CR0.CD and NW bits. 1889 1909 * 1890 1910 * For nested-guests this needs to always be set as well, see @bugref{7243#c109}. … … 1899 1919 1900 1920 /** 1901 * Loads the guest (or nested-guest) debug state into the VMCB and programs the1902 * necessary intercepts accordingly.1921 * Exports the guest (or nested-guest) debug state into the VMCB and programs 1922 * the necessary intercepts accordingly. 1903 1923 * 1904 1924 * @param pVCpu The cross context virtual CPU structure. 1905 1925 * @param pVmcb Pointer to the VM control block. 1906 * @param pCtx Pointer to the guest-CPU context.1907 1926 * 1908 1927 * @remarks No-long-jump zone!!! 1909 1928 * @remarks Requires EFLAGS to be up-to-date in the VMCB! 1910 1929 */ 1911 static void hmR0Svm LoadSharedDebugState(PVMCPU pVCpu, PSVMVMCB pVmcb, PCCPUMCTX pCtx)1912 { 1913 bool fInterceptMovDRx = false;1930 static void hmR0SvmExportSharedDebugState(PVMCPU pVCpu, PSVMVMCB pVmcb) 1931 { 1932 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 1914 1933 1915 1934 /* … … 1918 1937 * the VMM level like the VT-x implementations does. 1919 1938 */ 1939 bool fInterceptMovDRx = false; 1920 1940 bool const fStepping = pVCpu->hm.s.fSingleInstruction || DBGFIsStepping(pVCpu); 1921 1941 if (fStepping) … … 1960 1980 pVmcb->guest.u64DR6 = X86_DR6_INIT_VAL; 1961 1981 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX; 1962 pVCpu->hm.s.fUsingHyperDR7 = true;1963 1982 } 1964 1983 … … 1966 1985 * with the same values. */ 1967 1986 fInterceptMovDRx = true; 1968 Log5(("hmR0SvmLoadSharedDebugState: Loaded hyper DRx\n")); 1987 pVCpu->hm.s.fUsingHyperDR7 = true; 1988 Log5(("hmR0SvmExportSharedDebugState: Loaded hyper DRx\n")); 1969 1989 } 1970 1990 else … … 1979 1999 pVmcb->guest.u64DR6 = pCtx->dr[6]; 1980 2000 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_DRX; 1981 pVCpu->hm.s.fUsingHyperDR7 = false;1982 }2001 } 2002 pVCpu->hm.s.fUsingHyperDR7 = false; 1983 2003 1984 2004 /* … … 2006 2026 Assert(CPUMIsGuestDebugStateActive(pVCpu)); 2007 2027 } 2008 Log5(("hmR0Svm LoadSharedDebugState: Loaded guest DRx\n"));2028 Log5(("hmR0SvmExportSharedDebugState: Loaded guest DRx\n")); 2009 2029 } 2010 2030 /* … … 2048 2068 } 2049 2069 } 2050 Log4(("hmR0SvmLoadSharedDebugState: DR6=%#RX64 DR7=%#RX64\n", pCtx->dr[6], pCtx->dr[7])); 2051 } 2052 2070 Log4Func(("DR6=%#RX64 DR7=%#RX64\n", pCtx->dr[6], pCtx->dr[7])); 2071 } 2053 2072 2054 2073 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 2055 2074 /** 2056 * Loads the nested-guest APIC state (currently just the TPR). 2057 * 2058 * @param pVCpu The cross context virtual CPU structure. 2059 * @param pVmcbNstGst Pointer to the nested-guest VM control block. 2060 */ 2061 static void hmR0SvmLoadGuestApicStateNested(PVMCPU pVCpu, PSVMVMCB pVmcbNstGst) 2062 { 2063 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_APIC_STATE)) 2064 { 2065 Assert(pVmcbNstGst->ctrl.IntCtrl.n.u1VIntrMasking == 1); RT_NOREF(pVmcbNstGst); 2066 pVCpu->hm.s.svm.fSyncVTpr = false; 2067 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_APIC_STATE); 2068 } 2069 } 2070 2071 2072 /** 2073 * Loads the nested-guest hardware virtualization state. 2075 * Exports the nested-guest hardware virtualization state into the nested-guest 2076 * VMCB. 2074 2077 * 2075 2078 * @param pVCpu The cross context virtual CPU structure. 2076 2079 * @param pVmcbNstGst Pointer to the nested-guest VM control block. 2077 * @param pCtx Pointer to the guest-CPU or nested-guest-CPU context. 2078 */ 2079 static void hmR0SvmLoadGuestHwvirtStateNested(PVMCPU pVCpu, PSVMVMCB pVmcbNstGst, PCCPUMCTX pCtx) 2080 { 2081 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_HWVIRT)) 2080 * 2081 * @remarks No-long-jump zone!!! 2082 */ 2083 static void hmR0SvmExportGuestHwvirtStateNested(PVMCPU pVCpu, PSVMVMCB pVmcbNstGst) 2084 { 2085 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 2086 2087 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_HWVIRT) 2082 2088 { 2083 2089 /* … … 2095 2101 if (HMIsGuestSvmCtrlInterceptSet(pVCpu, SVM_CTRL_INTERCEPT_PAUSE)) 2096 2102 { 2103 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 2097 2104 pVmcbNstGstCtrl->u16PauseFilterCount = RT_MIN(pCtx->hwvirt.svm.cPauseFilter, uGuestPauseFilterCount); 2098 2105 pVmcbNstGstCtrl->u16PauseFilterThreshold = RT_MIN(pCtx->hwvirt.svm.cPauseFilterThreshold, uGuestPauseFilterThreshold); … … 2105 2112 } 2106 2113 2107 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_HWVIRT);2114 pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_GUEST_HWVIRT; 2108 2115 } 2109 2116 } … … 2111 2118 2112 2119 /** 2113 * Loads the guest APIC state (currently just the TPR).2120 * Exports the guest APIC TPR state into the VMCB. 2114 2121 * 2115 2122 * @returns VBox status code. 2116 2123 * @param pVCpu The cross context virtual CPU structure. 2117 2124 * @param pVmcb Pointer to the VM control block. 2118 * @param pCtx Pointer to the guest-CPU context. 2119 */ 2120 static int hmR0SvmLoadGuestApicState(PVMCPU pVCpu, PSVMVMCB pVmcb, PCCPUMCTX pCtx) 2121 { 2122 if (!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_APIC_STATE)) 2123 return VINF_SUCCESS; 2124 2125 int rc = VINF_SUCCESS; 2126 PVM pVM = pVCpu->CTX_SUFF(pVM); 2127 if ( PDMHasApic(pVM) 2128 && APICIsEnabled(pVCpu)) 2129 { 2130 bool fPendingIntr; 2131 uint8_t u8Tpr; 2132 rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, NULL /* pu8PendingIrq */); 2133 AssertRCReturn(rc, rc); 2134 2135 /* Assume that we need to trap all TPR accesses and thus need not check on 2136 every #VMEXIT if we should update the TPR. */ 2137 Assert(pVmcb->ctrl.IntCtrl.n.u1VIntrMasking); 2138 pVCpu->hm.s.svm.fSyncVTpr = false; 2139 2140 if (!pVM->hm.s.fTPRPatchingActive) 2141 { 2142 /* Bits 3-0 of the VTPR field correspond to bits 7-4 of the TPR (which is the Task-Priority Class). */ 2143 pVmcb->ctrl.IntCtrl.n.u8VTPR = (u8Tpr >> 4); 2144 2145 /* If there are interrupts pending, intercept CR8 writes to evaluate ASAP if we can deliver the interrupt to the guest. */ 2146 if (fPendingIntr) 2147 pVmcb->ctrl.u16InterceptWrCRx |= RT_BIT(8); 2125 */ 2126 static int hmR0SvmExportGuestApicTpr(PVMCPU pVCpu, PSVMVMCB pVmcb) 2127 { 2128 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR) 2129 { 2130 PVM pVM = pVCpu->CTX_SUFF(pVM); 2131 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 2132 if ( PDMHasApic(pVM) 2133 && APICIsEnabled(pVCpu)) 2134 { 2135 bool fPendingIntr; 2136 uint8_t u8Tpr; 2137 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, NULL /* pu8PendingIrq */); 2138 AssertRCReturn(rc, rc); 2139 2140 /* Assume that we need to trap all TPR accesses and thus need not check on 2141 every #VMEXIT if we should update the TPR. */ 2142 Assert(pVmcb->ctrl.IntCtrl.n.u1VIntrMasking); 2143 pVCpu->hm.s.svm.fSyncVTpr = false; 2144 2145 if (!pVM->hm.s.fTPRPatchingActive) 2146 { 2147 /* Bits 3-0 of the VTPR field correspond to bits 7-4 of the TPR (which is the Task-Priority Class). */ 2148 pVmcb->ctrl.IntCtrl.n.u8VTPR = (u8Tpr >> 4); 2149 2150 /* If there are interrupts pending, intercept CR8 writes to evaluate ASAP if we 2151 can deliver the interrupt to the guest. */ 2152 if (fPendingIntr) 2153 pVmcb->ctrl.u16InterceptWrCRx |= RT_BIT(8); 2154 else 2155 { 2156 pVmcb->ctrl.u16InterceptWrCRx &= ~RT_BIT(8); 2157 pVCpu->hm.s.svm.fSyncVTpr = true; 2158 } 2159 2160 pVmcb->ctrl.u32VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_INT_CTRL); 2161 } 2148 2162 else 2149 2163 { 2150 pVmcb->ctrl.u16InterceptWrCRx &= ~RT_BIT(8); 2151 pVCpu->hm.s.svm.fSyncVTpr = true; 2164 /* 32-bit guests uses LSTAR MSR for patching guest code which touches the TPR. */ 2165 pVmcb->guest.u64LSTAR = u8Tpr; 2166 uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.svm.pvMsrBitmap; 2167 2168 /* If there are interrupts pending, intercept LSTAR writes, otherwise don't intercept reads or writes. */ 2169 if (fPendingIntr) 2170 hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_INTERCEPT_WRITE); 2171 else 2172 { 2173 hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 2174 pVCpu->hm.s.svm.fSyncVTpr = true; 2175 } 2176 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM; 2152 2177 } 2153 2154 pVmcb->ctrl.u32VmcbCleanBits &= ~(HMSVM_VMCB_CLEAN_INTERCEPTS | HMSVM_VMCB_CLEAN_INT_CTRL); 2155 } 2156 else 2157 { 2158 /* 32-bit guests uses LSTAR MSR for patching guest code which touches the TPR. */ 2159 pVmcb->guest.u64LSTAR = u8Tpr; 2160 uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.svm.pvMsrBitmap; 2161 2162 /* If there are interrupts pending, intercept LSTAR writes, otherwise don't intercept reads or writes. */ 2163 if (fPendingIntr) 2164 hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_INTERCEPT_WRITE); 2165 else 2166 { 2167 hmR0SvmSetMsrPermission(pCtx, pbMsrBitmap, MSR_K8_LSTAR, SVMMSREXIT_PASSTHRU_READ, SVMMSREXIT_PASSTHRU_WRITE); 2168 pVCpu->hm.s.svm.fSyncVTpr = true; 2169 } 2170 pVmcb->ctrl.u32VmcbCleanBits &= ~HMSVM_VMCB_CLEAN_IOPM_MSRPM; 2171 } 2172 } 2173 2174 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_APIC_STATE); 2175 return rc; 2176 } 2177 2178 2179 /** 2180 * Loads the exception interrupts required for guest (or nested-guest) execution in 2181 * the VMCB. 2178 } 2179 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR); 2180 } 2181 return VINF_SUCCESS; 2182 } 2183 2184 2185 /** 2186 * Sets up the exception interrupts required for guest (or nested-guest) 2187 * execution in the VMCB. 2182 2188 * 2183 2189 * @param pVCpu The cross context virtual CPU structure. 2184 2190 * @param pVmcb Pointer to the VM control block. 2185 * @param pCtx Pointer to the guest-CPU context. 2186 */ 2187 static void hmR0SvmLoadGuestXcptIntercepts(PVMCPU pVCpu, PSVMVMCB pVmcb, PCCPUMCTX pCtx) 2188 { 2189 /* If we modify intercepts from here, please check & adjust hmR0SvmLoadGuestXcptInterceptsNested() 2190 if required. */ 2191 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMM_GUEST_XCPT_INTERCEPTS)) 2191 * 2192 * @remarks No-long-jump zone!!! 2193 */ 2194 static void hmR0SvmExportGuestXcptIntercepts(PVMCPU pVCpu, PSVMVMCB pVmcb) 2195 { 2196 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 2197 2198 /* If we modify intercepts from here, please check & adjust hmR0SvmMergeVmcbCtrlsNested() if required. */ 2199 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_SVM_GUEST_XCPT_INTERCEPTS) 2192 2200 { 2193 2201 /* Trap #UD for GIM provider (e.g. for hypercalls). */ … … 2195 2203 hmR0SvmSetXcptIntercept(pVmcb, X86_XCPT_UD); 2196 2204 else 2197 hmR0SvmClearXcptIntercept(pVCpu, p Ctx, pVmcb, X86_XCPT_UD);2205 hmR0SvmClearXcptIntercept(pVCpu, pVmcb, X86_XCPT_UD); 2198 2206 2199 2207 /* Trap #BP for INT3 debug breakpoints set by the VM debugger. */ … … 2201 2209 hmR0SvmSetXcptIntercept(pVmcb, X86_XCPT_BP); 2202 2210 else 2203 hmR0SvmClearXcptIntercept(pVCpu, p Ctx, pVmcb, X86_XCPT_BP);2204 2205 /* The remaining intercepts are handled elsewhere, e.g. in hmR0Svm LoadSharedCR0(). */2206 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMM_GUEST_XCPT_INTERCEPTS);2211 hmR0SvmClearXcptIntercept(pVCpu, pVmcb, X86_XCPT_BP); 2212 2213 /* The remaining intercepts are handled elsewhere, e.g. in hmR0SvmExportGuestCR0(). */ 2214 pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_SVM_GUEST_XCPT_INTERCEPTS; 2207 2215 } 2208 2216 } … … 2258 2266 * 2259 2267 * Warning!! This ASSUMES we only intercept \#UD for hypercall purposes and \#BP 2260 * for VM debugger breakpoints, see hmR0Svm LoadGuestXcptIntercepts.2268 * for VM debugger breakpoints, see hmR0SvmExportGuestXcptIntercepts(). 2261 2269 */ 2262 2270 #ifndef HMSVM_ALWAYS_TRAP_ALL_XCPTS … … 2291 2299 2292 2300 /** 2293 * Se ts upthe appropriate function to run guest code.2301 * Selects the appropriate function to run guest code. 2294 2302 * 2295 2303 * @returns VBox status code. … … 2298 2306 * @remarks No-long-jump zone!!! 2299 2307 */ 2300 static int hmR0SvmSe tupVMRunHandler(PVMCPU pVCpu)2308 static int hmR0SvmSelectVMRunHandler(PVMCPU pVCpu) 2301 2309 { 2302 2310 if (CPUMIsGuestInLongMode(pVCpu)) … … 2340 2348 2341 2349 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu)); 2342 Assert(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE)); 2350 Assert((pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE)) 2351 == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE)); 2343 2352 2344 2353 pVCpu->hm.s.fLeaveDone = false; … … 2401 2410 int rc = HMR0EnterCpu(pVCpu); 2402 2411 AssertRC(rc); NOREF(rc); 2403 Assert(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE)); 2412 Assert((pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE)) 2413 == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE)); 2404 2414 2405 2415 pVCpu->hm.s.fLeaveDone = false; … … 2425 2435 * @remarks No-long-jump zone!!! 2426 2436 */ 2427 VMMR0DECL(int) SVMR0SaveHostState(PVM pVM, PVMCPU pVCpu) 2428 { 2429 NOREF(pVM); 2437 VMMR0DECL(int) SVMR0ExportHostState(PVMCPU pVCpu) 2438 { 2430 2439 NOREF(pVCpu); 2440 2431 2441 /* Nothing to do here. AMD-V does this for us automatically during the world-switch. */ 2432 HMCPU_CF_CLEAR(pVCpu,HM_CHANGED_HOST_CONTEXT);2442 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_HOST_CONTEXT); 2433 2443 return VINF_SUCCESS; 2434 2444 } … … 2436 2446 2437 2447 /** 2438 * Loads the guest stateinto the VMCB.2448 * Exports the guest state from the guest-CPU context into the VMCB. 2439 2449 * 2440 2450 * The CPU state will be loaded from these fields on every successful VM-entry. … … 2449 2459 * @remarks No-long-jump zone!!! 2450 2460 */ 2451 static int hmR0SvmLoadGuestState(PVM pVM, PVMCPU pVCpu, PCCPUMCTX pCtx) 2452 { 2461 static int hmR0SvmExportGuestState(PVMCPU pVCpu) 2462 { 2463 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExportGuestState, x); 2464 2465 PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb; 2466 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 2467 2468 Assert(pVmcb); 2453 2469 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx); 2454 2455 PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb;2456 AssertMsgReturn(pVmcb, ("Invalid pVmcb\n"), VERR_SVM_INVALID_PVMCB);2457 2458 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestState, x);2459 2460 int rc = hmR0SvmLoadGuestControlRegs(pVCpu, pVmcb, pCtx);2461 AssertLogRelMsgRCReturn(rc, ("hmR0SvmLoadGuestControlRegs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);2462 2463 hmR0SvmLoadGuestSegmentRegs(pVCpu, pVmcb, pCtx);2464 hmR0SvmLoadGuestMsrs(pVCpu, pVmcb, pCtx);2465 2470 2466 2471 pVmcb->guest.u64RIP = pCtx->rip; … … 2468 2473 pVmcb->guest.u64RFlags = pCtx->eflags.u32; 2469 2474 pVmcb->guest.u64RAX = pCtx->rax; 2470 2471 2475 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 2472 2476 if (pVmcb->ctrl.IntCtrl.n.u1VGifEnable) 2473 2477 { 2474 Assert(pV M->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_VGIF);2478 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.svm.u32Features & X86_CPUID_SVM_FEATURE_EDX_VGIF); 2475 2479 pVmcb->ctrl.IntCtrl.n.u1VGif = pCtx->hwvirt.fGif; 2476 2480 } 2477 2481 #endif 2478 2482 2479 /* hmR0SvmLoadGuestApicState() must be called -after- hmR0SvmLoadGuestMsrs() as we 2480 may overwrite LSTAR MSR in the VMCB in the case of TPR patching. */ 2481 rc = hmR0SvmLoadGuestApicState(pVCpu, pVmcb, pCtx); 2482 AssertLogRelMsgRCReturn(rc, ("hmR0SvmLoadGuestApicState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 2483 2484 hmR0SvmLoadGuestXcptIntercepts(pVCpu, pVmcb, pCtx); 2485 2486 rc = hmR0SvmSetupVMRunHandler(pVCpu); 2487 AssertLogRelMsgRCReturn(rc, ("hmR0SvmSetupVMRunHandler! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 2488 2489 /* Clear any unused and reserved bits. */ 2490 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_RIP /* Unused (loaded unconditionally). */ 2491 | HM_CHANGED_GUEST_RSP 2492 | HM_CHANGED_GUEST_RFLAGS 2493 | HM_CHANGED_GUEST_SYSENTER_CS_MSR 2494 | HM_CHANGED_GUEST_SYSENTER_EIP_MSR 2495 | HM_CHANGED_GUEST_SYSENTER_ESP_MSR 2496 | HM_CHANGED_GUEST_HWVIRT /* Unused. */ 2497 | HM_CHANGED_VMM_GUEST_LAZY_MSRS 2498 | HM_CHANGED_SVM_RESERVED1 /* Reserved. */ 2499 | HM_CHANGED_SVM_RESERVED2 2500 | HM_CHANGED_SVM_RESERVED3 2501 | HM_CHANGED_SVM_RESERVED4); 2502 2503 /* All the guest state bits should be loaded except maybe the host context and/or shared host/guest bits. */ 2504 AssertMsg( !HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_ALL_GUEST) 2505 || HMCPU_CF_IS_PENDING_ONLY(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE), 2506 ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu))); 2483 RTCCUINTREG const fEFlags = ASMIntDisableFlags(); 2484 2485 int rc = hmR0SvmExportGuestControlRegs(pVCpu, pVmcb); 2486 AssertRCReturnStmt(rc, ASMSetFlags(fEFlags), rc); 2487 2488 hmR0SvmExportGuestSegmentRegs(pVCpu, pVmcb); 2489 hmR0SvmExportGuestMsrs(pVCpu, pVmcb); 2490 hmR0SvmExportGuestXcptIntercepts(pVCpu, pVmcb); 2491 2492 ASMSetFlags(fEFlags); 2493 2494 /* hmR0SvmExportGuestApicTpr() must be called -after- hmR0SvmExportGuestMsrs() as we 2495 otherwise we would overwrite the LSTAR MSR that we use for TPR patching. */ 2496 hmR0SvmExportGuestApicTpr(pVCpu, pVmcb); 2497 2498 rc = hmR0SvmSelectVMRunHandler(pVCpu); 2499 AssertRCReturn(rc, rc); 2500 2501 /* Clear any bits that may be set but exported unconditionally or unused/reserved bits. */ 2502 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~( HM_CHANGED_GUEST_RIP 2503 | HM_CHANGED_GUEST_RFLAGS 2504 | HM_CHANGED_GUEST_GPRS_MASK 2505 | HM_CHANGED_GUEST_X87 2506 | HM_CHANGED_GUEST_SSE_AVX 2507 | HM_CHANGED_GUEST_OTHER_XSAVE 2508 | HM_CHANGED_GUEST_XCRx 2509 | HM_CHANGED_GUEST_TSC_AUX 2510 | HM_CHANGED_GUEST_OTHER_MSRS 2511 | HM_CHANGED_GUEST_HWVIRT 2512 | (HM_CHANGED_KEEPER_STATE_MASK & ~HM_CHANGED_SVM_GUEST_XCPT_INTERCEPTS))); 2507 2513 2508 2514 #ifdef VBOX_STRICT 2509 hmR0SvmLogState(pVCpu, pVmcb, pCtx, "hmR0SvmLoadGuestState", 0 /* fFlags */, 0 /* uVerbose */); 2515 /* 2516 * All of the guest-CPU state and SVM keeper bits should be exported here by now, 2517 * except for the host-context and/or shared host-guest context bits. 2518 */ 2519 uint64_t const fCtxChanged = ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged); 2520 AssertMsg(!(fCtxChanged & (HM_CHANGED_ALL_GUEST & ~HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE)), 2521 ("fCtxChanged=%#RX64\n", fCtxChanged)); 2522 2523 /* 2524 * If we need to log state that isn't always imported, we'll need to import them here. 2525 * See hmR0SvmPostRunGuest() for which part of the state is imported uncondtionally. 2526 */ 2527 hmR0SvmLogState(pVCpu, pVmcb, "hmR0SvmExportGuestState", 0 /* fFlags */, 0 /* uVerbose */); 2510 2528 #endif 2511 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x); 2512 return rc; 2529 2530 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExportGuestState, x); 2531 return VINF_SUCCESS; 2513 2532 } 2514 2533 … … 2575 2594 pVmcbNstGstCache->fLbrVirt = pVmcbNstGstCtrl->LbrVirt.n.u1LbrVirt; 2576 2595 pVmcbNstGstCache->fCacheValid = true; 2577 Log4 (("hmR0SvmCacheVmcbNested:Cached VMCB fields\n"));2596 Log4Func(("Cached VMCB fields\n")); 2578 2597 } 2579 2598 … … 2590 2609 * 2591 2610 * @param pVCpu The cross context virtual CPU structure. 2592 * @param pCtx Pointer to the nested-guest-CPU context.2593 */ 2594 static void hmR0SvmSetupVmcbNested(PVMCPU pVCpu, PCCPUMCTX pCtx) 2595 { 2611 */ 2612 static void hmR0SvmSetupVmcbNested(PVMCPU pVCpu) 2613 { 2614 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 2596 2615 PSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb); 2597 2616 PSVMVMCBCTRL pVmcbNstGstCtrl = &pVmcbNstGst->ctrl; … … 2619 2638 /* Always enable V_INTR_MASKING as we do not want to allow access to the physical APIC TPR. */ 2620 2639 pVmcbNstGstCtrl->IntCtrl.n.u1VIntrMasking = 1; 2640 2641 /* 2642 * Turn off TPR syncing on #VMEXIT for nested-guests as CR8 intercepts are subject 2643 * to the nested-guest intercepts and we always run with V_INTR_MASKING. 2644 */ 2645 pVCpu->hm.s.svm.fSyncVTpr = false; 2621 2646 2622 2647 #ifdef DEBUG_ramshankar … … 2649 2674 else 2650 2675 { 2676 Assert(!pVCpu->hm.s.svm.fSyncVTpr); 2651 2677 Assert(pVmcbNstGstCtrl->u64IOPMPhysAddr == g_HCPhysIOBitmap); 2652 2678 Assert(RT_BOOL(pVmcbNstGstCtrl->NestedPagingCtrl.n.u1NestedPaging) == pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging); … … 2656 2682 2657 2683 /** 2658 * Loads the nested-guest state into the VMCB. 2684 * Exports the nested-guest state into the VMCB. 2685 * 2686 * We need to export the entire state as we could be continuing nested-guest 2687 * execution at any point (not just immediately after VMRUN) and thus the VMCB 2688 * can be out-of-sync with the nested-guest state if it was executed in IEM. 2659 2689 * 2660 2690 * @returns VBox status code. … … 2664 2694 * @remarks No-long-jump zone!!! 2665 2695 */ 2666 static int hmR0SvmLoadGuestStateNested(PVMCPU pVCpu, PCCPUMCTX pCtx) 2667 { 2668 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestState, x); 2669 2670 PSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb); 2696 static int hmR0SvmExportGuestStateNested(PVMCPU pVCpu) 2697 { 2698 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExportGuestState, x); 2699 2700 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 2701 PSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb); 2671 2702 Assert(pVmcbNstGst); 2672 2703 2673 hmR0SvmSetupVmcbNested(pVCpu, pCtx); 2674 2675 int rc = hmR0SvmLoadGuestControlRegs(pVCpu, pVmcbNstGst, pCtx); 2676 AssertRCReturn(rc, rc); 2677 2678 /* 2679 * We need to load the entire state (including FS, GS etc.) as we could be continuing 2680 * to execute the nested-guest at any point (not just immediately after VMRUN) and thus 2681 * the VMCB can possibly be out-of-sync with the actual nested-guest state if it was 2682 * executed in IEM. 2683 */ 2684 hmR0SvmLoadGuestSegmentRegs(pVCpu, pVmcbNstGst, pCtx); 2685 hmR0SvmLoadGuestMsrs(pVCpu, pVmcbNstGst, pCtx); 2686 hmR0SvmLoadGuestApicStateNested(pVCpu, pVmcbNstGst); 2687 hmR0SvmLoadGuestHwvirtStateNested(pVCpu, pVmcbNstGst, pCtx); 2704 hmR0SvmSetupVmcbNested(pVCpu); 2688 2705 2689 2706 pVmcbNstGst->guest.u64RIP = pCtx->rip; … … 2692 2709 pVmcbNstGst->guest.u64RAX = pCtx->rax; 2693 2710 2694 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 2695 Assert(!pVmcbNstGst->ctrl.IntCtrl.n.u1VGifEnable); /* Nested VGIF not supported yet. */ 2711 RTCCUINTREG const fEFlags = ASMIntDisableFlags(); 2712 2713 int rc = hmR0SvmExportGuestControlRegs(pVCpu, pVmcbNstGst); 2714 AssertRCReturnStmt(rc, ASMSetFlags(fEFlags), rc); 2715 2716 hmR0SvmExportGuestSegmentRegs(pVCpu, pVmcbNstGst); 2717 hmR0SvmExportGuestMsrs(pVCpu, pVmcbNstGst); 2718 hmR0SvmExportGuestHwvirtStateNested(pVCpu, pVmcbNstGst); 2719 2720 ASMSetFlags(fEFlags); 2721 2722 /* Nested VGIF not supported yet. */ 2723 Assert(!pVmcbNstGst->ctrl.IntCtrl.n.u1VGifEnable); 2724 2725 rc = hmR0SvmSelectVMRunHandler(pVCpu); 2726 AssertRCReturn(rc, rc); 2727 2728 /* Clear any bits that may be set but exported unconditionally or unused/reserved bits. */ 2729 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~( HM_CHANGED_GUEST_RIP 2730 | HM_CHANGED_GUEST_RFLAGS 2731 | HM_CHANGED_GUEST_GPRS_MASK 2732 | HM_CHANGED_GUEST_APIC_TPR 2733 | HM_CHANGED_GUEST_X87 2734 | HM_CHANGED_GUEST_SSE_AVX 2735 | HM_CHANGED_GUEST_OTHER_XSAVE 2736 | HM_CHANGED_GUEST_XCRx 2737 | HM_CHANGED_GUEST_TSC_AUX 2738 | HM_CHANGED_GUEST_OTHER_MSRS 2739 | HM_CHANGED_SVM_GUEST_XCPT_INTERCEPTS 2740 | (HM_CHANGED_KEEPER_STATE_MASK & ~HM_CHANGED_SVM_MASK))); 2741 2742 #ifdef VBOX_STRICT 2743 /* 2744 * All of the guest-CPU state and SVM keeper bits should be exported here by now, except 2745 * for the host-context and/or shared host-guest context bits. 2746 */ 2747 uint64_t const fCtxChanged = ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged); 2748 AssertMsg(!(fCtxChanged & (HM_CHANGED_ALL_GUEST & ~HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE)), 2749 ("fCtxChanged=%#RX64\n", fCtxChanged)); 2750 2751 /* 2752 * If we need to log state that isn't always imported, we'll need to import them here. 2753 * See hmR0SvmPostRunGuest() for which part of the state is imported uncondtionally. 2754 */ 2755 hmR0SvmLogState(pVCpu, pVmcbNstGst, "hmR0SvmExportGuestStateNested", 0 /* fFlags */, 0 /* uVerbose */); 2696 2756 #endif 2697 2757 2698 rc = hmR0SvmSetupVMRunHandler(pVCpu); 2699 AssertRCReturn(rc, rc); 2700 2701 /* Clear any unused and reserved bits. */ 2702 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_RIP /* Unused (loaded unconditionally). */ 2703 | HM_CHANGED_GUEST_RSP 2704 | HM_CHANGED_GUEST_RFLAGS 2705 | HM_CHANGED_GUEST_SYSENTER_CS_MSR 2706 | HM_CHANGED_GUEST_SYSENTER_EIP_MSR 2707 | HM_CHANGED_GUEST_SYSENTER_ESP_MSR 2708 | HM_CHANGED_VMM_GUEST_XCPT_INTERCEPTS /* Unused. */ 2709 | HM_CHANGED_VMM_GUEST_LAZY_MSRS 2710 | HM_CHANGED_SVM_RESERVED1 /* Reserved. */ 2711 | HM_CHANGED_SVM_RESERVED2 2712 | HM_CHANGED_SVM_RESERVED3 2713 | HM_CHANGED_SVM_RESERVED4); 2714 2715 /* All the guest state bits should be loaded except maybe the host context and/or shared host/guest bits. */ 2716 AssertMsg( !HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_ALL_GUEST) 2717 || HMCPU_CF_IS_PENDING_ONLY(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE), 2718 ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu))); 2719 2720 #ifdef VBOX_STRICT 2721 hmR0SvmLogState(pVCpu, pVmcbNstGst, pCtx, "hmR0SvmLoadGuestStateNested", HMSVM_LOG_ALL, 0 /* uVerbose */); 2722 #endif 2723 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x); 2758 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExportGuestState, x); 2724 2759 return rc; 2725 2760 } … … 2728 2763 2729 2764 /** 2730 * Loads the state shared between the host and guest (or nested-guest) into the2731 * VMCB.2765 * Exports the state shared between the host and guest (or nested-guest) into 2766 * the VMCB. 2732 2767 * 2733 2768 * @param pVCpu The cross context virtual CPU structure. 2734 2769 * @param pVmcb Pointer to the VM control block. 2735 * @param pCtx Pointer to the guest-CPU context.2736 2770 * 2737 2771 * @remarks No-long-jump zone!!! 2738 2772 */ 2739 static void hmR0Svm LoadSharedState(PVMCPU pVCpu, PSVMVMCB pVmcb, PCCPUMCTX pCtx)2773 static void hmR0SvmExportSharedState(PVMCPU pVCpu, PSVMVMCB pVmcb) 2740 2774 { 2741 2775 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 2742 2776 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); 2743 2777 2744 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0)) 2745 { 2746 hmR0SvmLoadSharedCR0(pVCpu, pVmcb, pCtx); 2747 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR0); 2748 } 2749 2750 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_DEBUG)) 2778 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_DR_MASK) 2751 2779 { 2752 2780 /** @todo Figure out stepping with nested-guest. */ 2781 PCCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 2753 2782 if (!CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 2754 hmR0Svm LoadSharedDebugState(pVCpu, pVmcb, pCtx);2783 hmR0SvmExportSharedDebugState(pVCpu, pVmcb); 2755 2784 else 2756 2785 { 2757 2786 pVmcb->guest.u64DR6 = pCtx->dr[6]; 2758 2787 pVmcb->guest.u64DR7 = pCtx->dr[7]; 2759 Log4(("hmR0SvmLoadSharedState: DR6=%#RX64 DR7=%#RX64\n", pCtx->dr[6], pCtx->dr[7])); 2760 } 2761 2762 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_DEBUG); 2763 } 2764 2765 /* Unused on AMD-V (no lazy MSRs). */ 2766 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMM_GUEST_LAZY_MSRS); 2767 2768 AssertMsg(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE), 2769 ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu))); 2788 } 2789 } 2790 2791 pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_GUEST_DR_MASK; 2792 AssertMsg(!(pVCpu->hm.s.fCtxChanged & HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE), 2793 ("fCtxChanged=%#RX64\n", pVCpu->hm.s.fCtxChanged)); 2770 2794 } 2771 2795 … … 2778 2802 * @param fWhat What to import, CPUMCTX_EXTRN_XXX. 2779 2803 */ 2780 static void hmR0SvmImportGuestState(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t fWhat) 2781 { 2804 static void hmR0SvmImportGuestState(PVMCPU pVCpu, uint64_t fWhat) 2805 { 2806 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatImportGuestState, x); 2807 2808 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 2782 2809 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx); 2783 2810 PCSVMVMCBSTATESAVE pVmcbGuest = &pVmcb->guest; 2784 2811 PCSVMVMCBCTRL pVmcbCtrl = &pVmcb->ctrl; 2785 2812 2786 Log4 (("hmR0SvmImportGuestState:fExtrn=%#RX64 fWhat=%#RX64\n", pCtx->fExtrn, fWhat));2813 Log4Func(("fExtrn=%#RX64 fWhat=%#RX64\n", pCtx->fExtrn, fWhat)); 2787 2814 2788 2815 /* … … 2790 2817 * the fExtrn modification atomic wrt to preemption hooks. 2791 2818 */ 2792 RTCCUINTREG const f SavedFlags = ASMIntDisableFlags();2819 RTCCUINTREG const fEFlags = ASMIntDisableFlags(); 2793 2820 2794 2821 fWhat &= pCtx->fExtrn; … … 2840 2867 { 2841 2868 HMSVM_SEG_REG_COPY_FROM_VMCB(pCtx, pVmcbGuest, CS, cs); 2842 /* 2843 * Correct the hidden CS granularity bit. Haven't seen it being wrong in any other 2844 * register (yet). 2845 */ 2869 /* Correct the CS granularity bit. Haven't seen it being wrong in any other register (yet). */ 2846 2870 /** @todo SELM might need to be fixed as it too should not care about the 2847 2871 * granularity bit. See @bugref{6785}. */ … … 2860 2884 HMSVM_ASSERT_SEG_GRANULARITY(pCtx, ss); 2861 2885 /* 2862 * Sync the hidden SS DPL field. AMD CPUs have a separate CPL field in the VMCB and uses that 2863 * and thus it's possible that when the CPL changes during guest execution that the SS DPL 2864 * isn't updated by AMD-V. Observed on some AMD Fusion CPUs with 64-bit guests. 2886 * Sync the hidden SS DPL field. AMD CPUs have a separate CPL field in the 2887 * VMCB and uses that and thus it's possible that when the CPL changes during 2888 * guest execution that the SS DPL isn't updated by AMD-V. Observed on some 2889 * AMD Fusion CPUs with 64-bit guests. 2890 * 2865 2891 * See AMD spec. 15.5.1 "Basic operation". 2866 2892 */ … … 2972 2998 uint64_t const uCr0 = (pCtx->cr0 & ~(X86_CR0_TS | X86_CR0_MP)) 2973 2999 | (pVmcbGuest->u64CR0 & (X86_CR0_TS | X86_CR0_MP)); 2974 VMMRZCallRing3Disable(pVCpu); /* C PUM has log statements and calls into PGM. */3000 VMMRZCallRing3Disable(pVCpu); /* Calls into PGM which has Log statements. */ 2975 3001 CPUMSetGuestCR0(pVCpu, uCr0); 2976 3002 VMMRZCallRing3Enable(pVCpu); … … 3006 3032 Assert(!pCtx->fExtrn || (pCtx->fExtrn & HMSVM_CPUMCTX_EXTRN_ALL)); 3007 3033 3008 ASMSetFlags(fSavedFlags); 3034 ASMSetFlags(fEFlags); 3035 3036 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatImportGuestState, x); 3009 3037 3010 3038 /* … … 3016 3044 * handling -> hmR0SvmImportGuestState() and here we are. 3017 3045 * 3018 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be3019 * up-to-date and thus any CR3-saves -before- the VM-exit (longjmp) would've postponed the CR33020 * update via the force-flag and cleared CR3 from fExtrn. Any SVM R0 VM-exit handler that requests3021 * CR3 to be saved will end up here and we call PGMUpdateCR3().3046 * The reason for such complicated handling is because VM-exits that call into PGM expect 3047 * CR3 to be up-to-date and thus any CR3-saves -before- the VM-exit (longjmp) would've 3048 * postponed the CR3 update via the force-flag and cleared CR3 from fExtrn. Any SVM R0 3049 * VM-exit handler that requests CR3 to be saved will end up here and we call PGMUpdateCR3(). 3022 3050 * 3023 3051 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again, … … 3042 3070 * @returns VBox status code. 3043 3071 * @param pVCpu The cross context virtual CPU structure. 3044 * @param pCtx Pointer to the guest-CPU or nested-guest-CPU context. The3045 * data may be out-of-sync. Make sure to update the required3046 * fields before using them.3047 3072 * @param fWhat What to import, CPUMCTX_EXTRN_XXX. 3048 3073 */ 3049 VMMR0DECL(int) SVMR0ImportStateOnDemand(PVMCPU pVCpu, PCPUMCTX pCtx,uint64_t fWhat)3050 { 3051 hmR0SvmImportGuestState(pVCpu, pCtx,fWhat);3074 VMMR0DECL(int) SVMR0ImportStateOnDemand(PVMCPU pVCpu, uint64_t fWhat) 3075 { 3076 hmR0SvmImportGuestState(pVCpu, fWhat); 3052 3077 return VINF_SUCCESS; 3053 }3054 3055 3056 /**3057 * Saves the guest (or nested-guest) state from the VMCB into the guest-CPU3058 * context.3059 *3060 * Currently there is no residual state left in the CPU that is not updated in the3061 * VMCB.3062 *3063 * @returns VBox status code.3064 * @param pVCpu The cross context virtual CPU structure.3065 * @param pCtx Pointer to the guest-CPU or nested-guest-CPU context. The3066 * data may be out-of-sync. Make sure to update the required3067 * fields before using them.3068 * @param pVmcb Pointer to the VM control block.3069 */3070 static void hmR0SvmSaveGuestState(PVMCPU pVCpu, PCPUMCTX pCtx, PCSVMVMCB pVmcb)3071 {3072 Assert(VMMRZCallRing3IsEnabled(pVCpu));3073 3074 /*3075 * Always import the following:3076 *3077 * - RIP, RFLAGS, int. shadow, GIF: we need them when as we evaluate3078 * injecting events before re-entering guest execution.3079 *3080 * - GPRS: Only RAX, RSP are in the VMCB. All the other GPRs are swapped3081 * by the assembly switcher code. Import these two always just to simplify3082 * assumptions on GPRs.3083 *3084 * - SREG: We load them all together so we have to save all of them.3085 *3086 * - KERNEL_GS_BASE, SYSCALL MSRS: We don't have a HM_CHANGED_GUEST flag3087 * for it yet3088 */3089 /** @todo Extend HM_CHANGED_GUEST_xxx so that we avoid saving segment3090 * registers, kernel GS base and other MSRs each time. */3091 hmR0SvmImportGuestState(pVCpu, pCtx, CPUMCTX_EXTRN_RIP3092 | CPUMCTX_EXTRN_SYSCALL_MSRS3093 | CPUMCTX_EXTRN_KERNEL_GS_BASE3094 | CPUMCTX_EXTRN_RFLAGS3095 | CPUMCTX_EXTRN_RAX3096 | CPUMCTX_EXTRN_SREG_MASK3097 | CPUMCTX_EXTRN_RSP3098 | CPUMCTX_EXTRN_HWVIRT3099 | CPUMCTX_EXTRN_HM_SVM_INT_SHADOW3100 | CPUMCTX_EXTRN_HM_SVM_HWVIRT_VIRQ);3101 3102 #ifdef DEBUG_ramshankar3103 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))3104 {3105 hmR0SvmImportGuestState(pVCpu, pCtx, HMSVM_CPUMCTX_EXTRN_ALL);3106 hmR0SvmLogState(pVCpu, pVmcb, pCtx, "hmR0SvmSaveGuestStateNested", HMSVM_LOG_ALL & ~HMSVM_LOG_LBR, 0 /* uVerbose */);3107 }3108 #else3109 RT_NOREF(pVmcb);3110 #endif3111 3078 } 3112 3079 … … 3135 3102 /* Save the guest state if necessary. */ 3136 3103 if (fImportState) 3137 hmR0SvmImportGuestState(pVCpu, &pVCpu->cpum.GstCtx,HMSVM_CPUMCTX_EXTRN_ALL);3138 3139 /* Restore host FPU state if necessary and resync on next R0 reentry .*/3104 hmR0SvmImportGuestState(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL); 3105 3106 /* Restore host FPU state if necessary and resync on next R0 reentry. */ 3140 3107 CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(pVCpu); 3108 Assert(!CPUMIsGuestFPUStateActive(pVCpu)); 3141 3109 3142 3110 /* … … 3152 3120 #endif 3153 3121 CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, false /* save DR6 */); 3154 3155 3122 Assert(!CPUMIsHyperDebugStateActive(pVCpu)); 3156 3123 Assert(!CPUMIsGuestDebugStateActive(pVCpu)); 3157 3124 3158 3125 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatEntry); 3159 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatLoadGuestState); 3126 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatImportGuestState); 3127 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExportGuestState); 3160 3128 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit1); 3161 3129 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit2); … … 3270 3238 Assert(VMMR0IsLogFlushDisabled(pVCpu)); 3271 3239 3272 Log4 (("hmR0SvmCallRing3Callback->hmR0SvmLongJmpToRing3\n"));3240 Log4Func(("Calling hmR0SvmLongJmpToRing3\n")); 3273 3241 int rc = hmR0SvmLongJmpToRing3(pVCpu); 3274 3242 AssertRCReturn(rc, rc); … … 3302 3270 /* Please, no longjumps here (any logging shouldn't flush jump back to ring-3). NO LOGGING BEFORE THIS POINT! */ 3303 3271 VMMRZCallRing3Disable(pVCpu); 3304 Log4(("hmR0SvmExitToRing3: VCPU[%u]: rcExit=%d LocalFF=%#RX32 GlobalFF=%#RX32\n", pVCpu->idCpu, rcExit, 3305 pVCpu->fLocalForcedActions, pVM->fGlobalForcedActions)); 3272 Log4Func(("rcExit=%d LocalFF=%#RX32 GlobalFF=%#RX32\n", rcExit, pVCpu->fLocalForcedActions, pVM->fGlobalForcedActions)); 3306 3273 3307 3274 /* We need to do this only while truly exiting the "inner loop" back to ring-3 and -not- for any longjmp to ring3. */ … … 3317 3284 3318 3285 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3); 3319 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_SYSENTER_MSR3320 | CPUM_CHANGED_LDTR3321 | CPUM_CHANGED_GDTR3322 | CPUM_CHANGED_IDTR3323 | CPUM_CHANGED_TR3324 | CPUM_CHANGED_HIDDEN_SEL_REGS);3286 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_SYSENTER_MSR 3287 | CPUM_CHANGED_LDTR 3288 | CPUM_CHANGED_GDTR 3289 | CPUM_CHANGED_IDTR 3290 | CPUM_CHANGED_TR 3291 | CPUM_CHANGED_HIDDEN_SEL_REGS); 3325 3292 if ( pVM->hm.s.fNestedPaging 3326 3293 && CPUMIsGuestPagingEnabledEx(pCtx)) … … 3329 3296 } 3330 3297 3298 /* Update the exit-to-ring 3 reason. */ 3299 pVCpu->hm.s.rcLastExitToR3 = rcExit; 3300 3331 3301 /* On our way back from ring-3 reload the guest state if there is a possibility of it being changed. */ 3332 3302 if (rcExit != VINF_EM_RAW_INTERRUPT) 3333 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST); 3303 { 3304 Assert(!(pCtx->fExtrn & HMSVM_CPUMCTX_EXTRN_ALL)); 3305 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST); 3306 } 3334 3307 3335 3308 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchExitToR3); … … 3427 3400 pVCpu->hm.s.Event.GCPtrFaultAddress = GCPtrFaultAddress; 3428 3401 3429 Log4 (("hmR0SvmSetPendingEvent: u=%#RX64 u8Vector=%#x Type=%#x ErrorCodeValid=%RTbool ErrorCode=%#RX32\n", pEvent->u,3430 pEvent->n.u8Vector,(uint8_t)pEvent->n.u3Type, !!pEvent->n.u1ErrorCodeValid, pEvent->n.u32ErrorCode));3402 Log4Func(("u=%#RX64 u8Vector=%#x Type=%#x ErrorCodeValid=%RTbool ErrorCode=%#RX32\n", pEvent->u, pEvent->n.u8Vector, 3403 (uint8_t)pEvent->n.u3Type, !!pEvent->n.u1ErrorCodeValid, pEvent->n.u32ErrorCode)); 3431 3404 } 3432 3405 … … 3490 3463 pCtx->cr2 = uFaultAddress; 3491 3464 /* The VMCB clean bit for CR2 will be updated while re-loading the guest state. */ 3492 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR2);3465 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR2); 3493 3466 } 3494 3467 … … 3549 3522 RT_NOREF(pVCpu); 3550 3523 3551 Log4 (("hmR0SvmInjectEventVmcb: u=%#RX64 u8Vector=%#x Type=%#x ErrorCodeValid=%RTbool ErrorCode=%#RX32\n", pEvent->u,3552 pEvent->n.u8Vector,(uint8_t)pEvent->n.u3Type, !!pEvent->n.u1ErrorCodeValid, pEvent->n.u32ErrorCode));3524 Log4Func(("u=%#RX64 u8Vector=%#x Type=%#x ErrorCodeValid=%RTbool ErrorCode=%#RX32\n", pEvent->u, pEvent->n.u8Vector, 3525 (uint8_t)pEvent->n.u3Type, !!pEvent->n.u1ErrorCodeValid, pEvent->n.u32ErrorCode)); 3553 3526 } 3554 3527 … … 3680 3653 { 3681 3654 /* 3682 * Instructions like STI and MOV SS inhibit interrupts till the next instruction completes. Check if we should 3683 * inhibit interrupts or clear any existing interrupt-inhibition. 3655 * Instructions like STI and MOV SS inhibit interrupts till the next instruction 3656 * completes. Check if we should inhibit interrupts or clear any existing 3657 * interrupt inhibition. 3684 3658 */ 3685 3659 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) … … 3688 3662 { 3689 3663 /* 3690 * We can clear the inhibit force flag as even if we go back to the recompiler without executing guest code in 3691 * AMD-V, the flag's condition to be cleared is met and thus the cleared state is correct. 3664 * We can clear the inhibit force flag as even if we go back to the recompiler 3665 * without executing guest code in AMD-V, the flag's condition to be cleared is 3666 * met and thus the cleared state is correct. 3692 3667 */ 3693 3668 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); … … 3710 3685 { 3711 3686 /* 3712 * When AVIC isn't supported, set up an interrupt window to cause a #VMEXIT when 3713 * the guest is ready to accept interrupts. At #VMEXIT, we then get the interrupt3714 * from the APIC(updating ISR at the right time) and inject the interrupt.3687 * When AVIC isn't supported, set up an interrupt window to cause a #VMEXIT when the guest 3688 * is ready to accept interrupts. At #VMEXIT, we then get the interrupt from the APIC 3689 * (updating ISR at the right time) and inject the interrupt. 3715 3690 * 3716 3691 * With AVIC is supported, we could make use of the asynchronously delivery without 3717 3692 * #VMEXIT and we would be passing the AVIC page to SVM. 3718 3693 * 3719 * In AMD-V, an interrupt window is achieved using a combination of 3720 * V_IRQ (an interrupt is pending), V_IGN_TPR (ignore TPR priorities) and the 3721 * VINTR intercept all being set. 3694 * In AMD-V, an interrupt window is achieved using a combination of V_IRQ (an interrupt 3695 * is pending), V_IGN_TPR (ignore TPR priorities) and the VINTR intercept all being set. 3722 3696 */ 3723 3697 #ifdef VBOX_WITH_NESTED_HWVIRT_SVM 3724 3698 /* 3725 * Currently we don't overlay interupt windows and if there's any V_IRQ pending 3726 * in the nested-guest VMCB, we avoid setting up any interrupt window on behalf3727 * of the outerguest.3699 * Currently we don't overlay interupt windows and if there's any V_IRQ pending in the 3700 * nested-guest VMCB, we avoid setting up any interrupt window on behalf of the outer 3701 * guest. 3728 3702 */ 3729 3703 /** @todo Does this mean we end up prioritizing virtual interrupt … … 3799 3773 bool const fBlockNmi = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS); 3800 3774 3801 Log4Func(("fVirtualGif=%RTbool fBlockNmi=%RTbool fIntShadow=%RTbool fInt rPending=%RTbool fNmiPending=%RTbool\n",3775 Log4Func(("fVirtualGif=%RTbool fBlockNmi=%RTbool fIntShadow=%RTbool fIntPending=%RTbool fNmiPending=%RTbool\n", 3802 3776 fVirtualGif, fBlockNmi, fIntShadow, VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC), 3803 3777 VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI))); … … 3819 3793 { 3820 3794 Log4(("Intercepting NMI -> #VMEXIT\n")); 3821 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx,IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);3795 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK); 3822 3796 return IEMExecSvmVmexit(pVCpu, SVM_EXIT_NMI, 0, 0); 3823 3797 } … … 3838 3812 } 3839 3813 /* 3840 * Check if the nested-guest can receive external interrupts (generated by 3841 * the guest'sPIC/APIC).3814 * Check if the nested-guest can receive external interrupts (generated by the guest's 3815 * PIC/APIC). 3842 3816 * 3843 3817 * External intercepts, NMI, SMI etc. from the physical CPU are -always- intercepted … … 3860 3834 { 3861 3835 Log4(("Intercepting INTR -> #VMEXIT\n")); 3862 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx,IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);3836 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK); 3863 3837 return IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0); 3864 3838 } … … 3924 3898 bool const fBlockNmi = VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS); 3925 3899 3926 Log4Func(("fGif=%RTbool fBlockNmi=%RTbool fBlockInt=%RTbool fIntShadow=%RTbool Intr. pending=%RTbool NMI pending=%RTbool\n",3900 Log4Func(("fGif=%RTbool fBlockNmi=%RTbool fBlockInt=%RTbool fIntShadow=%RTbool fIntPending=%RTbool NMI pending=%RTbool\n", 3927 3901 fGif, fBlockNmi, fBlockInt, fIntShadow, 3928 3902 VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC), … … 3957 3931 } 3958 3932 /* 3959 * Check if the guest can receive external interrupts (PIC/APIC). Once PDMGetInterrupt() returns 3960 * a valid interrupt we -must- deliver the interrupt. We can no longer re-request it from the APIC. 3933 * Check if the guest can receive external interrupts (PIC/APIC). Once PDMGetInterrupt() 3934 * returns a valid interrupt we -must- deliver the interrupt. We can no longer re-request 3935 * it from the APIC device. 3961 3936 */ 3962 3937 else if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC) … … 4021 3996 { 4022 3997 /* 4023 * For nested-guests we have no way to determine if we're injecting a physical or virtual4024 * interrupt at this point. Hence the partial verification below.3998 * For nested-guests we have no way to determine if we're injecting a physical or 3999 * virtual interrupt at this point. Hence the partial verification below. 4025 4000 */ 4026 4001 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) … … 4052 4027 4053 4028 /* 4054 * Before injecting an NMI we must set VMCPU_FF_BLOCK_NMIS to prevent nested NMIs. We do this only 4055 * when we are surely going to inject the NMI as otherwise if we return to ring-3 prematurely we 4056 * could leave NMIs blocked indefinitely upon re-entry into SVM R0. 4029 * Before injecting an NMI we must set VMCPU_FF_BLOCK_NMIS to prevent nested NMIs. We 4030 * do this only when we are surely going to inject the NMI as otherwise if we return 4031 * to ring-3 prematurely we could leave NMIs blocked indefinitely upon re-entry into 4032 * SVM R0. 4057 4033 * 4058 * With VT-x, this is handled by the Guest interruptibility information VMCS field which will set 4059 * the VMCS field after actually delivering the NMI which we read on VM-exit to determine the state. 4034 * With VT-x, this is handled by the Guest interruptibility information VMCS field 4035 * which will set the VMCS field after actually delivering the NMI which we read on 4036 * VM-exit to determine the state. 4060 4037 */ 4061 4038 if ( Event.n.u3Type == SVM_EVENT_NMI … … 4114 4091 HMSVM_ASSERT_PREEMPT_SAFE(); 4115 4092 HMSVM_ASSERT_NOT_IN_NESTED_GUEST(pCtx); 4116 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx,HMSVM_CPUMCTX_EXTRN_ALL);4093 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL); 4117 4094 4118 4095 PCSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb; … … 4120 4097 { 4121 4098 hmR0DumpRegs(pVM, pVCpu, pCtx); NOREF(pVM); 4099 /** @todo We probably don't need to dump this anymore or we can expand 4100 * hmR0DumpRegs()? */ 4122 4101 #ifdef VBOX_STRICT 4123 4102 Log4(("ctrl.u32VmcbCleanBits %#RX32\n", pVmcb->ctrl.u32VmcbCleanBits)); … … 4244 4223 } 4245 4224 else 4246 Log4 (("hmR0SvmReportWorldSwitchError:rcVMRun=%d\n", rcVMRun));4225 Log4Func(("rcVMRun=%d\n", rcVMRun)); 4247 4226 4248 4227 NOREF(pVmcb); … … 4274 4253 Assert(VMMRZCallRing3IsEnabled(pVCpu)); 4275 4254 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES)); 4276 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));4277 4255 4278 4256 /* Could happen as a result of longjump. */ 4279 4257 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3)) 4280 {4281 Assert(!(pCtx->fExtrn & CPUMCTX_EXTRN_CR3));4282 4258 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu)); 4283 }4284 4259 4285 4260 /* Update pending interrupts into the APIC's IRR. */ … … 4298 4273 if (rc != VINF_SUCCESS) 4299 4274 { 4300 Log4 (("hmR0SvmCheckForceFlags:PGMSyncCR3 forcing us back to ring-3. rc=%d\n", rc));4275 Log4Func(("PGMSyncCR3 forcing us back to ring-3. rc=%d\n", rc)); 4301 4276 return rc; 4302 4277 } … … 4310 4285 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF); 4311 4286 int rc = RT_UNLIKELY(VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3; 4312 Log4 (("hmR0SvmCheckForceFlags:HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc));4287 Log4Func(("HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc)); 4313 4288 return rc; 4314 4289 } … … 4318 4293 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST)) 4319 4294 { 4320 Log4 (("hmR0SvmCheckForceFlags:Pending VM request forcing us back to ring-3\n"));4295 Log4Func(("Pending VM request forcing us back to ring-3\n")); 4321 4296 return VINF_EM_PENDING_REQUEST; 4322 4297 } … … 4325 4300 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_POOL_FLUSH_PENDING)) 4326 4301 { 4327 Log4 (("hmR0SvmCheckForceFlags:PGM pool flush pending forcing us back to ring-3\n"));4302 Log4Func(("PGM pool flush pending forcing us back to ring-3\n")); 4328 4303 return VINF_PGM_POOL_FLUSH_PENDING; 4329 4304 } … … 4332 4307 if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA)) 4333 4308 { 4334 Log4 (("hmR0SvmCheckForceFlags:Pending DMA request forcing us back to ring-3\n"));4309 Log4Func(("Pending DMA request forcing us back to ring-3\n")); 4335 4310 return VINF_EM_RAW_TO_R3; 4336 4311 } … … 4397 4372 4398 4373 #ifdef HMSVM_SYNC_FULL_NESTED_GUEST_STATE 4399 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST); 4374 Assert(!(pCtx->fExtrn & HMSVM_CPUMCTX_EXTRN_ALL)); 4375 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST); 4400 4376 #endif 4401 4377 4402 4378 /* 4403 * Load the nested-guest state. 4379 * Export the nested-guest state bits that are not shared with the host in any way as we 4380 * can longjmp or get preempted in the midst of exporting some of the state. 4404 4381 */ 4405 rc = hmR0Svm LoadGuestStateNested(pVCpu, pCtx);4382 rc = hmR0SvmExportGuestStateNested(pVCpu); 4406 4383 AssertRCReturn(rc, rc); 4407 STAM_COUNTER_INC(&pVCpu->hm.s.Stat LoadFull); /** @todo Get new STAM counter for this? */4384 STAM_COUNTER_INC(&pVCpu->hm.s.StatExportFull); 4408 4385 4409 4386 /* Ensure we've cached (and hopefully modified) the VMCB for execution using hardware-assisted SVM. */ … … 4412 4389 /* 4413 4390 * No longjmps to ring-3 from this point on!!! 4414 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, better than a kernel panic. 4415 * This also disables flushing of the R0-logger instance (if any). 4391 * 4392 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, 4393 * better than a kernel panic. This also disables flushing of the R0-logger instance. 4416 4394 */ 4417 4395 VMMRZCallRing3Disable(pVCpu); 4418 4396 4419 4397 /* 4420 * We disable interrupts so that we don't miss any interrupts that would flag 4421 * preemption (IPI/timers etc.) when thread-context hooks aren't used and we've4422 * been running with preemption disabled for a while. Since this is purly to aid4423 * the RTThreadPreemptIsPending code, it doesn't matter that it may temporarily4424 * reenable anddisable interrupt on NT.4398 * We disable interrupts so that we don't miss any interrupts that would flag preemption 4399 * (IPI/timers etc.) when thread-context hooks aren't used and we've been running with 4400 * preemption disabled for a while. Since this is purly to aid the 4401 * RTThreadPreemptIsPending() code, it doesn't matter that it may temporarily reenable and 4402 * disable interrupt on NT. 4425 4403 * 4426 * We need to check for force-flags that could've possible been altered since we last checked them (e.g. 4427 * by PDMGetInterrupt() leaving the PDM critical section, see @bugref{6398}). 4404 * We need to check for force-flags that could've possible been altered since we last 4405 * checked them (e.g. by PDMGetInterrupt() leaving the PDM critical section, 4406 * see @bugref{6398}). 4428 4407 * 4429 * We also check a couple of other force-flags as a last opportunity to get the EMT back to ring-3 before4430 * executing guest code.4408 * We also check a couple of other force-flags as a last opportunity to get the EMT back 4409 * to ring-3 before executing guest code. 4431 4410 */ 4432 4411 pSvmTransient->fEFlags = ASMIntDisableFlags(); … … 4497 4476 4498 4477 #ifdef HMSVM_SYNC_FULL_GUEST_STATE 4499 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST); 4478 if (!CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 4479 { 4480 Assert(!(pCtx->fExtrn & HMSVM_CPUMCTX_EXTRN_ALL)); 4481 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST); 4482 } 4500 4483 #endif 4501 4502 /* Load the guest bits that are not shared with the host in any way since we can longjmp or get preempted. */ 4503 rc = hmR0SvmLoadGuestState(pVM, pVCpu, pCtx); 4484 #ifdef HMSVM_SYNC_FULL_NESTED_GUEST_STATE 4485 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 4486 { 4487 Assert(!(pCtx->fExtrn & HMSVM_CPUMCTX_EXTRN_ALL)); 4488 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST); 4489 } 4490 #endif 4491 4492 /* 4493 * Export the guest state bits that are not shared with the host in any way as we can 4494 * longjmp or get preempted in the midst of exporting some of the state. 4495 */ 4496 rc = hmR0SvmExportGuestState(pVCpu); 4504 4497 AssertRCReturn(rc, rc); 4505 STAM_COUNTER_INC(&pVCpu->hm.s.Stat LoadFull);4498 STAM_COUNTER_INC(&pVCpu->hm.s.StatExportFull); 4506 4499 4507 4500 /* 4508 * If we're not intercepting TPR changes in the guest, save the guest TPR before the world-switch4509 * so we can update it on the way back if the guest changed the TPR.4501 * If we're not intercepting TPR changes in the guest, save the guest TPR before the 4502 * world-switch so we can update it on the way back if the guest changed the TPR. 4510 4503 */ 4511 4504 if (pVCpu->hm.s.svm.fSyncVTpr) … … 4520 4513 /* 4521 4514 * No longjmps to ring-3 from this point on!!! 4522 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, better than a kernel panic. 4523 * This also disables flushing of the R0-logger instance (if any). 4515 * 4516 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, 4517 * better than a kernel panic. This also disables flushing of the R0-logger instance. 4524 4518 */ 4525 4519 VMMRZCallRing3Disable(pVCpu); 4526 4520 4527 4521 /* 4528 * We disable interrupts so that we don't miss any interrupts that would flag 4529 * preemption (IPI/timers etc.) when thread-context hooks aren't used and we've4530 * been running with preemption disabled for a while. Since this is purly to aid4531 * the RTThreadPreemptIsPending code, it doesn't matter that it may temporarily4532 * reenable anddisable interrupt on NT.4522 * We disable interrupts so that we don't miss any interrupts that would flag preemption 4523 * (IPI/timers etc.) when thread-context hooks aren't used and we've been running with 4524 * preemption disabled for a while. Since this is purly to aid the 4525 * RTThreadPreemptIsPending() code, it doesn't matter that it may temporarily reenable and 4526 * disable interrupt on NT. 4533 4527 * 4534 * We need to check for force-flags that could've possible been altered since we last checked them (e.g. 4535 * by PDMGetInterrupt() leaving the PDM critical section, see @bugref{6398}). 4528 * We need to check for force-flags that could've possible been altered since we last 4529 * checked them (e.g. by PDMGetInterrupt() leaving the PDM critical section, 4530 * see @bugref{6398}). 4536 4531 * 4537 * We also check a couple of other force-flags as a last opportunity to get the EMT back to ring-3 before4538 * executing guest code.4532 * We also check a couple of other force-flags as a last opportunity to get the EMT back 4533 * to ring-3 before executing guest code. 4539 4534 */ 4540 4535 pSvmTransient->fEFlags = ASMIntDisableFlags(); … … 4589 4584 { 4590 4585 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestFpuState, x); 4591 CPUMR0LoadGuestFPU(pVM, pVCpu); /* (Ignore rc, no need to set HM_CHANGED_HOST_CONTEXT for SVM.) */4586 CPUMR0LoadGuestFPU(pVM, pVCpu); /* (Ignore rc, no need to set HM_CHANGED_HOST_CONTEXT for SVM.) */ 4592 4587 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestFpuState, x); 4593 4588 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadGuestFpu); 4594 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);4595 4589 } 4596 4590 4597 4591 /* Load the state shared between host and guest (FPU, debug). */ 4598 if ( HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE))4599 hmR0Svm LoadSharedState(pVCpu, pVmcb, pCtx);4600 4601 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_HOST_CONTEXT);/* Preemption might set this, nothing to do on AMD-V. */4602 AssertMsg(! HMCPU_CF_VALUE(pVCpu), ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));4592 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE) 4593 hmR0SvmExportSharedState(pVCpu, pVmcb); 4594 4595 pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_HOST_CONTEXT; /* Preemption might set this, nothing to do on AMD-V. */ 4596 AssertMsg(!pVCpu->hm.s.fCtxChanged, ("fCtxChanged=%#RX64\n", pVCpu->hm.s.fCtxChanged)); 4603 4597 4604 4598 PHMGLOBALCPUINFO pHostCpu = hmR0GetCurrentCpu(); … … 4659 4653 4660 4654 /* 4661 * Save the current Host TSC_AUX and write the guest TSC_AUX to the host, so that 4662 * RDTSCPs (that don't cause exits) reads the guest MSR. See @bugref{3324}.4655 * Save the current Host TSC_AUX and write the guest TSC_AUX to the host, so that RDTSCPs 4656 * (that don't cause exits) reads the guest MSR, see @bugref{3324}. 4663 4657 * 4664 4658 * This should be done -after- any RDTSCPs for obtaining the host timestamp (TM, STAM etc). … … 4682 4676 4683 4677 /* 4684 * If VMCB Clean bits isn't supported by the CPU or exposed to the guest in the 4685 * nested virtualization case, mark all state-bits as dirty indicating to the4686 * CPU to re-load fromVMCB.4678 * If VMCB Clean bits isn't supported by the CPU or exposed to the guest in the nested 4679 * virtualization case, mark all state-bits as dirty indicating to the CPU to re-load 4680 * from the VMCB. 4687 4681 */ 4688 4682 bool const fSupportsVmcbCleanBits = hmR0SvmSupportsVmcbCleanBits(pVCpu, pCtx); … … 4708 4702 4709 4703 /* 4710 * 64-bit Windows uses XMM registers in the kernel as the Microsoft compiler expresses floating-point operations 4711 * using SSE instructions. Some XMM registers (XMM6-XMM15) are callee-saved and thus the need for this XMM wrapper. 4712 * Refer MSDN docs. "Configuring Programs for 64-bit / x64 Software Conventions / Register Usage" for details. 4704 * 64-bit Windows uses XMM registers in the kernel as the Microsoft compiler expresses 4705 * floating-point operations using SSE instructions. Some XMM registers (XMM6-XMM15) are 4706 * callee-saved and thus the need for this XMM wrapper. 4707 * 4708 * Refer MSDN "Configuring Programs for 64-bit/x64 Software Conventions / Register Usage". 4713 4709 */ 4714 4710 #ifdef VBOX_WITH_KERNEL_USING_XMM … … 4738 4734 4739 4735 /* 4740 * 64-bit Windows uses XMM registers in the kernel as the Microsoft compiler expresses floating-point operations 4741 * using SSE instructions. Some XMM registers (XMM6-XMM15) are callee-saved and thus the need for this XMM wrapper. 4742 * Refer MSDN docs. "Configuring Programs for 64-bit / x64 Software Conventions / Register Usage" for details. 4736 * 64-bit Windows uses XMM registers in the kernel as the Microsoft compiler expresses 4737 * floating-point operations using SSE instructions. Some XMM registers (XMM6-XMM15) are 4738 * callee-saved and thus the need for this XMM wrapper. 4739 * 4740 * Refer MSDN "Configuring Programs for 64-bit/x64 Software Conventions / Register Usage". 4743 4741 */ 4744 4742 #ifdef VBOX_WITH_KERNEL_USING_XMM … … 4832 4830 if (RT_UNLIKELY(rcVMRun != VINF_SUCCESS)) 4833 4831 { 4834 Log4 (("VMRUN failure: rcVMRun=%Rrc\n", rcVMRun));4832 Log4Func(("VMRUN failure: rcVMRun=%Rrc\n", rcVMRun)); 4835 4833 return; 4836 4834 } … … 4841 4839 pSvmTransient->fVectoringPF = false; /* Vectoring page-fault needs to be determined later. */ 4842 4840 4843 hmR0SvmSaveGuestState(pVCpu, pCtx, pVmcb); /* Save the guest state from the VMCB to the guest-CPU context. */ 4841 #ifdef HMSVM_SYNC_FULL_GUEST_STATE 4842 if (!CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 4843 hmR0SvmImportGuestState(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL); 4844 #elif defined(HMSVM_SYNC_FULL_NESTED_GUEST_STATE) 4845 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 4846 hmR0SvmImportGuestState(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL); 4847 #else 4848 /* 4849 * Always import the following: 4850 * 4851 * - RIP for exit optimizations and evaluating event injection on re-entry. 4852 * - RFLAGS for evaluating event injection on VM re-entry and for exporting shared debug 4853 * state on preemption. 4854 * - Interrupt shadow, GIF for evaluating event injection on VM re-entry. 4855 * - CS for exit optimizations. 4856 * - RAX, RSP for simplifying assumptions on GPRs. All other GPRs are swapped by the 4857 * assembly switcher code. 4858 * - Shared state (only DR7 currently) for exporting shared debug state on preemption. 4859 */ 4860 hmR0SvmImportGuestState(pVCpu, CPUMCTX_EXTRN_RIP 4861 | CPUMCTX_EXTRN_RFLAGS 4862 | CPUMCTX_EXTRN_RAX 4863 | CPUMCTX_EXTRN_RSP 4864 | CPUMCTX_EXTRN_CS 4865 | CPUMCTX_EXTRN_HWVIRT 4866 | CPUMCTX_EXTRN_HM_SVM_INT_SHADOW 4867 | CPUMCTX_EXTRN_HM_SVM_HWVIRT_VIRQ 4868 | HMSVM_CPUMCTX_SHARED_STATE); 4869 #endif 4870 4871 #ifdef DEBUG_ramshankar 4872 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx)) 4873 { 4874 hmR0SvmImportGuestState(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL); 4875 hmR0SvmLogState(pVCpu, pVmcb, pCtx, "hmR0SvmPostRunGuestNested", HMSVM_LOG_ALL & ~HMSVM_LOG_LBR, 0 /* uVerbose */); 4876 } 4877 #endif 4844 4878 4845 4879 if ( pSvmTransient->u64ExitCode != SVM_EXIT_INVALID … … 4853 4887 int rc = APICSetTpr(pVCpu, pVmcb->guest.u64LSTAR & 0xff); 4854 4888 AssertRC(rc); 4855 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_APIC_STATE);4889 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR); 4856 4890 } 4857 4891 /* Sync TPR when we aren't intercepting CR8 writes. */ … … 4860 4894 int rc = APICSetTpr(pVCpu, pVmcbCtrl->IntCtrl.n.u8VTPR << 4); 4861 4895 AssertRC(rc); 4862 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_APIC_STATE);4896 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR); 4863 4897 } 4864 4898 } … … 4896 4930 HMSVM_ASSERT_CPU_SAFE(); 4897 4931 4898 /* Preparatory work for running guest code, this may force us to return4899 toring-3. This bugger disables interrupts on VINF_SUCCESS! */4932 /* Preparatory work for running nested-guest code, this may force us to return to 4933 ring-3. This bugger disables interrupts on VINF_SUCCESS! */ 4900 4934 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x); 4901 4935 rc = hmR0SvmPreRunGuest(pVM, pVCpu, pCtx, &SvmTransient); … … 4905 4939 /* 4906 4940 * No longjmps to ring-3 from this point on!!! 4907 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, better than a kernel panic. 4908 * This also disables flushing of the R0-logger instance (if any). 4941 * 4942 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, 4943 * better than a kernel panic. This also disables flushing of the R0-logger instance. 4909 4944 */ 4910 4945 hmR0SvmPreRunGuestCommitted(pVCpu, pCtx, &SvmTransient); 4911 4946 rc = hmR0SvmRunGuest(pVM, pVCpu, pCtx); 4912 4947 4913 /* Restore any residual host-state and save any bits shared between host 4914 and guestinto the guest-CPU state. Re-enables interrupts! */4948 /* Restore any residual host-state and save any bits shared between host and guest 4949 into the guest-CPU state. Re-enables interrupts! */ 4915 4950 hmR0SvmPostRunGuest(pVCpu, pCtx, &SvmTransient, rc); 4916 4951 … … 4977 5012 (unsigned)RTMpCpuId(), *pcLoops)); 4978 5013 4979 /* Preparatory work for running guest code, this may force us to return4980 toring-3. This bugger disables interrupts on VINF_SUCCESS! */5014 /* Preparatory work for running nested-guest code, this may force us to return to 5015 ring-3. This bugger disables interrupts on VINF_SUCCESS! */ 4981 5016 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x); 4982 5017 rc = hmR0SvmPreRunGuest(pVM, pVCpu, pCtx, &SvmTransient); … … 4986 5021 /* 4987 5022 * No longjmps to ring-3 from this point on!!! 4988 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, better than a kernel panic. 4989 * This also disables flushing of the R0-logger instance (if any). 5023 * 5024 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, 5025 * better than a kernel panic. This also disables flushing of the R0-logger instance. 4990 5026 */ 4991 5027 VMMRZCallRing3Disable(pVCpu); … … 4995 5031 rc = hmR0SvmRunGuest(pVM, pVCpu, pCtx); 4996 5032 4997 /* 4998 * Restore any residual host-state and save any bits shared between host and guest into the guest-CPU state. 4999 * This will also re-enable longjmps to ring-3 when it has reached a safe point!!! 5000 */ 5033 /* Restore any residual host-state and save any bits shared between host and guest 5034 into the guest-CPU state. Re-enables interrupts! */ 5001 5035 hmR0SvmPostRunGuest(pVCpu, pCtx, &SvmTransient, rc); 5036 5002 5037 if (RT_UNLIKELY( rc != VINF_SUCCESS /* Check for VMRUN errors. */ 5003 5038 || SvmTransient.u64ExitCode == SVM_EXIT_INVALID)) /* Check for invalid guest-state errors. */ … … 5035 5070 break; 5036 5071 } 5037 pVCpu->hm.s.fC ontextUseFlags |= HM_CHANGED_GUEST_DEBUG;5072 pVCpu->hm.s.fCtxChanged |= HM_CHANGED_GUEST_DR_MASK; 5038 5073 } 5039 5074 … … 5081 5116 HMSVM_ASSERT_CPU_SAFE(); 5082 5117 5083 /* Preparatory work for running nested-guest code, this may force us to return 5084 toring-3. This bugger disables interrupts on VINF_SUCCESS! */5118 /* Preparatory work for running nested-guest code, this may force us to return to 5119 ring-3. This bugger disables interrupts on VINF_SUCCESS! */ 5085 5120 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x); 5086 5121 rc = hmR0SvmPreRunGuestNested(pVM, pVCpu, pCtx, &SvmTransient); … … 5093 5128 /* 5094 5129 * No longjmps to ring-3 from this point on!!! 5095 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, better than a kernel panic. 5096 * This also disables flushing of the R0-logger instance (if any). 5130 * 5131 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, 5132 * better than a kernel panic. This also disables flushing of the R0-logger instance. 5097 5133 */ 5098 5134 hmR0SvmPreRunGuestCommitted(pVCpu, pCtx, &SvmTransient); … … 5100 5136 rc = hmR0SvmRunGuestNested(pVM, pVCpu, pCtx); 5101 5137 5102 /* Restore any residual host-state and save any bits shared between host 5103 and guestinto the guest-CPU state. Re-enables interrupts! */5138 /* Restore any residual host-state and save any bits shared between host and guest 5139 into the guest-CPU state. Re-enables interrupts! */ 5104 5140 hmR0SvmPostRunGuest(pVCpu, pCtx, &SvmTransient, rc); 5105 5141 … … 5114 5150 5115 5151 /* Invalid nested-guest state. Cause a #VMEXIT but assert on strict builds. */ 5116 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx,HMSVM_CPUMCTX_EXTRN_ALL);5152 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL); 5117 5153 AssertMsgFailed(("Invalid nested-guest state. rc=%Rrc u64ExitCode=%#RX64\n", rc, SvmTransient.u64ExitCode)); 5118 5154 rc = VBOXSTRICTRC_TODO(IEMExecSvmVmexit(pVCpu, SVM_EXIT_INVALID, 0, 0)); … … 5236 5272 Assert(pSvmTransient->u64ExitCode <= SVM_EXIT_MAX); 5237 5273 5238 /** @todo UseIEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK instead of5239 * HMSVM_CPUMCTX_EXTRN_ALL b elow. See todo in5240 * HMSvmNstGstVmExitNotify(). */5274 /** @todo Figure out why using IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK instead of 5275 * HMSVM_CPUMCTX_EXTRN_ALL breaks nested guests (XP Pro, DSL etc.), see 5276 * also HMSvmNstGstVmExitNotify(). */ 5241 5277 #define NST_GST_VMEXIT_CALL_RET(a_pVCpu, a_pCtx, a_uExitCode, a_uExitInfo1, a_uExitInfo2) \ 5242 5278 do { \ 5243 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx,HMSVM_CPUMCTX_EXTRN_ALL); \5279 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL); \ 5244 5280 return VBOXSTRICTRC_TODO(IEMExecSvmVmexit((a_pVCpu), (a_uExitCode), (a_uExitInfo1), (a_uExitInfo2))); \ 5245 5281 } while (0) 5246 5282 5247 5283 /* 5248 * For all the #VMEXITs here we primarily figure out if the #VMEXIT is expected 5249 * by thenested-guest. If it isn't, it should be handled by the (outer) guest.5284 * For all the #VMEXITs here we primarily figure out if the #VMEXIT is expected by the 5285 * nested-guest. If it isn't, it should be handled by the (outer) guest. 5250 5286 */ 5251 5287 PSVMVMCB pVmcbNstGst = pCtx->hwvirt.svm.CTX_SUFF(pVmcb); … … 5364 5400 5365 5401 /* If the nested-guest is not intercepting #PFs, forward the #PF to the guest. */ 5366 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx,CPUMCTX_EXTRN_CR2);5402 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR2); 5367 5403 hmR0SvmSetPendingXcptPF(pVCpu, pCtx, u32ErrCode, uFaultAddress); 5368 5404 return VINF_SUCCESS; … … 5427 5463 case SVM_EXIT_WRITE_CR3: 5428 5464 case SVM_EXIT_WRITE_CR4: 5429 case SVM_EXIT_WRITE_CR8: /** @todo Shouldn't writes to CR8 go to V_TPR instead since we run with V_INTR_MASKING set?*/5465 case SVM_EXIT_WRITE_CR8: /* CR8 writes would go to the V_TPR rather than here, since we run with V_INTR_MASKING. */ 5430 5466 { 5431 5467 uint8_t const uCr = uExitCode - SVM_EXIT_WRITE_CR0; 5432 Log4 (("hmR0SvmHandleExitNested:Write CR%u: uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uCr, uExitInfo1, uExitInfo2));5468 Log4Func(("Write CR%u: uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", uCr, uExitInfo1, uExitInfo2)); 5433 5469 5434 5470 if (HMIsGuestSvmWriteCRxInterceptSet(pVCpu, uCr)) … … 5459 5495 * We shouldn't direct physical interrupts, NMIs, SMIs to the nested-guest. 5460 5496 * 5461 * Although we don't intercept SMIs, the nested-guest might. Therefore, we 5462 * might get an SMI #VMEXIT here so simply ignore rather than causing a5463 * correspondingnested-guest #VMEXIT.5497 * Although we don't intercept SMIs, the nested-guest might. Therefore, we might 5498 * get an SMI #VMEXIT here so simply ignore rather than causing a corresponding 5499 * nested-guest #VMEXIT. 5464 5500 */ 5501 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK); 5465 5502 return hmR0SvmExitIntr(pVCpu, pCtx, pSvmTransient); 5466 5503 } … … 5527 5564 } 5528 5565 5529 case SVM_EXIT_XCPT_ 0: /* #DE */5530 /* SVM_EXIT_XCPT_ 1: */ /* #DB -Handled above. */5531 /* SVM_EXIT_XCPT_ 2: */ /* #NMI -Handled above. */5532 /* SVM_EXIT_XCPT_ 3: */ /* #BP -Handled above. */5533 case SVM_EXIT_XCPT_ 4: /* #OF */5534 case SVM_EXIT_XCPT_ 5: /* #BR */5535 /* SVM_EXIT_XCPT_ 6: */ /* #UD -Handled above. */5536 case SVM_EXIT_XCPT_ 7: /* #NM */5537 case SVM_EXIT_XCPT_ 8: /* #DF */5538 case SVM_EXIT_XCPT_ 9: /* #CO_SEG_OVERRUN */5539 case SVM_EXIT_XCPT_ 10: /* #TS */5540 case SVM_EXIT_XCPT_ 11: /* #NP */5541 case SVM_EXIT_XCPT_ 12: /* #SS */5542 case SVM_EXIT_XCPT_ 13: /* #GP */5543 /* SVM_EXIT_XCPT_ 14: */ /* #PF -Handled above. */5544 case SVM_EXIT_XCPT_15: /* Reserved.*/5545 /* SVM_EXIT_XCPT_ 16: */ /* #MF -Handled above. */5546 /* SVM_EXIT_XCPT_ 17: */ /* #AC -Handled above. */5547 case SVM_EXIT_XCPT_ 18: /* #MC */5548 case SVM_EXIT_XCPT_ 19: /* #XF */5566 case SVM_EXIT_XCPT_DE: 5567 /* SVM_EXIT_XCPT_DB: */ /* Handled above. */ 5568 /* SVM_EXIT_XCPT_NMI: */ /* Handled above. */ 5569 /* SVM_EXIT_XCPT_BP: */ /* Handled above. */ 5570 case SVM_EXIT_XCPT_OF: 5571 case SVM_EXIT_XCPT_BR: 5572 /* SVM_EXIT_XCPT_UD: */ /* Handled above. */ 5573 case SVM_EXIT_XCPT_NM: 5574 case SVM_EXIT_XCPT_DF: 5575 case SVM_EXIT_XCPT_CO_SEG_OVERRUN: 5576 case SVM_EXIT_XCPT_TS: 5577 case SVM_EXIT_XCPT_NP: 5578 case SVM_EXIT_XCPT_SS: 5579 case SVM_EXIT_XCPT_GP: 5580 /* SVM_EXIT_XCPT_PF: */ /* Handled above. */ 5581 case SVM_EXIT_XCPT_15: /* Reserved. */ 5582 /* SVM_EXIT_XCPT_MF: */ /* Handled above. */ 5583 /* SVM_EXIT_XCPT_AC: */ /* Handled above. */ 5584 case SVM_EXIT_XCPT_MC: 5585 case SVM_EXIT_XCPT_XF: 5549 5586 case SVM_EXIT_XCPT_20: case SVM_EXIT_XCPT_21: case SVM_EXIT_XCPT_22: case SVM_EXIT_XCPT_23: 5550 5587 case SVM_EXIT_XCPT_24: case SVM_EXIT_XCPT_25: case SVM_EXIT_XCPT_26: case SVM_EXIT_XCPT_27: … … 5692 5729 do { \ 5693 5730 if ((a_fDbg) == 1) \ 5694 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx,HMSVM_CPUMCTX_EXTRN_ALL); \5731 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL); \ 5695 5732 int rc = a_CallExpr; \ 5696 /* if ((a_fDbg) == 1) */\5697 /* HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST); */\5733 if ((a_fDbg) == 1) \ 5734 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST); \ 5698 5735 return rc; \ 5699 5736 } while (0) … … 5901 5938 else 5902 5939 { 5903 Log4 (("hmR0SvmInterpretInvlPgEx invalid parameter type %#x\n", Param1.type));5940 Log4Func(("Invalid parameter type %#x\n", Param1.type)); 5904 5941 rc = VERR_EM_INTERPRETER; 5905 5942 } … … 5939 5976 } 5940 5977 else 5941 Log4 (("hmR0SvmInterpretInvlpg: EMInterpretDisasCurrent returned%Rrc uOpCode=%#x\n", rc, pDis->pCurInstr->uOpcode));5978 Log4Func(("EMInterpretDisasCurrent failed! rc=%Rrc uOpCode=%#x\n", rc, pDis->pCurInstr->uOpcode)); 5942 5979 } 5943 5980 return VERR_EM_INTERPRETER; … … 5945 5982 5946 5983 5947 #ifdef HMSVM_USE_IEM_EVENT_REFLECTION5948 5984 /** 5949 5985 * Gets the IEM exception flags for the specified SVM event. … … 5999 6035 } 6000 6036 6001 #else6002 /**6003 * Determines if an exception is a contributory exception.6004 *6005 * Contributory exceptions are ones which can cause double-faults unless the6006 * original exception was a benign exception. Page-fault is intentionally not6007 * included here as it's a conditional contributory exception.6008 *6009 * @returns @c true if the exception is contributory, @c false otherwise.6010 * @param uVector The exception vector.6011 */6012 DECLINLINE(bool) hmR0SvmIsContributoryXcpt(const uint32_t uVector)6013 {6014 switch (uVector)6015 {6016 case X86_XCPT_GP:6017 case X86_XCPT_SS:6018 case X86_XCPT_NP:6019 case X86_XCPT_TS:6020 case X86_XCPT_DE:6021 return true;6022 default:6023 break;6024 }6025 return false;6026 }6027 #endif /* HMSVM_USE_IEM_EVENT_REFLECTION */6028 6029 6037 6030 6038 /** … … 6049 6057 int rc = VINF_SUCCESS; 6050 6058 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx); 6051 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx,CPUMCTX_EXTRN_CR2);6059 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR2); 6052 6060 6053 6061 Log4(("EXITINTINFO: Pending vectoring event %#RX64 Valid=%RTbool ErrValid=%RTbool Err=%#RX32 Type=%u Vector=%u\n", … … 6055 6063 pVmcb->ctrl.ExitIntInfo.n.u32ErrorCode, pVmcb->ctrl.ExitIntInfo.n.u3Type, pVmcb->ctrl.ExitIntInfo.n.u8Vector)); 6056 6064 6057 /* See AMD spec. 15.7.3 "EXITINFO Pseudo-Code". The EXITINTINFO (if valid) contains the prior exception (IDT vector) 6058 * that was trying to be delivered to the guest which caused a #VMEXIT which was intercepted (Exit vector). */ 6065 /* 6066 * The EXITINTINFO (if valid) contains the prior exception (IDT vector) that was trying to 6067 * be delivered to the guest which caused a #VMEXIT which was intercepted (Exit vector). 6068 * 6069 * See AMD spec. 15.7.3 "EXITINFO Pseudo-Code". 6070 */ 6059 6071 if (pVmcb->ctrl.ExitIntInfo.n.u1Valid) 6060 6072 { 6061 #ifdef HMSVM_USE_IEM_EVENT_REFLECTION6062 6073 IEMXCPTRAISE enmRaise; 6063 6074 IEMXCPTRAISEINFO fRaiseInfo; … … 6074 6085 { 6075 6086 /* 6076 * If delivery of an event caused a #VMEXIT that is not an exception (e.g. #NPF) then we6077 * end up here.6087 * If delivery of an event caused a #VMEXIT that is not an exception (e.g. #NPF) 6088 * then we end up here. 6078 6089 * 6079 6090 * If the event was: 6080 * - a software interrupt, we can re-execute the instruction which will regenerate6081 * the event.6091 * - a software interrupt, we can re-execute the instruction which will 6092 * regenerate the event. 6082 6093 * - an NMI, we need to clear NMI blocking and re-inject the NMI. 6083 6094 * - a hardware exception or external interrupt, we re-inject it. … … 6108 6119 { 6109 6120 pSvmTransient->fVectoringPF = true; 6110 Log4 (("IDT: Pending vectoring #PF due to delivery of Ext-Int/NMI. uCR2=%#RX64\n", pCtx->cr2));6121 Log4Func(("IDT: Pending vectoring #PF due to delivery of Ext-Int/NMI. uCR2=%#RX64\n", pCtx->cr2)); 6111 6122 } 6112 6123 else if ( pVmcb->ctrl.ExitIntInfo.n.u3Type == SVM_EVENT_EXCEPTION … … 6128 6139 hmR0SvmSetPendingEvent(pVCpu, &pVmcb->ctrl.ExitIntInfo, GCPtrFaultAddress); 6129 6140 6130 Log4 (("IDT: Pending vectoring event %#RX64 ErrValid=%RTbool Err=%#RX32 GCPtrFaultAddress=%#RX64\n",6131 pVmcb->ctrl.ExitIntInfo.u, RT_BOOL(pVmcb->ctrl.ExitIntInfo.n.u1ErrorCodeValid),6132 pVmcb->ctrl.ExitIntInfo.n.u32ErrorCode, GCPtrFaultAddress));6141 Log4Func(("IDT: Pending vectoring event %#RX64 ErrValid=%RTbool Err=%#RX32 GCPtrFaultAddress=%#RX64\n", 6142 pVmcb->ctrl.ExitIntInfo.u, RT_BOOL(pVmcb->ctrl.ExitIntInfo.n.u1ErrorCodeValid), 6143 pVmcb->ctrl.ExitIntInfo.n.u32ErrorCode, GCPtrFaultAddress)); 6133 6144 } 6134 6145 break; … … 6144 6155 { 6145 6156 /* 6146 * Determing a vectoring double #PF condition. Used later, when PGM evaluates the 6147 * second #PF as a guest #PF (and not a shadow #PF) and needs to be converted into a #DF. 6157 * Determing a vectoring double #PF condition. Used later, when PGM evaluates 6158 * the second #PF as a guest #PF (and not a shadow #PF) and needs to be 6159 * converted into a #DF. 6148 6160 */ 6149 6161 if (fRaiseInfo & IEMXCPTRAISEINFO_PF_PF) 6150 6162 { 6151 Log4 (("IDT: Pending vectoring double #PF uCR2=%#RX64\n", pCtx->cr2));6163 Log4Func(("IDT: Pending vectoring double #PF uCR2=%#RX64\n", pCtx->cr2)); 6152 6164 pSvmTransient->fVectoringDoublePF = true; 6153 6165 Assert(rc == VINF_SUCCESS); … … 6181 6193 } 6182 6194 } 6183 #else6184 uint8_t uIdtVector = pVmcb->ctrl.ExitIntInfo.n.u8Vector;6185 6186 typedef enum6187 {6188 SVMREFLECTXCPT_XCPT, /* Reflect the exception to the guest or for further evaluation by VMM. */6189 SVMREFLECTXCPT_DF, /* Reflect the exception as a double-fault to the guest. */6190 SVMREFLECTXCPT_TF, /* Indicate a triple faulted state to the VMM. */6191 SVMREFLECTXCPT_HANG, /* Indicate bad VM trying to deadlock the CPU. */6192 SVMREFLECTXCPT_NONE /* Nothing to reflect. */6193 } SVMREFLECTXCPT;6194 6195 SVMREFLECTXCPT enmReflect = SVMREFLECTXCPT_NONE;6196 bool fReflectingNmi = false;6197 if (pVmcb->ctrl.ExitIntInfo.n.u3Type == SVM_EVENT_EXCEPTION)6198 {6199 if (pSvmTransient->u64ExitCode - SVM_EXIT_XCPT_0 <= SVM_EXIT_XCPT_31)6200 {6201 uint8_t uExitVector = (uint8_t)(pSvmTransient->u64ExitCode - SVM_EXIT_XCPT_0);6202 6203 #ifdef VBOX_STRICT6204 if ( hmR0SvmIsContributoryXcpt(uIdtVector)6205 && uExitVector == X86_XCPT_PF)6206 {6207 Log4(("IDT: Contributory #PF idCpu=%u uCR2=%#RX64\n", pVCpu->idCpu, pCtx->cr2));6208 }6209 #endif6210 6211 if ( uIdtVector == X86_XCPT_BP6212 || uIdtVector == X86_XCPT_OF)6213 {6214 /* Ignore INT3/INTO, just re-execute. See @bugref{8357}. */6215 }6216 else if ( uExitVector == X86_XCPT_PF6217 && uIdtVector == X86_XCPT_PF)6218 {6219 pSvmTransient->fVectoringDoublePF = true;6220 Log4(("IDT: Vectoring double #PF uCR2=%#RX64\n", pCtx->cr2));6221 }6222 else if ( uExitVector == X86_XCPT_AC6223 && uIdtVector == X86_XCPT_AC)6224 {6225 enmReflect = SVMREFLECTXCPT_HANG;6226 Log4(("IDT: Nested #AC - Bad guest\n"));6227 }6228 else if ( (pVmcb->ctrl.u32InterceptXcpt & HMSVM_CONTRIBUTORY_XCPT_MASK)6229 && hmR0SvmIsContributoryXcpt(uExitVector)6230 && ( hmR0SvmIsContributoryXcpt(uIdtVector)6231 || uIdtVector == X86_XCPT_PF))6232 {6233 enmReflect = SVMREFLECTXCPT_DF;6234 Log4(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->hm.s.Event.u64IntInfo,6235 uIdtVector, uExitVector));6236 }6237 else if (uIdtVector == X86_XCPT_DF)6238 {6239 enmReflect = SVMREFLECTXCPT_TF;6240 Log4(("IDT: Pending vectoring triple-fault %#RX64 uIdtVector=%#x uExitVector=%#x\n",6241 pVCpu->hm.s.Event.u64IntInfo, uIdtVector, uExitVector));6242 }6243 else6244 enmReflect = SVMREFLECTXCPT_XCPT;6245 }6246 else6247 {6248 /*6249 * If event delivery caused an #VMEXIT that is not an exception (e.g. #NPF) then reflect the original6250 * exception to the guest after handling the #VMEXIT.6251 */6252 enmReflect = SVMREFLECTXCPT_XCPT;6253 }6254 }6255 else if ( pVmcb->ctrl.ExitIntInfo.n.u3Type == SVM_EVENT_EXTERNAL_IRQ6256 || pVmcb->ctrl.ExitIntInfo.n.u3Type == SVM_EVENT_NMI)6257 {6258 enmReflect = SVMREFLECTXCPT_XCPT;6259 fReflectingNmi = RT_BOOL(pVmcb->ctrl.ExitIntInfo.n.u3Type == SVM_EVENT_NMI);6260 6261 if (pSvmTransient->u64ExitCode - SVM_EXIT_XCPT_0 <= SVM_EXIT_XCPT_31)6262 {6263 uint8_t uExitVector = (uint8_t)(pSvmTransient->u64ExitCode - SVM_EXIT_XCPT_0);6264 if (uExitVector == X86_XCPT_PF)6265 {6266 pSvmTransient->fVectoringPF = true;6267 Log4(("IDT: Vectoring #PF due to Ext-Int/NMI. uCR2=%#RX64\n", pCtx->cr2));6268 }6269 }6270 }6271 /* else: Ignore software interrupts (INT n) as they reoccur when restarting the instruction. */6272 6273 switch (enmReflect)6274 {6275 case SVMREFLECTXCPT_XCPT:6276 {6277 /* If we are re-injecting the NMI, clear NMI blocking. */6278 if (fReflectingNmi)6279 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);6280 6281 Assert(pVmcb->ctrl.ExitIntInfo.n.u3Type != SVM_EVENT_SOFTWARE_INT);6282 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingReflect);6283 hmR0SvmSetPendingEvent(pVCpu, &pVmcb->ctrl.ExitIntInfo, 0 /* GCPtrFaultAddress */);6284 6285 /* If uExitVector is #PF, CR2 value will be updated from the VMCB if it's a guest #PF. See hmR0SvmExitXcptPF(). */6286 Log4(("IDT: Pending vectoring event %#RX64 ErrValid=%RTbool Err=%#RX32\n", pVmcb->ctrl.ExitIntInfo.u,6287 !!pVmcb->ctrl.ExitIntInfo.n.u1ErrorCodeValid, pVmcb->ctrl.ExitIntInfo.n.u32ErrorCode));6288 break;6289 }6290 6291 case SVMREFLECTXCPT_DF:6292 {6293 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingReflect);6294 hmR0SvmSetPendingXcptDF(pVCpu);6295 rc = VINF_HM_DOUBLE_FAULT;6296 break;6297 }6298 6299 case SVMREFLECTXCPT_TF:6300 {6301 rc = VINF_EM_RESET;6302 break;6303 }6304 6305 case SVMREFLECTXCPT_HANG:6306 {6307 rc = VERR_EM_GUEST_CPU_HANG;6308 break;6309 }6310 6311 default:6312 Assert(rc == VINF_SUCCESS);6313 break;6314 }6315 #endif /* HMSVM_USE_IEM_EVENT_REFLECTION */6316 6195 } 6317 6196 Assert(rc == VINF_SUCCESS || rc == VINF_HM_DOUBLE_FAULT || rc == VINF_EM_RESET || rc == VERR_EM_GUEST_CPU_HANG); … … 6414 6293 6415 6294 /* 6416 * AMD-V has no preemption timer and the generic periodic preemption timer has no way to signal -before- the timer 6417 * fires if the current interrupt is our own timer or a some other host interrupt. We also cannot examine what 6418 * interrupt it is until the host actually take the interrupt. 6295 * AMD-V has no preemption timer and the generic periodic preemption timer has no way to 6296 * signal -before- the timer fires if the current interrupt is our own timer or a some 6297 * other host interrupt. We also cannot examine what interrupt it is until the host 6298 * actually take the interrupt. 6419 6299 * 6420 * Going back to executing guest code here unconditionally causes random scheduling problems (observed on an6421 * AMD Phenom 9850 Quad-Core on Windows 64-bit host).6300 * Going back to executing guest code here unconditionally causes random scheduling 6301 * problems (observed on an AMD Phenom 9850 Quad-Core on Windows 64-bit host). 6422 6302 */ 6423 6303 return VINF_EM_RAW_INTERRUPT; … … 6433 6313 6434 6314 hmR0SvmAdvanceRipHwAssist(pVCpu, pCtx, 2); 6435 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWbinvd);6436 6315 int rc = VINF_SUCCESS; 6437 6316 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc); … … 6448 6327 6449 6328 hmR0SvmAdvanceRipHwAssist(pVCpu, pCtx, 2); 6450 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvd);6451 6329 int rc = VINF_SUCCESS; 6452 6330 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc); … … 6462 6340 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 6463 6341 6464 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_CS); 6342 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_RIP 6343 | CPUMCTX_EXTRN_CS); 6465 6344 VBOXSTRICTRC rcStrict; 6466 6345 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu, … … 6488 6367 */ 6489 6368 Assert(pCtx == &pVCpu->cpum.GstCtx); 6490 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx,IEM_CPUMCTX_EXTRN_MUST_MASK);6369 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK); 6491 6370 6492 6371 Log4(("CpuIdExit/%u: %04x:%08RX64: %#x/%#x -> EMHistoryExec\n", … … 6499 6378 VBOXSTRICTRC_VAL(rcStrict), pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip)); 6500 6379 } 6501 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCpuid);6502 6380 return VBOXSTRICTRC_TODO(rcStrict); 6503 6381 } … … 6510 6388 { 6511 6389 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 6512 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx,IEM_CPUMCTX_EXTRN_MUST_MASK);6390 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK); 6513 6391 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtsc(pVCpu, hmR0SvmGetInstrLengthHwAssist(pVCpu, pCtx, 2)); 6514 6392 if (rcStrict == VINF_SUCCESS) … … 6517 6395 rcStrict = VINF_SUCCESS; 6518 6396 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict); 6519 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc);6520 6397 return VBOXSTRICTRC_TODO(rcStrict); 6521 6398 } … … 6529 6406 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 6530 6407 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtscp(pVCpu, hmR0SvmGetInstrLengthHwAssist(pVCpu, pCtx, 3)); 6531 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx,IEM_CPUMCTX_EXTRN_MUST_MASK);6408 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK); 6532 6409 if (rcStrict == VINF_SUCCESS) 6533 6410 pSvmTransient->fUpdateTscOffsetting = true; … … 6535 6412 rcStrict = VINF_SUCCESS; 6536 6413 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict); 6537 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtscp);6538 6414 return VBOXSTRICTRC_TODO(rcStrict); 6539 6415 } … … 6546 6422 { 6547 6423 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 6548 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx,CPUMCTX_EXTRN_CR06549 | CPUMCTX_EXTRN_CR46550 | CPUMCTX_EXTRN_SS);6424 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR0 6425 | CPUMCTX_EXTRN_CR4 6426 | CPUMCTX_EXTRN_SS); 6551 6427 6552 6428 int rc = EMInterpretRdpmc(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx)); … … 6561 6437 rc = VERR_EM_INTERPRETER; 6562 6438 } 6563 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdpmc);6564 6439 return rc; 6565 6440 } … … 6574 6449 PVM pVM = pVCpu->CTX_SUFF(pVM); 6575 6450 Assert(!pVM->hm.s.fNestedPaging); 6576 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvlpg);6577 6451 6578 6452 bool const fSupportsDecodeAssists = hmR0SvmSupportsDecodeAssists(pVCpu, pCtx); … … 6581 6455 && fSupportsNextRipSave) 6582 6456 { 6583 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx,IEM_CPUMCTX_EXTRN_MUST_MASK);6457 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK); 6584 6458 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx); 6585 6459 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pCtx->rip; … … 6590 6464 } 6591 6465 6592 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx,HMSVM_CPUMCTX_EXTRN_ALL);6466 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL); 6593 6467 int rc = hmR0SvmInterpretInvlpg(pVM, pVCpu, pCtx); /* Updates RIP if successful. */ 6594 6468 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER); … … 6608 6482 int rc = EMShouldContinueAfterHalt(pVCpu, pCtx) ? VINF_SUCCESS : VINF_EM_HALT; 6609 6483 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc); 6484 6610 6485 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt); 6611 6486 if (rc != VINF_SUCCESS) … … 6621 6496 { 6622 6497 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 6623 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx,CPUMCTX_EXTRN_CR06624 6498 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR0 6499 | CPUMCTX_EXTRN_SS); 6625 6500 6626 6501 int rc = EMInterpretMonitor(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx)); … … 6646 6521 { 6647 6522 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 6648 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx,CPUMCTX_EXTRN_CR06649 6523 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR0 6524 | CPUMCTX_EXTRN_SS); 6650 6525 6651 6526 VBOXSTRICTRC rc2 = EMInterpretMWait(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx)); … … 6682 6557 { 6683 6558 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 6684 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx,HMSVM_CPUMCTX_EXTRN_ALL);6559 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL); 6685 6560 return VINF_EM_RESET; 6686 6561 } … … 6694 6569 RT_NOREF(pCtx); 6695 6570 PCSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx); 6696 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx,HMSVM_CPUMCTX_EXTRN_ALL);6571 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL); 6697 6572 AssertMsgFailed(("hmR0SvmExitUnexpected: ExitCode=%#RX64 uExitInfo1=%#RX64 uExitInfo2=%#RX64\n", pSvmTransient->u64ExitCode, 6698 6573 pVmcb->ctrl.u64ExitInfo1, pVmcb->ctrl.u64ExitInfo2)); … … 6710 6585 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 6711 6586 6712 Log4(("hmR0SvmExitReadCRx: CS:RIP=%04x:%#RX64\n", pCtx->cs.Sel, pCtx->rip)); 6713 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxRead[pSvmTransient->u64ExitCode - SVM_EXIT_READ_CR0]); 6587 Log4Func(("CS:RIP=%04x:%#RX64\n", pCtx->cs.Sel, pCtx->rip)); 6588 #ifdef VBOX_WITH_STATISTICS 6589 switch (pSvmTransient->u64ExitCode) 6590 { 6591 case SVM_EXIT_READ_CR0: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR0Read); break; 6592 case SVM_EXIT_READ_CR2: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR2Read); break; 6593 case SVM_EXIT_READ_CR3: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR3Read); break; 6594 case SVM_EXIT_READ_CR4: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR4Read); break; 6595 case SVM_EXIT_READ_CR8: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR8Read); break; 6596 } 6597 #endif 6714 6598 6715 6599 bool const fSupportsDecodeAssists = hmR0SvmSupportsDecodeAssists(pVCpu, pCtx); … … 6722 6606 if (fMovCRx) 6723 6607 { 6724 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx,IEM_CPUMCTX_EXTRN_MUST_MASK);6608 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK); 6725 6609 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pCtx->rip; 6726 6610 uint8_t const iCrReg = pSvmTransient->u64ExitCode - SVM_EXIT_READ_CR0; … … 6733 6617 } 6734 6618 6735 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx,HMSVM_CPUMCTX_EXTRN_ALL);6619 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL); 6736 6620 VBOXSTRICTRC rc2 = EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0 /* pvFault */); 6737 6621 int rc = VBOXSTRICTRC_VAL(rc2); … … 6766 6650 if (fMovCRx) 6767 6651 { 6768 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx,IEM_CPUMCTX_EXTRN_MUST_MASK);6652 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK); 6769 6653 uint8_t const cbInstr = pVmcb->ctrl.u64NextRIP - pCtx->rip; 6770 6654 uint8_t const iGReg = pVmcb->ctrl.u64ExitInfo1 & SVM_EXIT1_MOV_CRX_GPR_NUMBER; 6771 Log4 (("hmR0SvmExitWriteCRx:Mov CR%u w/ iGReg=%#x\n", iCrReg, iGReg));6655 Log4Func(("Mov CR%u w/ iGReg=%#x\n", iCrReg, iGReg)); 6772 6656 rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, cbInstr, iCrReg, iGReg); 6773 6657 fDecodedInstr = true; … … 6778 6662 if (!fDecodedInstr) 6779 6663 { 6780 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx,IEM_CPUMCTX_EXTRN_MUST_MASK);6781 Log4 (("hmR0SvmExitWriteCRx:iCrReg=%#x\n", iCrReg));6664 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK); 6665 Log4Func(("iCrReg=%#x\n", iCrReg)); 6782 6666 rcStrict = IEMExecOneBypassEx(pVCpu, CPUMCTX2CORE(pCtx), NULL); 6783 6667 if (RT_UNLIKELY( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED … … 6790 6674 switch (iCrReg) 6791 6675 { 6792 case 0: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0); break; 6793 case 2: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR2); break; 6794 case 3: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR3); break; 6795 case 4: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR4); break; 6796 case 8: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_APIC_STATE); break; 6676 case 0: 6677 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR0); 6678 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR0Write); 6679 break; 6680 6681 case 2: 6682 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR2); 6683 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR2Write); 6684 break; 6685 6686 case 3: 6687 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR3); 6688 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR3Write); 6689 break; 6690 6691 case 4: 6692 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR4); 6693 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR4Write); 6694 break; 6695 6696 case 8: 6697 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR); 6698 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR8Write); 6699 break; 6700 6797 6701 default: 6798 6702 { … … 6811 6715 6812 6716 /** 6717 * \#VMEXIT helper for read MSRs, see hmR0SvmExitMsr. 6718 * 6719 * @returns VBox status code. 6720 * @param pVCpu The cross context virtual CPU structure. 6721 * @param pVmcb Pointer to the VM control block. 6722 */ 6723 static int hmR0SvmExitReadMsr(PVMCPU pVCpu, PSVMVMCB pVmcb) 6724 { 6725 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 6726 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR0 6727 | CPUMCTX_EXTRN_RFLAGS 6728 | CPUMCTX_EXTRN_SS 6729 | CPUMCTX_EXTRN_ALL_MSRS); 6730 6731 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdmsr); 6732 Log4Func(("idMsr=%#RX32\n", pCtx->ecx)); 6733 6734 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu, pCtx); 6735 if (fSupportsNextRipSave) 6736 { 6737 int rc = EMInterpretRdmsr(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx)); 6738 if (RT_LIKELY(rc == VINF_SUCCESS)) 6739 { 6740 pCtx->rip = pVmcb->ctrl.u64NextRIP; 6741 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc); 6742 return VINF_SUCCESS; 6743 } 6744 6745 AssertMsg( rc == VERR_EM_INTERPRETER 6746 || rc == VINF_CPUM_R3_MSR_READ, ("EMInterpretRdmsr failed rc=%Rrc\n", rc)); 6747 return rc; 6748 } 6749 6750 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL); 6751 int rc = VBOXSTRICTRC_TODO(EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0)); 6752 if (RT_UNLIKELY(rc != VINF_SUCCESS)) 6753 { 6754 AssertMsg( rc == VERR_EM_INTERPRETER 6755 || rc == VINF_CPUM_R3_MSR_READ, ("EMInterpretInstruction failed rc=%Rrc\n", rc)); 6756 } 6757 /* RIP updated by EMInterpretInstruction(). */ 6758 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc); 6759 return rc; 6760 } 6761 6762 6763 /** 6764 * \#VMEXIT helper for write MSRs, see hmR0SvmExitMsr. 6765 * 6766 * @returns VBox status code. 6767 * @param pVCpu The cross context virtual CPU structure. 6768 * @param pVmcb Pointer to the VM control block. 6769 * @param pSvmTransient Pointer to the SVM-transient structure. 6770 */ 6771 static int hmR0SvmExitWriteMsr(PVMCPU pVCpu, PSVMVMCB pVmcb, PSVMTRANSIENT pSvmTransient) 6772 { 6773 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 6774 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR0 6775 | CPUMCTX_EXTRN_RFLAGS 6776 | CPUMCTX_EXTRN_SS 6777 | CPUMCTX_EXTRN_ALL_MSRS); 6778 6779 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWrmsr); 6780 Log4Func(("idMsr=%#RX32\n", pCtx->ecx)); 6781 6782 /* 6783 * Handle TPR patching MSR writes. 6784 * We utilitize the LSTAR MSR for patching. 6785 */ 6786 if ( pVCpu->CTX_SUFF(pVM)->hm.s.fTPRPatchingActive 6787 && pCtx->ecx == MSR_K8_LSTAR) 6788 { 6789 if ((pCtx->eax & 0xff) != pSvmTransient->u8GuestTpr) 6790 { 6791 /* Our patch code uses LSTAR for TPR caching for 32-bit guests. */ 6792 int rc2 = APICSetTpr(pVCpu, pCtx->eax & 0xff); 6793 AssertRC(rc2); 6794 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR); 6795 } 6796 6797 int rc = VINF_SUCCESS; 6798 hmR0SvmAdvanceRipHwAssist(pVCpu, pCtx, 2); 6799 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc); 6800 return rc; 6801 } 6802 6803 /* 6804 * Handle regular MSR writes. 6805 */ 6806 int rc; 6807 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu, pCtx); 6808 if (fSupportsNextRipSave) 6809 { 6810 rc = EMInterpretWrmsr(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pCtx)); 6811 if (RT_LIKELY(rc == VINF_SUCCESS)) 6812 { 6813 pCtx->rip = pVmcb->ctrl.u64NextRIP; 6814 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc); 6815 } 6816 else 6817 AssertMsg( rc == VERR_EM_INTERPRETER 6818 || rc == VINF_CPUM_R3_MSR_WRITE, ("hmR0SvmExitMsr: EMInterpretWrmsr failed rc=%Rrc\n", rc)); 6819 } 6820 else 6821 { 6822 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL); 6823 rc = VBOXSTRICTRC_TODO(EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0 /* pvFault */)); 6824 if (RT_LIKELY(rc == VINF_SUCCESS)) 6825 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc); /* RIP updated by EMInterpretInstruction(). */ 6826 else 6827 AssertMsg( rc == VERR_EM_INTERPRETER 6828 || rc == VINF_CPUM_R3_MSR_WRITE, ("hmR0SvmExitMsr: WrMsr. EMInterpretInstruction failed rc=%Rrc\n", rc)); 6829 } 6830 6831 if (rc == VINF_SUCCESS) 6832 { 6833 /* If this is an X2APIC WRMSR access, update the APIC TPR state. */ 6834 if ( pCtx->ecx >= MSR_IA32_X2APIC_START 6835 && pCtx->ecx <= MSR_IA32_X2APIC_END) 6836 { 6837 /* 6838 * We've already saved the APIC related guest-state (TPR) in hmR0SvmPostRunGuest(). 6839 * When full APIC register virtualization is implemented we'll have to make sure 6840 * APIC state is saved from the VMCB before EMInterpretWrmsr() changes it. 6841 */ 6842 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR); 6843 } 6844 else 6845 { 6846 switch (pCtx->ecx) 6847 { 6848 case MSR_IA32_TSC: pSvmTransient->fUpdateTscOffsetting = true; break; 6849 case MSR_K6_EFER: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_EFER_MSR); break; 6850 case MSR_K8_FS_BASE: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_FS); break; 6851 case MSR_K8_GS_BASE: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_GS); break; 6852 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break; 6853 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break; 6854 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break; 6855 } 6856 } 6857 } 6858 6859 /* RIP has been updated by above after EMInterpretWrmsr() or by EMInterpretInstruction(). */ 6860 return rc; 6861 } 6862 6863 6864 /** 6813 6865 * \#VMEXIT handler for MSR read and writes (SVM_EXIT_MSR). Conditional 6814 6866 * \#VMEXIT. … … 6817 6869 { 6818 6870 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 6819 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, CPUMCTX_EXTRN_CR06820 | CPUMCTX_EXTRN_RFLAGS6821 | CPUMCTX_EXTRN_SS6822 | CPUMCTX_EXTRN_ALL_MSRS);6823 6871 6824 6872 PSVMVMCB pVmcb = hmR0SvmGetCurrentVmcb(pVCpu, pCtx); 6825 PVM pVM = pVCpu->CTX_SUFF(pVM); 6826 6827 int rc; 6828 if (pVmcb->ctrl.u64ExitInfo1 == SVM_EXIT1_MSR_WRITE) 6829 { 6830 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWrmsr); 6831 Log4(("MSR Write: idMsr=%#RX32\n", pCtx->ecx)); 6832 6833 /* Handle TPR patching; intercepted LSTAR write. */ 6834 if ( pVM->hm.s.fTPRPatchingActive 6835 && pCtx->ecx == MSR_K8_LSTAR) 6836 { 6837 if ((pCtx->eax & 0xff) != pSvmTransient->u8GuestTpr) 6838 { 6839 /* Our patch code uses LSTAR for TPR caching for 32-bit guests. */ 6840 int rc2 = APICSetTpr(pVCpu, pCtx->eax & 0xff); 6841 AssertRC(rc2); 6842 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_APIC_STATE); 6843 } 6844 rc = VINF_SUCCESS; 6845 hmR0SvmAdvanceRipHwAssist(pVCpu, pCtx, 2); 6846 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc); 6847 return rc; 6848 } 6849 6850 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu, pCtx); 6851 if (fSupportsNextRipSave) 6852 { 6853 rc = EMInterpretWrmsr(pVM, pVCpu, CPUMCTX2CORE(pCtx)); 6854 if (RT_LIKELY(rc == VINF_SUCCESS)) 6855 { 6856 pCtx->rip = pVmcb->ctrl.u64NextRIP; 6857 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc); 6858 } 6859 else 6860 AssertMsg( rc == VERR_EM_INTERPRETER 6861 || rc == VINF_CPUM_R3_MSR_WRITE, ("hmR0SvmExitMsr: EMInterpretWrmsr failed rc=%Rrc\n", rc)); 6862 } 6863 else 6864 { 6865 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, HMSVM_CPUMCTX_EXTRN_ALL); 6866 rc = VBOXSTRICTRC_TODO(EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0 /* pvFault */)); 6867 if (RT_LIKELY(rc == VINF_SUCCESS)) 6868 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc); /* RIP updated by EMInterpretInstruction(). */ 6869 else 6870 AssertMsg( rc == VERR_EM_INTERPRETER 6871 || rc == VINF_CPUM_R3_MSR_WRITE, ("hmR0SvmExitMsr: WrMsr. EMInterpretInstruction failed rc=%Rrc\n", rc)); 6872 } 6873 6874 if (rc == VINF_SUCCESS) 6875 { 6876 /* If this is an X2APIC WRMSR access, update the APIC state as well. */ 6877 if ( pCtx->ecx >= MSR_IA32_X2APIC_START 6878 && pCtx->ecx <= MSR_IA32_X2APIC_END) 6879 { 6880 /* 6881 * We've already saved the APIC related guest-state (TPR) in hmR0SvmPostRunGuest(). When full APIC register 6882 * virtualization is implemented we'll have to make sure APIC state is saved from the VMCB before 6883 * EMInterpretWrmsr() changes it. 6884 */ 6885 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_APIC_STATE); 6886 } 6887 else 6888 { 6889 switch (pCtx->ecx) 6890 { 6891 case MSR_K6_EFER: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_EFER_MSR); break; 6892 case MSR_IA32_TSC: pSvmTransient->fUpdateTscOffsetting = true; break; 6893 case MSR_K8_FS_BASE: 6894 case MSR_K8_GS_BASE: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS); break; 6895 case MSR_IA32_SYSENTER_CS: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break; 6896 case MSR_IA32_SYSENTER_EIP: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break; 6897 case MSR_IA32_SYSENTER_ESP: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break; 6898 } 6899 } 6900 } 6901 } 6902 else 6903 { 6904 /* MSR Read access. */ 6905 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdmsr); 6906 Assert(pVmcb->ctrl.u64ExitInfo1 == SVM_EXIT1_MSR_READ); 6907 Log4(("MSR Read: idMsr=%#RX32\n", pCtx->ecx)); 6908 6909 bool const fSupportsNextRipSave = hmR0SvmSupportsNextRipSave(pVCpu, pCtx); 6910 if (fSupportsNextRipSave) 6911 { 6912 rc = EMInterpretRdmsr(pVM, pVCpu, CPUMCTX2CORE(pCtx)); 6913 if (RT_LIKELY(rc == VINF_SUCCESS)) 6914 { 6915 pCtx->rip = pVmcb->ctrl.u64NextRIP; 6916 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc); 6917 } 6918 else 6919 AssertMsg( rc == VERR_EM_INTERPRETER 6920 || rc == VINF_CPUM_R3_MSR_READ, ("hmR0SvmExitMsr: EMInterpretRdmsr failed rc=%Rrc\n", rc)); 6921 } 6922 else 6923 { 6924 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, HMSVM_CPUMCTX_EXTRN_ALL); 6925 rc = VBOXSTRICTRC_TODO(EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0)); 6926 if (RT_UNLIKELY(rc != VINF_SUCCESS)) 6927 { 6928 AssertMsg( rc == VERR_EM_INTERPRETER 6929 || rc == VINF_CPUM_R3_MSR_READ, ("hmR0SvmExitMsr: RdMsr. EMInterpretInstruction failed rc=%Rrc\n", rc)); 6930 } 6931 /* RIP updated by EMInterpretInstruction(). */ 6932 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc); 6933 } 6934 } 6935 6936 /* RIP has been updated by EMInterpret[Rd|Wr]msr() or EMInterpretInstruction(). */ 6937 return rc; 6873 if (pVmcb->ctrl.u64ExitInfo1 == SVM_EXIT1_MSR_READ) 6874 return hmR0SvmExitReadMsr(pVCpu, pVmcb); 6875 6876 Assert(pVmcb->ctrl.u64ExitInfo1 == SVM_EXIT1_MSR_WRITE); 6877 return hmR0SvmExitWriteMsr(pVCpu, pVmcb, pSvmTransient); 6938 6878 } 6939 6879 … … 6945 6885 { 6946 6886 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 6947 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx,HMSVM_CPUMCTX_EXTRN_ALL);6887 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL); 6948 6888 6949 6889 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead); … … 7000 6940 /* Not necessary for read accesses but whatever doesn't hurt for now, will be fixed with decode assist. */ 7001 6941 /** @todo CPUM should set this flag! */ 7002 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);6942 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_DR_MASK); 7003 6943 HMSVM_CHECK_SINGLE_STEP(pVCpu, rc); 7004 6944 } … … 7029 6969 { 7030 6970 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 7031 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx,IEM_CPUMCTX_EXTRN_MUST_MASK);6971 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK); 7032 6972 7033 6973 /** @todo decode assists... */ 7034 6974 VBOXSTRICTRC rcStrict = IEMExecOne(pVCpu); 7035 6975 if (rcStrict == VINF_IEM_RAISED_XCPT) 7036 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);6976 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_XCPT_RAISED_MASK); 7037 6977 7038 6978 pVCpu->hm.s.fLoadSaveGuestXcr0 = (pCtx->cr4 & X86_CR4_OSXSAVE) && pCtx->aXcr[0] != ASMGetXcr0(); 7039 Log4 (("hmR0SvmExitXsetbv: New XCR0=%#RX64 fLoadSaveGuestXcr0=%d (cr4=%RX64) rcStrict=%Rrc\n",7040 pCtx->aXcr[0], pVCpu->hm.s.fLoadSaveGuestXcr0,pCtx->cr4, VBOXSTRICTRC_VAL(rcStrict)));6979 Log4Func(("New XCR0=%#RX64 fLoadSaveGuestXcr0=%d (cr4=%RX64) rcStrict=%Rrc\n", pCtx->aXcr[0], pVCpu->hm.s.fLoadSaveGuestXcr0, 6980 pCtx->cr4, VBOXSTRICTRC_VAL(rcStrict))); 7041 6981 7042 6982 HMSVM_CHECK_SINGLE_STEP(pVCpu, rcStrict); … … 7051 6991 { 7052 6992 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 7053 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, IEM_CPUMCTX_EXTRN_MUST_MASK | CPUMCTX_EXTRN_SREG_MASK); 6993 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK 6994 | CPUMCTX_EXTRN_SREG_MASK); 7054 6995 7055 6996 /* I/O operation lookup arrays. */ … … 7057 6998 static uint32_t const s_aIOOpAnd[8] = { 0, 0xff, 0xffff, 0, 0xffffffff, 0, 0, 0 }; /* AND masks for saving 7058 6999 the result (in AL/AX/EAX). */ 7059 Log4 (("hmR0SvmExitIOInstr:CS:RIP=%04x:%#RX64\n", pCtx->cs.Sel, pCtx->rip));7000 Log4Func(("CS:RIP=%04x:%#RX64\n", pCtx->cs.Sel, pCtx->rip)); 7060 7001 7061 7002 PVM pVM = pVCpu->CTX_SUFF(pVM); … … 7075 7016 } 7076 7017 7077 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_RFLAGS); 7018 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_RIP 7019 | CPUMCTX_EXTRN_CS 7020 | CPUMCTX_EXTRN_RFLAGS); 7078 7021 VBOXSTRICTRC rcStrict; 7079 7022 PCEMEXITREC pExitRec = NULL; … … 7097 7040 /** @todo Huh? why can't we use the segment prefix information given by AMD-V 7098 7041 * in EXITINFO1? Investigate once this thing is up and running. */ 7099 Log4 (("CS:RIP=%04x:%08RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, IoExitInfo.n.u16Port, cbValue,7100 IoExitInfo.n.u1Type == SVM_IOIO_WRITE ? 'w' : 'r'));7042 Log4Func(("CS:RIP=%04x:%08RX64 %#06x/%u %c str\n", pCtx->cs.Sel, pCtx->rip, IoExitInfo.n.u16Port, cbValue, 7043 IoExitInfo.n.u1Type == SVM_IOIO_WRITE ? 'w' : 'r')); 7101 7044 AssertReturn(pCtx->dx == IoExitInfo.n.u16Port, VERR_SVM_IPE_2); 7102 7045 static IEMMODE const s_aenmAddrMode[8] = … … 7193 7136 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the 7194 7137 * execution engines about whether hyper BPs and such are pending. */ 7195 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx,CPUMCTX_EXTRN_DR7);7138 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_DR7); 7196 7139 uint32_t const uDr7 = pCtx->dr[7]; 7197 7140 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK) … … 7256 7199 * Frequent exit or something needing probing. Get state and call EMHistoryExec. 7257 7200 */ 7258 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, IEM_CPUMCTX_EXTRN_MUST_MASK);7201 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL); 7259 7202 STAM_COUNTER_INC(!IoExitInfo.n.u1Str 7260 7203 ? IoExitInfo.n.u1Type == SVM_IOIO_WRITE ? &pVCpu->hm.s.StatExitIOWrite : &pVCpu->hm.s.StatExitIORead … … 7265 7208 7266 7209 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0); 7267 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);7210 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST); 7268 7211 7269 7212 Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n", … … 7281 7224 { 7282 7225 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 7283 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx,HMSVM_CPUMCTX_EXTRN_ALL);7226 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL); 7284 7227 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(); 7285 7228 … … 7293 7236 thus intentionally not copied into u32ErrCode. */ 7294 7237 7295 Log4(("#NPF at CS:RIP=%04x:%#RX64 faultaddr=%RGp errcode=%#x \n", pCtx->cs.Sel, pCtx->rip, GCPhysFaultAddr, u32ErrCode)); 7238 Log4Func(("#NPF at CS:RIP=%04x:%#RX64 GCPhysFaultAddr=%RGp ErrCode=%#x \n", pCtx->cs.Sel, pCtx->rip, GCPhysFaultAddr, 7239 u32ErrCode)); 7296 7240 7297 7241 /* … … 7299 7243 */ 7300 7244 if ( pVM->hm.s.fTprPatchingAllowed 7245 && !CPUMIsGuestInSvmNestedHwVirtMode(pCtx) 7301 7246 && (GCPhysFaultAddr & PAGE_OFFSET_MASK) == XAPIC_OFF_TPR 7302 7247 && ( !(u32ErrCode & X86_TRAP_PF_P) /* Not present */ … … 7335 7280 if ((u32ErrCode & (X86_TRAP_PF_RSVD | X86_TRAP_PF_P)) == (X86_TRAP_PF_RSVD | X86_TRAP_PF_P)) 7336 7281 { 7337 /* If event delivery causes an MMIO #NPF, go back to instruction emulation as 7338 otherwise injecting the original pending event would most likely cause the same MMIO #NPF. */ 7282 /* 7283 * If event delivery causes an MMIO #NPF, go back to instruction emulation as otherwise 7284 * injecting the original pending event would most likely cause the same MMIO #NPF. 7285 */ 7339 7286 if (pVCpu->hm.s.Event.fPending) 7340 7287 return VINF_EM_RAW_INJECT_TRPM_EVENT; 7341 7288 7342 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_CS); 7289 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_RIP 7290 | CPUMCTX_EXTRN_CS); 7343 7291 VBOXSTRICTRC rcStrict; 7344 7292 PCEMEXITREC pExitRec = EMHistoryUpdateFlagsAndTypeAndPC(pVCpu, … … 7353 7301 /* 7354 7302 * If we succeed, resume guest execution. 7355 * If we fail in interpreting the instruction because we couldn't get the guest physical address 7356 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page 7357 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this 7358 * weird case. See @bugref{6043}. 7303 * 7304 * If we fail in interpreting the instruction because we couldn't get the guest 7305 * physical address of the page containing the instruction via the guest's page 7306 * tables (we would invalidate the guest page in the host TLB), resume execution 7307 * which would cause a guest page fault to let the guest handle this weird case. 7308 * 7309 * See @bugref{6043}. 7359 7310 */ 7360 7311 if ( rcStrict == VINF_SUCCESS … … 7363 7314 { 7364 7315 /* Successfully handled MMIO operation. */ 7365 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_APIC_STATE);7316 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR); 7366 7317 rcStrict = VINF_SUCCESS; 7367 7318 } … … 7373 7324 */ 7374 7325 Assert(pCtx == &pVCpu->cpum.GstCtx); 7375 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, IEM_CPUMCTX_EXTRN_MUST_MASK);7326 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL); 7376 7327 Log4(("EptMisscfgExit/%u: %04x:%08RX64: %RGp -> EMHistoryExec\n", 7377 7328 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, GCPhysFaultAddr)); 7378 7329 7379 7330 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0); 7380 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);7331 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST); 7381 7332 7382 7333 Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n", … … 7391 7342 TRPMResetTrap(pVCpu); 7392 7343 7393 Log4 (("#NPF: PGMR0Trap0eHandlerNestedPaging returned%Rrc CS:RIP=%04x:%#RX64\n", rc, pCtx->cs.Sel, pCtx->rip));7344 Log4Func(("#NPF: PGMR0Trap0eHandlerNestedPaging returns %Rrc CS:RIP=%04x:%#RX64\n", rc, pCtx->cs.Sel, pCtx->rip)); 7394 7345 7395 7346 /* … … 7446 7397 /* 7447 7398 * AMD-V provides us with the exception which caused the TS; we collect 7448 * the information in the call to hmR0SvmCheckExitDueToEventDelivery .7399 * the information in the call to hmR0SvmCheckExitDueToEventDelivery(). 7449 7400 */ 7450 Log4 (("hmR0SvmExitTaskSwitch: TS occurred during event delivery.\n"));7401 Log4Func(("TS occurred during event delivery\n")); 7451 7402 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch); 7452 7403 return VINF_EM_RAW_INJECT_TRPM_EVENT; … … 7466 7417 { 7467 7418 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 7468 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, HMSVM_CPUMCTX_EXTRN_ALL); 7469 7470 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitVmcall); 7419 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL); 7471 7420 7472 7421 if (pVCpu->CTX_SUFF(pVM)->hm.s.fTprPatchingAllowed) … … 7475 7424 if (rc != VERR_NOT_FOUND) 7476 7425 { 7477 Log4 (("hmR0SvmExitVmmCall:hmSvmEmulateMovTpr returns %Rrc\n", rc));7426 Log4Func(("hmSvmEmulateMovTpr returns %Rrc\n", rc)); 7478 7427 return rc; 7479 7428 } … … 7493 7442 } 7494 7443 else 7495 Log4 (("hmR0SvmExitVmmCall:GIMHypercall returns %Rrc -> #UD\n", VBOXSTRICTRC_VAL(rcStrict)));7444 Log4Func(("GIMHypercall returns %Rrc -> #UD\n", VBOXSTRICTRC_VAL(rcStrict))); 7496 7445 } 7497 7446 … … 7507 7456 { 7508 7457 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 7509 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPause);7510 7458 hmR0SvmAdvanceRipHwAssist(pVCpu, pCtx, 2); 7511 7459 /** @todo The guest has likely hit a contended spinlock. We might want to … … 7522 7470 { 7523 7471 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 7524 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx,CPUMCTX_EXTRN_CR0);7472 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, CPUMCTX_EXTRN_CR0); 7525 7473 Assert(!(pCtx->cr0 & X86_CR0_NE)); 7526 7474 7527 Log4 (("hmR0SvmExitFerrFreeze:Raising IRQ 13 in response to #FERR\n"));7475 Log4Func(("Raising IRQ 13 in response to #FERR\n")); 7528 7476 return PDMIsaSetIrq(pVCpu->CTX_SUFF(pVM), 13 /* u8Irq */, 1 /* u8Level */, 0 /* uTagSrc */); 7529 7477 } … … 7557 7505 { 7558 7506 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 7559 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx,HMSVM_CPUMCTX_EXTRN_ALL);7507 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL); 7560 7508 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(); 7561 7509 … … 7575 7523 /* A genuine guest #PF, reflect it to the guest. */ 7576 7524 hmR0SvmSetPendingXcptPF(pVCpu, pCtx, uErrCode, uFaultAddress); 7577 Log4 (("#PF: Guest page fault at %04X:%RGv FaultAddr=%RX64 ErrCode=%#x\n", pCtx->cs.Sel, (RTGCPTR)pCtx->rip,7578 uFaultAddress, uErrCode));7525 Log4Func(("#PF: Guest page fault at %04X:%RGv FaultAddr=%RX64 ErrCode=%#x\n", pCtx->cs.Sel, (RTGCPTR)pCtx->rip, 7526 uFaultAddress, uErrCode)); 7579 7527 } 7580 7528 else … … 7582 7530 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */ 7583 7531 hmR0SvmSetPendingXcptDF(pVCpu); 7584 Log4 (("Pending #DF due to vectoring #PF. NP\n"));7532 Log4Func(("Pending #DF due to vectoring #PF. NP\n")); 7585 7533 } 7586 7534 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF); … … 7619 7567 } 7620 7568 7621 Log4(("#PF: uFaultAddress=%#RX64 CS:RIP=%#04x:%#RX64 uErrCode %#RX32 cr3=%#RX64\n", uFaultAddress, pCtx->cs.Sel, 7622 pCtx->rip, uErrCode, pCtx->cr3)); 7623 7624 /* If it's a vectoring #PF, emulate injecting the original event injection as PGMTrap0eHandler() is incapable 7625 of differentiating between instruction emulation and event injection that caused a #PF. See @bugref{6607}. */ 7569 Log4Func(("#PF: uFaultAddress=%#RX64 CS:RIP=%#04x:%#RX64 uErrCode %#RX32 cr3=%#RX64\n", uFaultAddress, pCtx->cs.Sel, 7570 pCtx->rip, uErrCode, pCtx->cr3)); 7571 7572 /* 7573 * If it's a vectoring #PF, emulate injecting the original event injection as 7574 * PGMTrap0eHandler() is incapable of differentiating between instruction emulation and 7575 * event injection that caused a #PF. See @bugref{6607}. 7576 */ 7626 7577 if (pSvmTransient->fVectoringPF) 7627 7578 { … … 7633 7584 int rc = PGMTrap0eHandler(pVCpu, uErrCode, CPUMCTX2CORE(pCtx), (RTGCPTR)uFaultAddress); 7634 7585 7635 Log4 (("#PF: rc=%Rrc\n", rc));7586 Log4Func(("#PF: rc=%Rrc\n", rc)); 7636 7587 7637 7588 if (rc == VINF_SUCCESS) … … 7640 7591 TRPMResetTrap(pVCpu); 7641 7592 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF); 7642 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);7593 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST); 7643 7594 return rc; 7644 7595 } … … 7674 7625 TRPMResetTrap(pVCpu); 7675 7626 hmR0SvmSetPendingXcptDF(pVCpu); 7676 Log4 (("#PF: Pending #DF due to vectoring #PF\n"));7627 Log4Func(("#PF: Pending #DF due to vectoring #PF\n")); 7677 7628 } 7678 7629 … … 7703 7654 if (pVCpu->hm.s.fGIMTrapXcptUD) 7704 7655 { 7705 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx,HMSVM_CPUMCTX_EXTRN_ALL);7656 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL); 7706 7657 uint8_t cbInstr = 0; 7707 7658 VBOXSTRICTRC rcStrict = GIMXcptUD(pVCpu, pCtx, NULL /* pDis */, &cbInstr); … … 7740 7691 { 7741 7692 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 7742 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx,HMSVM_CPUMCTX_EXTRN_ALL);7693 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL); 7743 7694 7744 7695 /* Paranoia; Ensure we cannot be called as a result of event delivery. */ … … 7762 7713 } 7763 7714 else 7764 Log4 (("hmR0SvmExitXcptMF:EMInterpretDisasCurrent returned %Rrc uOpCode=%#x\n", rc, pDis->pCurInstr->uOpcode));7715 Log4Func(("EMInterpretDisasCurrent returned %Rrc uOpCode=%#x\n", rc, pDis->pCurInstr->uOpcode)); 7765 7716 return rc; 7766 7717 } … … 7778 7729 { 7779 7730 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 7780 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx,HMSVM_CPUMCTX_EXTRN_ALL);7731 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL); 7781 7732 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(); 7782 7733 … … 7789 7740 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB); 7790 7741 7791 /* This can be a fault-type #DB (instruction breakpoint) or a trap-type #DB (data breakpoint). However, for both cases 7792 DR6 and DR7 are updated to what the exception handler expects. See AMD spec. 15.12.2 "#DB (Debug)". */ 7742 /* 7743 * This can be a fault-type #DB (instruction breakpoint) or a trap-type #DB (data 7744 * breakpoint). However, for both cases DR6 and DR7 are updated to what the exception 7745 * handler expects. See AMD spec. 15.12.2 "#DB (Debug)". 7746 */ 7793 7747 PVM pVM = pVCpu->CTX_SUFF(pVM); 7794 7748 PSVMVMCB pVmcb = pVCpu->hm.s.svm.pVmcb; … … 7851 7805 { 7852 7806 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 7853 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx,HMSVM_CPUMCTX_EXTRN_ALL);7807 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, HMSVM_CPUMCTX_EXTRN_ALL); 7854 7808 HMSVM_CHECK_EXIT_DUE_TO_EVENT_DELIVERY(); 7855 7809 … … 7884 7838 Assert(pSvmTransient->u64ExitCode == pVmcb->ctrl.u64ExitCode); 7885 7839 Assert(uVector <= X86_XCPT_LAST); 7886 Log4 (("hmR0SvmExitXcptGeneric:uVector=%#x uErrCode=%u\n", uVector, uErrCode));7840 Log4Func(("uVector=%#x uErrCode=%u\n", uVector, uErrCode)); 7887 7841 7888 7842 SVMEVENT Event; … … 7920 7874 { 7921 7875 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 7922 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx,IEM_CPUMCTX_EXTRN_MUST_MASK7923 7876 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK 7877 | CPUMCTX_EXTRN_HWVIRT); 7924 7878 7925 7879 #ifdef VBOX_STRICT … … 7930 7884 #endif 7931 7885 7932 /** @todo Stat. */7933 /* STAM_COUNTER_INC(&pVCpu->hm.s.StatExitClgi); */7934 7886 uint8_t const cbInstr = hmR0SvmGetInstrLengthHwAssist(pVCpu, pCtx, 3); 7935 7887 VBOXSTRICTRC rcStrict = IEMExecDecodedClgi(pVCpu, cbInstr); 7888 if (rcStrict == VINF_SUCCESS) 7889 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_HWVIRT); 7936 7890 return VBOXSTRICTRC_VAL(rcStrict); 7937 7891 } … … 7944 7898 { 7945 7899 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 7946 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx,IEM_CPUMCTX_EXTRN_MUST_MASK7947 7900 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK 7901 | CPUMCTX_EXTRN_HWVIRT); 7948 7902 7949 7903 /* … … 7957 7911 uint8_t const cbInstr = hmR0SvmGetInstrLengthHwAssist(pVCpu, pCtx, 3); 7958 7912 VBOXSTRICTRC rcStrict = IEMExecDecodedStgi(pVCpu, cbInstr); 7913 if (rcStrict == VINF_SUCCESS) 7914 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_HWVIRT); 7959 7915 return VBOXSTRICTRC_VAL(rcStrict); 7960 7916 } … … 7967 7923 { 7968 7924 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 7969 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx,IEM_CPUMCTX_EXTRN_MUST_MASK7970 7971 7972 7973 7974 7975 7976 7925 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK 7926 | CPUMCTX_EXTRN_FS 7927 | CPUMCTX_EXTRN_GS 7928 | CPUMCTX_EXTRN_TR 7929 | CPUMCTX_EXTRN_LDTR 7930 | CPUMCTX_EXTRN_KERNEL_GS_BASE 7931 | CPUMCTX_EXTRN_SYSCALL_MSRS 7932 | CPUMCTX_EXTRN_SYSENTER_MSRS); 7977 7933 7978 7934 #ifdef VBOX_STRICT … … 7988 7944 { 7989 7945 /* We skip flagging changes made to LSTAR, STAR, SFMASK and other MSRs as they are always re-loaded. */ 7990 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS 7991 | HM_CHANGED_GUEST_TR 7992 | HM_CHANGED_GUEST_LDTR 7993 | HM_CHANGED_GUEST_SYSENTER_CS_MSR 7994 | HM_CHANGED_GUEST_SYSENTER_EIP_MSR 7995 | HM_CHANGED_GUEST_SYSENTER_ESP_MSR); 7946 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_FS 7947 | HM_CHANGED_GUEST_GS 7948 | HM_CHANGED_GUEST_TR 7949 | HM_CHANGED_GUEST_LDTR 7950 | HM_CHANGED_GUEST_SYSENTER_CS_MSR 7951 | HM_CHANGED_GUEST_SYSENTER_EIP_MSR 7952 | HM_CHANGED_GUEST_SYSENTER_ESP_MSR); 7996 7953 } 7997 7954 return VBOXSTRICTRC_VAL(rcStrict); … … 8005 7962 { 8006 7963 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 8007 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx,IEM_CPUMCTX_EXTRN_MUST_MASK);7964 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK); 8008 7965 8009 7966 #ifdef VBOX_STRICT … … 8026 7983 { 8027 7984 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 8028 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, IEM_CPUMCTX_EXTRN_MUST_MASK); 8029 8030 /** @todo Stat. */ 8031 /* STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvlpga); */ 7985 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK); 7986 8032 7987 uint8_t const cbInstr = hmR0SvmGetInstrLengthHwAssist(pVCpu, pCtx, 3); 8033 7988 VBOXSTRICTRC rcStrict = IEMExecDecodedInvlpga(pVCpu, cbInstr); … … 8042 7997 { 8043 7998 HMSVM_VALIDATE_EXIT_HANDLER_PARAMS(); 8044 /** @todo Only save and reload what VMRUN changes (e.g. skip LDTR, TR etc). */ 8045 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, pCtx, HMSVM_CPUMCTX_EXTRN_ALL); 8046 7999 HMSVM_CPUMCTX_IMPORT_STATE(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK 8000 | IEM_CPUMCTX_EXTRN_SVM_VMRUN_MASK); 8047 8001 VBOXSTRICTRC rcStrict; 8048 8002 uint8_t const cbInstr = hmR0SvmGetInstrLengthHwAssist(pVCpu, pCtx, 3); 8049 8003 rcStrict = IEMExecDecodedVmrun(pVCpu, cbInstr); 8050 Log4 (("IEMExecDecodedVmrun: returned %d\n", VBOXSTRICTRC_VAL(rcStrict)));8004 Log4Func(("IEMExecDecodedVmrun returns %Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 8051 8005 if (rcStrict == VINF_SUCCESS) 8052 8006 { 8053 8007 rcStrict = VINF_SVM_VMRUN; 8054 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);8008 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_SVM_VMRUN_MASK); 8055 8009 } 8056 8010 return VBOXSTRICTRC_VAL(rcStrict); -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.h
r72643 r72744 49 49 VMMR0DECL(int) SVMR0SetupVM(PVM pVM); 50 50 VMMR0DECL(VBOXSTRICTRC) SVMR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx); 51 VMMR0DECL(int) SVMR0 SaveHostState(PVM pVM,PVMCPU pVCpu);52 VMMR0DECL(int) SVMR0ImportStateOnDemand(PVMCPU pVCpu, PCPUMCTX pCtx,uint64_t fWhat);51 VMMR0DECL(int) SVMR0ExportHostState(PVMCPU pVCpu); 52 VMMR0DECL(int) SVMR0ImportStateOnDemand(PVMCPU pVCpu, uint64_t fWhat); 53 53 VMMR0DECL(int) SVMR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt); 54 54 55 55 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) 56 56 DECLASM(int) SVMR0VMSwitcherRun64(RTHCPHYS pVMCBHostPhys, RTHCPHYS pVMCBPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu); 57 VMMR0DECL(int) SVMR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp, uint32_t cbParam, 58 uint32_t *paParam); 57 VMMR0DECL(int) SVMR0Execute64BitsHandler(PVMCPU pVCpu, HM64ON32OP enmOp, uint32_t cbParam, uint32_t *paParam); 59 58 #endif /* HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) */ 60 59 -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r72655 r72744 42 42 #include "dtrace/VBoxVMM.h" 43 43 44 #define HMVMX_USE_IEM_EVENT_REFLECTION45 44 #ifdef DEBUG_ramshankar 46 45 # define HMVMX_ALWAYS_SAVE_GUEST_RFLAGS … … 67 66 #define HMVMX_FLUSH_TAGGED_TLB_NONE 3 68 67 69 /** @name Updated-guest-state flags. 70 * @{ */ 71 #define HMVMX_UPDATED_GUEST_RIP RT_BIT(0) 72 #define HMVMX_UPDATED_GUEST_RSP RT_BIT(1) 73 #define HMVMX_UPDATED_GUEST_RFLAGS RT_BIT(2) 74 #define HMVMX_UPDATED_GUEST_CR0 RT_BIT(3) 75 #define HMVMX_UPDATED_GUEST_CR3 RT_BIT(4) 76 #define HMVMX_UPDATED_GUEST_CR4 RT_BIT(5) 77 #define HMVMX_UPDATED_GUEST_GDTR RT_BIT(6) 78 #define HMVMX_UPDATED_GUEST_IDTR RT_BIT(7) 79 #define HMVMX_UPDATED_GUEST_LDTR RT_BIT(8) 80 #define HMVMX_UPDATED_GUEST_TR RT_BIT(9) 81 #define HMVMX_UPDATED_GUEST_SEGMENT_REGS RT_BIT(10) 82 #define HMVMX_UPDATED_GUEST_DR7 RT_BIT(11) 83 #define HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR RT_BIT(12) 84 #define HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR RT_BIT(13) 85 #define HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR RT_BIT(14) 86 #define HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS RT_BIT(15) 87 #define HMVMX_UPDATED_GUEST_LAZY_MSRS RT_BIT(16) 88 #define HMVMX_UPDATED_GUEST_ACTIVITY_STATE RT_BIT(17) 89 #define HMVMX_UPDATED_GUEST_INTR_STATE RT_BIT(18) 90 #define HMVMX_UPDATED_GUEST_APIC_STATE RT_BIT(19) 91 #define HMVMX_UPDATED_GUEST_ALL ( HMVMX_UPDATED_GUEST_RIP \ 92 | HMVMX_UPDATED_GUEST_RSP \ 93 | HMVMX_UPDATED_GUEST_RFLAGS \ 94 | HMVMX_UPDATED_GUEST_CR0 \ 95 | HMVMX_UPDATED_GUEST_CR3 \ 96 | HMVMX_UPDATED_GUEST_CR4 \ 97 | HMVMX_UPDATED_GUEST_GDTR \ 98 | HMVMX_UPDATED_GUEST_IDTR \ 99 | HMVMX_UPDATED_GUEST_LDTR \ 100 | HMVMX_UPDATED_GUEST_TR \ 101 | HMVMX_UPDATED_GUEST_SEGMENT_REGS \ 102 | HMVMX_UPDATED_GUEST_DR7 \ 103 | HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR \ 104 | HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR \ 105 | HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR \ 106 | HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS \ 107 | HMVMX_UPDATED_GUEST_LAZY_MSRS \ 108 | HMVMX_UPDATED_GUEST_ACTIVITY_STATE \ 109 | HMVMX_UPDATED_GUEST_INTR_STATE \ 110 | HMVMX_UPDATED_GUEST_APIC_STATE) 68 /** @name HMVMX_READ_XXX 69 * Flags to skip redundant reads of some common VMCS fields that are not part of 70 * the guest-CPU or VCPU state but are needed while handling VM-exits. 71 */ 72 #define HMVMX_READ_IDT_VECTORING_INFO RT_BIT_32(0) 73 #define HMVMX_READ_IDT_VECTORING_ERROR_CODE RT_BIT_32(1) 74 #define HMVMX_READ_EXIT_QUALIFICATION RT_BIT_32(2) 75 #define HMVMX_READ_EXIT_INSTR_LEN RT_BIT_32(3) 76 #define HMVMX_READ_EXIT_INTERRUPTION_INFO RT_BIT_32(4) 77 #define HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE RT_BIT_32(5) 78 #define HMVMX_READ_EXIT_INSTR_INFO RT_BIT_32(6) 111 79 /** @} */ 112 80 113 /** @name 114 * Flags to skip redundant reads of some common VMCS fields that are not part of 115 * the guest-CPU state but are in the transient structure. 116 */ 117 #define HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO RT_BIT(0) 118 #define HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE RT_BIT(1) 119 #define HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION RT_BIT(2) 120 #define HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN RT_BIT(3) 121 #define HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO RT_BIT(4) 122 #define HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE RT_BIT(5) 123 #define HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_INFO RT_BIT(6) 124 /** @} */ 125 126 /** @name 81 /** 127 82 * States of the VMCS. 128 83 * … … 131 86 * are used. Maybe later this can be extended (i.e. Nested Virtualization). 132 87 */ 133 #define HMVMX_VMCS_STATE_CLEAR RT_BIT(0) 134 #define HMVMX_VMCS_STATE_ACTIVE RT_BIT(1) 135 #define HMVMX_VMCS_STATE_LAUNCHED RT_BIT(2) 136 /** @} */ 88 #define HMVMX_VMCS_STATE_CLEAR RT_BIT(0) 89 #define HMVMX_VMCS_STATE_ACTIVE RT_BIT(1) 90 #define HMVMX_VMCS_STATE_LAUNCHED RT_BIT(2) 137 91 138 92 /** … … 144 98 * MSR which cannot be modified by the guest without causing a VM-exit. 145 99 */ 146 #define HMVMX_CPUMCTX_EXTRN_ALL ( CPUMCTX_EXTRN_RIP \ 147 | CPUMCTX_EXTRN_RFLAGS \ 148 | CPUMCTX_EXTRN_SREG_MASK \ 149 | CPUMCTX_EXTRN_TABLE_MASK \ 150 | CPUMCTX_EXTRN_SYSENTER_MSRS \ 151 | CPUMCTX_EXTRN_SYSCALL_MSRS \ 152 | CPUMCTX_EXTRN_KERNEL_GS_BASE \ 153 | CPUMCTX_EXTRN_TSC_AUX \ 154 | CPUMCTX_EXTRN_OTHER_MSRS \ 155 | CPUMCTX_EXTRN_CR0 \ 156 | CPUMCTX_EXTRN_CR3 \ 157 | CPUMCTX_EXTRN_CR4 \ 158 | CPUMCTX_EXTRN_DR7) 100 #define HMVMX_CPUMCTX_EXTRN_ALL ( CPUMCTX_EXTRN_RIP \ 101 | CPUMCTX_EXTRN_RFLAGS \ 102 | CPUMCTX_EXTRN_RSP \ 103 | CPUMCTX_EXTRN_SREG_MASK \ 104 | CPUMCTX_EXTRN_TABLE_MASK \ 105 | CPUMCTX_EXTRN_KERNEL_GS_BASE \ 106 | CPUMCTX_EXTRN_SYSCALL_MSRS \ 107 | CPUMCTX_EXTRN_SYSENTER_MSRS \ 108 | CPUMCTX_EXTRN_TSC_AUX \ 109 | CPUMCTX_EXTRN_OTHER_MSRS \ 110 | CPUMCTX_EXTRN_CR0 \ 111 | CPUMCTX_EXTRN_CR3 \ 112 | CPUMCTX_EXTRN_CR4 \ 113 | CPUMCTX_EXTRN_DR7 \ 114 | CPUMCTX_EXTRN_HM_VMX_MASK) 159 115 160 116 /** … … 217 173 * context. */ 218 174 #ifdef VMX_USE_CACHED_VMCS_ACCESSES 219 # define HMVMX_ SAVE_SREG(Sel, a_pCtxSelReg) \220 hmR0Vmx SaveSegmentReg(pVCpu, VMX_VMCS16_GUEST_##Sel##_SEL, VMX_VMCS32_GUEST_##Sel##_LIMIT, \221 VMX_VMCS_GUEST_##Sel##_BASE_CACHE_IDX, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, (a_pCtxSelReg))175 # define HMVMX_IMPORT_SREG(Sel, a_pCtxSelReg) \ 176 hmR0VmxImportGuestSegmentReg(pVCpu, VMX_VMCS16_GUEST_##Sel##_SEL, VMX_VMCS32_GUEST_##Sel##_LIMIT, \ 177 VMX_VMCS_GUEST_##Sel##_BASE_CACHE_IDX, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, (a_pCtxSelReg)) 222 178 #else 223 # define HMVMX_ SAVE_SREG(Sel, a_pCtxSelReg) \224 hmR0Vmx SaveSegmentReg(pVCpu, VMX_VMCS16_GUEST_##Sel##_SEL, VMX_VMCS32_GUEST_##Sel##_LIMIT, \225 VMX_VMCS_GUEST_##Sel##_BASE, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, (a_pCtxSelReg))179 # define HMVMX_IMPORT_SREG(Sel, a_pCtxSelReg) \ 180 hmR0VmxImportGuestSegmentReg(pVCpu, VMX_VMCS16_GUEST_##Sel##_SEL, VMX_VMCS32_GUEST_##Sel##_LIMIT, \ 181 VMX_VMCS_GUEST_##Sel##_BASE, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, (a_pCtxSelReg)) 226 182 #endif 227 183 … … 318 274 uint32_t uIdtVectoringErrorCode; 319 275 320 /** Mask of currently read VMCS fields; HMVMX_ UPDATED_TRANSIENT_*. */276 /** Mask of currently read VMCS fields; HMVMX_READ_XXX. */ 321 277 uint32_t fVmcsFieldsRead; 322 278 … … 417 373 static void hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMXFLUSHVPID enmFlush, RTGCPTR GCPtr); 418 374 static void hmR0VmxClearIntNmiWindowsVmcs(PVMCPU pVCpu); 419 static VBOXSTRICTRC hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntInfo, uint32_t cbInstr,420 uint32_t u32ErrCode, RTGCUINTREG GCPtrFaultAddress,421 bool fStepping, uint32_t *puIntState);375 static int hmR0VmxImportGuestState(PVMCPU pVCpu, uint64_t fWhat); 376 static VBOXSTRICTRC hmR0VmxInjectEventVmcs(PVMCPU pVCpu, uint64_t u64IntInfo, uint32_t cbInstr, uint32_t u32ErrCode, 377 RTGCUINTREG GCPtrFaultAddress, bool fStepping, uint32_t *pfIntrState); 422 378 #if HC_ARCH_BITS == 32 423 379 static int hmR0VmxInitVmcsReadCache(PVM pVM, PVMCPU pVCpu); … … 649 605 } 650 606 651 652 607 #ifdef VBOX_STRICT 653 608 /** … … 666 621 return VINF_SUCCESS; 667 622 } 668 #endif /* VBOX_STRICT */ 669 670 671 #ifdef VBOX_STRICT 623 624 672 625 /** 673 626 * Reads the VM-entry exception error code field from the VMCS into … … 697 650 DECLINLINE(int) hmR0VmxReadExitIntInfoVmcs(PVMXTRANSIENT pVmxTransient) 698 651 { 699 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_ UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO))652 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_INFO)) 700 653 { 701 654 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntInfo); 702 AssertRCReturn(rc, 703 pVmxTransient->fVmcsFieldsRead |= HMVMX_ UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO;655 AssertRCReturn(rc,rc); 656 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INTERRUPTION_INFO; 704 657 } 705 658 return VINF_SUCCESS; … … 716 669 DECLINLINE(int) hmR0VmxReadExitIntErrorCodeVmcs(PVMXTRANSIENT pVmxTransient) 717 670 { 718 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_ UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE))671 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE)) 719 672 { 720 673 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntErrorCode); 721 674 AssertRCReturn(rc, rc); 722 pVmxTransient->fVmcsFieldsRead |= HMVMX_ UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE;675 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INTERRUPTION_ERROR_CODE; 723 676 } 724 677 return VINF_SUCCESS; … … 735 688 DECLINLINE(int) hmR0VmxReadExitInstrLenVmcs(PVMXTRANSIENT pVmxTransient) 736 689 { 737 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_ UPDATED_TRANSIENT_EXIT_INSTR_LEN))690 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_LEN)) 738 691 { 739 692 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbInstr); 740 693 AssertRCReturn(rc, rc); 741 pVmxTransient->fVmcsFieldsRead |= HMVMX_ UPDATED_TRANSIENT_EXIT_INSTR_LEN;694 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INSTR_LEN; 742 695 } 743 696 return VINF_SUCCESS; … … 754 707 DECLINLINE(int) hmR0VmxReadExitInstrInfoVmcs(PVMXTRANSIENT pVmxTransient) 755 708 { 756 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_ UPDATED_TRANSIENT_EXIT_INSTR_INFO))709 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_INSTR_INFO)) 757 710 { 758 711 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->ExitInstrInfo.u); 759 712 AssertRCReturn(rc, rc); 760 pVmxTransient->fVmcsFieldsRead |= HMVMX_ UPDATED_TRANSIENT_EXIT_INSTR_INFO;713 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_INSTR_INFO; 761 714 } 762 715 return VINF_SUCCESS; … … 775 728 DECLINLINE(int) hmR0VmxReadExitQualificationVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient) 776 729 { 777 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_ UPDATED_TRANSIENT_EXIT_QUALIFICATION))730 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_EXIT_QUALIFICATION)) 778 731 { 779 732 int rc = VMXReadVmcsGstN(VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQualification); NOREF(pVCpu); 780 733 AssertRCReturn(rc, rc); 781 pVmxTransient->fVmcsFieldsRead |= HMVMX_ UPDATED_TRANSIENT_EXIT_QUALIFICATION;734 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_EXIT_QUALIFICATION; 782 735 } 783 736 return VINF_SUCCESS; … … 796 749 DECLINLINE(int) hmR0VmxReadIdtVectoringInfoVmcs(PVMXTRANSIENT pVmxTransient) 797 750 { 798 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_ UPDATED_TRANSIENT_IDT_VECTORING_INFO))799 { 800 int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_ INFO, &pVmxTransient->uIdtVectoringInfo);751 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_INFO)) 752 { 753 int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_VECTORING_INFO, &pVmxTransient->uIdtVectoringInfo); 801 754 AssertRCReturn(rc, rc); 802 pVmxTransient->fVmcsFieldsRead |= HMVMX_ UPDATED_TRANSIENT_IDT_VECTORING_INFO;755 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_IDT_VECTORING_INFO; 803 756 } 804 757 return VINF_SUCCESS; … … 815 768 DECLINLINE(int) hmR0VmxReadIdtVectoringErrorCodeVmcs(PVMXTRANSIENT pVmxTransient) 816 769 { 817 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_ UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE))818 { 819 int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_ ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);770 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_READ_IDT_VECTORING_ERROR_CODE)) 771 { 772 int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_VECTORING_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode); 820 773 AssertRCReturn(rc, rc); 821 pVmxTransient->fVmcsFieldsRead |= HMVMX_ UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE;774 pVmxTransient->fVmcsFieldsRead |= HMVMX_READ_IDT_VECTORING_ERROR_CODE; 822 775 } 823 776 return VINF_SUCCESS; … … 913 866 * allocation. 914 867 */ 915 DECLINLINE(int)hmR0VmxPageAllocZ(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)868 static int hmR0VmxPageAllocZ(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys) 916 869 { 917 870 AssertPtrReturn(pMemObj, VERR_INVALID_PARAMETER); … … 938 891 * allocation as 0. 939 892 */ 940 DECLINLINE(void)hmR0VmxPageFree(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)893 static void hmR0VmxPageFree(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys) 941 894 { 942 895 AssertPtr(pMemObj); … … 1163 1116 1164 1117 /* 1165 * Flush all EPT tagged-TLB entries (in case VirtualBox or any other hypervisor have been using EPTPs) so 1166 * we don't retain any stale guest-physical mappings which won't get invalidated when flushing by VPID. 1118 * Flush all EPT tagged-TLB entries (in case VirtualBox or any other hypervisor have been 1119 * using EPTPs) so we don't retain any stale guest-physical mappings which won't get 1120 * invalidated when flushing by VPID. 1167 1121 */ 1168 1122 PVMXMSRS pMsrs = (PVMXMSRS)pvMsrs; … … 1303 1257 * @param cMsrs The number of MSRs. 1304 1258 */ 1305 DECLINLINE(int)hmR0VmxSetAutoLoadStoreMsrCount(PVMCPU pVCpu, uint32_t cMsrs)1259 static int hmR0VmxSetAutoLoadStoreMsrCount(PVMCPU pVCpu, uint32_t cMsrs) 1306 1260 { 1307 1261 /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */ … … 1382 1336 * Update the host MSR only when requested by the caller AND when we're 1383 1337 * adding it to the auto-load/store area. Otherwise, it would have been 1384 * updated by hmR0Vmx SaveHostMsrs(). We do this for performance reasons.1338 * updated by hmR0VmxExportHostMsrs(). We do this for performance reasons. 1385 1339 */ 1386 1340 bool fUpdatedMsrValue = false; … … 1451 1405 hmR0VmxSetMsrPermission(pVCpu, uMsr, VMXMSREXIT_INTERCEPT_READ, VMXMSREXIT_INTERCEPT_WRITE); 1452 1406 1453 Log4 (("Removed MSR %#RX32 new cMsrs=%u\n", uMsr, pVCpu->hm.s.vmx.cMsrs));1407 Log4Func(("Removed MSR %#RX32 new cMsrs=%u\n", uMsr, pVCpu->hm.s.vmx.cMsrs)); 1454 1408 return VINF_SUCCESS; 1455 1409 } … … 1572 1526 #endif 1573 1527 return false; 1574 }1575 1576 1577 /**1578 * Saves a set of guest MSRs back into the guest-CPU context.1579 *1580 * @param pVCpu The cross context virtual CPU structure.1581 * @param pMixedCtx Pointer to the guest-CPU context. The data may be1582 * out-of-sync. Make sure to update the required fields1583 * before using them.1584 *1585 * @remarks No-long-jump zone!!!1586 */1587 static void hmR0VmxLazySaveGuestMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)1588 {1589 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));1590 Assert(!VMMRZCallRing3IsEnabled(pVCpu));1591 1592 if (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)1593 {1594 Assert(pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_SAVED_HOST);1595 #if HC_ARCH_BITS == 641596 if (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests)1597 {1598 pMixedCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);1599 pMixedCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR);1600 pMixedCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);1601 pMixedCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);1602 }1603 #else1604 NOREF(pMixedCtx);1605 #endif1606 }1607 1528 } 1608 1529 … … 1678 1599 * @remarks No-long-jump zone!!! 1679 1600 * @remarks The guest MSRs should have been saved back into the guest-CPU 1680 * context by hmR0Vmx SaveGuestLazyMsrs()!!!1601 * context by hmR0VmxImportGuestState()!!! 1681 1602 */ 1682 1603 static void hmR0VmxLazyRestoreHostMsrs(PVMCPU pVCpu) … … 1926 1847 { 1927 1848 /* 1928 * We must invalidate the guest TLB entry in either case, we cannot ignore it even for the EPT case1929 * See @bugref{6043} and @bugref{6177}.1849 * We must invalidate the guest TLB entry in either case, we cannot ignore it even for 1850 * the EPT case. See @bugref{6043} and @bugref{6177}. 1930 1851 * 1931 * Set the VMCPU_FF_TLB_FLUSH force flag and flush before VM-entry in hmR0VmxFlushTLB*() as this1932 * function maybe called in a loop with individual addresses.1852 * Set the VMCPU_FF_TLB_FLUSH force flag and flush before VM-entry in hmR0VmxFlushTLB*() 1853 * as this function maybe called in a loop with individual addresses. 1933 1854 */ 1934 1855 if (pVM->hm.s.vmx.fVpid) … … 2024 1945 2025 1946 /* 2026 * Force a TLB flush for the first world-switch if the current CPU differs from the one we ran on last. 2027 * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB 2028 * or the host CPU is online after a suspend/resume, so we cannot reuse the current ASID anymore. 1947 * Force a TLB flush for the first world-switch if the current CPU differs from the one we 1948 * ran on last. If the TLB flush count changed, another VM (VCPU rather) has hit the ASID 1949 * limit while flushing the TLB or the host CPU is online after a suspend/resume, so we 1950 * cannot reuse the current ASID anymore. 2029 1951 */ 2030 1952 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu … … 2057 1979 { 2058 1980 /* 2059 * Changes to the EPT paging structure by VMM requires flushing by EPT as the CPU creates 2060 * guest-physical (only EPT-tagged) mappings while traversing the EPT tables when EPT is in use. 2061 * Flushing by VPID will only flush linear (only VPID-tagged) and combined (EPT+VPID tagged) mappings 2062 * but not guest-physical mappings. 2063 * See Intel spec. 28.3.2 "Creating and Using Cached Translation Information". See @bugref{6568}. 1981 * Changes to the EPT paging structure by VMM requires flushing-by-EPT as the CPU 1982 * creates guest-physical (ie. only EPT-tagged) mappings while traversing the EPT 1983 * tables when EPT is in use. Flushing-by-VPID will only flush linear (only 1984 * VPID-tagged) and combined (EPT+VPID tagged) mappings but not guest-physical 1985 * mappings, see @bugref{6568}. 1986 * 1987 * See Intel spec. 28.3.2 "Creating and Using Cached Translation Information". 2064 1988 */ 2065 1989 hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt); … … 2246 2170 break; 2247 2171 } 2248 2249 2172 /* Don't assert that VMCPU_FF_TLB_FLUSH should no longer be pending. It can be set by other EMTs. */ 2250 2173 } … … 2331 2254 { 2332 2255 /* Shouldn't happen. VPID is supported but INVVPID is not supported by the CPU. Ignore VPID capability. */ 2333 Log4 (("hmR0VmxSetupTaggedTlb:VPID supported without INVEPT support. Ignoring VPID.\n"));2256 Log4Func(("VPID supported without INVEPT support. Ignoring VPID.\n")); 2334 2257 pVM->hm.s.vmx.enmFlushVpid = VMXFLUSHVPID_NOT_SUPPORTED; 2335 2258 pVM->hm.s.vmx.fVpid = false; … … 2364 2287 AssertPtr(pVCpu); 2365 2288 2366 uint32_t val = pVM->hm.s.vmx.Msrs.VmxPinCtls.n.disallowed0; /* Bits set here must always be set. */2367 uint32_t zap = pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1; /* Bits cleared here must always be cleared. */2368 2369 val |= VMX_VMCS_CTRL_PIN_EXEC_EXT_INT_EXIT/* External interrupts cause a VM-exit. */2370 | VMX_VMCS_CTRL_PIN_EXEC_NMI_EXIT;/* Non-maskable interrupts (NMIs) cause a VM-exit. */2289 uint32_t fVal = pVM->hm.s.vmx.Msrs.VmxPinCtls.n.disallowed0; /* Bits set here must always be set. */ 2290 uint32_t fZap = pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1; /* Bits cleared here must always be cleared. */ 2291 2292 fVal |= VMX_VMCS_CTRL_PIN_EXEC_EXT_INT_EXIT /* External interrupts cause a VM-exit. */ 2293 | VMX_VMCS_CTRL_PIN_EXEC_NMI_EXIT; /* Non-maskable interrupts (NMIs) cause a VM-exit. */ 2371 2294 2372 2295 if (pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI) 2373 val |= VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI; /* Use virtual NMIs and virtual-NMI blocking features. */2296 fVal |= VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI; /* Use virtual NMIs and virtual-NMI blocking features. */ 2374 2297 2375 2298 /* Enable the VMX preemption timer. */ … … 2377 2300 { 2378 2301 Assert(pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER); 2379 val |= VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER;2302 fVal |= VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER; 2380 2303 } 2381 2304 … … 2386 2309 Assert(pVM->hm.s.vmx.Msrs.VmxPinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_POSTED_INTR); 2387 2310 Assert(pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1 & VMX_VMCS_CTRL_EXIT_ACK_EXT_INT); 2388 val |= VMX_VMCS_CTRL_PIN_EXEC_POSTED_INTR;2311 fVal |= VMX_VMCS_CTRL_PIN_EXEC_POSTED_INTR; 2389 2312 } 2390 2313 #endif 2391 2314 2392 if (( val & zap) != val)2393 { 2394 LogRel(("hmR0VmxSetupPinCtls: Invalid pin-based VM-execution controls combo! cpu=%#RX64 val=%#RX64 zap=%#RX64\n",2395 pVM->hm.s.vmx.Msrs.VmxPinCtls.n.disallowed0, val, zap));2315 if ((fVal & fZap) != fVal) 2316 { 2317 LogRel(("hmR0VmxSetupPinCtls: Invalid pin-based VM-execution controls combo! Cpu=%#RX64 fVal=%#RX64 fZap=%#RX64\n", 2318 pVM->hm.s.vmx.Msrs.VmxPinCtls.n.disallowed0, fVal, fZap)); 2396 2319 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PIN_EXEC; 2397 2320 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; 2398 2321 } 2399 2322 2400 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, val);2323 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, fVal); 2401 2324 AssertRCReturn(rc, rc); 2402 2325 2403 pVCpu->hm.s.vmx.u32PinCtls = val;2326 pVCpu->hm.s.vmx.u32PinCtls = fVal; 2404 2327 return rc; 2405 2328 } … … 2419 2342 2420 2343 int rc = VERR_INTERNAL_ERROR_5; 2421 uint32_t val = pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0;/* Bits set here must be set in the VMCS. */2422 uint32_t zap = pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1;/* Bits cleared here must be cleared in the VMCS. */2423 2424 val |= VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT/* HLT causes a VM-exit. */2425 | VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING/* Use TSC-offsetting. */2426 | VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT/* MOV DRx causes a VM-exit. */2427 | VMX_VMCS_CTRL_PROC_EXEC_UNCOND_IO_EXIT/* All IO instructions cause a VM-exit. */2428 | VMX_VMCS_CTRL_PROC_EXEC_RDPMC_EXIT/* RDPMC causes a VM-exit. */2429 | VMX_VMCS_CTRL_PROC_EXEC_MONITOR_EXIT/* MONITOR causes a VM-exit. */2430 | VMX_VMCS_CTRL_PROC_EXEC_MWAIT_EXIT;/* MWAIT causes a VM-exit. */2344 uint32_t fVal = pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0; /* Bits set here must be set in the VMCS. */ 2345 uint32_t fZap = pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */ 2346 2347 fVal |= VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT /* HLT causes a VM-exit. */ 2348 | VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING /* Use TSC-offsetting. */ 2349 | VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT /* MOV DRx causes a VM-exit. */ 2350 | VMX_VMCS_CTRL_PROC_EXEC_UNCOND_IO_EXIT /* All IO instructions cause a VM-exit. */ 2351 | VMX_VMCS_CTRL_PROC_EXEC_RDPMC_EXIT /* RDPMC causes a VM-exit. */ 2352 | VMX_VMCS_CTRL_PROC_EXEC_MONITOR_EXIT /* MONITOR causes a VM-exit. */ 2353 | VMX_VMCS_CTRL_PROC_EXEC_MWAIT_EXIT; /* MWAIT causes a VM-exit. */ 2431 2354 2432 2355 /* We toggle VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT later, check if it's not -always- needed to be set or clear. */ … … 2442 2365 if (!pVM->hm.s.fNestedPaging) 2443 2366 { 2444 Assert(!pVM->hm.s.vmx.fUnrestrictedGuest); 2445 val |=VMX_VMCS_CTRL_PROC_EXEC_INVLPG_EXIT2446 |VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT2447 |VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;2367 Assert(!pVM->hm.s.vmx.fUnrestrictedGuest); /* Paranoia. */ 2368 fVal |= VMX_VMCS_CTRL_PROC_EXEC_INVLPG_EXIT 2369 | VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT 2370 | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT; 2448 2371 } 2449 2372 … … 2453 2376 { 2454 2377 Assert(pVCpu->hm.s.vmx.HCPhysVirtApic); 2455 Assert(!(pVCpu->hm.s.vmx.HCPhysVirtApic & 0xfff)); 2378 Assert(!(pVCpu->hm.s.vmx.HCPhysVirtApic & 0xfff)); /* Bits 11:0 MBZ. */ 2456 2379 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, 0); 2457 2380 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_VAPIC_PAGEADDR_FULL, pVCpu->hm.s.vmx.HCPhysVirtApic); 2458 2381 AssertRCReturn(rc, rc); 2459 2382 2460 val |= VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW;/* CR8 reads from the Virtual-APIC page. */2461 /* CR8 writes cause a VM-exit based on TPR threshold. */2462 Assert(!( val & VMX_VMCS_CTRL_PROC_EXEC_CR8_STORE_EXIT));2463 Assert(!( val & VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT));2383 fVal |= VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW; /* CR8 reads from the Virtual-APIC page. */ 2384 /* CR8 writes cause a VM-exit based on TPR threshold. */ 2385 Assert(!(fVal & VMX_VMCS_CTRL_PROC_EXEC_CR8_STORE_EXIT)); 2386 Assert(!(fVal & VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT)); 2464 2387 } 2465 2388 else … … 2471 2394 if (pVM->hm.s.fAllow64BitGuests) 2472 2395 { 2473 val |= VMX_VMCS_CTRL_PROC_EXEC_CR8_STORE_EXIT/* CR8 reads cause a VM-exit. */2474 | VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT;/* CR8 writes cause a VM-exit. */2396 fVal |= VMX_VMCS_CTRL_PROC_EXEC_CR8_STORE_EXIT /* CR8 reads cause a VM-exit. */ 2397 | VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT; /* CR8 writes cause a VM-exit. */ 2475 2398 } 2476 2399 } … … 2479 2402 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS) 2480 2403 { 2481 val |= VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS;2404 fVal |= VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS; 2482 2405 2483 2406 Assert(pVCpu->hm.s.vmx.HCPhysMsrBitmap); 2484 Assert(!(pVCpu->hm.s.vmx.HCPhysMsrBitmap & 0xfff)); 2407 Assert(!(pVCpu->hm.s.vmx.HCPhysMsrBitmap & 0xfff)); /* Bits 11:0 MBZ. */ 2485 2408 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_MSR_BITMAP_FULL, pVCpu->hm.s.vmx.HCPhysMsrBitmap); 2486 2409 AssertRCReturn(rc, rc); … … 2520 2443 /* Use the secondary processor-based VM-execution controls if supported by the CPU. */ 2521 2444 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL) 2522 val |= VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL;2523 2524 if (( val & zap) != val)2525 { 2526 LogRel(("hmR0VmxSetupProcCtls: Invalid processor-based VM-execution controls combo! cpu=%#RX64 val=%#RX64 zap=%#RX64\n",2527 pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0, val, zap));2445 fVal |= VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL; 2446 2447 if ((fVal & fZap) != fVal) 2448 { 2449 LogRel(("hmR0VmxSetupProcCtls: Invalid processor-based VM-execution controls combo! cpu=%#RX64 fVal=%#RX64 fZap=%#RX64\n", 2450 pVM->hm.s.vmx.Msrs.VmxProcCtls.n.disallowed0, fVal, fZap)); 2528 2451 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC; 2529 2452 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; 2530 2453 } 2531 2454 2532 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, val);2455 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, fVal); 2533 2456 AssertRCReturn(rc, rc); 2534 2457 2535 pVCpu->hm.s.vmx.u32ProcCtls = val;2458 pVCpu->hm.s.vmx.u32ProcCtls = fVal; 2536 2459 2537 2460 /* … … 2540 2463 if (RT_LIKELY(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)) 2541 2464 { 2542 val = pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.disallowed0;/* Bits set here must be set in the VMCS. */2543 zap = pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1;/* Bits cleared here must be cleared in the VMCS. */2465 fVal = pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.disallowed0; /* Bits set here must be set in the VMCS. */ 2466 fZap = pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */ 2544 2467 2545 2468 if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT) 2546 val |= VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT;/* WBINVD causes a VM-exit. */2469 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT; /* WBINVD causes a VM-exit. */ 2547 2470 2548 2471 if (pVM->hm.s.fNestedPaging) 2549 val |= VMX_VMCS_CTRL_PROC_EXEC2_EPT;/* Enable EPT. */2472 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_EPT; /* Enable EPT. */ 2550 2473 2551 2474 /* … … 2556 2479 && pVM->cpum.ro.GuestFeatures.fInvpcid) 2557 2480 { 2558 val |= VMX_VMCS_CTRL_PROC_EXEC2_INVPCID;2481 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_INVPCID; 2559 2482 } 2560 2483 2561 2484 if (pVM->hm.s.vmx.fVpid) 2562 val |= VMX_VMCS_CTRL_PROC_EXEC2_VPID;/* Enable VPID. */2485 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_VPID; /* Enable VPID. */ 2563 2486 2564 2487 if (pVM->hm.s.vmx.fUnrestrictedGuest) 2565 val |= VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST;/* Enable Unrestricted Execution. */2488 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST; /* Enable Unrestricted Execution. */ 2566 2489 2567 2490 #if 0 … … 2569 2492 { 2570 2493 Assert(pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_APIC_REG_VIRT); 2571 val |= VMX_VMCS_CTRL_PROC_EXEC2_APIC_REG_VIRT;/* Enable APIC-register virtualization. */2494 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_APIC_REG_VIRT; /* Enable APIC-register virtualization. */ 2572 2495 2573 2496 Assert(pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_INTR_DELIVERY); 2574 val |= VMX_VMCS_CTRL_PROC_EXEC2_VIRT_INTR_DELIVERY;/* Enable virtual-interrupt delivery. */2497 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_VIRT_INTR_DELIVERY; /* Enable virtual-interrupt delivery. */ 2575 2498 } 2576 2499 #endif … … 2582 2505 { 2583 2506 Assert(pVM->hm.s.vmx.HCPhysApicAccess); 2584 Assert(!(pVM->hm.s.vmx.HCPhysApicAccess & 0xfff)); 2585 val |= VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC;/* Virtualize APIC accesses. */2507 Assert(!(pVM->hm.s.vmx.HCPhysApicAccess & 0xfff)); /* Bits 11:0 MBZ. */ 2508 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC; /* Virtualize APIC accesses. */ 2586 2509 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL, pVM->hm.s.vmx.HCPhysApicAccess); 2587 2510 AssertRCReturn(rc, rc); … … 2589 2512 2590 2513 if (pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP) 2591 val |= VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP;/* Enable RDTSCP support. */2514 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP; /* Enable RDTSCP support. */ 2592 2515 2593 2516 if ( pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT … … 2595 2518 && pVM->hm.s.vmx.cPleWindowTicks) 2596 2519 { 2597 val |= VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT;/* Enable pause-loop exiting. */2520 fVal |= VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT; /* Enable pause-loop exiting. */ 2598 2521 2599 2522 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PLE_GAP, pVM->hm.s.vmx.cPleGapTicks); … … 2602 2525 } 2603 2526 2604 if (( val & zap) != val)2527 if ((fVal & fZap) != fVal) 2605 2528 { 2606 2529 LogRel(("hmR0VmxSetupProcCtls: Invalid secondary processor-based VM-execution controls combo! " 2607 "cpu=%#RX64 val=%#RX64 zap=%#RX64\n", pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.disallowed0, val, zap));2530 "cpu=%#RX64 fVal=%#RX64 fZap=%#RX64\n", pVM->hm.s.vmx.Msrs.VmxProcCtls2.n.disallowed0, fVal, fZap)); 2608 2531 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC2; 2609 2532 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; 2610 2533 } 2611 2534 2612 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, val);2535 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, fVal); 2613 2536 AssertRCReturn(rc, rc); 2614 2537 2615 pVCpu->hm.s.vmx.u32ProcCtls2 = val;2538 pVCpu->hm.s.vmx.u32ProcCtls2 = fVal; 2616 2539 } 2617 2540 else if (RT_UNLIKELY(pVM->hm.s.vmx.fUnrestrictedGuest)) … … 2645 2568 /* All fields are zero-initialized during allocation; but don't remove the commented block below. */ 2646 2569 #if 0 2647 /* All CR3 accesses cause VM-exits. Later we optimize CR3 accesses (see hmR0Vmx LoadGuestCR3AndCR4())*/2570 /* All CR3 accesses cause VM-exits. Later we optimize CR3 accesses (see hmR0VmxExportGuestCR3AndCR4())*/ 2648 2571 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, 0); 2649 2572 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, 0); … … 2657 2580 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, 0); 2658 2581 2659 /** @todo Explore possibility of using IO-bitmaps. */2660 2582 /* All IO & IOIO instructions cause VM-exits. */ 2661 2583 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_A_FULL, 0); … … 2688 2610 #if 0 2689 2611 /* Setup debug controls */ 2690 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, 0); /** @todo We don't support IA32_DEBUGCTL MSR. Should we? */2612 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, 0); 2691 2613 rc |= VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, 0); 2692 2614 AssertRCReturn(rc, rc); … … 2733 2655 AssertRCReturn(rc, rc); 2734 2656 return rc; 2735 }2736 2737 2738 /**2739 * Sets up the initial guest-state mask. The guest-state mask is consulted2740 * before reading guest-state fields from the VMCS as VMREADs can be expensive2741 * for the nested virtualization case (as it would cause a VM-exit).2742 *2743 * @param pVCpu The cross context virtual CPU structure.2744 */2745 static int hmR0VmxInitUpdatedGuestStateMask(PVMCPU pVCpu)2746 {2747 /* Initially the guest-state is up-to-date as there is nothing in the VMCS. */2748 HMVMXCPU_GST_RESET_TO(pVCpu, HMVMX_UPDATED_GUEST_ALL);2749 return VINF_SUCCESS;2750 2657 } 2751 2658 … … 2806 2713 2807 2714 /* 2808 * Without UnrestrictedGuest, pRealModeTSS and pNonPagingModeEPTPageTable *must* always be allocated. 2809 * We no longer support the highly unlikely case of UnrestrictedGuest without pRealModeTSS. See hmR3InitFinalizeR0Intel(). 2715 * Without UnrestrictedGuest, pRealModeTSS and pNonPagingModeEPTPageTable *must* always be 2716 * allocated. We no longer support the highly unlikely case of UnrestrictedGuest without 2717 * pRealModeTSS, see hmR3InitFinalizeR0Intel(). 2810 2718 */ 2811 2719 if ( !pVM->hm.s.vmx.fUnrestrictedGuest … … 2852 2760 2853 2761 /* Log the VCPU pointers, useful for debugging SMP VMs. */ 2854 Log4 (("VMXR0SetupVM:pVCpu=%p idCpu=%RU32\n", pVCpu, pVCpu->idCpu));2762 Log4Func(("pVCpu=%p idCpu=%RU32\n", pVCpu, pVCpu->idCpu)); 2855 2763 2856 2764 /* Set revision dword at the beginning of the VMCS structure. */ … … 2881 2789 rc = hmR0VmxInitXcptBitmap(pVM, pVCpu); 2882 2790 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitXcptBitmap failed! rc=%Rrc (pVM=%p)\n", rc, pVM), 2883 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);2884 2885 rc = hmR0VmxInitUpdatedGuestStateMask(pVCpu);2886 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitUpdatedGuestStateMask failed! rc=%Rrc (pVM=%p)\n", rc, pVM),2887 2791 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc); 2888 2792 … … 2912 2816 * 2913 2817 * @returns VBox status code. 2914 * @param pVM The cross context VM structure. 2915 * @param pVCpu The cross context virtual CPU structure. 2916 */ 2917 DECLINLINE(int) hmR0VmxSaveHostControlRegs(PVM pVM, PVMCPU pVCpu) 2918 { 2919 NOREF(pVM); NOREF(pVCpu); 2920 2818 */ 2819 static int hmR0VmxExportHostControlRegs(void) 2820 { 2921 2821 RTCCUINTREG uReg = ASMGetCR0(); 2922 2822 int rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR0, uReg); … … 2934 2834 2935 2835 2836 /** 2837 * Saves the host segment registers and GDTR, IDTR, (TR, GS and FS bases) into 2838 * the host-state area in the VMCS. 2839 * 2840 * @returns VBox status code. 2841 * @param pVCpu The cross context virtual CPU structure. 2842 */ 2843 static int hmR0VmxExportHostSegmentRegs(PVMCPU pVCpu) 2844 { 2936 2845 #if HC_ARCH_BITS == 64 2937 2846 /** 2938 2847 * Macro for adjusting host segment selectors to satisfy VT-x's VM-entry 2939 * requirements. See hmR0Vmx SaveHostSegmentRegs().2848 * requirements. See hmR0VmxExportHostSegmentRegs(). 2940 2849 */ 2941 2850 # define VMXLOCAL_ADJUST_HOST_SEG(seg, selValue) \ … … 2955 2864 (selValue) = 0; \ 2956 2865 } 2957 #endif 2958 2959 2960 /** 2961 * Saves the host segment registers and GDTR, IDTR, (TR, GS and FS bases) into 2962 * the host-state area in the VMCS. 2963 * 2964 * @returns VBox status code. 2965 * @param pVM The cross context VM structure. 2966 * @param pVCpu The cross context virtual CPU structure. 2967 */ 2968 DECLINLINE(int) hmR0VmxSaveHostSegmentRegs(PVM pVM, PVMCPU pVCpu) 2969 { 2970 int rc = VERR_INTERNAL_ERROR_5; 2971 2972 #if HC_ARCH_BITS == 64 2866 2973 2867 /* 2974 2868 * If we've executed guest code using VT-x, the host-state bits will be messed up. We 2975 * should -not- save the messed up state without restoring the original host-state. See @bugref{7240}. 2869 * should -not- save the messed up state without restoring the original host-state, 2870 * see @bugref{7240}. 2976 2871 * 2977 * This apparently can happen (most likely the FPU changes), deal with it rather than asserting.2978 * Was observed booting Solaris10u10 32-bit guest.2872 * This apparently can happen (most likely the FPU changes), deal with it rather than 2873 * asserting. Was observed booting Solaris 10u10 32-bit guest. 2979 2874 */ 2980 2875 if ( (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_REQUIRED) … … 3018 2913 #if HC_ARCH_BITS == 64 3019 2914 /* 3020 * Determine if the host segment registers are suitable for VT-x. Otherwise use zero to gain VM-entry and restore them 3021 * before we get preempted. See Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers". 2915 * Determine if the host segment registers are suitable for VT-x. Otherwise use zero to 2916 * gain VM-entry and restore them before we get preempted. 2917 * 2918 * See Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers". 3022 2919 */ 3023 2920 VMXLOCAL_ADJUST_HOST_SEG(DS, uSelDS); … … 3046 2943 3047 2944 /* Write these host selector fields into the host-state area in the VMCS. */ 3048 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_CS_SEL, uSelCS);3049 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_SS_SEL, uSelSS);2945 int rc = VMXWriteVmcs32(VMX_VMCS16_HOST_CS_SEL, uSelCS); 2946 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_SS_SEL, uSelSS); 3050 2947 #if HC_ARCH_BITS == 64 3051 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_DS_SEL, uSelDS);3052 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_ES_SEL, uSelES);3053 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FS_SEL, uSelFS);3054 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_GS_SEL, uSelGS);2948 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_DS_SEL, uSelDS); 2949 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_ES_SEL, uSelES); 2950 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_FS_SEL, uSelFS); 2951 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_GS_SEL, uSelGS); 3055 2952 #else 3056 2953 NOREF(uSelDS); … … 3059 2956 NOREF(uSelGS); 3060 2957 #endif 3061 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_TR_SEL, uSelTR);2958 rc |= VMXWriteVmcs32(VMX_VMCS16_HOST_TR_SEL, uSelTR); 3062 2959 AssertRCReturn(rc, rc); 3063 2960 … … 3077 2974 #if HC_ARCH_BITS == 64 3078 2975 /* 3079 * Determine if we need to manually need to restore the GDTR and IDTR limits as VT-x zaps them to the3080 * maximum limit (0xffff) on every VM-exit.2976 * Determine if we need to manually need to restore the GDTR and IDTR limits as VT-x zaps 2977 * them to the maximum limit (0xffff) on every VM-exit. 3081 2978 */ 3082 2979 if (Gdtr.cbGdt != 0xffff) … … 3085 2982 /* 3086 2983 * IDT limit is effectively capped at 0xfff. (See Intel spec. 6.14.1 "64-Bit Mode IDT" 3087 * and Intel spec. 6.2 "Exception and Interrupt Vectors".) Therefore if the host has the limit as 0xfff, VT-x 3088 * bloating the limit to 0xffff shouldn't cause any different CPU behavior. However, several hosts either insists 3089 * on 0xfff being the limit (Windows Patch Guard) or uses the limit for other purposes (darwin puts the CPU ID in there 3090 * but botches sidt alignment in at least one consumer). So, we're only allowing IDTR.LIMIT to be left at 0xffff on 3091 * hosts where we are pretty sure it won't cause trouble. 2984 * and Intel spec. 6.2 "Exception and Interrupt Vectors".) Therefore if the host has the 2985 * limit as 0xfff, VT-x bloating the limit to 0xffff shouldn't cause any different CPU 2986 * behavior. However, several hosts either insists on 0xfff being the limit (Windows 2987 * Patch Guard) or uses the limit for other purposes (darwin puts the CPU ID in there 2988 * but botches sidt alignment in at least one consumer). So, we're only allowing the 2989 * IDTR.LIMIT to be left at 0xffff on hosts where we are sure it won't cause trouble. 3092 2990 */ 3093 2991 # if defined(RT_OS_LINUX) || defined(RT_OS_SOLARIS) … … 3104 3002 3105 3003 /* 3106 * Host TR base. Verify that TR selector doesn't point past the GDT. Masking off the TI and RPL bits 3107 * is effectively what the CPU does for "scaling by 8". TI is always 0 and RPL should be too in most cases. 3004 * Host TR base. Verify that TR selector doesn't point past the GDT. Masking off the TI 3005 * and RPL bits is effectively what the CPU does for "scaling by 8". TI is always 0 and 3006 * RPL should be too in most cases. 3108 3007 */ 3109 3008 AssertMsgReturn((uSelTR | X86_SEL_RPL_LDT) <= Gdtr.cbGdt, 3110 ("hmR0VmxSaveHostSegmentRegs: TR selector exceeds limit. TR=%RTsel cbGdt=%#x\n", uSelTR, Gdtr.cbGdt), 3111 VERR_VMX_INVALID_HOST_STATE); 3009 ("TR selector exceeds limit. TR=%RTsel cbGdt=%#x\n", uSelTR, Gdtr.cbGdt), VERR_VMX_INVALID_HOST_STATE); 3112 3010 3113 3011 PCX86DESCHC pDesc = (PCX86DESCHC)(Gdtr.pGdt + (uSelTR & X86_SEL_MASK)); … … 3116 3014 3117 3015 /* 3118 * VT-x unconditionally restores the TR limit to 0x67 and type to 11 (32-bit busy TSS) on all VM-exits. 3119 * The type is the same for 64-bit busy TSS[1]. The limit needs manual restoration if the host has something else. 3120 * Task switching is not supported in 64-bit mode[2], but the limit still matters as IOPM is supported in 64-bit mode. 3121 * Restoring the limit lazily while returning to ring-3 is safe because IOPM is not applicable in ring-0. 3016 * VT-x unconditionally restores the TR limit to 0x67 and type to 11 (32-bit busy TSS) on 3017 * all VM-exits. The type is the same for 64-bit busy TSS[1]. The limit needs manual 3018 * restoration if the host has something else. Task switching is not supported in 64-bit 3019 * mode[2], but the limit still matters as IOPM is supported in 64-bit mode. Restoring the 3020 * limit lazily while returning to ring-3 is safe because IOPM is not applicable in ring-0. 3122 3021 * 3123 3022 * [1] See Intel spec. 3.5 "System Descriptor Types". 3124 3023 * [2] See Intel spec. 7.2.3 "TSS Descriptor in 64-bit mode". 3125 3024 */ 3025 PVM pVM = pVCpu->CTX_SUFF(pVM); 3126 3026 Assert(pDesc->System.u4Type == 11); 3127 3027 if ( pDesc->System.u16LimitLow != 0x67 … … 3152 3052 } 3153 3053 #else 3154 NOREF(pVM);3155 3054 uintptr_t uTRBase = X86DESC_BASE(pDesc); 3156 3055 #endif … … 3174 3073 pVCpu->hm.s.vmx.RestoreHost.uHostGSBase = u64GSBase; 3175 3074 #endif 3176 return rc; 3177 } 3178 3179 3180 /** 3181 * Saves certain host MSRs in the VM-exit MSR-load area and some in the 3182 * host-state area of the VMCS. Theses MSRs will be automatically restored on 3183 * the host after every successful VM-exit. 3075 return VINF_SUCCESS; 3076 } 3077 3078 3079 /** 3080 * Exports certain host MSRs in the VM-exit MSR-load area and some in the 3081 * host-state area of the VMCS. 3082 * 3083 * Theses MSRs will be automatically restored on the host after every successful 3084 * VM-exit. 3184 3085 * 3185 3086 * @returns VBox status code. 3186 * @param pVM The cross context VM structure.3187 3087 * @param pVCpu The cross context virtual CPU structure. 3188 3088 * 3189 3089 * @remarks No-long-jump zone!!! 3190 3090 */ 3191 DECLINLINE(int) hmR0VmxSaveHostMsrs(PVM pVM, PVMCPU pVCpu) 3192 { 3193 NOREF(pVM); 3194 3091 static int hmR0VmxExportHostMsrs(PVMCPU pVCpu) 3092 { 3195 3093 AssertPtr(pVCpu); 3196 3094 AssertPtr(pVCpu->hm.s.vmx.pvHostMsr); … … 3205 3103 * Host Sysenter MSRs. 3206 3104 */ 3207 int rc = VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS,ASMRdMsr_Low(MSR_IA32_SYSENTER_CS));3105 int rc = VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, ASMRdMsr_Low(MSR_IA32_SYSENTER_CS)); 3208 3106 #if HC_ARCH_BITS == 32 3209 rc |= VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP, 3210 rc |= VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_EIP, 3107 rc |= VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP)); 3108 rc |= VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP)); 3211 3109 #else 3212 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, 3213 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, 3110 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP)); 3111 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP)); 3214 3112 #endif 3215 3113 AssertRCReturn(rc, rc); … … 3217 3115 /* 3218 3116 * Host EFER MSR. 3219 * If the CPU supports the newer VMCS controls for managing EFER, use it. 3220 * Otherwise it's done as part of auto-load/store MSR area in the VMCS, see hmR0VmxLoadGuestMsrs(). 3117 * 3118 * If the CPU supports the newer VMCS controls for managing EFER, use it. Otherwise it's 3119 * done as part of auto-load/store MSR area in the VMCS, see hmR0VmxExportGuestMsrs(). 3221 3120 */ 3121 PVM pVM = pVCpu->CTX_SUFF(pVM); 3222 3122 if (pVM->hm.s.vmx.fSupportsVmcsEfer) 3223 3123 { … … 3226 3126 } 3227 3127 3228 /** @todo IA32_PERF_GLOBALCTRL, IA32_PAT also see 3229 * hmR0VmxLoadGuestExitCtls() !! */ 3230 3231 return rc; 3128 /** @todo IA32_PERF_GLOBALCTRL, IA32_PAT also see hmR0VmxExportGuestExitCtls(). */ 3129 3130 return VINF_SUCCESS; 3232 3131 } 3233 3132 … … 3237 3136 * 3238 3137 * We check all relevant bits. For now, that's everything besides LMA/LME, as 3239 * these two bits are handled by VM-entry, see hmR0Vmx LoadGuestExitCtls() and3240 * hmR0VMx LoadGuestEntryCtls().3138 * these two bits are handled by VM-entry, see hmR0VmxExportGuestExitCtls() and 3139 * hmR0VMxExportGuestEntryCtls(). 3241 3140 * 3242 3141 * @returns true if we need to load guest EFER, false otherwise. … … 3249 3148 * @remarks No-long-jump zone!!! 3250 3149 */ 3251 static bool hmR0VmxShouldSwapEferMsr(PVMCPU pVCpu, PC PUMCTX pMixedCtx)3150 static bool hmR0VmxShouldSwapEferMsr(PVMCPU pVCpu, PCCPUMCTX pMixedCtx) 3252 3151 { 3253 3152 #ifdef HMVMX_ALWAYS_SWAP_EFER … … 3257 3156 #if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) 3258 3157 /* For 32-bit hosts running 64-bit guests, we always swap EFER in the world-switcher. Nothing to do here. */ 3259 if (CPUMIsGuestInLongMode (pVCpu))3158 if (CPUMIsGuestInLongModeEx(pMixedCtx)) 3260 3159 return false; 3261 3160 #endif … … 3266 3165 3267 3166 /* 3268 * For 64-bit guests, if EFER.SCE bit differs, we need to swap to ensure that the3269 * guest's SYSCALL behaviour isn't screwed. See @bugref{7386}.3167 * For 64-bit guests, if EFER.SCE bit differs, we need to swap EFER to ensure that the 3168 * guest's SYSCALL behaviour isn't broken, see @bugref{7386}. 3270 3169 */ 3271 if ( CPUMIsGuestInLongMode (pVCpu)3170 if ( CPUMIsGuestInLongModeEx(pMixedCtx) 3272 3171 && (u64GuestEfer & MSR_K6_EFER_SCE) != (u64HostEfer & MSR_K6_EFER_SCE)) 3273 3172 { … … 3289 3188 } 3290 3189 3291 /** @todo Check the latest Intel spec. for any other bits,3292 * like SMEP/SMAP? */3293 3190 return false; 3294 3191 } … … 3296 3193 3297 3194 /** 3298 * Sets up VM-entry controls in the VMCS. These controls can affect things done 3299 * on VM-exit; e.g. "load debug controls", see Intel spec. 24.8.1 "VM-entry 3300 * controls". 3195 * Exports the guest state with appropriate VM-entry controls in the VMCS. 3196 * 3197 * These controls can affect things done on VM-exit; e.g. "load debug controls", 3198 * see Intel spec. 24.8.1 "VM-entry controls". 3301 3199 * 3302 3200 * @returns VBox status code. … … 3309 3207 * @remarks No-long-jump zone!!! 3310 3208 */ 3311 DECLINLINE(int) hmR0VmxLoadGuestEntryCtls(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 3312 { 3313 int rc = VINF_SUCCESS; 3314 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_ENTRY_CTLS)) 3315 { 3316 PVM pVM = pVCpu->CTX_SUFF(pVM); 3317 uint32_t val = pVM->hm.s.vmx.Msrs.VmxEntry.n.disallowed0; /* Bits set here must be set in the VMCS. */ 3318 uint32_t zap = pVM->hm.s.vmx.Msrs.VmxEntry.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */ 3209 static int hmR0VmxExportGuestEntryCtls(PVMCPU pVCpu, PCCPUMCTX pMixedCtx) 3210 { 3211 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_ENTRY_CTLS) 3212 { 3213 PVM pVM = pVCpu->CTX_SUFF(pVM); 3214 uint32_t fVal = pVM->hm.s.vmx.Msrs.VmxEntry.n.disallowed0; /* Bits set here must be set in the VMCS. */ 3215 uint32_t fZap = pVM->hm.s.vmx.Msrs.VmxEntry.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */ 3319 3216 3320 3217 /* Load debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x capable CPUs only supports the 1-setting of this bit. */ 3321 val |= VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG;3218 fVal |= VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG; 3322 3219 3323 3220 /* Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry. */ 3324 3221 if (CPUMIsGuestInLongModeEx(pMixedCtx)) 3325 3222 { 3326 val |= VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST;3327 Log4 (("Load[%RU32]: VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST\n", pVCpu->idCpu));3223 fVal |= VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST; 3224 Log4Func(("VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST\n")); 3328 3225 } 3329 3226 else 3330 Assert(!( val & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST));3227 Assert(!(fVal & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST)); 3331 3228 3332 3229 /* If the CPU supports the newer VMCS controls for managing guest/host EFER, use it. */ … … 3334 3231 && hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx)) 3335 3232 { 3336 val |= VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR;3337 Log4 (("Load[%RU32]: VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR\n", pVCpu->idCpu));3233 fVal |= VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR; 3234 Log4Func(("VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR\n")); 3338 3235 } 3339 3236 … … 3347 3244 * VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PAT_MSR. */ 3348 3245 3349 if (( val & zap) != val)3350 { 3351 Log Rel(("hmR0VmxLoadGuestEntryCtls: Invalid VM-entry controls combo! cpu=%RX64 val=%RX64 zap=%RX64\n",3352 pVM->hm.s.vmx.Msrs.VmxEntry.n.disallowed0, val, zap));3246 if ((fVal & fZap) != fVal) 3247 { 3248 Log4Func(("Invalid VM-entry controls combo! Cpu=%RX64 fVal=%RX64 fZap=%RX64\n", 3249 pVM->hm.s.vmx.Msrs.VmxEntry.n.disallowed0, fVal, fZap)); 3353 3250 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_ENTRY; 3354 3251 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; 3355 3252 } 3356 3253 3357 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY, val);3254 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY, fVal); 3358 3255 AssertRCReturn(rc, rc); 3359 3256 3360 pVCpu->hm.s.vmx.u32EntryCtls = val;3361 HMCPU_CF_CLEAR(pVCpu,HM_CHANGED_VMX_ENTRY_CTLS);3362 } 3363 return rc;3364 } 3365 3366 3367 /** 3368 * Sets up the VM-exit controls in the VMCS.3257 pVCpu->hm.s.vmx.u32EntryCtls = fVal; 3258 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_VMX_ENTRY_CTLS); 3259 } 3260 return VINF_SUCCESS; 3261 } 3262 3263 3264 /** 3265 * Exports the guest state with appropriate VM-exit controls in the VMCS. 3369 3266 * 3370 3267 * @returns VBox status code. … … 3376 3273 * @remarks Requires EFER. 3377 3274 */ 3378 DECLINLINE(int) hmR0VmxLoadGuestExitCtls(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 3379 { 3380 NOREF(pMixedCtx); 3381 3382 int rc = VINF_SUCCESS; 3383 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_EXIT_CTLS)) 3384 { 3385 PVM pVM = pVCpu->CTX_SUFF(pVM); 3386 uint32_t val = pVM->hm.s.vmx.Msrs.VmxExit.n.disallowed0; /* Bits set here must be set in the VMCS. */ 3387 uint32_t zap = pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */ 3275 static int hmR0VmxExportGuestExitCtls(PVMCPU pVCpu, PCCPUMCTX pMixedCtx) 3276 { 3277 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_EXIT_CTLS) 3278 { 3279 PVM pVM = pVCpu->CTX_SUFF(pVM); 3280 uint32_t fVal = pVM->hm.s.vmx.Msrs.VmxExit.n.disallowed0; /* Bits set here must be set in the VMCS. */ 3281 uint32_t fZap = pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */ 3388 3282 3389 3283 /* Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only supported the 1-setting of this bit. */ 3390 val |= VMX_VMCS_CTRL_EXIT_SAVE_DEBUG;3284 fVal |= VMX_VMCS_CTRL_EXIT_SAVE_DEBUG; 3391 3285 3392 3286 /* 3393 3287 * Set the host long mode active (EFER.LMA) bit (which Intel calls "Host address-space size") if necessary. 3394 * On VM-exit, VT-x sets both the host EFER.LMA and EFER.LME bit to this value. See assertion in hmR0VmxSaveHostMsrs(). 3288 * On VM-exit, VT-x sets both the host EFER.LMA and EFER.LME bit to this value. See assertion in 3289 * hmR0VmxExportHostMsrs(). 3395 3290 */ 3396 3291 #if HC_ARCH_BITS == 64 3397 val |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE;3398 Log4 (("Load[%RU32]: VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE\n", pVCpu->idCpu));3292 fVal |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE; 3293 Log4Func(("VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE\n")); 3399 3294 #else 3400 3295 Assert( pVCpu->hm.s.vmx.pfnStartVM == VMXR0SwitcherStartVM64 … … 3404 3299 { 3405 3300 /* The switcher returns to long mode, EFER is managed by the switcher. */ 3406 val |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE;3407 Log4 (("Load[%RU32]: VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE\n", pVCpu->idCpu));3301 fVal |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE; 3302 Log4Func(("VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE\n")); 3408 3303 } 3409 3304 else 3410 Assert(!( val & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE));3305 Assert(!(fVal & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE)); 3411 3306 #endif 3412 3307 … … 3415 3310 && hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx)) 3416 3311 { 3417 val |=VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR3418 |VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR;3419 Log4 (("Load[%RU32]: VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR, VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR\n", pVCpu->idCpu));3312 fVal |= VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR 3313 | VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR; 3314 Log4Func(("VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR and VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR\n")); 3420 3315 } 3421 3316 3422 3317 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */ 3423 Assert(!( val & VMX_VMCS_CTRL_EXIT_ACK_EXT_INT));3318 Assert(!(fVal & VMX_VMCS_CTRL_EXIT_ACK_EXT_INT)); 3424 3319 3425 3320 /** @todo VMX_VMCS_CTRL_EXIT_LOAD_PERF_MSR, … … 3429 3324 if ( pVM->hm.s.vmx.fUsePreemptTimer 3430 3325 && (pVM->hm.s.vmx.Msrs.VmxExit.n.allowed1 & VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER)) 3431 val |= VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER;3432 3433 if (( val & zap) != val)3434 { 3435 LogRel(("hmR0VmxSetupProcCtls: Invalid VM-exit controls combo! cpu=%RX64 val=%RX64 zap=%RX64\n",3436 pVM->hm.s.vmx.Msrs.VmxExit.n.disallowed0, val, zap));3326 fVal |= VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER; 3327 3328 if ((fVal & fZap) != fVal) 3329 { 3330 LogRel(("hmR0VmxSetupProcCtls: Invalid VM-exit controls combo! cpu=%RX64 fVal=%RX64 fZap=%RX64\n", 3331 pVM->hm.s.vmx.Msrs.VmxExit.n.disallowed0, fVal, fZap)); 3437 3332 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_EXIT; 3438 3333 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO; 3439 3334 } 3440 3335 3441 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT, val);3336 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT, fVal); 3442 3337 AssertRCReturn(rc, rc); 3443 3338 3444 pVCpu->hm.s.vmx.u32ExitCtls = val;3445 HMCPU_CF_CLEAR(pVCpu,HM_CHANGED_VMX_EXIT_CTLS);3446 } 3447 return rc;3339 pVCpu->hm.s.vmx.u32ExitCtls = fVal; 3340 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_VMX_EXIT_CTLS); 3341 } 3342 return VINF_SUCCESS; 3448 3343 } 3449 3344 … … 3465 3360 3466 3361 /** 3467 * Loads the guest APIC and related state.3362 * Exports the guest APIC TPR state into the VMCS. 3468 3363 * 3469 3364 * @returns VBox status code. 3470 3365 * @param pVCpu The cross context virtual CPU structure. 3471 * @param pMixedCtx Pointer to the guest-CPU context. The data may be3472 * out-of-sync. Make sure to update the required fields3473 * before using them.3474 3366 * 3475 3367 * @remarks No-long-jump zone!!! 3476 3368 */ 3477 DECLINLINE(int) hmR0VmxLoadGuestApicState(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 3478 { 3479 NOREF(pMixedCtx); 3480 3481 int rc = VINF_SUCCESS; 3482 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_APIC_STATE)) 3369 static int hmR0VmxExportGuestApicTpr(PVMCPU pVCpu) 3370 { 3371 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_APIC_TPR) 3483 3372 { 3484 3373 if ( PDMHasApic(pVCpu->CTX_SUFF(pVM)) … … 3495 3384 uint8_t u8Tpr = 0; 3496 3385 uint8_t u8PendingIntr = 0; 3497 rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);3386 int rc = APICGetTpr(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr); 3498 3387 AssertRCReturn(rc, rc); 3499 3388 3500 3389 /* 3501 * If there are interrupts pending but masked by the TPR, instruct VT-x to cause a TPR-below-threshold VM-exit 3502 * when the guest lowers its TPR below the priority of the pending interrupt so we can deliver the interrupt. 3503 * If there are no interrupts pending, set threshold to 0 to not cause any TPR-below-threshold VM-exits. 3390 * If there are interrupts pending but masked by the TPR, instruct VT-x to 3391 * cause a TPR-below-threshold VM-exit when the guest lowers its TPR below the 3392 * priority of the pending interrupt so we can deliver the interrupt. If there 3393 * are no interrupts pending, set threshold to 0 to not cause any 3394 * TPR-below-threshold VM-exits. 3504 3395 */ 3505 3396 pVCpu->hm.s.vmx.pbVirtApic[XAPIC_OFF_TPR] = u8Tpr; … … 3518 3409 } 3519 3410 } 3520 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_APIC_STATE); 3521 } 3522 3523 return rc; 3411 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_APIC_TPR); 3412 } 3413 return VINF_SUCCESS; 3524 3414 } 3525 3415 … … 3536 3426 * @remarks No-long-jump zone!!! 3537 3427 */ 3538 DECLINLINE(uint32_t)hmR0VmxGetGuestIntrState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)3428 static uint32_t hmR0VmxGetGuestIntrState(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 3539 3429 { 3540 3430 /* 3541 3431 * Check if we should inhibit interrupt delivery due to instructions like STI and MOV SS. 3542 3432 */ 3543 uint32_t uIntrState = 0;3433 uint32_t fIntrState = 0; 3544 3434 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 3545 3435 { 3546 /* If inhibition is active, RIP & RFLAGS should've been accessed (i.e. read previously from the VMCS or from ring-3). */ 3547 AssertMsg(HMVMXCPU_GST_IS_SET(pVCpu, HMVMX_UPDATED_GUEST_RIP | HMVMX_UPDATED_GUEST_RFLAGS), 3548 ("%#x\n", HMVMXCPU_GST_VALUE(pVCpu))); 3436 /* If inhibition is active, RIP & RFLAGS should've been accessed 3437 (i.e. read previously from the VMCS or from ring-3). */ 3438 #ifdef VBOX_STRICT 3439 uint64_t const fExtrn = ASMAtomicUoReadU64(&pMixedCtx->fExtrn); 3440 AssertMsg(!(fExtrn & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS)), ("%#x\n", fExtrn)); 3441 #endif 3549 3442 if (pMixedCtx->rip == EMGetInhibitInterruptsPC(pVCpu)) 3550 3443 { 3551 3444 if (pMixedCtx->eflags.Bits.u1IF) 3552 uIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI;3445 fIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI; 3553 3446 else 3554 uIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS;3447 fIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS; 3555 3448 } 3556 3449 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 3557 3450 { 3558 3451 /* 3559 * We can clear the inhibit force flag as even if we go back to the recompiler without executing guest code in 3560 * VT-x, the flag's condition to be cleared is met and thus the cleared state is correct. 3452 * We can clear the inhibit force flag as even if we go back to the recompiler 3453 * without executing guest code in VT-x, the flag's condition to be cleared is 3454 * met and thus the cleared state is correct. 3561 3455 */ 3562 3456 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); … … 3574 3468 && (pVCpu->hm.s.vmx.u32PinCtls & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI)) 3575 3469 { 3576 uIntrState |= VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI;3577 } 3578 3579 return uIntrState;3580 } 3581 3582 3583 /** 3584 * Loads the guest's interruptibility-state into the guest-state area in the3470 fIntrState |= VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI; 3471 } 3472 3473 return fIntrState; 3474 } 3475 3476 3477 /** 3478 * Exports the guest's interruptibility-state into the guest-state area in the 3585 3479 * VMCS. 3586 3480 * 3587 3481 * @returns VBox status code. 3588 3482 * @param pVCpu The cross context virtual CPU structure. 3589 * @param uIntrState The interruptibility-state to set.3590 */ 3591 static int hmR0Vmx LoadGuestIntrState(PVMCPU pVCpu, uint32_t uIntrState)3483 * @param fIntrState The interruptibility-state to set. 3484 */ 3485 static int hmR0VmxExportGuestIntrState(PVMCPU pVCpu, uint32_t fIntrState) 3592 3486 { 3593 3487 NOREF(pVCpu); 3594 AssertMsg(!(uIntrState & 0xfffffff0), ("%#x\n", uIntrState)); /* Bits 31:4 MBZ. */ 3595 Assert((uIntrState & 0x3) != 0x3); /* Block-by-STI and MOV SS cannot be simultaneously set. */ 3596 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, uIntrState); 3597 AssertRC(rc); 3598 return rc; 3599 } 3600 3601 3602 /** 3603 * Loads the exception intercepts required for guest execution in the VMCS. 3488 AssertMsg(!(fIntrState & 0xfffffff0), ("%#x\n", fIntrState)); /* Bits 31:4 MBZ. */ 3489 Assert((fIntrState & 0x3) != 0x3); /* Block-by-STI and MOV SS cannot be simultaneously set. */ 3490 return VMXWriteVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, fIntrState); 3491 } 3492 3493 3494 /** 3495 * Exports the exception intercepts required for guest execution in the VMCS. 3604 3496 * 3605 3497 * @returns VBox status code. 3606 3498 * @param pVCpu The cross context virtual CPU structure. 3607 * @param pMixedCtx Pointer to the guest-CPU context. The data may be 3608 * out-of-sync. Make sure to update the required fields 3609 * before using them. 3610 */ 3611 static int hmR0VmxLoadGuestXcptIntercepts(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 3612 { 3613 NOREF(pMixedCtx); 3614 int rc = VINF_SUCCESS; 3615 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMM_GUEST_XCPT_INTERCEPTS)) 3616 { 3617 /* The remaining exception intercepts are handled elsewhere, e.g. in hmR0VmxLoadSharedCR0(). */ 3499 * 3500 * @remarks No-long-jump zone!!! 3501 */ 3502 static int hmR0VmxExportGuestXcptIntercepts(PVMCPU pVCpu) 3503 { 3504 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_GUEST_XCPT_INTERCEPTS) 3505 { 3506 /* The remaining exception intercepts are handled elsewhere, e.g. in hmR0VmxExportSharedCR0(). */ 3618 3507 if (pVCpu->hm.s.fGIMTrapXcptUD) 3619 3508 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_UD); … … 3626 3515 Assert(pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT_32(X86_XCPT_DB)); 3627 3516 3628 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);3517 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap); 3629 3518 AssertRCReturn(rc, rc); 3630 3519 3631 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMM_GUEST_XCPT_INTERCEPTS); 3632 Log4(("Load[%RU32]: VMX_VMCS32_CTRL_EXCEPTION_BITMAP=%#RX64 fContextUseFlags=%#RX32\n", pVCpu->idCpu, 3633 pVCpu->hm.s.vmx.u32XcptBitmap, HMCPU_CF_VALUE(pVCpu))); 3634 } 3635 return rc; 3636 } 3637 3638 3639 /** 3640 * Loads the guest's RIP into the guest-state area in the VMCS. 3520 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_VMX_GUEST_XCPT_INTERCEPTS); 3521 Log4Func(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP=%#RX64\n", pVCpu->hm.s.vmx.u32XcptBitmap)); 3522 } 3523 return VINF_SUCCESS; 3524 } 3525 3526 3527 /** 3528 * Exports the guest's RIP into the guest-state area in the VMCS. 3641 3529 * 3642 3530 * @returns VBox status code. … … 3648 3536 * @remarks No-long-jump zone!!! 3649 3537 */ 3650 static int hmR0Vmx LoadGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx)3538 static int hmR0VmxExportGuestRip(PVMCPU pVCpu, PCCPUMCTX pMixedCtx) 3651 3539 { 3652 3540 int rc = VINF_SUCCESS; 3653 if ( HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RIP))3541 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RIP) 3654 3542 { 3655 3543 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RIP, pMixedCtx->rip); 3656 3544 AssertRCReturn(rc, rc); 3657 3545 3658 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_RIP);3659 Log4(("Load[%RU32]: VMX_VMCS_GUEST_RIP=%#RX64 fContextUseFlags=%#RX32\n", pVCpu->idCpu, pMixedCtx->rip,3660 HMCPU_CF_VALUE(pVCpu)));3661 3662 3546 /* Update the exit history entry with the correct CS.BASE + RIP or just RIP. */ 3663 if ( HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS))3547 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CS) 3664 3548 EMR0HistoryUpdatePC(pVCpu, pMixedCtx->cs.u64Base + pMixedCtx->rip, true); 3665 3549 else 3666 3550 EMR0HistoryUpdatePC(pVCpu, pMixedCtx->rip, false); 3551 3552 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_RIP); 3553 Log4Func(("RIP=%#RX64\n", pMixedCtx->rip)); 3667 3554 } 3668 3555 return rc; … … 3671 3558 3672 3559 /** 3673 * Loads the guest's RSP into the guest-state area in the VMCS.3560 * Exports the guest's RSP into the guest-state area in the VMCS. 3674 3561 * 3675 3562 * @returns VBox status code. … … 3681 3568 * @remarks No-long-jump zone!!! 3682 3569 */ 3683 static int hmR0VmxLoadGuestRsp(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 3684 { 3685 int rc = VINF_SUCCESS; 3686 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RSP)) 3687 { 3688 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RSP, pMixedCtx->rsp); 3570 static int hmR0VmxExportGuestRsp(PVMCPU pVCpu, PCCPUMCTX pMixedCtx) 3571 { 3572 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RSP) 3573 { 3574 int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RSP, pMixedCtx->rsp); 3689 3575 AssertRCReturn(rc, rc); 3690 3576 3691 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_RSP); 3692 Log4(("Load[%RU32]: VMX_VMCS_GUEST_RSP=%#RX64\n", pVCpu->idCpu, pMixedCtx->rsp)); 3693 } 3694 return rc; 3695 } 3696 3697 3698 /** 3699 * Loads the guest's RFLAGS into the guest-state area in the VMCS. 3577 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_RSP); 3578 } 3579 return VINF_SUCCESS; 3580 } 3581 3582 3583 /** 3584 * Exports the guest's RFLAGS into the guest-state area in the VMCS. 3700 3585 * 3701 3586 * @returns VBox status code. … … 3707 3592 * @remarks No-long-jump zone!!! 3708 3593 */ 3709 static int hmR0VmxLoadGuestRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 3710 { 3711 int rc = VINF_SUCCESS; 3712 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RFLAGS)) 3594 static int hmR0VmxExportGuestRflags(PVMCPU pVCpu, PCCPUMCTX pMixedCtx) 3595 { 3596 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RFLAGS) 3713 3597 { 3714 3598 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ). 3715 3599 Let us assert it as such and use 32-bit VMWRITE. */ 3716 3600 Assert(!(pMixedCtx->rflags.u64 >> 32)); 3717 X86EFLAGS Eflags = pMixedCtx->eflags; 3718 /** @todo r=bird: There shall be no need to OR in X86_EFL_1 here, nor 3719 * shall there be any reason for clearing bits 63:22, 15, 5 and 3. 3720 * These will never be cleared/set, unless some other part of the VMM 3721 * code is buggy - in which case we're better of finding and fixing 3722 * those bugs than hiding them. */ 3723 Assert(Eflags.u32 & X86_EFL_RA1_MASK); 3724 Assert(!(Eflags.u32 & ~(X86_EFL_1 | X86_EFL_LIVE_MASK))); 3725 Eflags.u32 &= VMX_EFLAGS_RESERVED_0; /* Bits 22-31, 15, 5 & 3 MBZ. */ 3726 Eflags.u32 |= VMX_EFLAGS_RESERVED_1; /* Bit 1 MB1. */ 3601 X86EFLAGS fEFlags = pMixedCtx->eflags; 3602 Assert(fEFlags.u32 & X86_EFL_RA1_MASK); 3603 Assert(!(fEFlags.u32 & ~(X86_EFL_1 | X86_EFL_LIVE_MASK))); 3727 3604 3728 3605 /* 3729 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so we can restore them on VM-exit. 3730 * Modify the real-mode guest's eflags so that VT-x can run the real-mode guest code under Virtual 8086 mode. 3606 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so 3607 * we can restore them on VM-exit. Modify the real-mode guest's eflags so that VT-x 3608 * can run the real-mode guest code under Virtual 8086 mode. 3731 3609 */ 3732 3610 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) … … 3734 3612 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS); 3735 3613 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM))); 3736 pVCpu->hm.s.vmx.RealMode.Eflags.u32 = Eflags.u32; /* Save the original eflags of the real-mode guest. */3737 Eflags.Bits.u1VM = 1; /* Set the Virtual 8086 mode bit. */3738 Eflags.Bits.u2IOPL = 0; /* Change IOPL to 0, otherwise certain instructions won't fault. */3739 } 3740 3741 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_RFLAGS, Eflags.u32);3614 pVCpu->hm.s.vmx.RealMode.Eflags.u32 = fEFlags.u32; /* Save the original eflags of the real-mode guest. */ 3615 fEFlags.Bits.u1VM = 1; /* Set the Virtual 8086 mode bit. */ 3616 fEFlags.Bits.u2IOPL = 0; /* Change IOPL to 0, otherwise certain instructions won't fault. */ 3617 } 3618 3619 int rc = VMXWriteVmcs32(VMX_VMCS_GUEST_RFLAGS, fEFlags.u32); 3742 3620 AssertRCReturn(rc, rc); 3743 3621 3744 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_RFLAGS); 3745 Log4(("Load[%RU32]: VMX_VMCS_GUEST_RFLAGS=%#RX32\n", pVCpu->idCpu, Eflags.u32)); 3746 } 3747 return rc; 3748 } 3749 3750 3751 /** 3752 * Loads the guest RIP, RSP and RFLAGS into the guest-state area in the VMCS. 3622 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_RFLAGS); 3623 Log4Func(("EFlags=%#RX32\n", fEFlags.u32)); 3624 } 3625 return VINF_SUCCESS; 3626 } 3627 3628 3629 /** 3630 * Exports the guest CR0 control register into the guest-state area in the VMCS. 3631 * 3632 * The guest FPU state is always pre-loaded hence we don't need to bother about 3633 * sharing FPU related CR0 bits between the guest and host. 3753 3634 * 3754 3635 * @returns VBox status code. … … 3760 3641 * @remarks No-long-jump zone!!! 3761 3642 */ 3762 DECLINLINE(int) hmR0VmxLoadGuestRipRspRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 3763 { 3764 int rc = hmR0VmxLoadGuestRip(pVCpu, pMixedCtx); 3765 rc |= hmR0VmxLoadGuestRsp(pVCpu, pMixedCtx); 3766 rc |= hmR0VmxLoadGuestRflags(pVCpu, pMixedCtx); 3767 AssertRCReturn(rc, rc); 3768 return rc; 3769 } 3770 3771 3772 /** 3773 * Loads the guest CR0 control register into the guest-state area in the VMCS. 3774 * CR0 is partially shared with the host and we have to consider the FPU bits. 3775 * 3776 * @returns VBox status code. 3777 * @param pVCpu The cross context virtual CPU structure. 3778 * @param pMixedCtx Pointer to the guest-CPU context. The data may be 3779 * out-of-sync. Make sure to update the required fields 3780 * before using them. 3781 * 3782 * @remarks No-long-jump zone!!! 3783 */ 3784 static int hmR0VmxLoadSharedCR0(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 3785 { 3786 Assert(CPUMIsGuestFPUStateActive(pVCpu)); 3787 3788 /* 3789 * Guest CR0. 3790 * Guest FPU. 3791 */ 3792 int rc = VINF_SUCCESS; 3793 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0)) 3794 { 3795 Assert(!(pMixedCtx->cr0 >> 32)); 3796 uint32_t u32GuestCR0 = pMixedCtx->cr0; 3797 PVM pVM = pVCpu->CTX_SUFF(pVM); 3798 3799 /* The guest's view (read access) of its CR0 is unblemished. */ 3800 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, u32GuestCR0); 3801 AssertRCReturn(rc, rc); 3802 Log4(("Load[%RU32]: VMX_VMCS_CTRL_CR0_READ_SHADOW=%#RX32\n", pVCpu->idCpu, u32GuestCR0)); 3803 3804 /* Setup VT-x's view of the guest CR0. */ 3805 /* Minimize VM-exits due to CR3 changes when we have NestedPaging. */ 3643 static int hmR0VmxExportGuestCR0(PVMCPU pVCpu, PCCPUMCTX pMixedCtx) 3644 { 3645 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CR0) 3646 { 3647 PVM pVM = pVCpu->CTX_SUFF(pVM); 3648 Assert(!RT_HI_U32(pMixedCtx->cr0)); 3649 uint32_t const uShadowCR0 = pMixedCtx->cr0; 3650 uint32_t uGuestCR0 = pMixedCtx->cr0; 3651 3652 /* 3653 * Setup VT-x's view of the guest CR0. 3654 * Minimize VM-exits due to CR3 changes when we have NestedPaging. 3655 */ 3656 uint32_t uProcCtls = pVCpu->hm.s.vmx.u32ProcCtls; 3806 3657 if (pVM->hm.s.fNestedPaging) 3807 3658 { 3808 if (CPUMIsGuestPagingEnabled Ex(pMixedCtx))3659 if (CPUMIsGuestPagingEnabled(pVCpu)) 3809 3660 { 3810 3661 /* The guest has paging enabled, let it access CR3 without causing a VM-exit if supported. */ 3811 pVCpu->hm.s.vmx.u32ProcCtls &= ~( VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT3812 3662 uProcCtls &= ~( VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT 3663 | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT); 3813 3664 } 3814 3665 else 3815 3666 { 3816 3667 /* The guest doesn't have paging enabled, make CR3 access cause a VM-exit to update our shadow. */ 3817 pVCpu->hm.s.vmx.u32ProcCtls |=VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT3818 |VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;3668 uProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT 3669 | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT; 3819 3670 } 3820 3671 3821 3672 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */ 3822 3673 if (pVM->hm.s.vmx.fUnrestrictedGuest) 3823 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT; 3824 3825 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); 3826 AssertRCReturn(rc, rc); 3674 uProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT; 3827 3675 } 3828 3676 else 3829 u32GuestCR0 |= X86_CR0_WP; /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */ 3677 { 3678 /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */ 3679 uGuestCR0 |= X86_CR0_WP; 3680 } 3830 3681 3831 3682 /* 3832 3683 * Guest FPU bits. 3833 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be set on the first 3834 * CPUs to support VT-x and no mention of with regards to UX in VM-entry checks. 3684 * 3685 * Since we pre-load the guest FPU always before VM-entry there is no need to track lazy state 3686 * using CR0.TS. 3687 * 3688 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be 3689 * set on the first CPUs to support VT-x and no mention of with regards to UX in VM-entry checks. 3835 3690 */ 3836 u32GuestCR0 |= X86_CR0_NE; 3837 3838 /* Catch floating point exceptions if we need to report them to the guest in a different way. */ 3839 bool fInterceptMF = false; 3840 if (!(pMixedCtx->cr0 & X86_CR0_NE)) 3841 fInterceptMF = true; 3842 3843 /* Finally, intercept all exceptions as we cannot directly inject them in real-mode, see hmR0VmxInjectEventVmcs(). */ 3691 uGuestCR0 |= X86_CR0_NE; 3692 3693 /* If CR0.NE isn't set, we need to intercept #MF exceptions and report them to the guest differently. */ 3694 bool const fInterceptMF = !(uShadowCR0 & X86_CR0_NE); 3695 3696 /* 3697 * Update exception intercepts. 3698 */ 3699 uint32_t uXcptBitmap = pVCpu->hm.s.vmx.u32XcptBitmap; 3844 3700 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 3845 3701 { 3846 3702 Assert(PDMVmmDevHeapIsEnabled(pVM)); 3847 3703 Assert(pVM->hm.s.vmx.pRealModeTSS); 3848 pVCpu->hm.s.vmx.u32XcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;3704 uXcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK; 3849 3705 } 3850 3706 else 3851 3707 { 3852 3708 /* For now, cleared here as mode-switches can happen outside HM/VT-x. See @bugref{7626#c11}. */ 3853 pVCpu->hm.s.vmx.u32XcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;3709 uXcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK; 3854 3710 if (fInterceptMF) 3855 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_MF); 3856 } 3857 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMM_GUEST_XCPT_INTERCEPTS); 3711 uXcptBitmap |= RT_BIT(X86_XCPT_MF); 3712 } 3858 3713 3859 3714 /* Additional intercepts for debugging, define these yourself explicitly. */ 3860 3715 #ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS 3861 pVCpu->hm.s.vmx.u32XcptBitmap |= 03862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3716 uXcptBitmap |= 0 3717 | RT_BIT(X86_XCPT_BP) 3718 | RT_BIT(X86_XCPT_DE) 3719 | RT_BIT(X86_XCPT_NM) 3720 | RT_BIT(X86_XCPT_TS) 3721 | RT_BIT(X86_XCPT_UD) 3722 | RT_BIT(X86_XCPT_NP) 3723 | RT_BIT(X86_XCPT_SS) 3724 | RT_BIT(X86_XCPT_GP) 3725 | RT_BIT(X86_XCPT_PF) 3726 | RT_BIT(X86_XCPT_MF) 3727 ; 3873 3728 #elif defined(HMVMX_ALWAYS_TRAP_PF) 3874 pVCpu->hm.s.vmx.u32XcptBitmap|= RT_BIT(X86_XCPT_PF);3729 uXcptBitmap |= RT_BIT(X86_XCPT_PF); 3875 3730 #endif 3876 3731 if (uXcptBitmap != pVCpu->hm.s.vmx.u32XcptBitmap) 3732 { 3733 pVCpu->hm.s.vmx.u32XcptBitmap = uXcptBitmap; 3734 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_VMX_GUEST_XCPT_INTERCEPTS); 3735 } 3877 3736 Assert(pVM->hm.s.fNestedPaging || (pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT(X86_XCPT_PF))); 3878 3737 3879 /* Set/clear the CR0 specific bits along with their exceptions (PE, PG, CD, NW). */ 3880 uint32_t uSetCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr0Fixed1); 3881 uint32_t uZapCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr0Fixed1); 3882 if (pVM->hm.s.vmx.fUnrestrictedGuest) /* Exceptions for unrestricted-guests for fixed CR0 bits (PE, PG). */ 3883 uSetCR0 &= ~(X86_CR0_PE | X86_CR0_PG); 3738 /* 3739 * Set/clear the CR0 specific bits along with their exceptions (PE, PG, CD, NW). 3740 */ 3741 uint32_t fSetCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr0Fixed1); 3742 uint32_t fZapCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr0Fixed1); 3743 if (pVM->hm.s.vmx.fUnrestrictedGuest) /* Exceptions for unrestricted-guests for fixed CR0 bits (PE, PG). */ 3744 fSetCR0 &= ~(X86_CR0_PE | X86_CR0_PG); 3884 3745 else 3885 Assert((uSetCR0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)); 3886 3887 u32GuestCR0 |= uSetCR0; 3888 u32GuestCR0 &= uZapCR0; 3889 u32GuestCR0 &= ~(X86_CR0_CD | X86_CR0_NW); /* Always enable caching. */ 3890 3891 /* Write VT-x's view of the guest CR0 into the VMCS. */ 3892 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR0, u32GuestCR0); 3893 AssertRCReturn(rc, rc); 3894 Log4(("Load[%RU32]: VMX_VMCS_GUEST_CR0=%#RX32 (uSetCR0=%#RX32 uZapCR0=%#RX32)\n", pVCpu->idCpu, u32GuestCR0, uSetCR0, 3895 uZapCR0)); 3746 Assert((fSetCR0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG)); 3747 3748 uGuestCR0 |= fSetCR0; 3749 uGuestCR0 &= fZapCR0; 3750 uGuestCR0 &= ~(X86_CR0_CD | X86_CR0_NW); /* Always enable caching. */ 3896 3751 3897 3752 /* … … 3900 3755 * we want to be notified immediately of guest CR0 changes (e.g. PG to update our shadow page tables). 3901 3756 */ 3902 uint32_t u32CR0Mask = 0; 3903 u32CR0Mask = X86_CR0_PE 3904 | X86_CR0_NE 3905 | X86_CR0_WP 3906 | X86_CR0_PG 3907 | X86_CR0_ET /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.ET */ 3908 | X86_CR0_CD /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.CD */ 3909 | X86_CR0_NW; /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.NW */ 3757 uint32_t uCR0Mask = X86_CR0_PE 3758 | X86_CR0_NE 3759 | (pVM->hm.s.fNestedPaging ? 0 : X86_CR0_WP) 3760 | X86_CR0_PG 3761 | X86_CR0_ET /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.ET */ 3762 | X86_CR0_CD /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.CD */ 3763 | X86_CR0_NW; /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.NW */ 3910 3764 3911 3765 /** @todo Avoid intercepting CR0.PE with unrestricted guests. Fix PGM … … 3914 3768 #if 0 3915 3769 if (pVM->hm.s.vmx.fUnrestrictedGuest) 3916 u 32CR0Mask &= ~X86_CR0_PE;3770 uCr0Mask &= ~X86_CR0_PE; 3917 3771 #endif 3918 if (pVM->hm.s.fNestedPaging) 3919 u32CR0Mask &= ~X86_CR0_WP; 3920 3921 /* Write the CR0 mask into the VMCS and update the VCPU's copy of the current CR0 mask. */ 3922 pVCpu->hm.s.vmx.u32CR0Mask = u32CR0Mask; 3923 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_MASK, u32CR0Mask); 3772 /* Update the HMCPU's copy of the CR0 mask. */ 3773 pVCpu->hm.s.vmx.u32CR0Mask = uCR0Mask; 3774 3775 /* 3776 * Finally, update VMCS fields with the CR0 values. 3777 */ 3778 int rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR0, uGuestCR0); 3779 rc |= VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, uShadowCR0); 3780 rc |= VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_MASK, uCR0Mask); 3781 if (uProcCtls != pVCpu->hm.s.vmx.u32ProcCtls) 3782 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, uProcCtls); 3924 3783 AssertRCReturn(rc, rc); 3925 Log4(("Load[%RU32]: VMX_VMCS_CTRL_CR0_MASK=%#RX32\n", pVCpu->idCpu, u32CR0Mask)); 3926 3927 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR0); 3928 } 3929 return rc; 3930 } 3931 3932 3933 /** 3934 * Loads the guest control registers (CR3, CR4) into the guest-state area 3784 pVCpu->hm.s.vmx.u32ProcCtls = uProcCtls; 3785 3786 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CR0); 3787 3788 Log4Func(("uCr0Mask=%#RX32 uShadowCR0=%#RX32 uGuestCR0=%#RX32 (fSetCR0=%#RX32 fZapCR0=%#RX32\n", uCR0Mask, uShadowCR0, 3789 uGuestCR0, fSetCR0, fZapCR0)); 3790 } 3791 3792 return VINF_SUCCESS; 3793 } 3794 3795 3796 /** 3797 * Exports the guest control registers (CR3, CR4) into the guest-state area 3935 3798 * in the VMCS. 3936 3799 * … … 3947 3810 * @remarks No-long-jump zone!!! 3948 3811 */ 3949 static VBOXSTRICTRC hmR0Vmx LoadGuestCR3AndCR4(PVMCPU pVCpu, PCPUMCTX pMixedCtx)3812 static VBOXSTRICTRC hmR0VmxExportGuestCR3AndCR4(PVMCPU pVCpu, PCCPUMCTX pMixedCtx) 3950 3813 { 3951 3814 int rc = VINF_SUCCESS; … … 3960 3823 * Guest CR3. 3961 3824 */ 3962 if ( HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR3))3825 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CR3) 3963 3826 { 3964 3827 RTGCPHYS GCPhysGuestCR3 = NIL_RTGCPHYS; … … 3986 3849 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, pVCpu->hm.s.vmx.HCPhysEPTP); 3987 3850 AssertRCReturn(rc, rc); 3988 Log4(("Load[%RU32]: VMX_VMCS64_CTRL_EPTP_FULL=%#RX64\n", pVCpu->idCpu, pVCpu->hm.s.vmx.HCPhysEPTP));3989 3851 3990 3852 if ( pVM->hm.s.vmx.fUnrestrictedGuest … … 4003 3865 } 4004 3866 4005 /* The guest's view of its CR3 is unblemished with Nested Paging when the guest is using paging or we 4006 have Unrestricted Execution to handle the guest when it's not using paging. */ 3867 /* 3868 * The guest's view of its CR3 is unblemished with Nested Paging when the 3869 * guest is using paging or we have unrestricted guest execution to handle 3870 * the guest when it's not using paging. 3871 */ 4007 3872 GCPhysGuestCR3 = pMixedCtx->cr3; 4008 3873 } … … 4010 3875 { 4011 3876 /* 4012 * The guest is not using paging, but the CPU (VT-x) has to. While the guest thinks it accesses physical memory 4013 * directly, we use our identity-mapped page table to map guest-linear to guest-physical addresses. 4014 * EPT takes care of translating it to host-physical addresses. 3877 * The guest is not using paging, but the CPU (VT-x) has to. While the guest 3878 * thinks it accesses physical memory directly, we use our identity-mapped 3879 * page table to map guest-linear to guest-physical addresses. EPT takes care 3880 * of translating it to host-physical addresses. 4015 3881 */ 4016 3882 RTGCPHYS GCPhys; … … 4023 3889 else if (rc == VERR_PDM_DEV_HEAP_R3_TO_GCPHYS) 4024 3890 { 4025 Log4 (("Load[%RU32]: VERR_PDM_DEV_HEAP_R3_TO_GCPHYS -> VINF_EM_RESCHEDULE_REM\n", pVCpu->idCpu));3891 Log4Func(("VERR_PDM_DEV_HEAP_R3_TO_GCPHYS -> VINF_EM_RESCHEDULE_REM\n")); 4026 3892 return VINF_EM_RESCHEDULE_REM; /* We cannot execute now, switch to REM/IEM till the guest maps in VMMDev. */ 4027 3893 } … … 4032 3898 } 4033 3899 4034 Log4 (("Load[%RU32]: VMX_VMCS_GUEST_CR3=%#RGp (GstN)\n", pVCpu->idCpu, GCPhysGuestCR3));3900 Log4Func(("uGuestCR3=%#RGp (GstN)\n", GCPhysGuestCR3)); 4035 3901 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_CR3, GCPhysGuestCR3); 3902 AssertRCReturn(rc, rc); 4036 3903 } 4037 3904 else … … 4040 3907 RTHCPHYS HCPhysGuestCR3 = PGMGetHyperCR3(pVCpu); 4041 3908 4042 Log4 (("Load[%RU32]: VMX_VMCS_GUEST_CR3=%#RHv (HstN)\n", pVCpu->idCpu, HCPhysGuestCR3));3909 Log4Func(("uGuestCR3=%#RHv (HstN)\n", HCPhysGuestCR3)); 4043 3910 rc = VMXWriteVmcsHstN(VMX_VMCS_GUEST_CR3, HCPhysGuestCR3); 4044 }4045 AssertRCReturn(rc, rc);4046 4047 HMCPU_CF_CLEAR(pVCpu,HM_CHANGED_GUEST_CR3);3911 AssertRCReturn(rc, rc); 3912 } 3913 3914 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CR3); 4048 3915 } 4049 3916 … … 4052 3919 * ASSUMES this is done everytime we get in from ring-3! (XCR0) 4053 3920 */ 4054 if ( HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR4))4055 { 4056 Assert(! (pMixedCtx->cr4 >> 32));4057 uint32_t u 32GuestCR4 = pMixedCtx->cr4;3921 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_CR4) 3922 { 3923 Assert(!RT_HI_U32(pMixedCtx->cr4)); 3924 uint32_t uGuestCR4 = pMixedCtx->cr4; 4058 3925 4059 3926 /* The guest's view of its CR4 is unblemished. */ 4060 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, u 32GuestCR4);3927 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, uGuestCR4); 4061 3928 AssertRCReturn(rc, rc); 4062 Log4(("Load[%RU32]: VMX_VMCS_CTRL_CR4_READ_SHADOW=%#RX32\n", pVCpu->idCpu, u32GuestCR4)); 4063 4064 /* Setup VT-x's view of the guest CR4. */ 3929 Log4Func(("uShadowCR4=%#RX32\n", uGuestCR4)); 3930 4065 3931 /* 4066 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software interrupts to the 8086 program 4067 * interrupt handler. Clear the VME bit (the interrupt redirection bitmap is already all 0, see hmR3InitFinalizeR0()) 3932 * Setup VT-x's view of the guest CR4. 3933 * 3934 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software 3935 * interrupts to the 8086 program interrupt handler. Clear the VME bit (the interrupt 3936 * redirection bitmap is already all 0, see hmR3InitFinalizeR0()) 3937 * 4068 3938 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode". 4069 3939 */ … … 4072 3942 Assert(pVM->hm.s.vmx.pRealModeTSS); 4073 3943 Assert(PDMVmmDevHeapIsEnabled(pVM)); 4074 u 32GuestCR4 &= ~X86_CR4_VME;3944 uGuestCR4 &= ~X86_CR4_VME; 4075 3945 } 4076 3946 … … 4081 3951 { 4082 3952 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */ 4083 u 32GuestCR4 |= X86_CR4_PSE;3953 uGuestCR4 |= X86_CR4_PSE; 4084 3954 /* Our identity mapping is a 32-bit page directory. */ 4085 u 32GuestCR4 &= ~X86_CR4_PAE;3955 uGuestCR4 &= ~X86_CR4_PAE; 4086 3956 } 4087 3957 /* else use guest CR4.*/ … … 4099 3969 case PGMMODE_32_BIT: /* 32-bit paging. */ 4100 3970 { 4101 u 32GuestCR4 &= ~X86_CR4_PAE;3971 uGuestCR4 &= ~X86_CR4_PAE; 4102 3972 break; 4103 3973 } … … 4106 3976 case PGMMODE_PAE_NX: /* PAE paging with NX. */ 4107 3977 { 4108 u 32GuestCR4 |= X86_CR4_PAE;3978 uGuestCR4 |= X86_CR4_PAE; 4109 3979 break; 4110 3980 } … … 4122 3992 4123 3993 /* We need to set and clear the CR4 specific bits here (mainly the X86_CR4_VMXE bit). */ 4124 uint64_t uSetCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);4125 uint64_t uZapCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);4126 u 32GuestCR4 |= uSetCR4;4127 u 32GuestCR4 &= uZapCR4;3994 uint64_t fSetCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr4Fixed1); 3995 uint64_t fZapCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr4Fixed1); 3996 uGuestCR4 |= fSetCR4; 3997 uGuestCR4 &= fZapCR4; 4128 3998 4129 3999 /* Write VT-x's view of the guest CR4 into the VMCS. */ 4130 Log4 (("Load[%RU32]: VMX_VMCS_GUEST_CR4=%#RX32 (Set=%#RX32 Zap=%#RX32)\n", pVCpu->idCpu, u32GuestCR4, uSetCR4, uZapCR4));4131 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR4, u 32GuestCR4);4000 Log4Func(("uGuestCR4=%#RX32 (fSetCR4=%#RX32 fZapCR4=%#RX32)\n", uGuestCR4, fSetCR4, fZapCR4)); 4001 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR4, uGuestCR4); 4132 4002 AssertRCReturn(rc, rc); 4133 4003 … … 4149 4019 pVCpu->hm.s.fLoadSaveGuestXcr0 = (pMixedCtx->cr4 & X86_CR4_OSXSAVE) && pMixedCtx->aXcr[0] != ASMGetXcr0(); 4150 4020 4151 HMCPU_CF_CLEAR(pVCpu,HM_CHANGED_GUEST_CR4);4021 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_CR4); 4152 4022 } 4153 4023 return rc; … … 4156 4026 4157 4027 /** 4158 * Loads the guest debug registers into the guest-state area in the VMCS. 4028 * Exports the guest debug registers into the guest-state area in the VMCS. 4029 * The guest debug bits are partially shared with the host (e.g. DR6, DR0-3). 4159 4030 * 4160 4031 * This also sets up whether \#DB and MOV DRx accesses cause VM-exits. 4161 *4162 * The guest debug bits are partially shared with the host (e.g. DR6, DR0-3).4163 4032 * 4164 4033 * @returns VBox status code. … … 4170 4039 * @remarks No-long-jump zone!!! 4171 4040 */ 4172 static int hmR0VmxLoadSharedDebugState(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 4173 { 4174 if (!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_DEBUG)) 4175 return VINF_SUCCESS; 4041 static int hmR0VmxExportSharedDebugState(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 4042 { 4043 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 4176 4044 4177 4045 #ifdef VBOX_STRICT … … 4185 4053 #endif 4186 4054 4187 int rc; 4188 PVM pVM = pVCpu->CTX_SUFF(pVM); 4189 bool fSteppingDB = false; 4190 bool fInterceptMovDRx = false; 4055 bool fSteppingDB = false; 4056 bool fInterceptMovDRx = false; 4057 uint32_t uProcCtls = pVCpu->hm.s.vmx.u32ProcCtls; 4191 4058 if (pVCpu->hm.s.fSingleInstruction) 4192 4059 { 4193 4060 /* If the CPU supports the monitor trap flag, use it for single stepping in DBGF and avoid intercepting #DB. */ 4061 PVM pVM = pVCpu->CTX_SUFF(pVM); 4194 4062 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG) 4195 4063 { 4196 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG; 4197 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); 4198 AssertRCReturn(rc, rc); 4064 uProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG; 4199 4065 Assert(fSteppingDB == false); 4200 4066 } … … 4202 4068 { 4203 4069 pMixedCtx->eflags.u32 |= X86_EFL_TF; 4070 pVCpu->hm.s.fCtxChanged |= HM_CHANGED_GUEST_RFLAGS; 4204 4071 pVCpu->hm.s.fClearTrapFlag = true; 4205 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RFLAGS);4206 4072 fSteppingDB = true; 4207 4073 } 4208 4074 } 4209 4075 4076 uint32_t uGuestDR7; 4210 4077 if ( fSteppingDB 4211 4078 || (CPUMGetHyperDR7(pVCpu) & X86_DR7_ENABLED_MASK)) 4212 4079 { 4213 4080 /* 4214 * Use the combined guest and host DRx values found in the hypervisor 4215 * register set because the debugger has breakpoints active or someone4216 * is single stepping on thehost side without a monitor trap flag.4081 * Use the combined guest and host DRx values found in the hypervisor register set 4082 * because the debugger has breakpoints active or someone is single stepping on the 4083 * host side without a monitor trap flag. 4217 4084 * 4218 4085 * Note! DBGF expects a clean DR6 state before executing guest code. 4219 4086 */ 4220 4087 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) 4221 if ( CPUMIsGuestInLongModeEx(pMixedCtx)4088 if ( CPUMIsGuestInLongModeEx(pMixedCtx) 4222 4089 && !CPUMIsHyperDebugStateActivePending(pVCpu)) 4223 4090 { … … 4235 4102 } 4236 4103 4237 /* Update DR7. (The other DRx values are handled by CPUM one way or the other.) */ 4238 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, (uint32_t)CPUMGetHyperDR7(pVCpu)); 4239 AssertRCReturn(rc, rc); 4240 4104 /* Update DR7 with the hypervisor value (other DRx registers are handled by CPUM one way or another). */ 4105 uGuestDR7 = (uint32_t)CPUMGetHyperDR7(pVCpu); 4241 4106 pVCpu->hm.s.fUsingHyperDR7 = true; 4242 4107 fInterceptMovDRx = true; … … 4248 4113 * executing guest code so they'll trigger at the right time. 4249 4114 */ 4250 if (pMixedCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD)) /** @todo Why GD? */4115 if (pMixedCtx->dr[7] & X86_DR7_ENABLED_MASK) 4251 4116 { 4252 4117 #if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) 4253 if ( CPUMIsGuestInLongModeEx(pMixedCtx)4118 if ( CPUMIsGuestInLongModeEx(pMixedCtx) 4254 4119 && !CPUMIsGuestDebugStateActivePending(pVCpu)) 4255 4120 { … … 4286 4151 } 4287 4152 4288 /* Update guest DR7. */ 4289 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, pMixedCtx->dr[7]); 4290 AssertRCReturn(rc, rc); 4291 4153 /* Update DR7 with the actual guest value. */ 4154 uGuestDR7 = pMixedCtx->dr[7]; 4292 4155 pVCpu->hm.s.fUsingHyperDR7 = false; 4293 4156 } 4294 4157 4158 if (fInterceptMovDRx) 4159 uProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT; 4160 else 4161 uProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT; 4162 4295 4163 /* 4296 * Update the processor-based VM-execution controls regarding intercepting MOV DRx instructions.4164 * Update the processor-based VM-execution controls for MOV-DRx intercepts and the monitor-trap flag. 4297 4165 */ 4298 if (fInterceptMovDRx) 4299 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT; 4300 else 4301 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT; 4302 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); 4166 if (uProcCtls != pVCpu->hm.s.vmx.u32ProcCtls) 4167 { 4168 int rc2 = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); 4169 AssertRCReturn(rc2, rc2); 4170 pVCpu->hm.s.vmx.u32ProcCtls = uProcCtls; 4171 } 4172 4173 /* 4174 * Update guest DR7. 4175 */ 4176 int rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, uGuestDR7); 4303 4177 AssertRCReturn(rc, rc); 4304 4178 4305 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_DEBUG);4306 4179 return VINF_SUCCESS; 4307 4180 } … … 4312 4185 * Strict function to validate segment registers. 4313 4186 * 4314 * @remarks ASSUMES CR0 is up to date. 4315 */ 4316 static void hmR0VmxValidateSegmentRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 4317 { 4318 /* Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */ 4319 /* NOTE: The reason we check for attribute value 0 and not just the unusable bit here is because hmR0VmxWriteSegmentReg() 4320 * only updates the VMCS' copy of the value with the unusable bit and doesn't change the guest-context value. */ 4187 * @remarks Will import guest CR0 on strict builds during validation of 4188 * segments. 4189 */ 4190 static void hmR0VmxValidateSegmentRegs(PVM pVM, PVMCPU pVCpu, PCCPUMCTX pCtx) 4191 { 4192 /* 4193 * Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". 4194 * 4195 * The reason we check for attribute value 0 in this function and not just the unusable bit is 4196 * because hmR0VmxWriteSegmentReg() only updates the VMCS' copy of the value with the unusable bit 4197 * and doesn't change the guest-context value. 4198 */ 4199 hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0); 4321 4200 if ( !pVM->hm.s.vmx.fUnrestrictedGuest 4322 4201 && ( !CPUMIsGuestInRealModeEx(pCtx) … … 4492 4371 */ 4493 4372 static int hmR0VmxWriteSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase, 4494 uint32_t idxAccess, PC PUMSELREG pSelReg)4373 uint32_t idxAccess, PCCPUMSELREG pSelReg) 4495 4374 { 4496 4375 int rc = VMXWriteVmcs32(idxSel, pSelReg->Sel); /* 16-bit guest selector field. */ … … 4510 4389 { 4511 4390 /* 4512 * The way to differentiate between whether this is really a null selector or was just a selector loaded with 0 in 4513 * real-mode is using the segment attributes. A selector loaded in real-mode with the value 0 is valid and usable in 4514 * protected-mode and we should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures NULL selectors 4515 * loaded in protected-mode have their attribute as 0. 4391 * The way to differentiate between whether this is really a null selector or was just 4392 * a selector loaded with 0 in real-mode is using the segment attributes. A selector 4393 * loaded in real-mode with the value 0 is valid and usable in protected-mode and we 4394 * should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures 4395 * NULL selectors loaded in protected-mode have their attribute as 0. 4516 4396 */ 4517 4397 if (!u32Access) … … 4530 4410 4531 4411 /** 4532 * Loads the guest segment registers, GDTR, IDTR, LDTR, (TR, FS and GS bases)4412 * Exports the guest segment registers, GDTR, IDTR, LDTR, (TR, FS and GS bases) 4533 4413 * into the guest-state area in the VMCS. 4534 4414 * … … 4539 4419 * before using them. 4540 4420 * 4541 * @remarks ASSUMES pMixedCtx->cr0 is up to date (strict builds validation). 4421 * @remarks Will import guest CR0 on strict builds during validation of 4422 * segments. 4542 4423 * @remarks No-long-jump zone!!! 4543 4424 */ 4544 static int hmR0Vmx LoadGuestSegmentRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)4425 static int hmR0VmxExportGuestSegmentRegs(PVMCPU pVCpu, PCCPUMCTX pMixedCtx) 4545 4426 { 4546 4427 int rc = VERR_INTERNAL_ERROR_5; … … 4550 4431 * Guest Segment registers: CS, SS, DS, ES, FS, GS. 4551 4432 */ 4552 if ( HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS))4433 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SREG_MASK) 4553 4434 { 4554 4435 /* Save the segment attributes for real-on-v86 mode hack, so we can restore them on VM-exit. */ … … 4574 4455 in real-mode (e.g. OpenBSD 4.0) */ 4575 4456 REMFlushTBs(pVM); 4576 Log4 (("Load[%RU32]: Switch to protected mode detected!\n", pVCpu->idCpu));4457 Log4Func(("Switch to protected mode detected!\n")); 4577 4458 pVCpu->hm.s.vmx.fWasInRealMode = false; 4578 4459 } … … 4603 4484 #endif 4604 4485 4605 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS);4606 Log4(("Load[%RU32]: CS=%#RX16 Base=%#RX64 Limit=%#RX32 Attr=%#RX32\n", pVCpu->idCpu, pMixedCtx->cs.Sel,4607 pMixedCtx->cs.u64Base, pMixedCtx->cs.u32Limit, pMixedCtx->cs.Attr.u));4608 4609 4486 /* Update the exit history entry with the correct CS.BASE + RIP. */ 4610 if ( HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RIP))4487 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_RIP) 4611 4488 EMR0HistoryUpdatePC(pVCpu, pMixedCtx->cs.u64Base + pMixedCtx->rip, true); 4489 4490 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SREG_MASK); 4491 Log4Func(("CS=%#RX16 Base=%#RX64 Limit=%#RX32 Attr=%#RX32\n", pMixedCtx->cs.Sel, pMixedCtx->cs.u64Base, 4492 pMixedCtx->cs.u32Limit, pMixedCtx->cs.Attr.u)); 4612 4493 } 4613 4494 … … 4615 4496 * Guest TR. 4616 4497 */ 4617 if ( HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_TR))4498 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_TR) 4618 4499 { 4619 4500 /* … … 4675 4556 AssertRCReturn(rc, rc); 4676 4557 4677 HMCPU_CF_CLEAR(pVCpu,HM_CHANGED_GUEST_TR);4678 Log4 (("Load[%RU32]: VMX_VMCS_GUEST_TR_BASE=%#RX64\n", pVCpu->idCpu,u64Base));4558 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_TR); 4559 Log4Func(("TR base=%#RX64\n", pMixedCtx->tr.u64Base)); 4679 4560 } 4680 4561 … … 4682 4563 * Guest GDTR. 4683 4564 */ 4684 if ( HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_GDTR))4565 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_GDTR) 4685 4566 { 4686 4567 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, pMixedCtx->gdtr.cbGdt); … … 4691 4572 Assert(!(pMixedCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */ 4692 4573 4693 HMCPU_CF_CLEAR(pVCpu,HM_CHANGED_GUEST_GDTR);4694 Log4 (("Load[%RU32]: VMX_VMCS_GUEST_GDTR_BASE=%#RX64\n", pVCpu->idCpu, pMixedCtx->gdtr.pGdt));4574 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_GDTR); 4575 Log4Func(("GDTR base=%#RX64\n", pMixedCtx->gdtr.pGdt)); 4695 4576 } 4696 4577 … … 4698 4579 * Guest LDTR. 4699 4580 */ 4700 if ( HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_LDTR))4581 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_LDTR) 4701 4582 { 4702 4583 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */ … … 4728 4609 } 4729 4610 4730 HMCPU_CF_CLEAR(pVCpu,HM_CHANGED_GUEST_LDTR);4731 Log4 (("Load[%RU32]: VMX_VMCS_GUEST_LDTR_BASE=%#RX64\n", pVCpu->idCpu, pMixedCtx->ldtr.u64Base));4611 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_LDTR); 4612 Log4Func(("LDTR base=%#RX64\n", pMixedCtx->ldtr.u64Base)); 4732 4613 } 4733 4614 … … 4735 4616 * Guest IDTR. 4736 4617 */ 4737 if ( HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_IDTR))4618 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_IDTR) 4738 4619 { 4739 4620 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, pMixedCtx->idtr.cbIdt); … … 4744 4625 Assert(!(pMixedCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */ 4745 4626 4746 HMCPU_CF_CLEAR(pVCpu,HM_CHANGED_GUEST_IDTR);4747 Log4 (("Load[%RU32]: VMX_VMCS_GUEST_IDTR_BASE=%#RX64\n", pVCpu->idCpu, pMixedCtx->idtr.pIdt));4627 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_IDTR); 4628 Log4Func(("IDTR base=%#RX64\n", pMixedCtx->idtr.pIdt)); 4748 4629 } 4749 4630 … … 4753 4634 4754 4635 /** 4755 * Loads certain guest MSRs into the VM-entry MSR-load and VM-exit MSR-store4636 * Exports certain guest MSRs into the VM-entry MSR-load and VM-exit MSR-store 4756 4637 * areas. 4757 4638 * … … 4759 4640 * VM-entry and stored from the host CPU on every successful VM-exit. This also 4760 4641 * creates/updates MSR slots for the host MSRs. The actual host MSR values are 4761 * -not- updated here for performance reasons. See hmR0Vmx SaveHostMsrs().4762 * 4763 * Also loads thesysenter MSRs into the guest-state area in the VMCS.4642 * -not- updated here for performance reasons. See hmR0VmxExportHostMsrs(). 4643 * 4644 * Also exports the guest sysenter MSRs into the guest-state area in the VMCS. 4764 4645 * 4765 4646 * @returns VBox status code. … … 4771 4652 * @remarks No-long-jump zone!!! 4772 4653 */ 4773 static int hmR0Vmx LoadGuestMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)4654 static int hmR0VmxExportGuestMsrs(PVMCPU pVCpu, PCCPUMCTX pMixedCtx) 4774 4655 { 4775 4656 AssertPtr(pVCpu); … … 4778 4659 /* 4779 4660 * MSRs that we use the auto-load/store MSR area in the VMCS. 4661 * For 64-bit hosts, we load/restore them lazily, see hmR0VmxLazyLoadGuestMsrs(). 4780 4662 */ 4781 4663 PVM pVM = pVCpu->CTX_SUFF(pVM); 4782 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS)) 4783 { 4784 /* For 64-bit hosts, we load/restore them lazily, see hmR0VmxLazyLoadGuestMsrs(). */ 4664 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_VMX_GUEST_AUTO_MSRS) 4665 { 4666 if (pVM->hm.s.fAllow64BitGuests) 4667 { 4785 4668 #if HC_ARCH_BITS == 32 4786 if (pVM->hm.s.fAllow64BitGuests)4787 {4788 4669 int rc = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_LSTAR, pMixedCtx->msrLSTAR, false, NULL); 4789 4670 rc |= hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K6_STAR, pMixedCtx->msrSTAR, false, NULL); … … 4792 4673 AssertRCReturn(rc, rc); 4793 4674 # ifdef LOG_ENABLED 4794 P VMXAUTOMSR pMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;4675 PCVMXAUTOMSR pMsr = (PVMXAUTOMSR)CpVCpu->hm.s.vmx.pvGuestMsr; 4795 4676 for (uint32_t i = 0; i < pVCpu->hm.s.vmx.cMsrs; i++, pMsr++) 4796 { 4797 Log4(("Load[%RU32]: MSR[%RU32]: u32Msr=%#RX32 u64Value=%#RX64\n", pVCpu->idCpu, i, pMsr->u32Msr, 4798 pMsr->u64Value)); 4799 } 4677 Log4Func(("MSR[%RU32]: u32Msr=%#RX32 u64Value=%#RX64\n", i, pMsr->u32Msr, pMsr->u64Value)); 4800 4678 # endif 4801 }4802 4679 #endif 4803 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS); 4680 } 4681 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_VMX_GUEST_AUTO_MSRS); 4804 4682 } 4805 4683 … … 4809 4687 * VM-exits on WRMSRs for these MSRs. 4810 4688 */ 4811 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR)) 4812 { 4813 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, pMixedCtx->SysEnter.cs); AssertRCReturn(rc, rc); 4814 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR); 4815 } 4816 4817 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR)) 4818 { 4819 int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, pMixedCtx->SysEnter.eip); AssertRCReturn(rc, rc); 4820 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); 4821 } 4822 4823 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR)) 4824 { 4825 int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, pMixedCtx->SysEnter.esp); AssertRCReturn(rc, rc); 4826 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); 4827 } 4828 4829 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_EFER_MSR)) 4689 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_MSR_MASK) 4690 { 4691 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_CS_MSR) 4692 { 4693 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, pMixedCtx->SysEnter.cs); 4694 AssertRCReturn(rc, rc); 4695 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_CS_MSR); 4696 } 4697 4698 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_EIP_MSR) 4699 { 4700 int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, pMixedCtx->SysEnter.eip); 4701 AssertRCReturn(rc, rc); 4702 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_EIP_MSR); 4703 } 4704 4705 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_SYSENTER_ESP_MSR) 4706 { 4707 int rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, pMixedCtx->SysEnter.esp); 4708 AssertRCReturn(rc, rc); 4709 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_SYSENTER_ESP_MSR); 4710 } 4711 } 4712 4713 if (ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged) & HM_CHANGED_GUEST_EFER_MSR) 4830 4714 { 4831 4715 if (hmR0VmxShouldSwapEferMsr(pVCpu, pMixedCtx)) … … 4839 4723 int rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_EFER_FULL, pMixedCtx->msrEFER); 4840 4724 AssertRCReturn(rc,rc); 4841 Log4 (("Load[%RU32]: VMX_VMCS64_GUEST_EFER_FULL=%#RX64\n", pVCpu->idCpu, pMixedCtx->msrEFER));4725 Log4Func(("EFER=%#RX64\n", pMixedCtx->msrEFER)); 4842 4726 } 4843 4727 else … … 4850 4734 if (pVM->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS) 4851 4735 hmR0VmxSetMsrPermission(pVCpu, MSR_K6_EFER, VMXMSREXIT_INTERCEPT_READ, VMXMSREXIT_INTERCEPT_WRITE); 4852 Log4 (("Load[%RU32]: MSR[--]: u32Msr=%#RX32 u64Value=%#RX64 cMsrs=%u\n", pVCpu->idCpu, MSR_K6_EFER,4853 pMixedCtx->msrEFER,pVCpu->hm.s.vmx.cMsrs));4736 Log4Func(("MSR[--]: u32Msr=%#RX32 u64Value=%#RX64 cMsrs=%u\n", MSR_K6_EFER, pMixedCtx->msrEFER, 4737 pVCpu->hm.s.vmx.cMsrs)); 4854 4738 } 4855 4739 } 4856 4740 else if (!pVM->hm.s.vmx.fSupportsVmcsEfer) 4857 4741 hmR0VmxRemoveAutoLoadStoreMsr(pVCpu, MSR_K6_EFER); 4858 HMCPU_CF_CLEAR(pVCpu,HM_CHANGED_GUEST_EFER_MSR);4742 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~HM_CHANGED_GUEST_EFER_MSR); 4859 4743 } 4860 4744 … … 4863 4747 4864 4748 4865 /** 4866 * Loads the guest activity state into the guest-state area in the VMCS. 4749 #if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) 4750 /** 4751 * Check if guest state allows safe use of 32-bit switcher again. 4752 * 4753 * Segment bases and protected mode structures must be 32-bit addressable 4754 * because the 32-bit switcher will ignore high dword when writing these VMCS 4755 * fields. See @bugref{8432} for details. 4756 * 4757 * @returns true if safe, false if must continue to use the 64-bit switcher. 4758 * @param pMixedCtx Pointer to the guest-CPU context. The data may be 4759 * out-of-sync. Make sure to update the required fields 4760 * before using them. 4761 * 4762 * @remarks No-long-jump zone!!! 4763 */ 4764 static bool hmR0VmxIs32BitSwitcherSafe(PCCPUMCTX pMixedCtx) 4765 { 4766 if (pMixedCtx->gdtr.pGdt & UINT64_C(0xffffffff00000000)) return false; 4767 if (pMixedCtx->idtr.pIdt & UINT64_C(0xffffffff00000000)) return false; 4768 if (pMixedCtx->ldtr.u64Base & UINT64_C(0xffffffff00000000)) return false; 4769 if (pMixedCtx->tr.u64Base & UINT64_C(0xffffffff00000000)) return false; 4770 if (pMixedCtx->es.u64Base & UINT64_C(0xffffffff00000000)) return false; 4771 if (pMixedCtx->cs.u64Base & UINT64_C(0xffffffff00000000)) return false; 4772 if (pMixedCtx->ss.u64Base & UINT64_C(0xffffffff00000000)) return false; 4773 if (pMixedCtx->ds.u64Base & UINT64_C(0xffffffff00000000)) return false; 4774 if (pMixedCtx->fs.u64Base & UINT64_C(0xffffffff00000000)) return false; 4775 if (pMixedCtx->gs.u64Base & UINT64_C(0xffffffff00000000)) return false; 4776 4777 /* All good, bases are 32-bit. */ 4778 return true; 4779 } 4780 #endif 4781 4782 4783 /** 4784 * Selects up the appropriate function to run guest code. 4867 4785 * 4868 4786 * @returns VBox status code. … … 4874 4792 * @remarks No-long-jump zone!!! 4875 4793 */ 4876 static int hmR0VmxLoadGuestActivityState(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 4877 { 4878 NOREF(pMixedCtx); 4879 /** @todo See if we can make use of other states, e.g. 4880 * VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN or HLT. */ 4881 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_ACTIVITY_STATE)) 4882 { 4883 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_ACTIVITY_STATE, VMX_VMCS_GUEST_ACTIVITY_ACTIVE); 4884 AssertRCReturn(rc, rc); 4885 4886 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMX_GUEST_ACTIVITY_STATE); 4887 } 4888 return VINF_SUCCESS; 4889 } 4890 4891 4892 #if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) 4893 /** 4894 * Check if guest state allows safe use of 32-bit switcher again. 4895 * 4896 * Segment bases and protected mode structures must be 32-bit addressable 4897 * because the 32-bit switcher will ignore high dword when writing these VMCS 4898 * fields. See @bugref{8432} for details. 4899 * 4900 * @returns true if safe, false if must continue to use the 64-bit switcher. 4901 * @param pMixedCtx Pointer to the guest-CPU context. The data may be 4902 * out-of-sync. Make sure to update the required fields 4903 * before using them. 4904 * 4905 * @remarks No-long-jump zone!!! 4906 */ 4907 static bool hmR0VmxIs32BitSwitcherSafe(PCPUMCTX pMixedCtx) 4908 { 4909 if (pMixedCtx->gdtr.pGdt & UINT64_C(0xffffffff00000000)) 4910 return false; 4911 if (pMixedCtx->idtr.pIdt & UINT64_C(0xffffffff00000000)) 4912 return false; 4913 if (pMixedCtx->ldtr.u64Base & UINT64_C(0xffffffff00000000)) 4914 return false; 4915 if (pMixedCtx->tr.u64Base & UINT64_C(0xffffffff00000000)) 4916 return false; 4917 if (pMixedCtx->es.u64Base & UINT64_C(0xffffffff00000000)) 4918 return false; 4919 if (pMixedCtx->cs.u64Base & UINT64_C(0xffffffff00000000)) 4920 return false; 4921 if (pMixedCtx->ss.u64Base & UINT64_C(0xffffffff00000000)) 4922 return false; 4923 if (pMixedCtx->ds.u64Base & UINT64_C(0xffffffff00000000)) 4924 return false; 4925 if (pMixedCtx->fs.u64Base & UINT64_C(0xffffffff00000000)) 4926 return false; 4927 if (pMixedCtx->gs.u64Base & UINT64_C(0xffffffff00000000)) 4928 return false; 4929 /* All good, bases are 32-bit. */ 4930 return true; 4931 } 4932 #endif 4933 4934 4935 /** 4936 * Sets up the appropriate function to run guest code. 4937 * 4938 * @returns VBox status code. 4939 * @param pVCpu The cross context virtual CPU structure. 4940 * @param pMixedCtx Pointer to the guest-CPU context. The data may be 4941 * out-of-sync. Make sure to update the required fields 4942 * before using them. 4943 * 4944 * @remarks No-long-jump zone!!! 4945 */ 4946 static int hmR0VmxSetupVMRunHandler(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 4794 static int hmR0VmxSelectVMRunHandler(PVMCPU pVCpu, PCCPUMCTX pMixedCtx) 4947 4795 { 4948 4796 if (CPUMIsGuestInLongModeEx(pMixedCtx)) … … 4956 4804 if (pVCpu->hm.s.vmx.pfnStartVM != VMXR0SwitcherStartVM64) 4957 4805 { 4806 #ifdef VBOX_STRICT 4958 4807 if (pVCpu->hm.s.vmx.pfnStartVM != NULL) /* Very first entry would have saved host-state already, ignore it. */ 4959 4808 { 4960 4809 /* Currently, all mode changes sends us back to ring-3, so these should be set. See @bugref{6944}. */ 4961 AssertMsg(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_VMX_EXIT_CTLS 4962 | HM_CHANGED_VMX_ENTRY_CTLS 4963 | HM_CHANGED_GUEST_EFER_MSR), ("flags=%#x\n", HMCPU_CF_VALUE(pVCpu))); 4810 uint64_t const fCtxChanged = ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged); 4811 AssertMsg(fCtxChanged & ( HM_CHANGED_VMX_EXIT_CTLS 4812 | HM_CHANGED_VMX_ENTRY_CTLS 4813 | HM_CHANGED_GUEST_EFER_MSR), ("fCtxChanged=%#RX64\n", fCtxChanged)); 4964 4814 } 4815 #endif 4965 4816 pVCpu->hm.s.vmx.pfnStartVM = VMXR0SwitcherStartVM64; 4966 4817 … … 4968 4819 the rest of the VM run (until VM reset). See @bugref{8432#c7}. */ 4969 4820 pVCpu->hm.s.vmx.fSwitchedTo64on32 = true; 4970 Log4 (("Load[%RU32]: hmR0VmxSetupVMRunHandler: selected 64-bit switcher\n", pVCpu->idCpu));4821 Log4Func(("Selected 64-bit switcher\n")); 4971 4822 } 4972 4823 #else … … 4983 4834 && pVCpu->hm.s.vmx.pfnStartVM != NULL) /* Very first entry would have saved host-state already, ignore it. */ 4984 4835 { 4836 # ifdef VBOX_STRICT 4985 4837 /* Currently, all mode changes sends us back to ring-3, so these should be set. See @bugref{6944}. */ 4986 AssertMsg(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_VMX_EXIT_CTLS 4987 | HM_CHANGED_VMX_ENTRY_CTLS 4988 | HM_CHANGED_GUEST_EFER_MSR), ("flags=%#x\n", HMCPU_CF_VALUE(pVCpu))); 4838 uint64_t const fCtxChanged = ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged); 4839 AssertMsg(fCtxChanged & ( HM_CHANGED_VMX_EXIT_CTLS 4840 | HM_CHANGED_VMX_ENTRY_CTLS 4841 | HM_CHANGED_GUEST_EFER_MSR), ("fCtxChanged=%#RX64\n", fCtxChanged)); 4842 # endif 4989 4843 } 4990 4844 # ifdef VBOX_ENABLE_64_BITS_GUESTS 4991 4845 /* 4992 * Keep using the 64-bit switcher even though we're in 32-bit because of bad Intel design, see @bugref{8432#c7}. 4993 * If real-on-v86 mode is active, clear the 64-bit switcher flag because now we know the guest is in a sane 4994 * state where it's safe to use the 32-bit switcher. Otherwise check the guest state if it's safe to use 4846 * Keep using the 64-bit switcher even though we're in 32-bit because of bad Intel 4847 * design, see @bugref{8432#c7}. If real-on-v86 mode is active, clear the 64-bit 4848 * switcher flag because now we know the guest is in a sane state where it's safe 4849 * to use the 32-bit switcher. Otherwise check the guest state if it's safe to use 4995 4850 * the much faster 32-bit switcher again. 4996 4851 */ … … 4998 4853 { 4999 4854 if (pVCpu->hm.s.vmx.pfnStartVM != VMXR0StartVM32) 5000 Log4 (("Load[%RU32]: hmR0VmxSetupVMRunHandler: selected 32-bit switcher\n", pVCpu->idCpu));4855 Log4Func(("Selected 32-bit switcher\n")); 5001 4856 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32; 5002 4857 } … … 5009 4864 pVCpu->hm.s.vmx.fSwitchedTo64on32 = false; 5010 4865 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32; 5011 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_EFER_MSR5012 | HM_CHANGED_VMX_ENTRY_CTLS5013 | HM_CHANGED_VMX_EXIT_CTLS5014 | HM_CHANGED_HOST_CONTEXT);5015 Log4 (("Load[%RU32]: hmR0VmxSetupVMRunHandler: selected 32-bit switcher (safe)\n", pVCpu->idCpu));4866 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_EFER_MSR 4867 | HM_CHANGED_VMX_ENTRY_CTLS 4868 | HM_CHANGED_VMX_EXIT_CTLS 4869 | HM_CHANGED_HOST_CONTEXT); 4870 Log4Func(("Selected 32-bit switcher (safe)\n")); 5016 4871 } 5017 4872 } … … 5040 4895 DECLINLINE(int) hmR0VmxRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 5041 4896 { 4897 /* Mark that HM is the keeper of all guest-CPU registers now that we're going to execute guest code. */ 4898 pCtx->fExtrn |= HMVMX_CPUMCTX_EXTRN_ALL | CPUMCTX_EXTRN_KEEPER_HM; 4899 5042 4900 /* 5043 * 64-bit Windows uses XMM registers in the kernel as the Microsoft compiler expresses floating-point operations 5044 * using SSE instructions. Some XMM registers (XMM6-XMM15) are callee-saved and thus the need for this XMM wrapper. 5045 * Refer MSDN docs. "Configuring Programs for 64-bit / x64 Software Conventions / Register Usage" for details. 4901 * 64-bit Windows uses XMM registers in the kernel as the Microsoft compiler expresses 4902 * floating-point operations using SSE instructions. Some XMM registers (XMM6-XMM15) are 4903 * callee-saved and thus the need for this XMM wrapper. 4904 * 4905 * See MSDN "Configuring Programs for 64-bit/x64 Software Conventions / Register Usage". 5046 4906 */ 5047 4907 bool const fResumeVM = RT_BOOL(pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_LAUNCHED); … … 5075 4935 HMVMX_ASSERT_PREEMPT_SAFE(); 5076 4936 5077 Log4 (("VM-entry failure: %Rrc\n", rcVMRun));4937 Log4Func(("VM-entry failure: %Rrc\n", rcVMRun)); 5078 4938 switch (rcVMRun) 5079 4939 { … … 5322 5182 * 5323 5183 * @returns VBox status code (no informational status codes). 5324 * @param pVM The cross context VM structure.5325 5184 * @param pVCpu The cross context virtual CPU structure. 5326 * @param pCtx Pointer to the guest CPU context.5327 5185 * @param enmOp The operation to perform. 5328 5186 * @param cParams Number of parameters. 5329 5187 * @param paParam Array of 32-bit parameters. 5330 5188 */ 5331 VMMR0DECL(int) VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp,5189 VMMR0DECL(int) VMXR0Execute64BitsHandler(PVMCPU pVCpu, HM64ON32OP enmOp, 5332 5190 uint32_t cParams, uint32_t *paParam) 5333 5191 { 5334 NOREF(pCtx); 5335 5192 PVM pVM = pVCpu->CTX_SUFF(pVM); 5336 5193 AssertReturn(pVM->hm.s.pfnHost32ToGuest64R0, VERR_HM_NO_32_TO_64_SWITCHER); 5337 5194 Assert(enmOp > HM64ON32OP_INVALID && enmOp < HM64ON32OP_END); … … 5453 5310 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 1; 5454 5311 #endif 5455 int rc = VMXR0Execute64BitsHandler(pV M, pVCpu, pCtx, HM64ON32OP_VMXRCStartVM64, RT_ELEMENTS(aParam), &aParam[0]);5312 int rc = VMXR0Execute64BitsHandler(pVCpu, HM64ON32OP_VMXRCStartVM64, RT_ELEMENTS(aParam), &aParam[0]); 5456 5313 5457 5314 #ifdef VBOX_WITH_CRASHDUMP_MAGIC … … 5537 5394 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SYSENTER_EIP); 5538 5395 5539 /* 64-bit guest-state fields; unused as we use two 32-bit VMREADs for these 64-bit fields (using "FULL" and "HIGH" fields). */ 5396 /* 64-bit guest-state fields; unused as we use two 32-bit VMREADs for 5397 these 64-bit fields (using "FULL" and "HIGH" fields). */ 5540 5398 #if 0 5541 5399 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL); … … 5774 5632 5775 5633 5776 #ifdef HMVMX_USE_IEM_EVENT_REFLECTION5777 5634 /** 5778 5635 * Gets the IEM exception flags for the specified vector and IDT vectoring / … … 5832 5689 } 5833 5690 5834 #else5835 /**5836 * Determines if an exception is a contributory exception.5837 *5838 * Contributory exceptions are ones which can cause double-faults unless the5839 * original exception was a benign exception. Page-fault is intentionally not5840 * included here as it's a conditional contributory exception.5841 *5842 * @returns true if the exception is contributory, false otherwise.5843 * @param uVector The exception vector.5844 */5845 DECLINLINE(bool) hmR0VmxIsContributoryXcpt(const uint32_t uVector)5846 {5847 switch (uVector)5848 {5849 case X86_XCPT_GP:5850 case X86_XCPT_SS:5851 case X86_XCPT_NP:5852 case X86_XCPT_TS:5853 case X86_XCPT_DE:5854 return true;5855 default:5856 break;5857 }5858 return false;5859 }5860 #endif /* HMVMX_USE_IEM_EVENT_REFLECTION */5861 5862 5691 5863 5692 /** … … 5930 5759 uint32_t const uExitVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVmxTransient->uExitIntInfo); 5931 5760 5932 int rc2 = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient); AssertRCReturn(rc2, rc2); 5933 rc2 = hmR0VmxReadExitIntInfoVmcs(pVmxTransient); AssertRCReturn(rc2, rc2); 5761 int rc2 = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient); 5762 rc2 |= hmR0VmxReadExitIntInfoVmcs(pVmxTransient); 5763 AssertRCReturn(rc2, rc2); 5934 5764 5935 5765 VBOXSTRICTRC rcStrict = VINF_SUCCESS; … … 5938 5768 uint32_t const uIdtVectorType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo); 5939 5769 uint32_t const uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo); 5940 #ifdef HMVMX_USE_IEM_EVENT_REFLECTION 5770 5941 5771 /* 5942 * If the event was a software interrupt (generated with INT n) or a software exception (generated 5943 * by INT3/INTO) or a privileged software exception (generated by INT1), we can handle the VM-exit 5944 * and continue guest execution which will re-execute the instruction rather than re-injecting the 5945 * exception, as that can cause premature trips to ring-3 before injection and involve TRPM which 5946 * currently has no way of storing that these exceptions were caused by these instructions 5947 * (ICEBP's #DB poses the problem). 5772 * If the event was a software interrupt (generated with INT n) or a software exception 5773 * (generated by INT3/INTO) or a privileged software exception (generated by INT1), we 5774 * can handle the VM-exit and continue guest execution which will re-execute the 5775 * instruction rather than re-injecting the exception, as that can cause premature 5776 * trips to ring-3 before injection and involve TRPM which currently has no way of 5777 * storing that these exceptions were caused by these instructions (ICEBP's #DB poses 5778 * the problem). 5948 5779 */ 5949 5780 IEMXCPTRAISE enmRaise; … … 5965 5796 ("hmR0VmxCheckExitDueToEventDelivery: Unexpected VM-exit interruption info. %#x!\n", 5966 5797 uExitVectorType), VERR_VMX_IPE_5); 5798 5967 5799 enmRaise = IEMEvaluateRecursiveXcpt(pVCpu, fIdtVectorFlags, uIdtVector, fExitVectorFlags, uExitVector, &fRaiseInfo); 5968 5800 … … 6009 5841 case IEMXCPTRAISE_CURRENT_XCPT: 6010 5842 { 6011 Log4 (("IDT: vcpu[%RU32] Pending secondary xcpt: uIdtVectoringInfo=%#RX64 uExitIntInfo=%#RX64\n", pVCpu->idCpu,6012 pVmxTransient->uIdtVectoringInfo, pVmxTransient->uExitIntInfo));5843 Log4Func(("IDT: Pending secondary Xcpt: uIdtVectoringInfo=%#RX64 uExitIntInfo=%#RX64\n", 5844 pVmxTransient->uIdtVectoringInfo, pVmxTransient->uExitIntInfo)); 6013 5845 Assert(rcStrict == VINF_SUCCESS); 6014 5846 break; … … 6032 5864 0 /* cbInstr */, u32ErrCode, pMixedCtx->cr2); 6033 5865 6034 Log4 (("IDT: vcpu[%RU32] Pending vectoring event %#RX64 Err=%#RX32\n", pVCpu->idCpu, pVCpu->hm.s.Event.u64IntInfo,6035 pVCpu->hm.s.Event.u32ErrCode));5866 Log4Func(("IDT: Pending vectoring event %#RX64 Err=%#RX32\n", pVCpu->hm.s.Event.u64IntInfo, 5867 pVCpu->hm.s.Event.u32ErrCode)); 6036 5868 Assert(rcStrict == VINF_SUCCESS); 6037 5869 break; … … 6051 5883 { 6052 5884 pVmxTransient->fVectoringDoublePF = true; 6053 Log4 (("IDT: vcpu[%RU32] Vectoring double #PF %#RX64 cr2=%#RX64\n", pVCpu->idCpu, pVCpu->hm.s.Event.u64IntInfo,5885 Log4Func(("IDT: Vectoring double #PF %#RX64 cr2=%#RX64\n", pVCpu->hm.s.Event.u64IntInfo, 6054 5886 pMixedCtx->cr2)); 6055 5887 rcStrict = VINF_SUCCESS; … … 6059 5891 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingReflect); 6060 5892 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx); 6061 Log4 (("IDT: vcpu[%RU32] Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->idCpu,6062 pVCpu->hm.s.Event.u64IntInfo,uIdtVector, uExitVector));5893 Log4Func(("IDT: Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->hm.s.Event.u64IntInfo, 5894 uIdtVector, uExitVector)); 6063 5895 rcStrict = VINF_HM_DOUBLE_FAULT; 6064 5896 } … … 6068 5900 case IEMXCPTRAISE_TRIPLE_FAULT: 6069 5901 { 6070 Log4(("IDT: vcpu[%RU32] Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", pVCpu->idCpu, uIdtVector, 6071 uExitVector)); 5902 Log4Func(("IDT: Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", uIdtVector, uExitVector)); 6072 5903 rcStrict = VINF_EM_RESET; 6073 5904 break; … … 6076 5907 case IEMXCPTRAISE_CPU_HANG: 6077 5908 { 6078 Log4 (("IDT: vcpu[%RU32] Bad guest! Entering CPU hang. fRaiseInfo=%#x\n", pVCpu->idCpu, fRaiseInfo));5909 Log4Func(("IDT: Bad guest! Entering CPU hang. fRaiseInfo=%#x\n", fRaiseInfo)); 6079 5910 rcStrict = VERR_EM_GUEST_CPU_HANG; 6080 5911 break; … … 6088 5919 } 6089 5920 } 6090 #else6091 typedef enum6092 {6093 VMXREFLECTXCPT_XCPT, /* Reflect the exception to the guest or for further evaluation by VMM. */6094 VMXREFLECTXCPT_DF, /* Reflect the exception as a double-fault to the guest. */6095 VMXREFLECTXCPT_TF, /* Indicate a triple faulted state to the VMM. */6096 VMXREFLECTXCPT_HANG, /* Indicate bad VM trying to deadlock the CPU. */6097 VMXREFLECTXCPT_NONE /* Nothing to reflect. */6098 } VMXREFLECTXCPT;6099 6100 /* See Intel spec. 30.7.1.1 "Reflecting Exceptions to Guest Software". */6101 VMXREFLECTXCPT enmReflect = VMXREFLECTXCPT_NONE;6102 if (VMX_EXIT_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uExitIntInfo))6103 {6104 if (uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT)6105 {6106 enmReflect = VMXREFLECTXCPT_XCPT;6107 #ifdef VBOX_STRICT6108 if ( hmR0VmxIsContributoryXcpt(uIdtVector)6109 && uExitVector == X86_XCPT_PF)6110 {6111 Log4(("IDT: vcpu[%RU32] Contributory #PF uCR2=%#RX64\n", pVCpu->idCpu, pMixedCtx->cr2));6112 }6113 #endif6114 if ( uExitVector == X86_XCPT_PF6115 && uIdtVector == X86_XCPT_PF)6116 {6117 pVmxTransient->fVectoringDoublePF = true;6118 Log4(("IDT: vcpu[%RU32] Vectoring Double #PF uCR2=%#RX64\n", pVCpu->idCpu, pMixedCtx->cr2));6119 }6120 else if ( uExitVector == X86_XCPT_AC6121 && uIdtVector == X86_XCPT_AC)6122 {6123 enmReflect = VMXREFLECTXCPT_HANG;6124 Log4(("IDT: Nested #AC - Bad guest\n"));6125 }6126 else if ( (pVCpu->hm.s.vmx.u32XcptBitmap & HMVMX_CONTRIBUTORY_XCPT_MASK)6127 && hmR0VmxIsContributoryXcpt(uExitVector)6128 && ( hmR0VmxIsContributoryXcpt(uIdtVector)6129 || uIdtVector == X86_XCPT_PF))6130 {6131 enmReflect = VMXREFLECTXCPT_DF;6132 }6133 else if (uIdtVector == X86_XCPT_DF)6134 enmReflect = VMXREFLECTXCPT_TF;6135 }6136 else if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT6137 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI)6138 {6139 /*6140 * Ignore software interrupts (INT n), software exceptions (#BP, #OF) and6141 * privileged software exception (#DB from ICEBP) as they reoccur when restarting the instruction.6142 */6143 enmReflect = VMXREFLECTXCPT_XCPT;6144 6145 if (uExitVector == X86_XCPT_PF)6146 {6147 pVmxTransient->fVectoringPF = true;6148 Log4(("IDT: vcpu[%RU32] Vectoring #PF due to Ext-Int/NMI. uCR2=%#RX64\n", pVCpu->idCpu, pMixedCtx->cr2));6149 }6150 }6151 }6152 else if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT6153 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT6154 || uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI)6155 {6156 /*6157 * If event delivery caused an EPT violation/misconfig or APIC access VM-exit, then the VM-exit6158 * interruption-information will not be valid as it's not an exception and we end up here. In such cases,6159 * it is sufficient to reflect the original exception to the guest after handling the VM-exit.6160 */6161 enmReflect = VMXREFLECTXCPT_XCPT;6162 }6163 6164 /*6165 * On CPUs that support Virtual NMIs, if this VM-exit (be it an exception or EPT violation/misconfig etc.) occurred6166 * while delivering the NMI, we need to clear the block-by-NMI field in the guest interruptibility-state before6167 * re-delivering the NMI after handling the VM-exit. Otherwise the subsequent VM-entry would fail.6168 *6169 * See Intel spec. 30.7.1.2 "Resuming Guest Software after Handling an Exception". See @bugref{7445}.6170 */6171 if ( uIdtVectorType == VMX_IDT_VECTORING_INFO_TYPE_NMI6172 && enmReflect == VMXREFLECTXCPT_XCPT6173 && (pVCpu->hm.s.vmx.u32PinCtls & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI)6174 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))6175 {6176 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);6177 }6178 6179 switch (enmReflect)6180 {6181 case VMXREFLECTXCPT_XCPT:6182 {6183 Assert( uIdtVectorType != VMX_IDT_VECTORING_INFO_TYPE_SW_INT6184 && uIdtVectorType != VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT6185 && uIdtVectorType != VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT);6186 6187 uint32_t u32ErrCode = 0;6188 if (VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVmxTransient->uIdtVectoringInfo))6189 {6190 rc2 = hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);6191 AssertRCReturn(rc2, rc2);6192 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;6193 }6194 6195 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF. See hmR0VmxExitXcptPF(). */6196 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingReflect);6197 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),6198 0 /* cbInstr */, u32ErrCode, pMixedCtx->cr2);6199 rcStrict = VINF_SUCCESS;6200 Log4(("IDT: vcpu[%RU32] Pending vectoring event %#RX64 Err=%#RX32\n", pVCpu->idCpu,6201 pVCpu->hm.s.Event.u64IntInfo, pVCpu->hm.s.Event.u32ErrCode));6202 6203 break;6204 }6205 6206 case VMXREFLECTXCPT_DF:6207 {6208 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingReflect);6209 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);6210 rcStrict = VINF_HM_DOUBLE_FAULT;6211 Log4(("IDT: vcpu[%RU32] Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->idCpu,6212 pVCpu->hm.s.Event.u64IntInfo, uIdtVector, uExitVector));6213 6214 break;6215 }6216 6217 case VMXREFLECTXCPT_TF:6218 {6219 rcStrict = VINF_EM_RESET;6220 Log4(("IDT: vcpu[%RU32] Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", pVCpu->idCpu, uIdtVector,6221 uExitVector));6222 break;6223 }6224 6225 case VMXREFLECTXCPT_HANG:6226 {6227 rcStrict = VERR_EM_GUEST_CPU_HANG;6228 break;6229 }6230 6231 default:6232 Assert(rcStrict == VINF_SUCCESS);6233 break;6234 }6235 #endif /* HMVMX_USE_IEM_EVENT_REFLECTION */6236 5921 } 6237 5922 else if ( VMX_EXIT_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uExitIntInfo) … … 6247 5932 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS)) 6248 5933 { 6249 Log4 (("hmR0VmxCheckExitDueToEventDelivery: vcpu[%RU32] Setting VMCPU_FF_BLOCK_NMIS.Valid=%RTbool uExitReason=%u\n",6250 pVCpu->idCpu,VMX_EXIT_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uExitIntInfo), pVmxTransient->uExitReason));5934 Log4Func(("Setting VMCPU_FF_BLOCK_NMIS. fValid=%RTbool uExitReason=%u\n", 5935 VMX_EXIT_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uExitIntInfo), pVmxTransient->uExitReason)); 6251 5936 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS); 6252 5937 } … … 6260 5945 6261 5946 /** 6262 * Saves the guest's CR0 register from the VMCS into the guest-CPU context. 6263 * 6264 * @returns VBox status code. 6265 * @param pVCpu The cross context virtual CPU structure. 6266 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe 6267 * out-of-sync. Make sure to update the required fields 6268 * before using them. 6269 * 6270 * @remarks No-long-jump zone!!! 6271 */ 6272 static int hmR0VmxSaveGuestCR0(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 6273 { 6274 NOREF(pMixedCtx); 6275 6276 /* 6277 * While in the middle of saving guest-CR0, we could get preempted and re-invoked from the preemption hook, 6278 * see hmR0VmxLeave(). Safer to just make this code non-preemptible. 6279 */ 6280 VMMRZCallRing3Disable(pVCpu); 6281 HM_DISABLE_PREEMPT(); 6282 6283 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0)) 6284 { 6285 #ifndef DEBUG_bird /** @todo this triggers running bs3-cpu-generated-1.img with --debug-command-line 6286 * and 'dbgc-init' containing: 6287 * sxe "xcpt_de" 6288 * sxe "xcpt_bp" 6289 * sxi "xcpt_gp" 6290 * sxi "xcpt_ss" 6291 * sxi "xcpt_np" 6292 */ 6293 /** @todo r=ramshankar: Should be fixed after r119291. */ 6294 Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0)); 6295 #endif 6296 uint32_t uVal = 0; 6297 uint32_t uShadow = 0; 6298 int rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &uVal); 6299 rc |= VMXReadVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uShadow); 6300 AssertRCReturn(rc, rc); 6301 6302 uVal = (uShadow & pVCpu->hm.s.vmx.u32CR0Mask) | (uVal & ~pVCpu->hm.s.vmx.u32CR0Mask); 6303 CPUMSetGuestCR0(pVCpu, uVal); 6304 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0); 6305 } 6306 6307 HM_RESTORE_PREEMPT(); 6308 VMMRZCallRing3Enable(pVCpu); 6309 return VINF_SUCCESS; 6310 } 6311 6312 6313 /** 6314 * Saves the guest's CR4 register from the VMCS into the guest-CPU context. 6315 * 6316 * @returns VBox status code. 6317 * @param pVCpu The cross context virtual CPU structure. 6318 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe 6319 * out-of-sync. Make sure to update the required fields 6320 * before using them. 6321 * 6322 * @remarks No-long-jump zone!!! 6323 */ 6324 static int hmR0VmxSaveGuestCR4(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 6325 { 6326 NOREF(pMixedCtx); 6327 6328 int rc = VINF_SUCCESS; 6329 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR4)) 6330 { 6331 Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR4)); 6332 uint32_t uVal = 0; 6333 uint32_t uShadow = 0; 6334 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &uVal); 6335 rc |= VMXReadVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uShadow); 6336 AssertRCReturn(rc, rc); 6337 6338 uVal = (uShadow & pVCpu->hm.s.vmx.u32CR4Mask) | (uVal & ~pVCpu->hm.s.vmx.u32CR4Mask); 6339 CPUMSetGuestCR4(pVCpu, uVal); 6340 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR4); 6341 } 6342 return rc; 6343 } 6344 6345 6346 /** 6347 * Saves the guest's RIP register from the VMCS into the guest-CPU context. 6348 * 6349 * @returns VBox status code. 6350 * @param pVCpu The cross context virtual CPU structure. 6351 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe 6352 * out-of-sync. Make sure to update the required fields 6353 * before using them. 6354 * 6355 * @remarks No-long-jump zone!!! 6356 */ 6357 static int hmR0VmxSaveGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 6358 { 6359 int rc = VINF_SUCCESS; 6360 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RIP)) 6361 { 6362 Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RIP)); 6363 uint64_t u64Val = 0; 6364 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &u64Val); 6365 AssertRCReturn(rc, rc); 6366 6367 pMixedCtx->rip = u64Val; 6368 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RIP); 6369 } 6370 return rc; 6371 } 6372 6373 6374 /** 6375 * Saves the guest's RSP register from the VMCS into the guest-CPU context. 6376 * 6377 * @returns VBox status code. 6378 * @param pVCpu The cross context virtual CPU structure. 6379 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe 6380 * out-of-sync. Make sure to update the required fields 6381 * before using them. 6382 * 6383 * @remarks No-long-jump zone!!! 6384 */ 6385 static int hmR0VmxSaveGuestRsp(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 6386 { 6387 int rc = VINF_SUCCESS; 6388 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RSP)) 6389 { 6390 Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RSP)); 6391 uint64_t u64Val = 0; 6392 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &u64Val); 6393 AssertRCReturn(rc, rc); 6394 6395 pMixedCtx->rsp = u64Val; 6396 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RSP); 6397 } 6398 return rc; 6399 } 6400 6401 6402 /** 6403 * Saves the guest's RFLAGS from the VMCS into the guest-CPU context. 6404 * 6405 * @returns VBox status code. 6406 * @param pVCpu The cross context virtual CPU structure. 6407 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe 6408 * out-of-sync. Make sure to update the required fields 6409 * before using them. 6410 * 6411 * @remarks No-long-jump zone!!! 6412 */ 6413 static int hmR0VmxSaveGuestRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 6414 { 6415 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS)) 6416 { 6417 Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RFLAGS)); 6418 uint32_t uVal = 0; 6419 int rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &uVal); 6420 AssertRCReturn(rc, rc); 6421 6422 pMixedCtx->eflags.u32 = uVal; 6423 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) /* Undo our real-on-v86-mode changes to eflags if necessary. */ 6424 { 6425 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS); 6426 Log4(("Saving real-mode EFLAGS VT-x view=%#RX32\n", pMixedCtx->eflags.u32)); 6427 6428 pMixedCtx->eflags.Bits.u1VM = 0; 6429 pMixedCtx->eflags.Bits.u2IOPL = pVCpu->hm.s.vmx.RealMode.Eflags.Bits.u2IOPL; 6430 } 6431 6432 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS); 6433 } 6434 return VINF_SUCCESS; 6435 } 6436 6437 6438 /** 6439 * Wrapper for saving the guest's RIP, RSP and RFLAGS from the VMCS into the 6440 * guest-CPU context. 6441 */ 6442 DECLINLINE(int) hmR0VmxSaveGuestRipRspRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 6443 { 6444 int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx); 6445 rc |= hmR0VmxSaveGuestRsp(pVCpu, pMixedCtx); 6446 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); 6447 return rc; 6448 } 6449 6450 6451 /** 6452 * Saves the guest's interruptibility-state ("interrupt shadow" as AMD calls it) 6453 * from the guest-state area in the VMCS. 6454 * 6455 * @param pVCpu The cross context virtual CPU structure. 6456 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe 6457 * out-of-sync. Make sure to update the required fields 6458 * before using them. 6459 * 6460 * @remarks No-long-jump zone!!! 6461 */ 6462 static void hmR0VmxSaveGuestIntrState(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 6463 { 6464 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_INTR_STATE)) 6465 { 6466 uint32_t uIntrState = 0; 6467 int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &uIntrState); 6468 AssertRC(rc); 6469 6470 if (!uIntrState) 6471 { 6472 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 6473 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 6474 6475 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS)) 6476 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS); 6477 } 6478 else 6479 { 6480 if (uIntrState & ( VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS 6481 | VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)) 6482 { 6483 rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx); 6484 AssertRC(rc); 6485 rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); /* for hmR0VmxGetGuestIntrState(). */ 6486 AssertRC(rc); 6487 6488 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip); 6489 Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)); 6490 } 6491 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 6492 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 6493 6494 if (uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI) 6495 { 6496 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS)) 6497 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS); 6498 } 6499 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS)) 6500 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS); 6501 } 6502 6503 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_INTR_STATE); 6504 } 6505 } 6506 6507 6508 /** 6509 * Saves the guest's activity state. 6510 * 6511 * @returns VBox status code. 6512 * @param pVCpu The cross context virtual CPU structure. 6513 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe 6514 * out-of-sync. Make sure to update the required fields 6515 * before using them. 6516 * 6517 * @remarks No-long-jump zone!!! 6518 */ 6519 static int hmR0VmxSaveGuestActivityState(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 6520 { 6521 NOREF(pMixedCtx); 6522 /* Nothing to do for now until we make use of different guest-CPU activity state. Just update the flag. */ 6523 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_ACTIVITY_STATE); 6524 return VINF_SUCCESS; 6525 } 6526 6527 6528 /** 6529 * Saves the guest SYSENTER MSRs (SYSENTER_CS, SYSENTER_EIP, SYSENTER_ESP) from 6530 * the current VMCS into the guest-CPU context. 6531 * 6532 * @returns VBox status code. 6533 * @param pVCpu The cross context virtual CPU structure. 6534 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe 6535 * out-of-sync. Make sure to update the required fields 6536 * before using them. 6537 * 6538 * @remarks No-long-jump zone!!! 6539 */ 6540 static int hmR0VmxSaveGuestSysenterMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 6541 { 6542 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR)) 6543 { 6544 Assert(!HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR)); 6545 uint32_t u32Val = 0; 6546 int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRCReturn(rc, rc); 6547 pMixedCtx->SysEnter.cs = u32Val; 6548 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR); 6549 } 6550 6551 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR)) 6552 { 6553 Assert(!HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR)); 6554 uint64_t u64Val = 0; 6555 int rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, &u64Val); AssertRCReturn(rc, rc); 6556 pMixedCtx->SysEnter.eip = u64Val; 6557 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR); 6558 } 6559 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR)) 6560 { 6561 Assert(!HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR)); 6562 uint64_t u64Val = 0; 6563 int rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, &u64Val); AssertRCReturn(rc, rc); 6564 pMixedCtx->SysEnter.esp = u64Val; 6565 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR); 6566 } 6567 return VINF_SUCCESS; 6568 } 6569 6570 6571 /** 6572 * Saves the set of guest MSRs (that we restore lazily while leaving VT-x) from 6573 * the CPU back into the guest-CPU context. 6574 * 6575 * @returns VBox status code. 6576 * @param pVCpu The cross context virtual CPU structure. 6577 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe 6578 * out-of-sync. Make sure to update the required fields 6579 * before using them. 6580 * 6581 * @remarks No-long-jump zone!!! 6582 */ 6583 static int hmR0VmxSaveGuestLazyMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 6584 { 6585 /* Since this can be called from our preemption hook it's safer to make the guest-MSRs update non-preemptible. */ 6586 VMMRZCallRing3Disable(pVCpu); 6587 HM_DISABLE_PREEMPT(); 6588 6589 /* Doing the check here ensures we don't overwrite already-saved guest MSRs from a preemption hook. */ 6590 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS)) 6591 { 6592 Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMM_GUEST_LAZY_MSRS)); 6593 hmR0VmxLazySaveGuestMsrs(pVCpu, pMixedCtx); 6594 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS); 6595 } 6596 6597 HM_RESTORE_PREEMPT(); 6598 VMMRZCallRing3Enable(pVCpu); 6599 6600 return VINF_SUCCESS; 6601 } 6602 6603 6604 /** 6605 * Saves the auto load/store'd guest MSRs from the current VMCS into 5947 * Imports a guest segment register from the current VMCS into 6606 5948 * the guest-CPU context. 6607 *6608 * @returns VBox status code.6609 * @param pVCpu The cross context virtual CPU structure.6610 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe6611 * out-of-sync. Make sure to update the required fields6612 * before using them.6613 *6614 * @remarks No-long-jump zone!!!6615 */6616 static int hmR0VmxSaveGuestAutoLoadStoreMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)6617 {6618 if (HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS))6619 return VINF_SUCCESS;6620 6621 Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS));6622 PVMXAUTOMSR pMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;6623 uint32_t cMsrs = pVCpu->hm.s.vmx.cMsrs;6624 Log4(("hmR0VmxSaveGuestAutoLoadStoreMsrs: cMsrs=%u\n", cMsrs));6625 for (uint32_t i = 0; i < cMsrs; i++, pMsr++)6626 {6627 switch (pMsr->u32Msr)6628 {6629 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsr->u64Value); break;6630 case MSR_K8_LSTAR: pMixedCtx->msrLSTAR = pMsr->u64Value; break;6631 case MSR_K6_STAR: pMixedCtx->msrSTAR = pMsr->u64Value; break;6632 case MSR_K8_SF_MASK: pMixedCtx->msrSFMASK = pMsr->u64Value; break;6633 case MSR_K8_KERNEL_GS_BASE: pMixedCtx->msrKERNELGSBASE = pMsr->u64Value; break;6634 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsr->u64Value); break;6635 case MSR_K6_EFER: /* Nothing to do here since we intercept writes, see hmR0VmxLoadGuestMsrs(). */6636 break;6637 6638 default:6639 {6640 AssertMsgFailed(("Unexpected MSR in auto-load/store area. uMsr=%#RX32 cMsrs=%u\n", pMsr->u32Msr, cMsrs));6641 pVCpu->hm.s.u32HMError = pMsr->u32Msr;6642 return VERR_HM_UNEXPECTED_LD_ST_MSR;6643 }6644 }6645 }6646 6647 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS);6648 return VINF_SUCCESS;6649 }6650 6651 6652 /**6653 * Saves the guest control registers from the current VMCS into the guest-CPU6654 * context.6655 *6656 * @returns VBox status code.6657 * @param pVCpu The cross context virtual CPU structure.6658 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe6659 * out-of-sync. Make sure to update the required fields6660 * before using them.6661 *6662 * @remarks No-long-jump zone!!!6663 */6664 static int hmR0VmxSaveGuestControlRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)6665 {6666 /* Guest CR0. Guest FPU. */6667 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);6668 AssertRCReturn(rc, rc);6669 6670 /* Guest CR4. */6671 rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);6672 AssertRCReturn(rc, rc);6673 6674 /* Guest CR2 - updated always during the world-switch or in #PF. */6675 /* Guest CR3. Only changes with Nested Paging. This must be done -after- saving CR0 and CR4 from the guest! */6676 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR3))6677 {6678 Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR3));6679 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0));6680 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR4));6681 6682 PVM pVM = pVCpu->CTX_SUFF(pVM);6683 if ( pVM->hm.s.vmx.fUnrestrictedGuest6684 || ( pVM->hm.s.fNestedPaging6685 && CPUMIsGuestPagingEnabledEx(pMixedCtx)))6686 {6687 uint64_t u64Val = 0;6688 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_CR3, &u64Val);6689 if (pMixedCtx->cr3 != u64Val)6690 {6691 CPUMSetGuestCR3(pVCpu, u64Val);6692 if (VMMRZCallRing3IsEnabled(pVCpu))6693 {6694 PGMUpdateCR3(pVCpu, u64Val);6695 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));6696 }6697 else6698 {6699 /* Set the force flag to inform PGM about it when necessary. It is cleared by PGMUpdateCR3().*/6700 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);6701 }6702 }6703 6704 /* If the guest is in PAE mode, sync back the PDPE's into the guest state. */6705 if (CPUMIsGuestInPAEModeEx(pMixedCtx)) /* Reads CR0, CR4 and EFER MSR (EFER is always up-to-date). */6706 {6707 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &pVCpu->hm.s.aPdpes[0].u);6708 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &pVCpu->hm.s.aPdpes[1].u);6709 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &pVCpu->hm.s.aPdpes[2].u);6710 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &pVCpu->hm.s.aPdpes[3].u);6711 AssertRCReturn(rc, rc);6712 6713 if (VMMRZCallRing3IsEnabled(pVCpu))6714 {6715 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);6716 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));6717 }6718 else6719 {6720 /* Set the force flag to inform PGM about it when necessary. It is cleared by PGMGstUpdatePaePdpes(). */6721 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);6722 }6723 }6724 }6725 6726 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR3);6727 }6728 6729 /*6730 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> hmR0VmxCallRing3Callback()6731 * -> VMMRZCallRing3Disable() -> hmR0VmxSaveGuestState() -> Set VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp6732 * -> continue with VM-exit handling -> hmR0VmxSaveGuestControlRegs() and here we are.6733 *6734 * The reason for such complicated handling is because VM-exits that call into PGM expect CR3 to be up-to-date and thus6735 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that6736 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should6737 * -NOT- check if HMVMX_UPDATED_GUEST_CR3 is already set or not!6738 *6739 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.6740 */6741 if (VMMRZCallRing3IsEnabled(pVCpu))6742 {6743 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))6744 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));6745 6746 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))6747 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);6748 6749 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));6750 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));6751 }6752 6753 return rc;6754 }6755 6756 6757 /**6758 * Saves a guest segment register from the current VMCS into the guest-CPU6759 * context.6760 5949 * 6761 5950 * @returns VBox status code. … … 6768 5957 * 6769 5958 * @remarks No-long-jump zone!!! 5959 * 6770 5960 * @remarks Never call this function directly!!! Use the 6771 * HMVMX_ SAVE_SREG() macro as that takes care of whether to read6772 * from the VMCS cache or not.6773 */ 6774 static int hmR0Vmx SaveSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase, uint32_t idxAccess,6775 PCPUMSELREG pSelReg)5961 * HMVMX_IMPORT_SREG() macro as that takes care 5962 * of whether to read from the VMCS cache or not. 5963 */ 5964 static int hmR0VmxImportGuestSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase, uint32_t idxAccess, 5965 PCPUMSELREG pSelReg) 6776 5966 { 6777 5967 NOREF(pVCpu); 6778 5968 6779 uint32_t u32Val = 0; 6780 int rc = VMXReadVmcs32(idxSel, &u32Val); 5969 uint32_t u32Sel; 5970 uint32_t u32Limit; 5971 uint32_t u32Attr; 5972 uint64_t u64Base; 5973 int rc = VMXReadVmcs32(idxSel, &u32Sel); 5974 rc |= VMXReadVmcs32(idxLimit, &u32Limit); 5975 rc |= VMXReadVmcs32(idxAccess, &u32Attr); 5976 rc |= VMXReadVmcsGstNByIdxVal(idxBase, &u64Base); 6781 5977 AssertRCReturn(rc, rc); 6782 pSelReg->Sel = (uint16_t)u32Val; 6783 pSelReg->ValidSel = (uint16_t)u32Val; 5978 5979 pSelReg->Sel = (uint16_t)u32Sel; 5980 pSelReg->ValidSel = (uint16_t)u32Sel; 6784 5981 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID; 6785 6786 rc = VMXReadVmcs32(idxLimit, &u32Val); 6787 AssertRCReturn(rc, rc); 6788 pSelReg->u32Limit = u32Val; 6789 6790 uint64_t u64Val = 0; 6791 rc = VMXReadVmcsGstNByIdxVal(idxBase, &u64Val); 6792 AssertRCReturn(rc, rc); 6793 pSelReg->u64Base = u64Val; 6794 6795 rc = VMXReadVmcs32(idxAccess, &u32Val); 6796 AssertRCReturn(rc, rc); 6797 pSelReg->Attr.u = u32Val; 5982 pSelReg->u32Limit = u32Limit; 5983 pSelReg->u64Base = u64Base; 5984 pSelReg->Attr.u = u32Attr; 6798 5985 6799 5986 /* … … 6820 6007 6821 6008 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */ 6822 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D| X86DESCATTR_G6823 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;6824 6825 Log4 (("hmR0VmxReadSegmentReg: Unusable idxSel=%#x attr=%#x -> %#x\n", idxSel, u32Val, pSelReg->Attr.u));6009 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G 6010 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT; 6011 6012 Log4Func(("Unusable idxSel=%#x attr=%#x -> %#x\n", idxSel, u32Sel, pSelReg->Attr.u)); 6826 6013 #ifdef DEBUG_bird 6827 AssertMsg((u32 Val& ~X86DESCATTR_P) == pSelReg->Attr.u,6014 AssertMsg((u32Attr & ~X86DESCATTR_P) == pSelReg->Attr.u, 6828 6015 ("%#x: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n", 6829 6016 idxSel, u32Val, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit)); … … 6833 6020 } 6834 6021 6835 /** 6836 * Saves the guest segment registers from the current VMCS into the guest-CPU 6837 * context. 6838 * 6839 * @returns VBox status code. 6840 * @param pVCpu The cross context virtual CPU structure. 6841 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe 6842 * out-of-sync. Make sure to update the required fields 6843 * before using them. 6844 * 6845 * @remarks No-long-jump zone!!! 6846 */ 6847 static int hmR0VmxSaveGuestSegmentRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 6848 { 6849 /* Guest segment registers. */ 6850 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SEGMENT_REGS)) 6851 { 6852 /** @todo r=ramshankar: Why do we save CR0 here? */ 6853 Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS)); 6854 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); 6855 AssertRCReturn(rc, rc); 6856 6857 rc = HMVMX_SAVE_SREG(CS, &pMixedCtx->cs); 6858 rc |= HMVMX_SAVE_SREG(SS, &pMixedCtx->ss); 6859 rc |= HMVMX_SAVE_SREG(DS, &pMixedCtx->ds); 6860 rc |= HMVMX_SAVE_SREG(ES, &pMixedCtx->es); 6861 rc |= HMVMX_SAVE_SREG(FS, &pMixedCtx->fs); 6862 rc |= HMVMX_SAVE_SREG(GS, &pMixedCtx->gs); 6863 AssertRCReturn(rc, rc); 6864 6865 /* Restore segment attributes for real-on-v86 mode hack. */ 6866 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 6867 { 6868 pMixedCtx->cs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrCS.u; 6869 pMixedCtx->ss.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrSS.u; 6870 pMixedCtx->ds.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrDS.u; 6871 pMixedCtx->es.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrES.u; 6872 pMixedCtx->fs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrFS.u; 6873 pMixedCtx->gs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrGS.u; 6874 } 6875 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SEGMENT_REGS); 6876 } 6877 6878 return VINF_SUCCESS; 6879 } 6880 6881 6882 /** 6883 * Saves the guest SS register from the current VMCS into the guest-CPU context. 6884 * 6885 * @returns VBox status code. 6886 * @param pVCpu The cross context virtual CPU structure. 6887 * @remarks No-long-jump zone!!! 6888 */ 6889 static int hmR0VmxSaveGuestCs(PVMCPU pVCpu) 6890 { 6891 /** @todo optimize this? */ 6892 return hmR0VmxSaveGuestSegmentRegs(pVCpu, &pVCpu->cpum.GstCtx); 6893 } 6894 6895 6896 /** 6897 * Saves the guest descriptor table registers and task register from the current 6898 * VMCS into the guest-CPU context. 6899 * 6900 * @returns VBox status code. 6901 * @param pVCpu The cross context virtual CPU structure. 6902 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe 6903 * out-of-sync. Make sure to update the required fields 6904 * before using them. 6905 * 6906 * @remarks No-long-jump zone!!! 6907 */ 6908 static int hmR0VmxSaveGuestTableRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 6909 { 6910 int rc = VINF_SUCCESS; 6911 6912 /* Guest LDTR. */ 6913 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LDTR)) 6914 { 6915 Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_LDTR)); 6916 rc = HMVMX_SAVE_SREG(LDTR, &pMixedCtx->ldtr); 6917 AssertRCReturn(rc, rc); 6918 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LDTR); 6919 } 6920 6921 /* Guest GDTR. */ 6922 uint64_t u64Val = 0; 6923 uint32_t u32Val = 0; 6924 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_GDTR)) 6925 { 6926 Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_GDTR)); 6927 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, &u64Val); 6928 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRCReturn(rc, rc); 6929 pMixedCtx->gdtr.pGdt = u64Val; 6930 pMixedCtx->gdtr.cbGdt = u32Val; 6931 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_GDTR); 6932 } 6933 6934 /* Guest IDTR. */ 6935 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_IDTR)) 6936 { 6937 Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_IDTR)); 6938 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, &u64Val); 6939 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRCReturn(rc, rc); 6940 pMixedCtx->idtr.pIdt = u64Val; 6941 pMixedCtx->idtr.cbIdt = u32Val; 6942 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_IDTR); 6943 } 6944 6945 /* Guest TR. */ 6946 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_TR)) 6947 { 6948 Assert(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_TR)); 6949 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); 6950 AssertRCReturn(rc, rc); 6951 6952 /* For real-mode emulation using virtual-8086 mode we have the fake TSS (pRealModeTSS) in TR, don't save the fake one. */ 6953 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 6954 { 6955 rc = HMVMX_SAVE_SREG(TR, &pMixedCtx->tr); 6956 AssertRCReturn(rc, rc); 6957 } 6958 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_TR); 6959 } 6960 return rc; 6961 } 6962 6963 6964 /** 6965 * Saves the guest debug-register DR7 from the current VMCS into the guest-CPU 6966 * context. 6967 * 6968 * @returns VBox status code. 6969 * @param pVCpu The cross context virtual CPU structure. 6970 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe 6971 * out-of-sync. Make sure to update the required fields 6972 * before using them. 6973 * 6974 * @remarks No-long-jump zone!!! 6975 */ 6976 static int hmR0VmxSaveGuestDR7(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 6977 { 6978 if (!HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_DR7)) 6979 { 6980 if (!pVCpu->hm.s.fUsingHyperDR7) 6981 { 6982 /* Upper 32-bits are always zero. See Intel spec. 2.7.3 "Loading and Storing Debug Registers". */ 6983 uint32_t u32Val; 6984 int rc = VMXReadVmcs32(VMX_VMCS_GUEST_DR7, &u32Val); AssertRCReturn(rc, rc); 6985 pMixedCtx->dr[7] = u32Val; 6986 } 6987 6988 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_DR7); 6989 } 6990 return VINF_SUCCESS; 6991 } 6992 6993 6994 /** 6995 * Saves the guest APIC state from the current VMCS into the guest-CPU context. 6996 * 6997 * @returns VBox status code. 6998 * @param pVCpu The cross context virtual CPU structure. 6999 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe 7000 * out-of-sync. Make sure to update the required fields 7001 * before using them. 7002 * 7003 * @remarks No-long-jump zone!!! 7004 */ 7005 static int hmR0VmxSaveGuestApicState(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 7006 { 7007 NOREF(pMixedCtx); 7008 7009 /* Updating TPR is already done in hmR0VmxPostRunGuest(). Just update the flag. */ 7010 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_APIC_STATE); 7011 return VINF_SUCCESS; 7012 } 7013 7014 7015 /** 7016 * Worker for VMXR0ImportStateOnDemand. 6022 6023 /** 6024 * Imports the guest RIP from the VMCS back into the guest-CPU context. 7017 6025 * 7018 6026 * @returns VBox status code. 7019 6027 * @param pVCpu The cross context virtual CPU structure. 7020 * @param pCtx Pointer to the guest-CPU context. 6028 * 6029 * @remarks Called with interrupts and/or preemption disabled, should not assert! 6030 * @remarks Do -not- call this function directly, use hmR0VmxImportGuestState() 6031 * instead!!! 6032 */ 6033 DECLINLINE(int) hmR0VmxImportGuestRip(PVMCPU pVCpu) 6034 { 6035 uint64_t u64Val; 6036 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 6037 if (pCtx->fExtrn & CPUMCTX_EXTRN_RIP) 6038 { 6039 int rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &u64Val); 6040 if (RT_SUCCESS(rc)) 6041 { 6042 pCtx->rip = u64Val; 6043 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RIP; 6044 } 6045 return rc; 6046 } 6047 return VINF_SUCCESS; 6048 } 6049 6050 6051 /** 6052 * Imports the guest RFLAGS from the VMCS back into the guest-CPU context. 6053 * 6054 * @returns VBox status code. 6055 * @param pVCpu The cross context virtual CPU structure. 6056 * 6057 * @remarks Called with interrupts and/or preemption disabled, should not assert! 6058 * @remarks Do -not- call this function directly, use hmR0VmxImportGuestState() 6059 * instead!!! 6060 */ 6061 DECLINLINE(int) hmR0VmxImportGuestRFlags(PVMCPU pVCpu) 6062 { 6063 uint32_t u32Val; 6064 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 6065 if (pCtx->fExtrn & CPUMCTX_EXTRN_RFLAGS) 6066 { 6067 int rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Val); 6068 if (RT_SUCCESS(rc)) 6069 { 6070 pCtx->eflags.u32 = u32Val; 6071 6072 /* Restore eflags for real-on-v86-mode hack. */ 6073 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 6074 { 6075 pCtx->eflags.Bits.u1VM = 0; 6076 pCtx->eflags.Bits.u2IOPL = pVCpu->hm.s.vmx.RealMode.Eflags.Bits.u2IOPL; 6077 } 6078 } 6079 pCtx->fExtrn &= ~CPUMCTX_EXTRN_RFLAGS; 6080 return rc; 6081 } 6082 return VINF_SUCCESS; 6083 } 6084 6085 6086 /** 6087 * Imports the guest interruptibility-state from the VMCS back into the guest-CPU 6088 * context. 6089 * 6090 * @returns VBox status code. 6091 * @param pVCpu The cross context virtual CPU structure. 6092 * 6093 * @remarks Called with interrupts and/or preemption disabled, should not assert! 6094 * @remarks Do -not- call this function directly, use hmR0VmxImportGuestState() 6095 * instead!!! 6096 */ 6097 DECLINLINE(int) hmR0VmxImportGuestIntrState(PVMCPU pVCpu) 6098 { 6099 uint32_t u32Val; 6100 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 6101 int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &u32Val); 6102 if (RT_SUCCESS(rc)) 6103 { 6104 /* 6105 * We additionally have a requirement to import RIP, RFLAGS depending on whether we 6106 * might need them in hmR0VmxEvaluatePendingEvent(). 6107 */ 6108 if (!u32Val) 6109 { 6110 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 6111 { 6112 rc = hmR0VmxImportGuestRip(pVCpu); 6113 rc |= hmR0VmxImportGuestRFlags(pVCpu); 6114 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 6115 } 6116 6117 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS)) 6118 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS); 6119 } 6120 else 6121 { 6122 rc = hmR0VmxImportGuestRip(pVCpu); 6123 rc |= hmR0VmxImportGuestRFlags(pVCpu); 6124 6125 if (u32Val & ( VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS 6126 | VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)) 6127 { 6128 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip); 6129 } 6130 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) 6131 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS); 6132 6133 if (u32Val & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI) 6134 { 6135 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS)) 6136 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS); 6137 } 6138 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS)) 6139 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS); 6140 } 6141 } 6142 return rc; 6143 } 6144 6145 6146 /** 6147 * Worker for VMXR0ImportStateOnDemand. 6148 * 6149 * @returns VBox status code. 6150 * @param pVCpu The cross context virtual CPU structure. 7021 6151 * @param fWhat What to import, CPUMCTX_EXTRN_XXX. 7022 6152 */ 7023 static int hmR0VmxImportGuestState(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t fWhat) 7024 { 7025 int rc = VINF_SUCCESS; 7026 PVM pVM = pVCpu->CTX_SUFF(pVM); 6153 static int hmR0VmxImportGuestState(PVMCPU pVCpu, uint64_t fWhat) 6154 { 6155 #define VMXLOCAL_BREAK_RC(a_rc) \ 6156 if (RT_FAILURE(a_rc)) \ 6157 break 6158 6159 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatImportGuestState, x); 6160 6161 int rc = VINF_SUCCESS; 6162 PVM pVM = pVCpu->CTX_SUFF(pVM); 6163 PCPUMCTX pCtx = &pVCpu->cpum.GstCtx; 7027 6164 uint64_t u64Val; 7028 6165 uint32_t u32Val; 7029 uint32_t u32Shadow; 6166 6167 Log4Func(("fExtrn=%#RX64 fWhat=%#RX64\n", pCtx->fExtrn, fWhat)); 7030 6168 7031 6169 /* 7032 * Though we can longjmp to ring-3 due to log-flushes here and get re-invoked7033 * on the ring-3 callback path, there is no real need to.6170 * We disable interrupts to make the updating of the state and in particular 6171 * the fExtrn modification atomic wrt to preemption hooks. 7034 6172 */ 7035 if (VMMRZCallRing3IsEnabled(pVCpu)) 7036 VMMR0LogFlushDisable(pVCpu); 6173 RTCCUINTREG const fEFlags = ASMIntDisableFlags(); 6174 6175 fWhat &= pCtx->fExtrn; 6176 if (fWhat & pCtx->fExtrn) 6177 { 6178 do 6179 { 6180 if (fWhat & CPUMCTX_EXTRN_RIP) 6181 { 6182 rc = hmR0VmxImportGuestRip(pVCpu); 6183 VMXLOCAL_BREAK_RC(rc); 6184 } 6185 6186 if (fWhat & CPUMCTX_EXTRN_RFLAGS) 6187 { 6188 rc = hmR0VmxImportGuestRFlags(pVCpu); 6189 VMXLOCAL_BREAK_RC(rc); 6190 } 6191 6192 if (fWhat & CPUMCTX_EXTRN_HM_VMX_INT_STATE) 6193 { 6194 rc = hmR0VmxImportGuestIntrState(pVCpu); 6195 VMXLOCAL_BREAK_RC(rc); 6196 } 6197 6198 if (fWhat & CPUMCTX_EXTRN_RSP) 6199 { 6200 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &u64Val); 6201 VMXLOCAL_BREAK_RC(rc); 6202 pCtx->rsp = u64Val; 6203 } 6204 6205 if (fWhat & CPUMCTX_EXTRN_SREG_MASK) 6206 { 6207 if (fWhat & CPUMCTX_EXTRN_CS) 6208 { 6209 rc = HMVMX_IMPORT_SREG(CS, &pCtx->cs); 6210 VMXLOCAL_BREAK_RC(rc); 6211 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 6212 pCtx->cs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrCS.u; 6213 } 6214 if (fWhat & CPUMCTX_EXTRN_SS) 6215 { 6216 rc = HMVMX_IMPORT_SREG(SS, &pCtx->ss); 6217 VMXLOCAL_BREAK_RC(rc); 6218 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 6219 pCtx->ss.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrSS.u; 6220 } 6221 if (fWhat & CPUMCTX_EXTRN_DS) 6222 { 6223 rc = HMVMX_IMPORT_SREG(DS, &pCtx->ds); 6224 VMXLOCAL_BREAK_RC(rc); 6225 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 6226 pCtx->ds.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrDS.u; 6227 } 6228 if (fWhat & CPUMCTX_EXTRN_ES) 6229 { 6230 rc = HMVMX_IMPORT_SREG(ES, &pCtx->es); 6231 VMXLOCAL_BREAK_RC(rc); 6232 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 6233 pCtx->es.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrES.u; 6234 } 6235 if (fWhat & CPUMCTX_EXTRN_FS) 6236 { 6237 rc = HMVMX_IMPORT_SREG(FS, &pCtx->fs); 6238 VMXLOCAL_BREAK_RC(rc); 6239 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 6240 pCtx->fs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrFS.u; 6241 } 6242 if (fWhat & CPUMCTX_EXTRN_GS) 6243 { 6244 rc = HMVMX_IMPORT_SREG(GS, &pCtx->gs); 6245 VMXLOCAL_BREAK_RC(rc); 6246 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 6247 pCtx->gs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrGS.u; 6248 } 6249 } 6250 6251 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK) 6252 { 6253 if (fWhat & CPUMCTX_EXTRN_LDTR) 6254 { 6255 rc = HMVMX_IMPORT_SREG(LDTR, &pCtx->ldtr); 6256 VMXLOCAL_BREAK_RC(rc); 6257 } 6258 6259 if (fWhat & CPUMCTX_EXTRN_GDTR) 6260 { 6261 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, &u64Val); 6262 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); 6263 VMXLOCAL_BREAK_RC(rc); 6264 pCtx->gdtr.pGdt = u64Val; 6265 pCtx->gdtr.cbGdt = u32Val; 6266 } 6267 6268 /* Guest IDTR. */ 6269 if (fWhat & CPUMCTX_EXTRN_IDTR) 6270 { 6271 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, &u64Val); 6272 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); 6273 VMXLOCAL_BREAK_RC(rc); 6274 pCtx->idtr.pIdt = u64Val; 6275 pCtx->idtr.cbIdt = u32Val; 6276 } 6277 6278 /* Guest TR. */ 6279 if (fWhat & CPUMCTX_EXTRN_TR) 6280 { 6281 /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR, don't save that one. */ 6282 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) 6283 { 6284 rc = HMVMX_IMPORT_SREG(TR, &pCtx->tr); 6285 VMXLOCAL_BREAK_RC(rc); 6286 } 6287 } 6288 } 6289 6290 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS) 6291 { 6292 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, &pCtx->SysEnter.eip); 6293 rc |= VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, &pCtx->SysEnter.esp); 6294 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); 6295 pCtx->SysEnter.cs = u32Val; 6296 VMXLOCAL_BREAK_RC(rc); 6297 } 6298 6299 #if HC_ARCH_BITS == 64 6300 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE) 6301 { 6302 if ( pVM->hm.s.fAllow64BitGuests 6303 && (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)) 6304 pCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE); 6305 } 6306 6307 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS) 6308 { 6309 if ( pVM->hm.s.fAllow64BitGuests 6310 && (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST)) 6311 { 6312 pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR); 6313 pCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR); 6314 pCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK); 6315 } 6316 } 6317 #endif 6318 6319 if ( (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS)) 6320 #if HC_ARCH_BITS == 32 6321 || (fWhat & (CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_SYSCALL_MSRS)) 6322 #endif 6323 ) 6324 { 6325 PCVMXAUTOMSR pMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr; 6326 uint32_t const cMsrs = pVCpu->hm.s.vmx.cMsrs; 6327 for (uint32_t i = 0; i < cMsrs; i++, pMsr++) 6328 { 6329 switch (pMsr->u32Msr) 6330 { 6331 #if HC_ARCH_BITS == 32 6332 case MSR_K8_LSTAR: pCtx->msrLSTAR = pMsr->u64Value; break; 6333 case MSR_K6_STAR: pCtx->msrSTAR = pMsr->u64Value; break; 6334 case MSR_K8_SF_MASK: pCtx->msrSFMASK = pMsr->u64Value; break; 6335 case MSR_K8_KERNEL_GS_BASE: pCtx->msrKERNELGSBASE = pMsr->u64Value; break; 6336 #endif 6337 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsr->u64Value); break; 6338 case MSR_K8_TSC_AUX: CPUMSetGuestTscAux(pVCpu, pMsr->u64Value); break; 6339 default: 6340 { 6341 AssertMsgFailed(("Unexpected MSR in auto-load/store area. uMsr=%#RX32 cMsrs=%u\n", pMsr->u32Msr, 6342 cMsrs)); 6343 pVCpu->hm.s.u32HMError = pMsr->u32Msr; 6344 return VERR_HM_UNEXPECTED_LD_ST_MSR; 6345 } 6346 } 6347 } 6348 } 6349 6350 if (fWhat & CPUMCTX_EXTRN_DR7) 6351 { 6352 if (!pVCpu->hm.s.fUsingHyperDR7) 6353 { 6354 /* Upper 32-bits are always zero. See Intel spec. 2.7.3 "Loading and Storing Debug Registers". */ 6355 rc = VMXReadVmcs32(VMX_VMCS_GUEST_DR7, &u32Val); 6356 VMXLOCAL_BREAK_RC(rc); 6357 pCtx->dr[7] = u32Val; 6358 } 6359 } 6360 6361 if (fWhat & CPUMCTX_EXTRN_CR_MASK) 6362 { 6363 uint32_t u32Shadow; 6364 /* CR0 required for saving CR3 below, see CPUMIsGuestPagingEnabledEx(). */ 6365 if (fWhat & CPUMCTX_EXTRN_CR0) 6366 { 6367 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32Val); 6368 rc |= VMXReadVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, &u32Shadow); 6369 VMXLOCAL_BREAK_RC(rc); 6370 u32Val = (u32Val & ~pVCpu->hm.s.vmx.u32CR0Mask) 6371 | (u32Shadow & pVCpu->hm.s.vmx.u32CR0Mask); 6372 VMMRZCallRing3Disable(pVCpu); /* Calls into PGM which has Log statements. */ 6373 CPUMSetGuestCR0(pVCpu, u32Val); 6374 VMMRZCallRing3Enable(pVCpu); 6375 } 6376 6377 /* CR4 required for saving CR3 below, see CPUMIsGuestInPAEModeEx(). */ 6378 if (fWhat & CPUMCTX_EXTRN_CR4) 6379 { 6380 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &u32Val); 6381 rc |= VMXReadVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, &u32Shadow); 6382 VMXLOCAL_BREAK_RC(rc); 6383 u32Val = (u32Val & ~pVCpu->hm.s.vmx.u32CR4Mask) 6384 | (u32Shadow & pVCpu->hm.s.vmx.u32CR4Mask); 6385 CPUMSetGuestCR4(pVCpu, u32Val); 6386 } 6387 6388 if (fWhat & CPUMCTX_EXTRN_CR3) 6389 { 6390 if ( pVM->hm.s.vmx.fUnrestrictedGuest 6391 || ( pVM->hm.s.fNestedPaging 6392 && CPUMIsGuestPagingEnabledEx(pCtx))) /* PG bit changes are always intercepted, so it's up to date. */ 6393 { 6394 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_CR3, &u64Val); 6395 if (pCtx->cr3 != u64Val) 6396 { 6397 CPUMSetGuestCR3(pVCpu, u64Val); 6398 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3); 6399 } 6400 6401 /* If the guest is in PAE mode, sync back the PDPE's into the guest state. */ 6402 if (CPUMIsGuestInPAEModeEx(pCtx)) 6403 { 6404 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &pVCpu->hm.s.aPdpes[0].u); 6405 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &pVCpu->hm.s.aPdpes[1].u); 6406 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &pVCpu->hm.s.aPdpes[2].u); 6407 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &pVCpu->hm.s.aPdpes[3].u); 6408 VMXLOCAL_BREAK_RC(rc); 6409 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES); 6410 } 6411 } 6412 } 6413 } 6414 } while (0); 6415 6416 if (RT_SUCCESS(rc)) 6417 { 6418 /* Update fExtrn. */ 6419 pCtx->fExtrn &= ~fWhat; 6420 6421 /* If everything has been imported, clear the HM keeper bit. */ 6422 if (!(pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL)) 6423 { 6424 pCtx->fExtrn &= ~CPUMCTX_EXTRN_KEEPER_HM; 6425 Assert(!pCtx->fExtrn); 6426 } 6427 } 6428 } 7037 6429 else 7038 Assert(VMMR0IsLogFlushDisabled(pVCpu)); 7039 Log4Func(("fExtrn=%#RX64 fWhat=%#RX64\n", pCtx->fExtrn, fWhat)); 6430 AssertMsg(!pCtx->fExtrn || (pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL), ("%#RX64\n", pCtx->fExtrn)); 6431 6432 ASMSetFlags(fEFlags); 6433 6434 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatImportGuestState, x); 7040 6435 7041 6436 /* 6437 * Honor any pending CR3 updates. 6438 * 7042 6439 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> hmR0VmxCallRing3Callback() 7043 6440 * -> VMMRZCallRing3Disable() -> hmR0VmxImportGuestState() -> Sets VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp … … 7047 6444 * if any CR3-saves -before- the VM-exit (longjmp) postponed the CR3 update via the force-flag, any VM-exit handler that 7048 6445 * calls into PGM when it re-saves CR3 will end up here and we call PGMUpdateCR3(). This is why the code below should 7049 * -NOT- check if HMVMX_UPDATED_GUEST_CR3 is already set or not!6446 * -NOT- check if CPUMCTX_EXTRN_CR3 is set! 7050 6447 * 7051 6448 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here. … … 7053 6450 if (VMMRZCallRing3IsEnabled(pVCpu)) 7054 6451 { 6452 VMMR0LogFlushDisable(pVCpu); 6453 7055 6454 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3)) 7056 6455 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu)); … … 7065 6464 } 7066 6465 7067 Assert(!(fWhat & CPUMCTX_EXTRN_KEEPER_HM));7068 fWhat &= pCtx->fExtrn;7069 7070 /* If there is nothing more to import, bail early. */7071 if (!(fWhat & HMVMX_CPUMCTX_EXTRN_ALL))7072 return VINF_SUCCESS;7073 7074 /* RIP required while saving interruptibility-state below, see EMSetInhibitInterruptsPC(). */7075 if (fWhat & (CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_HM_VMX_INT_STATE))7076 {7077 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &u64Val);7078 AssertRCReturn(rc, rc);7079 pCtx->rip = u64Val;7080 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_RIP);7081 }7082 7083 /* RFLAGS and interruptibility-state required while re-evaluating interrupt injection, see hmR0VmxGetGuestIntrState(). */7084 if (fWhat & (CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_HM_VMX_INT_STATE))7085 {7086 rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Val);7087 AssertRCReturn(rc, rc);7088 pCtx->eflags.u32 = u32Val;7089 /* Restore eflags for real-on-v86-mode hack. */7090 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)7091 {7092 Assert(pVM->hm.s.vmx.pRealModeTSS);7093 pCtx->eflags.Bits.u1VM = 0;7094 pCtx->eflags.Bits.u2IOPL = pVCpu->hm.s.vmx.RealMode.Eflags.Bits.u2IOPL;7095 }7096 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_RFLAGS);7097 }7098 7099 if (fWhat & CPUMCTX_EXTRN_HM_VMX_INT_STATE)7100 {7101 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &u32Val);7102 AssertRCReturn(rc, rc);7103 if (!u32Val)7104 {7105 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))7106 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);7107 7108 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))7109 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);7110 }7111 else7112 {7113 if (u32Val & ( VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS7114 | VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI))7115 {7116 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip);7117 Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));7118 }7119 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))7120 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);7121 7122 if (u32Val & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI)7123 {7124 if (!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))7125 VMCPU_FF_SET(pVCpu, VMCPU_FF_BLOCK_NMIS);7126 }7127 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_BLOCK_NMIS))7128 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_BLOCK_NMIS);7129 }7130 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_HM_VMX_INT_STATE);7131 }7132 7133 if (fWhat & CPUMCTX_EXTRN_RSP)7134 {7135 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &u64Val);7136 AssertRCReturn(rc, rc);7137 pCtx->rsp = u64Val;7138 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_RSP);7139 }7140 7141 if (fWhat & CPUMCTX_EXTRN_SREG_MASK)7142 {7143 if (fWhat & CPUMCTX_EXTRN_CS)7144 {7145 rc = HMVMX_SAVE_SREG(CS, &pCtx->cs);7146 AssertRCReturn(rc, rc);7147 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)7148 pCtx->cs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrCS.u;7149 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_CS);7150 }7151 if (fWhat & CPUMCTX_EXTRN_SS)7152 {7153 rc = HMVMX_SAVE_SREG(SS, &pCtx->ss);7154 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)7155 pCtx->ss.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrSS.u;7156 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_SS);7157 }7158 if (fWhat & CPUMCTX_EXTRN_DS)7159 {7160 rc = HMVMX_SAVE_SREG(DS, &pCtx->ds);7161 AssertRCReturn(rc, rc);7162 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)7163 pCtx->ds.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrDS.u;7164 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_DS);7165 }7166 if (fWhat & CPUMCTX_EXTRN_ES)7167 {7168 rc = HMVMX_SAVE_SREG(ES, &pCtx->es);7169 AssertRCReturn(rc, rc);7170 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)7171 pCtx->es.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrES.u;7172 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_ES);7173 }7174 if (fWhat & CPUMCTX_EXTRN_FS)7175 {7176 rc = HMVMX_SAVE_SREG(FS, &pCtx->fs);7177 AssertRCReturn(rc, rc);7178 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)7179 pCtx->fs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrFS.u;7180 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_FS);7181 }7182 if (fWhat & CPUMCTX_EXTRN_GS)7183 {7184 rc = HMVMX_SAVE_SREG(GS, &pCtx->gs);7185 AssertRCReturn(rc, rc);7186 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)7187 pCtx->gs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrGS.u;7188 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_GS);7189 }7190 }7191 7192 if (fWhat & CPUMCTX_EXTRN_TABLE_MASK)7193 {7194 if (fWhat & CPUMCTX_EXTRN_LDTR)7195 {7196 rc = HMVMX_SAVE_SREG(LDTR, &pCtx->ldtr);7197 AssertRCReturn(rc, rc);7198 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_LDTR);7199 }7200 7201 if (fWhat & CPUMCTX_EXTRN_GDTR)7202 {7203 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, &u64Val);7204 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);7205 AssertRCReturn(rc, rc);7206 pCtx->gdtr.pGdt = u64Val;7207 pCtx->gdtr.cbGdt = u32Val;7208 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_GDTR);7209 }7210 7211 /* Guest IDTR. */7212 if (fWhat & CPUMCTX_EXTRN_IDTR)7213 {7214 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, &u64Val);7215 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);7216 AssertRCReturn(rc, rc);7217 pCtx->idtr.pIdt = u64Val;7218 pCtx->idtr.cbIdt = u32Val;7219 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_IDTR);7220 }7221 7222 /* Guest TR. */7223 if (fWhat & CPUMCTX_EXTRN_TR)7224 {7225 /* Real-mode emulation using virtual-8086 mode has the fake TSS (pRealModeTSS) in TR, don't save that one. */7226 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)7227 {7228 rc = HMVMX_SAVE_SREG(TR, &pCtx->tr);7229 AssertRCReturn(rc, rc);7230 }7231 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_TR);7232 }7233 }7234 7235 if (fWhat & CPUMCTX_EXTRN_SYSENTER_MSRS)7236 {7237 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, &pCtx->SysEnter.eip);7238 rc |= VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, &pCtx->SysEnter.esp);7239 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val);7240 pCtx->SysEnter.cs = u32Val;7241 AssertRCReturn(rc, rc);7242 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_SYSENTER_MSRS);7243 }7244 7245 #if HC_ARCH_BITS == 647246 if (fWhat & CPUMCTX_EXTRN_KERNEL_GS_BASE)7247 {7248 if ( pVM->hm.s.fAllow64BitGuests7249 && (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))7250 pCtx->msrKERNELGSBASE = ASMRdMsr(MSR_K8_KERNEL_GS_BASE);7251 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_KERNEL_GS_BASE);7252 }7253 7254 if (fWhat & CPUMCTX_EXTRN_SYSCALL_MSRS)7255 {7256 if ( pVM->hm.s.fAllow64BitGuests7257 && (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST))7258 {7259 pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);7260 pCtx->msrSTAR = ASMRdMsr(MSR_K6_STAR);7261 pCtx->msrSFMASK = ASMRdMsr(MSR_K8_SF_MASK);7262 }7263 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_SYSCALL_MSRS);7264 }7265 #endif7266 7267 if ( (fWhat & (CPUMCTX_EXTRN_TSC_AUX | CPUMCTX_EXTRN_OTHER_MSRS))7268 #if HC_ARCH_BITS == 327269 || (fWhat & (CPUMCTX_EXTRN_KERNEL_GS_BASE | CPUMCTX_EXTRN_SYSCALL_MSRS))7270 #endif7271 )7272 {7273 PCVMXAUTOMSR pMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;7274 uint32_t const cMsrs = pVCpu->hm.s.vmx.cMsrs;7275 for (uint32_t i = 0; i < cMsrs; i++, pMsr++)7276 {7277 switch (pMsr->u32Msr)7278 {7279 #if HC_ARCH_BITS == 327280 case MSR_K8_LSTAR: pCtx->msrLSTAR = pMsr->u64Value; break;7281 case MSR_K6_STAR: pCtx->msrSTAR = pMsr->u64Value; break;7282 case MSR_K8_SF_MASK: pCtx->msrSFMASK = pMsr->u64Value; break;7283 case MSR_K8_KERNEL_GS_BASE: pCtx->msrKERNELGSBASE = pMsr->u64Value; break;7284 #endif7285 case MSR_IA32_SPEC_CTRL: CPUMSetGuestSpecCtrl(pVCpu, pMsr->u64Value); break;7286 case MSR_K8_TSC_AUX:7287 {7288 /* CPUMSetGuestTscAux alters fExtrn without using atomics, so disable preemption temporarily. */7289 HM_DISABLE_PREEMPT();7290 CPUMSetGuestTscAux(pVCpu, pMsr->u64Value);7291 HM_RESTORE_PREEMPT();7292 break;7293 }7294 default:7295 {7296 AssertMsgFailed(("Unexpected MSR in auto-load/store area. uMsr=%#RX32 cMsrs=%u\n", pMsr->u32Msr, cMsrs));7297 pVCpu->hm.s.u32HMError = pMsr->u32Msr;7298 return VERR_HM_UNEXPECTED_LD_ST_MSR;7299 }7300 }7301 }7302 ASMAtomicUoAndU64(&pCtx->fExtrn, ~( CPUMCTX_EXTRN_TSC_AUX7303 | CPUMCTX_EXTRN_OTHER_MSRS7304 #if HC_ARCH_BITS == 327305 | CPUMCTX_EXTRN_KERNEL_GS_BASE7306 | CPUMCTX_EXTRN_SYSCALL_MSRS7307 #endif7308 ));7309 }7310 7311 if (fWhat & CPUMCTX_EXTRN_DR7)7312 {7313 if (!pVCpu->hm.s.fUsingHyperDR7)7314 {7315 /* Upper 32-bits are always zero. See Intel spec. 2.7.3 "Loading and Storing Debug Registers". */7316 rc = VMXReadVmcs32(VMX_VMCS_GUEST_DR7, &u32Val);7317 AssertRCReturn(rc, rc);7318 pCtx->dr[7] = u32Val;7319 }7320 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_DR7);7321 }7322 7323 if (fWhat & CPUMCTX_EXTRN_CR_MASK)7324 {7325 /* CR0 required for saving CR3 below, see CPUMIsGuestPagingEnabledEx(). */7326 if (fWhat & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3))7327 {7328 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32Val);7329 rc |= VMXReadVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, &u32Shadow);7330 AssertRCReturn(rc, rc);7331 u32Val = (u32Val & ~pVCpu->hm.s.vmx.u32CR0Mask)7332 | (u32Shadow & pVCpu->hm.s.vmx.u32CR0Mask);7333 CPUMSetGuestCR0(pVCpu, u32Val);7334 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_CR0);7335 }7336 7337 /* CR4 required for saving CR3 below, see CPUMIsGuestInPAEModeEx(). */7338 if (fWhat & (CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_CR3))7339 {7340 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &u32Val);7341 rc |= VMXReadVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, &u32Shadow);7342 AssertRCReturn(rc, rc);7343 u32Val = (u32Val & ~pVCpu->hm.s.vmx.u32CR4Mask)7344 | (u32Shadow & pVCpu->hm.s.vmx.u32CR4Mask);7345 CPUMSetGuestCR4(pVCpu, u32Val);7346 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_CR4);7347 }7348 7349 if (fWhat & CPUMCTX_EXTRN_CR3)7350 {7351 if ( pVM->hm.s.vmx.fUnrestrictedGuest7352 || ( pVM->hm.s.fNestedPaging7353 && CPUMIsGuestPagingEnabledEx(pCtx)))7354 {7355 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_CR3, &u64Val);7356 if (pCtx->cr3 != u64Val)7357 {7358 CPUMSetGuestCR3(pVCpu, u64Val);7359 if (VMMRZCallRing3IsEnabled(pVCpu))7360 {7361 PGMUpdateCR3(pVCpu, u64Val);7362 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));7363 }7364 else7365 {7366 /* Set the force flag to inform PGM about it when necessary. It is cleared by PGMUpdateCR3().*/7367 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);7368 }7369 }7370 7371 /* If the guest is in PAE mode, sync back the PDPE's into the guest state. */7372 if (CPUMIsGuestInPAEModeEx(pCtx))7373 {7374 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &pVCpu->hm.s.aPdpes[0].u);7375 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &pVCpu->hm.s.aPdpes[1].u);7376 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &pVCpu->hm.s.aPdpes[2].u);7377 rc |= VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &pVCpu->hm.s.aPdpes[3].u);7378 AssertRCReturn(rc, rc);7379 7380 if (VMMRZCallRing3IsEnabled(pVCpu))7381 {7382 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);7383 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));7384 }7385 else7386 {7387 /* Set the force flag to inform PGM about it when necessary. It is cleared by PGMGstUpdatePaePdpes(). */7388 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);7389 }7390 }7391 }7392 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_CR3);7393 }7394 }7395 7396 /* If everything has been imported, clear the HM keeper bit. */7397 if (!(pCtx->fExtrn & HMVMX_CPUMCTX_EXTRN_ALL))7398 {7399 ASMAtomicUoAndU64(&pCtx->fExtrn, ~CPUMCTX_EXTRN_KEEPER_HM);7400 Assert(!pCtx->fExtrn);7401 }7402 7403 6466 return VINF_SUCCESS; 6467 #undef VMXLOCAL_BREAK_RC 7404 6468 } 7405 6469 … … 7410 6474 * @returns VBox status code. 7411 6475 * @param pVCpu The cross context virtual CPU structure. 7412 * @param pCtx Pointer to the guest-CPU or nested-guest-CPU context.7413 6476 * @param fWhat What to import, CPUMCTX_EXTRN_XXX. 7414 6477 */ 7415 VMMR0DECL(int) VMXR0ImportStateOnDemand(PVMCPU pVCpu, PCPUMCTX pCtx, uint64_t fWhat) 7416 { 7417 return hmR0VmxImportGuestState(pVCpu, pCtx, fWhat); 7418 } 7419 7420 7421 /** 7422 * Saves the entire guest state from the currently active VMCS into the 7423 * guest-CPU context. 7424 * 7425 * This essentially VMREADs all guest-data. 7426 * 7427 * @returns VBox status code. 7428 * @param pVCpu The cross context virtual CPU structure. 7429 * @param pMixedCtx Pointer to the guest-CPU context. The data may be 7430 * out-of-sync. Make sure to update the required fields 7431 * before using them. 7432 */ 7433 static int hmR0VmxSaveGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 7434 { 7435 Assert(pVCpu); 7436 Assert(pMixedCtx); 7437 7438 if (HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL) 7439 return VINF_SUCCESS; 7440 7441 /* Though we can longjmp to ring-3 due to log-flushes here and get recalled 7442 again on the ring-3 callback path, there is no real need to. */ 7443 if (VMMRZCallRing3IsEnabled(pVCpu)) 7444 VMMR0LogFlushDisable(pVCpu); 7445 else 7446 Assert(VMMR0IsLogFlushDisabled(pVCpu)); 7447 Log4Func(("vcpu[%RU32]\n", pVCpu->idCpu)); 7448 7449 int rc = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx); 7450 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestRipRspRflags failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc); 7451 7452 rc = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx); 7453 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestControlRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc); 7454 7455 rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); 7456 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestSegmentRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc); 7457 7458 rc = hmR0VmxSaveGuestTableRegs(pVCpu, pMixedCtx); 7459 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestTableRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc); 7460 7461 rc = hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx); 7462 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestDR7 failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc); 7463 7464 rc = hmR0VmxSaveGuestSysenterMsrs(pVCpu, pMixedCtx); 7465 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestSysenterMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc); 7466 7467 rc = hmR0VmxSaveGuestLazyMsrs(pVCpu, pMixedCtx); 7468 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestLazyMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc); 7469 7470 rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx); 7471 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestAutoLoadStoreMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc); 7472 7473 rc = hmR0VmxSaveGuestActivityState(pVCpu, pMixedCtx); 7474 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestActivityState failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc); 7475 7476 rc = hmR0VmxSaveGuestApicState(pVCpu, pMixedCtx); 7477 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestApicState failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc); 7478 7479 AssertMsg(HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL, 7480 ("Missed guest state bits while saving state; missing %RX32 (got %RX32, want %RX32) - check log for any previous errors!\n", 7481 HMVMX_UPDATED_GUEST_ALL ^ HMVMXCPU_GST_VALUE(pVCpu), HMVMXCPU_GST_VALUE(pVCpu), HMVMX_UPDATED_GUEST_ALL)); 7482 7483 if (VMMRZCallRing3IsEnabled(pVCpu)) 7484 VMMR0LogFlushEnable(pVCpu); 7485 7486 return VINF_SUCCESS; 7487 } 7488 7489 7490 /** 7491 * Saves basic guest registers needed for IEM instruction execution. 7492 * 7493 * @returns VBox status code (OR-able). 7494 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 7495 * @param pMixedCtx Pointer to the CPU context of the guest. 7496 * @param fMemory Whether the instruction being executed operates on 7497 * memory or not. Only CR0 is synced up if clear. 7498 * @param fNeedRsp Need RSP (any instruction working on GPRs or stack). 7499 */ 7500 static int hmR0VmxSaveGuestRegsForIemExec(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fMemory, bool fNeedRsp) 7501 { 7502 /* 7503 * We assume all general purpose registers other than RSP are available. 7504 * 7505 * - RIP is a must, as it will be incremented or otherwise changed. 7506 * - RFLAGS are always required to figure the CPL. 7507 * - RSP isn't always required, however it's a GPR, so frequently required. 7508 * - SS and CS are the only segment register needed if IEM doesn't do memory 7509 * access (CPL + 16/32/64-bit mode), but we can only get all segment registers. 7510 * - CR0 is always required by IEM for the CPL, while CR3 and CR4 will only 7511 * be required for memory accesses. 7512 * 7513 * Note! Before IEM dispatches an exception, it will call us to sync in everything. 7514 */ 7515 int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx); 7516 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); 7517 if (fNeedRsp) 7518 rc |= hmR0VmxSaveGuestRsp(pVCpu, pMixedCtx); 7519 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); /** @todo Only CS and SS are required here. */ 7520 if (!fMemory) 7521 rc |= hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); 7522 else 7523 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx); 7524 AssertRCReturn(rc, rc); 7525 return rc; 7526 } 7527 7528 7529 /** 7530 * Saves guest registers needed for IEM instruction interpretation. 7531 * 7532 * @returns VBox status code (OR-able). 7533 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 7534 */ 7535 static int hmR0VmxSaveGuestRegsForIemInterpreting(PVMCPU pVCpu) 7536 { 7537 /* 7538 * Our goal here is IEM_CPUMCTX_EXTRN_MUST_MASK. 7539 * 7540 * Note! Before IEM dispatches an exception, it will call us to sync in everything. 7541 */ 7542 #if 0 /* later with CPUMCTX_EXTRN_XXX */ 7543 int rc = hmR0VmxSaveGuestRip(pVCpu, &pVCpu->cpum.GstCtx); 7544 rc |= hmR0VmxSaveGuestRflags(pVCpu, &pVCpu->cpum.GstCtx); 7545 rc |= hmR0VmxSaveGuestRsp(pVCpu, &pVCpu->cpum.GstCtx); 7546 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, &pVCpu->cpum.GstCtx); /** @todo Only CS and SS are strictly required here. */ 7547 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, &pVCpu->cpum.GstCtx); /** @todo We don't need CR2 here. */ 7548 rc |= hmR0VmxSaveGuestApicState(pVCpu, &pVCpu->cpum.GstCtx); /** @todo Only TPR is needed here. */ 7549 rc |= hmR0VmxSaveGuestDR7(pVCpu, &pVCpu->cpum.GstCtx); 7550 /* EFER is always up to date. */ 7551 AssertRCReturn(rc, rc); 7552 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST - fixme); /** @todo fix me */ 7553 #else 7554 int rc = hmR0VmxSaveGuestState(pVCpu, &pVCpu->cpum.GstCtx); 7555 AssertRCReturn(rc, rc); 7556 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST); 7557 #endif 7558 7559 return rc; 7560 } 7561 7562 7563 /** 7564 * Ensures that we've got a complete basic guest-context. 7565 * 7566 * This excludes the FPU, SSE, AVX, and similar extended state. The interface 7567 * is for the interpreter. 7568 * 7569 * @returns VBox status code. 7570 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 7571 * @param pMixedCtx Pointer to the guest-CPU context which may have data 7572 * needing to be synced in. 7573 * @thread EMT(pVCpu) 7574 */ 7575 VMMR0_INT_DECL(int) HMR0EnsureCompleteBasicContext(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 7576 { 7577 /* Note! Since this is only applicable to VT-x, the implementation is placed 7578 in the VT-x part of the sources instead of the generic stuff. */ 7579 int rc; 7580 PVM pVM = pVCpu->CTX_SUFF(pVM); 7581 if ( pVM->hm.s.vmx.fSupported 7582 && VM_IS_HM_ENABLED(pVM)) 7583 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx); 7584 else 7585 rc = VINF_SUCCESS; 7586 7587 /* 7588 * For now, imply that the caller might change everything too. Do this after 7589 * saving the guest state so as to not trigger assertions. 7590 * 7591 * This is required for AMD-V too as it too only selectively re-loads changed 7592 * guest state back in to the VMCB. 7593 */ 7594 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST); 7595 return rc; 6478 VMMR0DECL(int) VMXR0ImportStateOnDemand(PVMCPU pVCpu, uint64_t fWhat) 6479 { 6480 return hmR0VmxImportGuestState(pVCpu, fWhat); 7596 6481 } 7597 6482 … … 7634 6519 return VINF_SUCCESS; 7635 6520 6521 #if 0 7636 6522 /* We need the control registers now, make sure the guest-CPU context is updated. */ 7637 int rc3 = hmR0Vmx SaveGuestControlRegs(pVCpu, pMixedCtx);6523 int rc3 = hmR0VmxImportGuestStatae(pVCpu, CPUMCTX_EXTRN_CR0); 7638 6524 AssertRCReturn(rc3, rc3); 7639 6525 … … 7657 6543 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES)); 7658 6544 } 6545 #endif 7659 6546 7660 6547 /* Pending PGM C3 sync. */ 7661 6548 if (VMCPU_FF_IS_PENDING(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)) 7662 6549 { 6550 Assert(!(ASMAtomicUoReadU64(&pMixedCtx->fExtrn) & (CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4))); 7663 6551 VBOXSTRICTRC rcStrict2 = PGMSyncCR3(pVCpu, pMixedCtx->cr0, pMixedCtx->cr3, pMixedCtx->cr4, 7664 6552 VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3)); … … 7666 6554 { 7667 6555 AssertRC(VBOXSTRICTRC_VAL(rcStrict2)); 7668 Log4 (("hmR0VmxCheckForceFlags:PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", VBOXSTRICTRC_VAL(rcStrict2)));6556 Log4Func(("PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", VBOXSTRICTRC_VAL(rcStrict2))); 7669 6557 return rcStrict2; 7670 6558 } … … 7677 6565 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF); 7678 6566 int rc2 = RT_UNLIKELY(VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3; 7679 Log4 (("hmR0VmxCheckForceFlags:HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc2));6567 Log4Func(("HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc2)); 7680 6568 return rc2; 7681 6569 } … … 7685 6573 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST)) 7686 6574 { 7687 Log4 (("hmR0VmxCheckForceFlags:Pending VM request forcing us back to ring-3\n"));6575 Log4Func(("Pending VM request forcing us back to ring-3\n")); 7688 6576 return VINF_EM_PENDING_REQUEST; 7689 6577 } … … 7692 6580 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_POOL_FLUSH_PENDING)) 7693 6581 { 7694 Log4 (("hmR0VmxCheckForceFlags:PGM pool flush pending forcing us back to ring-3\n"));6582 Log4Func(("PGM pool flush pending forcing us back to ring-3\n")); 7695 6583 return VINF_PGM_POOL_FLUSH_PENDING; 7696 6584 } … … 7699 6587 if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA)) 7700 6588 { 7701 Log4 (("hmR0VmxCheckForceFlags:Pending DMA request forcing us back to ring-3\n"));6589 Log4Func(("Pending DMA request forcing us back to ring-3\n")); 7702 6590 return VINF_EM_RAW_TO_R3; 7703 6591 } … … 7766 6654 AssertRC(rc); 7767 6655 Log4(("TRPM->HM event: u32IntInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n", 7768 u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));6656 u32IntInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress)); 7769 6657 7770 6658 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, cbInstr, uErrCode, GCPtrFaultAddress); … … 7850 6738 * 7851 6739 * @returns VBox status code. 7852 * @param pVCpu The cross context virtual CPU structure. 7853 * @param pMixedCtx Pointer to the guest-CPU context. The data may 7854 * be out-of-sync. Make sure to update the required 7855 * fields before using them. 7856 * @param fSaveGuestState Whether to save the guest state or not. 6740 * @param pVCpu The cross context virtual CPU structure. 6741 * @param fImportState Whether to import the guest state from the VMCS back 6742 * to the guest-CPU context. 7857 6743 * 7858 6744 * @remarks No-long-jmp zone!!! 7859 6745 */ 7860 static int hmR0VmxLeave(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fSaveGuestState)6746 static int hmR0VmxLeave(PVMCPU pVCpu, bool fImportState) 7861 6747 { 7862 6748 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); … … 7872 6758 7873 6759 /* Save the guest state if necessary. */ 7874 if ( fSaveGuestState 7875 && HMVMXCPU_GST_VALUE(pVCpu) != HMVMX_UPDATED_GUEST_ALL) 7876 { 7877 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx); 6760 if (fImportState) 6761 { 6762 int rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL); 7878 6763 AssertRCReturn(rc, rc); 7879 Assert(HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL); 7880 } 7881 7882 /* Restore host FPU state if necessary and resync on next R0 reentry .*/ 7883 if (CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(pVCpu)) 7884 { 7885 /* We shouldn't reload CR0 without saving it first. */ 7886 if (!fSaveGuestState) 7887 { 7888 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); 7889 AssertRCReturn(rc, rc); 7890 } 7891 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0); 7892 } 7893 7894 /* Restore host debug registers if necessary and resync on next R0 reentry. */ 6764 } 6765 6766 /* Restore host FPU state if necessary. We will resync on next R0 reentry. */ 6767 CPUMR0FpuStateMaybeSaveGuestAndRestoreHost(pVCpu); 6768 Assert(!CPUMIsGuestFPUStateActive(pVCpu)); 6769 6770 /* Restore host debug registers if necessary. We will resync on next R0 reentry. */ 7895 6771 #ifdef VBOX_STRICT 7896 6772 if (CPUMIsHyperDebugStateActive(pVCpu)) 7897 6773 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT); 7898 6774 #endif 7899 if (CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */)) 7900 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG); 6775 CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */); 7901 6776 Assert(!CPUMIsGuestDebugStateActive(pVCpu) && !CPUMIsGuestDebugStateActivePending(pVCpu)); 7902 6777 Assert(!CPUMIsHyperDebugStateActive(pVCpu) && !CPUMIsHyperDebugStateActivePending(pVCpu)); … … 7914 6789 7915 6790 /* Restore the lazy host MSRs as we're leaving VT-x context. */ 7916 if (pVCpu->hm.s.vmx.fLazyMsrs) 7917 { 7918 /* We shouldn't reload the guest MSRs without saving it first. */ 7919 if (!fSaveGuestState) 7920 { 7921 int rc = hmR0VmxSaveGuestLazyMsrs(pVCpu, pMixedCtx); 6791 if (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST) 6792 { 6793 /* We shouldn't restore the host MSRs without saving the guest MSRs first. */ 6794 if (!fImportState) 6795 { 6796 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_KERNEL_GS_BASE 6797 | CPUMCTX_EXTRN_SYSCALL_MSRS); 7922 6798 AssertRCReturn(rc, rc); 7923 6799 } 7924 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_LAZY_MSRS));7925 6800 hmR0VmxLazyRestoreHostMsrs(pVCpu); 7926 6801 Assert(!pVCpu->hm.s.vmx.fLazyMsrs); 7927 6802 } 6803 else 6804 pVCpu->hm.s.vmx.fLazyMsrs = 0; 7928 6805 7929 6806 /* Update auto-load/store host MSRs values when we re-enter VT-x (as we could be on a different CPU). */ … … 7931 6808 7932 6809 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatEntry); 7933 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatLoadGuestState); 6810 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatImportGuestState); 6811 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExportGuestState); 7934 6812 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit1); 7935 6813 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit2); … … 7972 6850 * @remarks No-long-jmp zone!!! 7973 6851 */ 7974 DECLINLINE(int)hmR0VmxLeaveSession(PVMCPU pVCpu, PCPUMCTX pMixedCtx)6852 static int hmR0VmxLeaveSession(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 7975 6853 { 7976 6854 HM_DISABLE_PREEMPT(); … … 7983 6861 if (!pVCpu->hm.s.fLeaveDone) 7984 6862 { 7985 int rc2 = hmR0VmxLeave(pVCpu, pMixedCtx, true /* fSaveGuestState */);6863 int rc2 = hmR0VmxLeave(pVCpu, true /* fImportState */); 7986 6864 AssertRCReturnStmt(rc2, HM_RESTORE_PREEMPT(), rc2); 7987 6865 pVCpu->hm.s.fLeaveDone = true; 7988 6866 } 7989 Assert( HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL);6867 Assert(!pMixedCtx->fExtrn); 7990 6868 7991 6869 /* … … 8059 6937 /* Please, no longjumps here (any logging shouldn't flush jump back to ring-3). NO LOGGING BEFORE THIS POINT! */ 8060 6938 VMMRZCallRing3Disable(pVCpu); 8061 Log4 (("hmR0VmxExitToRing3: pVCpu=%p idCpu=%RU32 rcExit=%d\n", pVCpu, pVCpu->idCpu, VBOXSTRICTRC_VAL(rcExit)));6939 Log4Func(("rcExit=%d\n", VBOXSTRICTRC_VAL(rcExit))); 8062 6940 8063 6941 /* We need to do this only while truly exiting the "inner loop" back to ring-3 and -not- for any longjmp to ring3. */ … … 8074 6952 and if we're injecting an event we should have a TRPM trap pending. */ 8075 6953 AssertMsg(rcExit != VINF_EM_RAW_INJECT_TRPM_EVENT || TRPMHasTrap(pVCpu), ("%Rrc\n", VBOXSTRICTRC_VAL(rcExit))); 8076 #ifndef DEBUG_bird /* Triggered after firing an NMI against NT4SP1, possibly a trip ple fault in progress. */6954 #ifndef DEBUG_bird /* Triggered after firing an NMI against NT4SP1, possibly a triple fault in progress. */ 8077 6955 AssertMsg(rcExit != VINF_EM_RAW_EMULATE_INSTR || !TRPMHasTrap(pVCpu), ("%Rrc\n", VBOXSTRICTRC_VAL(rcExit))); 8078 6956 #endif … … 8092 6970 | CPUM_CHANGED_TR 8093 6971 | CPUM_CHANGED_HIDDEN_SEL_REGS); 8094 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0));8095 6972 if ( pVM->hm.s.fNestedPaging 8096 6973 && CPUMIsGuestPagingEnabledEx(pMixedCtx)) … … 8101 6978 Assert(!pVCpu->hm.s.fClearTrapFlag); 8102 6979 6980 /* Update the exit-to-ring 3 reason. */ 6981 pVCpu->hm.s.rcLastExitToR3 = VBOXSTRICTRC_VAL(rcExit); 6982 8103 6983 /* On our way back from ring-3 reload the guest state if there is a possibility of it being changed. */ 8104 6984 if (rcExit != VINF_EM_RAW_INTERRUPT) 8105 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);6985 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST); 8106 6986 8107 6987 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchExitToR3); … … 8150 7030 pVCpu->hm.s.vmx.fRestoreHostFlags = 0; 8151 7031 #endif 7032 8152 7033 /* Restore the lazy host MSRs as we're leaving VT-x context. */ 8153 if (pVCpu->hm.s.vmx.fLazyMsrs )7034 if (pVCpu->hm.s.vmx.fLazyMsrs & VMX_LAZY_MSRS_LOADED_GUEST) 8154 7035 hmR0VmxLazyRestoreHostMsrs(pVCpu); 8155 7036 … … 8178 7059 Assert(VMMR0IsLogFlushDisabled(pVCpu)); 8179 7060 8180 Log4(("hmR0VmxCallRing3Callback->hmR0VmxLongJmpToRing3 pVCpu=%p idCpu=%RU32 enmOperation=%d\n", pVCpu, pVCpu->idCpu, 8181 enmOperation)); 7061 Log4Func((" -> hmR0VmxLongJmpToRing3 enmOperation=%d\n", enmOperation)); 8182 7062 8183 7063 int rc = hmR0VmxLongJmpToRing3(pVCpu, (PCPUMCTX)pvUser); … … 8204 7084 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); 8205 7085 AssertRC(rc); 8206 Log4 (("Setup interrupt-window exiting\n"));7086 Log4Func(("Setup interrupt-window exiting\n")); 8207 7087 } 8208 7088 } /* else we will deliver interrupts whenever the guest exits next and is in a state to receive events. */ … … 8221 7101 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); 8222 7102 AssertRC(rc); 8223 Log4 (("Cleared interrupt-window exiting\n"));7103 Log4Func(("Cleared interrupt-window exiting\n")); 8224 7104 } 8225 7105 … … 8240 7120 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); 8241 7121 AssertRC(rc); 8242 Log4 (("Setup NMI-window exiting\n"));7122 Log4Func(("Setup NMI-window exiting\n")); 8243 7123 } 8244 7124 } /* else we will deliver NMIs whenever we VM-exit next, even possibly nesting NMIs. Can't be helped on ancient CPUs. */ … … 8257 7137 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); 8258 7138 AssertRC(rc); 8259 Log4 (("Cleared NMI-window exiting\n"));7139 Log4Func(("Cleared NMI-window exiting\n")); 8260 7140 } 8261 7141 … … 8274 7154 { 8275 7155 /* Get the current interruptibility-state of the guest and then figure out what can be injected. */ 8276 uint32_t const uIntrState = hmR0VmxGetGuestIntrState(pVCpu, pMixedCtx);8277 bool const fBlockMovSS = RT_BOOL( uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);8278 bool const fBlockSti = RT_BOOL( uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);8279 bool const fBlockNmi = RT_BOOL( uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI);8280 8281 Assert(!fBlockSti || HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS));8282 Assert(!( uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/7156 uint32_t const fIntrState = hmR0VmxGetGuestIntrState(pVCpu, pMixedCtx); 7157 bool const fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS); 7158 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI); 7159 bool const fBlockNmi = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI); 7160 7161 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pMixedCtx->fExtrn) & CPUMCTX_EXTRN_RFLAGS)); 7162 Assert(!(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/ 8283 7163 Assert(!fBlockSti || pMixedCtx->eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */ 8284 7164 Assert(!TRPMHasTrap(pVCpu)); … … 8300 7180 && !fBlockMovSS) 8301 7181 { 8302 Log4 (("Pending NMI vcpu[%RU32]\n", pVCpu->idCpu));7182 Log4Func(("Pending NMI\n")); 8303 7183 uint32_t u32IntInfo = X86_XCPT_NMI | VMX_EXIT_INTERRUPTION_INFO_VALID; 8304 7184 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT); … … 8318 7198 { 8319 7199 Assert(!DBGFIsStepping(pVCpu)); 8320 int rc = hmR0Vmx SaveGuestRflags(pVCpu, pMixedCtx);8321 AssertRC (rc);7200 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_RFLAGS); 7201 AssertRCReturn(rc, 0); 8322 7202 bool const fBlockInt = !(pMixedCtx->eflags.u32 & X86_EFL_IF); 8323 7203 if ( !pVCpu->hm.s.Event.fPending … … 8330 7210 if (RT_SUCCESS(rc)) 8331 7211 { 8332 Log4(("Pending interrupt vcpu[%RU32] u8Interrupt=%#x \n", pVCpu->idCpu, u8Interrupt)); 8333 uint32_t u32IntInfo = u8Interrupt | VMX_EXIT_INTERRUPTION_INFO_VALID; 8334 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT); 7212 Log4Func(("Pending external interrupt u8Interrupt=%#x\n", u8Interrupt)); 7213 uint32_t u32IntInfo = u8Interrupt 7214 | VMX_EXIT_INTERRUPTION_INFO_VALID 7215 | (VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT); 8335 7216 8336 7217 hmR0VmxSetPendingEvent(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrfaultAddress */); … … 8355 7236 } 8356 7237 8357 return uIntrState;7238 return fIntrState; 8358 7239 } 8359 7240 … … 8364 7245 * 8365 7246 * @param pVCpu The cross context virtual CPU structure. 8366 */ 8367 DECLINLINE(void) hmR0VmxSetPendingDebugXcptVmcs(PVMCPU pVCpu) 8368 { 8369 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS)); NOREF(pVCpu); 8370 int rc = VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, VMX_VMCS_GUEST_DEBUG_EXCEPTIONS_BS); 8371 AssertRC(rc); 7247 * @param pMixedCtx Pointer to the guest-CPU context. The data may be 7248 * out-of-sync. Make sure to update the required fields 7249 * before using them. 7250 */ 7251 DECLINLINE(int) hmR0VmxSetPendingDebugXcptVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx) 7252 { 7253 RT_NOREF(pVCpu); 7254 Assert(!(ASMAtomicUoReadU64(&pMixedCtx->fExtrn) & CPUMCTX_EXTRN_RFLAGS)); 7255 return VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, VMX_VMCS_GUEST_DEBUG_EXCEPTIONS_BS); 8372 7256 } 8373 7257 … … 8382 7266 * out-of-sync. Make sure to update the required fields 8383 7267 * before using them. 8384 * @param uIntrState The VT-x guest-interruptibility state.7268 * @param fIntrState The VT-x guest-interruptibility state. 8385 7269 * @param fStepping Running in hmR0VmxRunGuestCodeStep() and we should 8386 7270 * return VINF_EM_DBG_STEPPED if the event was 8387 7271 * dispatched directly. 8388 7272 */ 8389 static VBOXSTRICTRC hmR0VmxInjectPendingEvent(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t uIntrState, bool fStepping)7273 static VBOXSTRICTRC hmR0VmxInjectPendingEvent(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t fIntrState, bool fStepping) 8390 7274 { 8391 7275 HMVMX_ASSERT_PREEMPT_SAFE(); 8392 7276 Assert(VMMRZCallRing3IsEnabled(pVCpu)); 8393 7277 8394 bool fBlockMovSS = RT_BOOL( uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);8395 bool fBlockSti = RT_BOOL( uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);8396 8397 Assert(!fBlockSti || HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RFLAGS));8398 Assert(!( uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/7278 bool fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS); 7279 bool fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI); 7280 7281 Assert(!fBlockSti || !(ASMAtomicUoReadU64(&pMixedCtx->fExtrn) & CPUMCTX_EXTRN_RFLAGS)); 7282 Assert(!(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI)); /* We don't support block-by-SMI yet.*/ 8399 7283 Assert(!fBlockSti || pMixedCtx->eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */ 8400 7284 Assert(!TRPMHasTrap(pVCpu)); … … 8421 7305 else if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI) 8422 7306 { 8423 bool const fBlockNmi = RT_BOOL( uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI);7307 bool const fBlockNmi = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI); 8424 7308 Assert(!fBlockSti); 8425 7309 Assert(!fBlockMovSS); … … 8429 7313 Log4(("Injecting pending event vcpu[%RU32] u64IntInfo=%#RX64 Type=%#x\n", pVCpu->idCpu, pVCpu->hm.s.Event.u64IntInfo, 8430 7314 (uint8_t)uIntType)); 8431 rcStrict = hmR0VmxInjectEventVmcs(pVCpu, p MixedCtx, pVCpu->hm.s.Event.u64IntInfo, pVCpu->hm.s.Event.cbInstr,8432 pVCpu->hm.s.Event.u32ErrCode, pVCpu->hm.s.Event.GCPtrFaultAddress, 8433 fStepping, &uIntrState);7315 rcStrict = hmR0VmxInjectEventVmcs(pVCpu, pVCpu->hm.s.Event.u64IntInfo, pVCpu->hm.s.Event.cbInstr, 7316 pVCpu->hm.s.Event.u32ErrCode, pVCpu->hm.s.Event.GCPtrFaultAddress, fStepping, 7317 &fIntrState); 8434 7318 AssertRCReturn(VBOXSTRICTRC_VAL(rcStrict), rcStrict); 8435 7319 8436 7320 /* Update the interruptibility-state as it could have been changed by 8437 7321 hmR0VmxInjectEventVmcs() (e.g. real-on-v86 guest injecting software interrupts) */ 8438 fBlockMovSS = RT_BOOL( uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);8439 fBlockSti = RT_BOOL( uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);7322 fBlockMovSS = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS); 7323 fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI); 8440 7324 8441 7325 if (uIntType == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT) … … 8457 7341 */ 8458 7342 Assert(!DBGFIsStepping(pVCpu)); 8459 int rc 2 = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);8460 AssertRCReturn(rc 2, rc2);7343 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_RFLAGS); 7344 AssertRCReturn(rc, rc); 8461 7345 if (pMixedCtx->eflags.Bits.u1TF) 8462 hmR0VmxSetPendingDebugXcptVmcs(pVCpu); 7346 { 7347 int rc2 = hmR0VmxSetPendingDebugXcptVmcs(pVCpu, pMixedCtx); 7348 AssertRCReturn(rc2, rc2); 7349 } 8463 7350 } 8464 7351 else if (pMixedCtx->eflags.Bits.u1TF) … … 8469 7356 */ 8470 7357 Assert(!(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.Msrs.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG)); 8471 uIntrState = 0;7358 fIntrState = 0; 8472 7359 } 8473 7360 } … … 8477 7364 * VT-x clears the valid bit on every VM-exit. See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection". 8478 7365 */ 8479 int rc 2 = hmR0VmxLoadGuestIntrState(pVCpu, uIntrState);8480 AssertRC (rc2);7366 int rc3 = hmR0VmxExportGuestIntrState(pVCpu, fIntrState); 7367 AssertRCReturn(rc3, rc3); 8481 7368 8482 7369 Assert(rcStrict == VINF_SUCCESS || rcStrict == VINF_EM_RESET || (rcStrict == VINF_EM_DBG_STEPPED && fStepping)); … … 8514 7401 * is injected directly (register modified by us, not 8515 7402 * by hardware on VM-entry). 8516 * @param p uIntrState Pointer to the current guest interruptibility-state.7403 * @param pfIntrState Pointer to the current guest interruptibility-state. 8517 7404 * This interruptibility-state will be updated if 8518 7405 * necessary. This cannot not be NULL. 8519 7406 */ 8520 DECLINLINE(VBOXSTRICTRC) hmR0VmxInjectXcptDF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fStepping, uint32_t *puIntrState) 8521 { 7407 DECLINLINE(VBOXSTRICTRC) hmR0VmxInjectXcptDF(PVMCPU pVCpu, PCCPUMCTX pMixedCtx, bool fStepping, uint32_t *pfIntrState) 7408 { 7409 NOREF(pMixedCtx); 8522 7410 uint32_t u32IntInfo = X86_XCPT_DF | VMX_EXIT_INTERRUPTION_INFO_VALID; 8523 7411 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT); 8524 7412 u32IntInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID; 8525 return hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */,8526 fStepping, puIntrState);7413 return hmR0VmxInjectEventVmcs(pVCpu, u32IntInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */, fStepping, 7414 pfIntrState); 8527 7415 } 8528 7416 … … 8580 7468 * directly (register modified by us, not by 8581 7469 * hardware on VM-entry). 8582 * @param p uIntrState Pointer to the current guest interruptibility-state.7470 * @param pfIntrState Pointer to the current guest interruptibility-state. 8583 7471 * This interruptibility-state will be updated if 8584 7472 * necessary. This cannot not be NULL. 8585 7473 */ 8586 DECLINLINE(VBOXSTRICTRC) hmR0VmxInjectXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fErrorCodeValid, uint32_t u32ErrorCode, 8587 bool fStepping, uint32_t *puIntrState) 8588 { 7474 DECLINLINE(VBOXSTRICTRC) hmR0VmxInjectXcptGP(PVMCPU pVCpu, PCCPUMCTX pMixedCtx, bool fErrorCodeValid, uint32_t u32ErrorCode, 7475 bool fStepping, uint32_t *pfIntrState) 7476 { 7477 NOREF(pMixedCtx); 8589 7478 uint32_t u32IntInfo = X86_XCPT_GP | VMX_EXIT_INTERRUPTION_INFO_VALID; 8590 7479 u32IntInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT); 8591 7480 if (fErrorCodeValid) 8592 7481 u32IntInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID; 8593 return hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntInfo, 0 /* cbInstr */, u32ErrorCode, 0 /* GCPtrFaultAddress */,8594 fStepping, puIntrState);7482 return hmR0VmxInjectEventVmcs(pVCpu, u32IntInfo, 0 /* cbInstr */, u32ErrorCode, 0 /* GCPtrFaultAddress */, fStepping, 7483 pfIntrState); 8595 7484 } 8596 7485 … … 8677 7566 * 8678 7567 * @param pVCpu The cross context virtual CPU structure. 8679 * @param pMixedCtx Pointer to the guest-CPU context. The data may8680 * be out-of-sync. Make sure to update the required8681 * fields before using them.8682 7568 * @param u64IntInfo The VM-entry interruption-information field. 8683 7569 * @param cbInstr The VM-entry instruction length in bytes (for … … 8686 7572 * @param u32ErrCode The VM-entry exception error code. 8687 7573 * @param GCPtrFaultAddress The page-fault address for \#PF exceptions. 8688 * @param p uIntrState Pointer to the current guest interruptibility-state.7574 * @param pfIntrState Pointer to the current guest interruptibility-state. 8689 7575 * This interruptibility-state will be updated if 8690 7576 * necessary. This cannot not be NULL. … … 8697 7583 * @remarks Requires CR0! 8698 7584 */ 8699 static VBOXSTRICTRC hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntInfo, uint32_t cbInstr, 8700 uint32_t u32ErrCode, RTGCUINTREG GCPtrFaultAddress, bool fStepping, 8701 uint32_t *puIntrState) 7585 static VBOXSTRICTRC hmR0VmxInjectEventVmcs(PVMCPU pVCpu, uint64_t u64IntInfo, uint32_t cbInstr, uint32_t u32ErrCode, 7586 RTGCUINTREG GCPtrFaultAddress, bool fStepping, uint32_t *pfIntrState) 8702 7587 { 8703 7588 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */ 8704 7589 AssertMsg(u64IntInfo >> 32 == 0, ("%#RX64\n", u64IntInfo)); 8705 Assert(puIntrState); 8706 uint32_t u32IntInfo = (uint32_t)u64IntInfo; 8707 8708 uint32_t const uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(u32IntInfo); 8709 uint32_t const uIntType = VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntInfo); 7590 Assert(pfIntrState); 7591 7592 PCPUMCTX pMixedCtx = &pVCpu->cpum.GstCtx; 7593 uint32_t u32IntInfo = (uint32_t)u64IntInfo; 7594 uint32_t const uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(u32IntInfo); 7595 uint32_t const uIntType = VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntInfo); 8710 7596 8711 7597 #ifdef VBOX_STRICT … … 8737 7623 /* Cannot inject an NMI when block-by-MOV SS is in effect. */ 8738 7624 Assert( uIntType != VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI 8739 || !(*p uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS));7625 || !(*pfIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS)); 8740 7626 8741 7627 STAM_COUNTER_INC(&pVCpu->hm.s.paStatInjectedIrqsR0[uVector & MASK_INJECT_IRQ_STAT]); 8742 7628 8743 /* We require CR0 to check if the guest is in real-mode. */ 8744 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); 8745 AssertRCReturn(rc, rc); 8746 8747 /* 8748 * Hardware interrupts & exceptions cannot be delivered through the software interrupt redirection bitmap to the real 8749 * mode task in virtual-8086 mode. We must jump to the interrupt handler in the (real-mode) guest. 8750 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode" for interrupt & exception classes. 8751 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling. 8752 */ 8753 if (CPUMIsGuestInRealModeEx(pMixedCtx)) 8754 { 8755 PVM pVM = pVCpu->CTX_SUFF(pVM); 8756 if (!pVM->hm.s.vmx.fUnrestrictedGuest) 8757 { 7629 if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest) 7630 { 7631 /* 7632 * For unrestricted execution enabled CPUs running real-mode guests, we must not set the deliver-error-code bit. 7633 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields". 7634 */ 7635 u32IntInfo &= ~VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID; 7636 } 7637 else 7638 { 7639 /* We require CR0 to check if the guest is in real-mode. */ 7640 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0); 7641 AssertRCReturn(rc, rc); 7642 7643 /* 7644 * Hardware interrupts & exceptions cannot be delivered through the software interrupt 7645 * redirection bitmap to the real mode task in virtual-8086 mode. We must jump to the 7646 * interrupt handler in the (real-mode) guest. 7647 * 7648 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode". 7649 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling. 7650 */ 7651 if (CPUMIsGuestInRealModeEx(pMixedCtx)) 7652 { 7653 PVM pVM = pVCpu->CTX_SUFF(pVM); 8758 7654 Assert(PDMVmmDevHeapIsEnabled(pVM)); 8759 7655 Assert(pVM->hm.s.vmx.pRealModeTSS); 8760 7656 8761 /* We require RIP, RSP, RFLAGS, CS, IDTR. Save the required ones from the VMCS. */ 8762 rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); 8763 rc |= hmR0VmxSaveGuestTableRegs(pVCpu, pMixedCtx); 8764 rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx); 7657 /* We require RIP, RSP, RFLAGS, CS, IDTR, import them. */ 7658 rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_SREG_MASK 7659 | CPUMCTX_EXTRN_TABLE_MASK 7660 | CPUMCTX_EXTRN_RIP 7661 | CPUMCTX_EXTRN_RSP 7662 | CPUMCTX_EXTRN_RFLAGS); 8765 7663 AssertRCReturn(rc, rc); 8766 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_RIP));8767 7664 8768 7665 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */ … … 8776 7673 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault. */ 8777 7674 if (uVector == X86_XCPT_GP) 8778 return hmR0VmxInjectXcptDF(pVCpu, pMixedCtx, fStepping, p uIntrState);7675 return hmR0VmxInjectXcptDF(pVCpu, pMixedCtx, fStepping, pfIntrState); 8779 7676 8780 7677 /* If we're injecting an interrupt/exception with no valid IDT entry, inject a general-protection fault. */ 8781 7678 /* No error codes for exceptions in real-mode. See Intel spec. 20.1.4 "Interrupt and Exception Handling" */ 8782 7679 return hmR0VmxInjectXcptGP(pVCpu, pMixedCtx, false /* fErrCodeValid */, 0 /* u32ErrCode */, 8783 fStepping, p uIntrState);7680 fStepping, pfIntrState); 8784 7681 } 8785 7682 … … 8803 7700 /* Construct the stack frame for the interrupt/exception handler. */ 8804 7701 VBOXSTRICTRC rcStrict; 8805 rcStrict 7702 rcStrict = hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, pMixedCtx->eflags.u32); 8806 7703 if (rcStrict == VINF_SUCCESS) 8807 7704 rcStrict = hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, pMixedCtx->cs.Sel); … … 8823 7720 /* If any other guest-state bits are changed here, make sure to update 8824 7721 hmR0VmxPreRunGuestCommitted() when thread-context hooks are used. */ 8825 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS 8826 | HM_CHANGED_GUEST_RIP 8827 | HM_CHANGED_GUEST_RFLAGS 8828 | HM_CHANGED_GUEST_RSP); 7722 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CS 7723 | HM_CHANGED_GUEST_CR2 7724 | HM_CHANGED_GUEST_RIP 7725 | HM_CHANGED_GUEST_RFLAGS 7726 | HM_CHANGED_GUEST_RSP); 8829 7727 8830 7728 /* We're clearing interrupts, which means no block-by-STI interrupt-inhibition. */ 8831 if (*p uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)7729 if (*pfIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI) 8832 7730 { 8833 7731 Assert( uIntType != VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI 8834 7732 && uIntType != VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT); 8835 Log4 (("Clearing inhibition due to STI.\n"));8836 *p uIntrState &= ~VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI;7733 Log4Func(("Clearing inhibition due to STI\n")); 7734 *pfIntrState &= ~VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI; 8837 7735 } 8838 7736 Log4(("Injecting real-mode: u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x Eflags=%#x CS:EIP=%04x:%04x\n", … … 8843 7741 pVCpu->hm.s.Event.fPending = false; 8844 7742 8845 /* Make hmR0VmxPreRunGuest return if we're stepping since we've changed cs:rip. */7743 /* Make hmR0VmxPreRunGuest() return if we're stepping since we've changed cs:rip. */ 8846 7744 if (fStepping) 8847 7745 rcStrict = VINF_EM_DBG_STEPPED; … … 8851 7749 return rcStrict; 8852 7750 } 8853 8854 /*8855 * For unrestricted execution enabled CPUs running real-mode guests, we must not set the deliver-error-code bit.8856 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".8857 */8858 u32IntInfo &= ~VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;8859 7751 } 8860 7752 … … 8865 7757 8866 7758 /* Inject. */ 8867 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo);7759 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntInfo); 8868 7760 if (VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(u32IntInfo)) 8869 7761 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode); 8870 7762 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr); 7763 AssertRCReturn(rc, rc); 8871 7764 8872 7765 if ( VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT … … 8874 7767 pMixedCtx->cr2 = GCPtrFaultAddress; 8875 7768 8876 Log4(("Injecting vcpu[%RU32] u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x pMixedCtx->uCR2=%#RX64\n", pVCpu->idCpu, 8877 u32IntInfo, u32ErrCode, cbInstr, pMixedCtx->cr2)); 8878 8879 AssertRCReturn(rc, rc); 7769 Log4(("Injecting u32IntInfo=%#x u32ErrCode=%#x cbInstr=%#x pMixedCtx->uCR2=%#RX64\n", u32IntInfo, u32ErrCode, cbInstr, 7770 pMixedCtx->cr2)); 7771 8880 7772 return VINF_SUCCESS; 8881 7773 } … … 8895 7787 static void hmR0VmxClearIntNmiWindowsVmcs(PVMCPU pVCpu) 8896 7788 { 8897 Log4Func(("vcpu[%d]\n", pVCpu->idCpu));8898 8899 7789 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT) 7790 { 8900 7791 hmR0VmxClearIntWindowExitVmcs(pVCpu); 7792 Log4Func(("Cleared interrupt widow\n")); 7793 } 8901 7794 8902 7795 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT) 7796 { 8903 7797 hmR0VmxClearNmiWindowExitVmcs(pVCpu); 7798 Log4Func(("Cleared interrupt widow\n")); 7799 } 8904 7800 } 8905 7801 … … 8922 7818 8923 7819 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu)); 8924 Assert(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE)); 7820 Assert((pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)) 7821 == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)); 8925 7822 8926 7823 #ifdef VBOX_STRICT … … 8970 7867 VMCPU_ASSERT_EMT(pVCpu); 8971 7868 8972 PCPUMCTX pMixedCtx = CPUMQueryGuestCtxPtr(pVCpu);8973 8974 7869 /* No longjmps (logger flushes, locks) in this fragile context. */ 8975 7870 VMMRZCallRing3Disable(pVCpu); … … 8981 7876 if (!pVCpu->hm.s.fLeaveDone) 8982 7877 { 8983 /* Do -not- save guest-state here as we might already be in the middle of saving it (esp. bad if we are 8984 holding the PGM lock while saving the guest state (see hmR0VmxSaveGuestControlRegs()). */ 8985 hmR0VmxLeave(pVCpu, pMixedCtx, false /* fSaveGuestState */); 7878 /* 7879 * Do -not- import the guest-state here as we might already be in the middle of importing 7880 * it, esp. bad if we're holding the PGM lock, see comment in hmR0VmxImportGuestState(). 7881 */ 7882 hmR0VmxLeave(pVCpu, false /* fImportState */); 8986 7883 pVCpu->hm.s.fLeaveDone = true; 8987 7884 } … … 9011 7908 int rc = HMR0EnterCpu(pVCpu); 9012 7909 AssertRC(rc); 9013 Assert(HMCPU_CF_IS_SET(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE)); 7910 Assert((pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)) 7911 == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)); 9014 7912 9015 7913 /* Load the active VMCS as the current one. */ … … 9035 7933 9036 7934 /** 7935 * Exports the host state into the VMCS host-state area. 7936 * Sets up the VM-exit MSR-load area. 7937 * 7938 * The CPU state will be loaded from these fields on every successful VM-exit. 7939 * 7940 * @returns VBox status code. 7941 * @param pVCpu The cross context virtual CPU structure. 7942 * 7943 * @remarks No-long-jump zone!!! 7944 */ 7945 static int hmR0VmxExportHostState(PVMCPU pVCpu) 7946 { 7947 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 7948 7949 int rc = VINF_SUCCESS; 7950 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_HOST_CONTEXT) 7951 { 7952 rc = hmR0VmxExportHostControlRegs(); 7953 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc); 7954 7955 rc = hmR0VmxExportHostSegmentRegs(pVCpu); 7956 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc); 7957 7958 rc = hmR0VmxExportHostMsrs(pVCpu); 7959 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc); 7960 7961 pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_HOST_CONTEXT; 7962 } 7963 return rc; 7964 } 7965 7966 7967 /** 9037 7968 * Saves the host state in the VMCS host-state. 9038 * Sets up the VM-exit MSR-load area.9039 *9040 * The CPU state will be loaded from these fields on every successful VM-exit.9041 7969 * 9042 7970 * @returns VBox status code. … … 9046 7974 * @remarks No-long-jump zone!!! 9047 7975 */ 9048 static int hmR0VmxSaveHostState(PVM pVM, PVMCPU pVCpu) 9049 { 7976 VMMR0DECL(int) VMXR0ExportHostState(PVMCPU pVCpu) 7977 { 7978 AssertPtr(pVCpu); 9050 7979 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 9051 7980 9052 int rc = VINF_SUCCESS; 9053 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT)) 9054 { 9055 rc = hmR0VmxSaveHostControlRegs(pVM, pVCpu); 9056 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostControlRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 9057 9058 rc = hmR0VmxSaveHostSegmentRegs(pVM, pVCpu); 9059 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostSegmentRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 9060 9061 rc = hmR0VmxSaveHostMsrs(pVM, pVCpu); 9062 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostMsrs failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 9063 9064 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_HOST_CONTEXT); 9065 } 9066 return rc; 9067 } 9068 9069 9070 /** 9071 * Saves the host state in the VMCS host-state. 9072 * 9073 * @returns VBox status code. 9074 * @param pVM The cross context VM structure. 9075 * @param pVCpu The cross context virtual CPU structure. 9076 * 9077 * @remarks No-long-jump zone!!! 9078 */ 9079 VMMR0DECL(int) VMXR0SaveHostState(PVM pVM, PVMCPU pVCpu) 9080 { 9081 AssertPtr(pVM); 9082 AssertPtr(pVCpu); 9083 9084 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu)); 9085 9086 /* Save the host state here while entering HM context. When thread-context hooks are used, we might get preempted 9087 and have to resave the host state but most of the time we won't be, so do it here before we disable interrupts. */ 9088 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 9089 return hmR0VmxSaveHostState(pVM, pVCpu); 9090 } 9091 9092 9093 /** 9094 * Loads the guest state into the VMCS guest-state area. 7981 /* 7982 * Export the host state here while entering HM context. 7983 * When thread-context hooks are used, we might get preempted and have to re-save the host 7984 * state but most of the time we won't be, so do it here before we disable interrupts. 7985 */ 7986 return hmR0VmxExportHostState(pVCpu); 7987 } 7988 7989 7990 /** 7991 * Exports the guest state into the VMCS guest-state area. 9095 7992 * 9096 7993 * The will typically be done before VM-entry when the guest-CPU state and the … … 9115 8012 * @remarks No-long-jump zone!!! 9116 8013 */ 9117 static VBOXSTRICTRC hmR0Vmx LoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)8014 static VBOXSTRICTRC hmR0VmxExportGuestState(PVM pVM, PVMCPU pVCpu, PCCPUMCTX pMixedCtx) 9118 8015 { 9119 8016 AssertPtr(pVM); … … 9124 8021 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu)); 9125 8022 9126 STAM_PROFILE_ADV_START(&pVCpu->hm.s.Stat LoadGuestState, x);8023 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExportGuestState, x); 9127 8024 9128 8025 /* Determine real-on-v86 mode. */ … … 9135 8032 9136 8033 /* 9137 * Load the guest-state into the VMCS.9138 8034 * Any ordering dependency among the sub-functions below must be explicitly stated using comments. 9139 8035 * Ideally, assert that the cross-dependent bits are up-to-date at the point of using it. 9140 8036 */ 9141 int rc = hmR0VmxSe tupVMRunHandler(pVCpu, pMixedCtx);9142 AssertLogRelMsgRCReturn(rc, (" hmR0VmxSetupVMRunHandler! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);9143 9144 /* This needs to be done after hmR0VmxSe tupVMRunHandler() as changing pfnStartVM may require VM-entry control updates. */9145 rc = hmR0Vmx LoadGuestEntryCtls(pVCpu, pMixedCtx);9146 AssertLogRelMsgRCReturn(rc, (" hmR0VmxLoadGuestEntryCtls! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);9147 9148 /* This needs to be done after hmR0VmxSe tupVMRunHandler() as changing pfnStartVM may require VM-exit control updates. */9149 rc = hmR0Vmx LoadGuestExitCtls(pVCpu, pMixedCtx);9150 AssertLogRelMsgRCReturn(rc, (" hmR0VmxSetupExitCtls failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);9151 9152 rc = hmR0Vmx LoadGuestActivityState(pVCpu, pMixedCtx);9153 AssertLogRelMsgRCReturn(rc, (" hmR0VmxLoadGuestActivityState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);9154 9155 VBOXSTRICTRC rcStrict = hmR0Vmx LoadGuestCR3AndCR4(pVCpu, pMixedCtx);8037 int rc = hmR0VmxSelectVMRunHandler(pVCpu, pMixedCtx); 8038 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc); 8039 8040 /* This needs to be done after hmR0VmxSelectVMRunHandler() as changing pfnStartVM may require VM-entry control updates. */ 8041 rc = hmR0VmxExportGuestEntryCtls(pVCpu, pMixedCtx); 8042 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc); 8043 8044 /* This needs to be done after hmR0VmxSelectVMRunHandler() as changing pfnStartVM may require VM-exit control updates. */ 8045 rc = hmR0VmxExportGuestExitCtls(pVCpu, pMixedCtx); 8046 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc); 8047 8048 rc = hmR0VmxExportGuestCR0(pVCpu, pMixedCtx); 8049 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc); 8050 8051 VBOXSTRICTRC rcStrict = hmR0VmxExportGuestCR3AndCR4(pVCpu, pMixedCtx); 9156 8052 if (rcStrict == VINF_SUCCESS) 9157 8053 { /* likely */ } … … 9162 8058 } 9163 8059 9164 /* Assumes pMixedCtx->cr0 is up-to-date (strict builds require CR0 for segment register validation checks). */ 9165 rc = hmR0VmxLoadGuestSegmentRegs(pVCpu, pMixedCtx); 9166 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestSegmentRegs: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 9167 9168 /* This needs to be done after hmR0VmxLoadGuestEntryCtls() and hmR0VmxLoadGuestExitCtls() as it may alter controls if we 9169 determine we don't have to swap EFER after all. */ 9170 rc = hmR0VmxLoadGuestMsrs(pVCpu, pMixedCtx); 9171 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestMsrs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 9172 9173 rc = hmR0VmxLoadGuestApicState(pVCpu, pMixedCtx); 9174 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestApicState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 9175 9176 rc = hmR0VmxLoadGuestXcptIntercepts(pVCpu, pMixedCtx); 9177 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestXcptIntercepts! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 9178 9179 /* 9180 * Loading Rflags here is fine, even though Rflags.TF might depend on guest debug state (which is not loaded here). 9181 * It is re-evaluated and updated if necessary in hmR0VmxLoadSharedState(). 9182 */ 9183 rc = hmR0VmxLoadGuestRipRspRflags(pVCpu, pMixedCtx); 9184 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestRipRspRflags! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc); 9185 9186 /* Clear any unused and reserved bits. */ 9187 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_GUEST_CR2 9188 | HM_CHANGED_GUEST_HWVIRT); 9189 9190 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x); 8060 rc = hmR0VmxExportGuestSegmentRegs(pVCpu, pMixedCtx); 8061 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc); 8062 8063 /* This needs to be done after hmR0VmxExportGuestEntryCtls() and hmR0VmxExportGuestExitCtls() as it 8064 may alter controls if we determine we don't have to swap EFER after all. */ 8065 rc = hmR0VmxExportGuestMsrs(pVCpu, pMixedCtx); 8066 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc); 8067 8068 rc = hmR0VmxExportGuestApicTpr(pVCpu); 8069 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc); 8070 8071 /* This needs to be done after hmR0VmxExportGuestCR0() as it may alter intercepted exceptions. */ 8072 rc = hmR0VmxExportGuestXcptIntercepts(pVCpu); 8073 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc); 8074 8075 /* Exporting RFLAGS here is fine, even though RFLAGS.TF might depend on guest debug state which is 8076 not exported here. It is re-evaluated and updated if necessary in hmR0VmxExportSharedState(). */ 8077 rc = hmR0VmxExportGuestRip(pVCpu, pMixedCtx); 8078 rc |= hmR0VmxExportGuestRsp(pVCpu, pMixedCtx); 8079 rc |= hmR0VmxExportGuestRflags(pVCpu, pMixedCtx); 8080 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc\n", rc), rc); 8081 8082 /* Clear any bits that may be set but exported unconditionally or unused/reserved bits. */ 8083 ASMAtomicUoAndU64(&pVCpu->hm.s.fCtxChanged, ~( (HM_CHANGED_GUEST_GPRS_MASK & ~HM_CHANGED_GUEST_RSP) 8084 | HM_CHANGED_GUEST_CR2 8085 | (HM_CHANGED_GUEST_DR_MASK & ~HM_CHANGED_GUEST_DR7) 8086 | HM_CHANGED_GUEST_X87 8087 | HM_CHANGED_GUEST_SSE_AVX 8088 | HM_CHANGED_GUEST_OTHER_XSAVE 8089 | HM_CHANGED_GUEST_XCRx 8090 | HM_CHANGED_GUEST_KERNEL_GS_BASE /* Part of lazy or auto load-store MSRs. */ 8091 | HM_CHANGED_GUEST_SYSCALL_MSRS /* Part of lazy or auto load-store MSRs. */ 8092 | HM_CHANGED_GUEST_TSC_AUX 8093 | HM_CHANGED_GUEST_OTHER_MSRS 8094 | HM_CHANGED_GUEST_HWVIRT 8095 | (HM_CHANGED_KEEPER_STATE_MASK & ~HM_CHANGED_VMX_MASK))); 8096 8097 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExportGuestState, x); 9191 8098 return rc; 9192 8099 } … … 9194 8101 9195 8102 /** 9196 * Loads the state shared between the host and guest into the VMCS.8103 * Exports the state shared between the host and guest into the VMCS. 9197 8104 * 9198 8105 * @param pVM The cross context VM structure. … … 9202 8109 * @remarks No-long-jump zone!!! 9203 8110 */ 9204 static void hmR0Vmx LoadSharedState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)8111 static void hmR0VmxExportSharedState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx) 9205 8112 { 9206 8113 NOREF(pVM); … … 9209 8116 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); 9210 8117 9211 if ( HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_CR0))9212 { 9213 int rc = hmR0Vmx LoadSharedCR0(pVCpu, pCtx);8118 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_DR_MASK) 8119 { 8120 int rc = hmR0VmxExportSharedDebugState(pVCpu, pCtx); 9214 8121 AssertRC(rc); 9215 } 9216 9217 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_DEBUG)) 9218 { 9219 int rc = hmR0VmxLoadSharedDebugState(pVCpu, pCtx); 9220 AssertRC(rc); 8122 pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_GUEST_DR_MASK; 9221 8123 9222 8124 /* Loading shared debug bits might have changed eflags.TF bit for debugging purposes. */ 9223 if ( HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_GUEST_RFLAGS))9224 { 9225 rc = hmR0Vmx LoadGuestRflags(pVCpu, pCtx);8125 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_GUEST_RFLAGS) 8126 { 8127 rc = hmR0VmxExportGuestRflags(pVCpu, pCtx); 9226 8128 AssertRC(rc); 9227 8129 } 9228 8130 } 9229 8131 9230 if ( HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMM_GUEST_LAZY_MSRS))8132 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_VMX_GUEST_LAZY_MSRS) 9231 8133 { 9232 8134 hmR0VmxLazyLoadGuestMsrs(pVCpu, pCtx); 9233 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMM_GUEST_LAZY_MSRS); 9234 } 9235 9236 /* Loading CR0, debug state might have changed intercepts, update VMCS. */ 9237 if (HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_VMM_GUEST_XCPT_INTERCEPTS)) 9238 { 9239 Assert(pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT_32(X86_XCPT_AC)); 9240 Assert(pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT_32(X86_XCPT_DB)); 9241 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap); 9242 AssertRC(rc); 9243 HMCPU_CF_CLEAR(pVCpu, HM_CHANGED_VMM_GUEST_XCPT_INTERCEPTS); 9244 } 9245 9246 AssertMsg(!HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE), 9247 ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu))); 8135 pVCpu->hm.s.fCtxChanged &= ~HM_CHANGED_VMX_GUEST_LAZY_MSRS; 8136 } 8137 8138 AssertMsg(!(pVCpu->hm.s.fCtxChanged & HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE), 8139 ("fCtxChanged=%#RX64\n", pVCpu->hm.s.fCtxChanged)); 9248 8140 } 9249 8141 … … 9265 8157 * @remarks No-long-jump zone!!! 9266 8158 */ 9267 static VBOXSTRICTRC hmR0Vmx LoadGuestStateOptimal(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)8159 static VBOXSTRICTRC hmR0VmxExportGuestStateOptimal(PVM pVM, PVMCPU pVCpu, PCCPUMCTX pMixedCtx) 9268 8160 { 9269 8161 HMVMX_ASSERT_PREEMPT_SAFE(); … … 9271 8163 Assert(VMMR0IsLogFlushDisabled(pVCpu)); 9272 8164 9273 Log5(("LoadFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));9274 8165 #ifdef HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE 9275 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);8166 pVCpu->hm.s.fCtxChanged |= HM_CHANGED_ALL_GUEST; 9276 8167 #endif 9277 8168 9278 8169 /* 9279 * RIP is what changes the most often and hence if it's the only bit needing to be9280 * updated, we shall handle it early for performance reasons.8170 * For many exits it's only RIP that changes and hence try to export it first 8171 * without going through a lot of change flag checks. 9281 8172 */ 9282 VBOXSTRICTRC rcStrict = VINF_SUCCESS; 9283 if (HMCPU_CF_IS_SET_ONLY(pVCpu, HM_CHANGED_GUEST_RIP)) 9284 { 9285 rcStrict = hmR0VmxLoadGuestRip(pVCpu, pMixedCtx); 8173 VBOXSTRICTRC rcStrict; 8174 uint64_t fCtxChanged = ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged); 8175 if ((fCtxChanged & (HM_CHANGED_ALL_GUEST & ~HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)) == HM_CHANGED_GUEST_RIP) 8176 { 8177 rcStrict = hmR0VmxExportGuestRip(pVCpu, pMixedCtx); 9286 8178 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 9287 8179 { /* likely */} 9288 8180 else 9289 { 9290 AssertMsgFailedReturn(("hmR0VmxLoadGuestStateOptimal: hmR0VmxLoadGuestRip failed! rc=%Rrc\n", 9291 VBOXSTRICTRC_VAL(rcStrict)), rcStrict); 9292 } 9293 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadMinimal); 9294 } 9295 else if (HMCPU_CF_VALUE(pVCpu)) 9296 { 9297 rcStrict = hmR0VmxLoadGuestState(pVM, pVCpu, pMixedCtx); 8181 AssertMsgFailedReturn(("hmR0VmxExportGuestRip failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)), rcStrict); 8182 STAM_COUNTER_INC(&pVCpu->hm.s.StatExportMinimal); 8183 } 8184 else if (fCtxChanged & (HM_CHANGED_ALL_GUEST & ~HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)) 8185 { 8186 rcStrict = hmR0VmxExportGuestState(pVM, pVCpu, pMixedCtx); 9298 8187 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 9299 8188 { /* likely */} 9300 8189 else 9301 8190 { 9302 AssertMsg(rcStrict == VINF_EM_RESCHEDULE_REM, 9303 ("hmR0VmxLoadGuestStateOptimal: hmR0VmxLoadGuestState failed! rc=%Rrc\n",VBOXSTRICTRC_VAL(rcStrict)));8191 AssertMsg(rcStrict == VINF_EM_RESCHEDULE_REM, ("hmR0VmxExportGuestState failed! rc=%Rrc\n", 8192 VBOXSTRICTRC_VAL(rcStrict))); 9304 8193 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); 9305 8194 return rcStrict; 9306 8195 } 9307 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadFull); 9308 } 9309 8196 STAM_COUNTER_INC(&pVCpu->hm.s.StatExportFull); 8197 } 8198 else 8199 rcStrict = VINF_SUCCESS; 8200 8201 #ifdef VBOX_STRICT 9310 8202 /* All the guest state bits should be loaded except maybe the host context and/or the shared host/guest bits. */ 9311 AssertMsg( !HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_ALL_GUEST) 9312 || HMCPU_CF_IS_PENDING_ONLY(pVCpu, HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE), 9313 ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu))); 8203 fCtxChanged = ASMAtomicUoReadU64(&pVCpu->hm.s.fCtxChanged); 8204 AssertMsg(!(fCtxChanged & (HM_CHANGED_ALL_GUEST & ~HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)), 8205 ("fCtxChanged=%#RX64\n", fCtxChanged)); 8206 #endif 9314 8207 return rcStrict; 9315 8208 } … … 9370 8263 * in both PGMHandlerPhysicalReset() and IOMMMIOMapMMIOHCPage(), see @bugref{8721}. 9371 8264 * 9372 * This is the reason we do it here and not in hmR0Vmx LoadGuestState().8265 * This is the reason we do it here and not in hmR0VmxExportGuestState(). 9373 8266 */ 9374 8267 if ( !pVCpu->hm.s.vmx.u64MsrApicBase … … 9387 8280 9388 8281 /* Map the HC APIC-access page in place of the MMIO page, also updates the shadow page tables if necessary. */ 9389 Log4 (("hmR0VmxPreRunGuest: VCPU%u: Mapped HC APIC-access page at %#RGp\n", pVCpu->idCpu, GCPhysApicBase));8282 Log4Func(("Mapped HC APIC-access page at %#RGp\n", GCPhysApicBase)); 9390 8283 rc = IOMMMIOMapMMIOHCPage(pVM, pVCpu, GCPhysApicBase, pVM->hm.s.vmx.HCPhysApicAccess, X86_PTE_RW | X86_PTE_P); 9391 8284 AssertRCReturn(rc, rc); … … 9397 8290 if (TRPMHasTrap(pVCpu)) 9398 8291 hmR0VmxTrpmTrapToPendingEvent(pVCpu); 9399 uint32_t uIntrState = hmR0VmxEvaluatePendingEvent(pVCpu, pMixedCtx);8292 uint32_t fIntrState = hmR0VmxEvaluatePendingEvent(pVCpu, pMixedCtx); 9400 8293 9401 8294 /* … … 9403 8296 * longjmps or interrupts + preemption enabled. Event injection might also result in triple-faulting the VM. 9404 8297 */ 9405 rcStrict = hmR0VmxInjectPendingEvent(pVCpu, pMixedCtx, uIntrState, fStepping);8298 rcStrict = hmR0VmxInjectPendingEvent(pVCpu, pMixedCtx, fIntrState, fStepping); 9406 8299 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 9407 8300 { /* likely */ } … … 9421 8314 9422 8315 /* 9423 * Loadthe guest state bits.8316 * Export the guest state bits. 9424 8317 * 9425 8318 * We cannot perform longjmps while loading the guest state because we do not preserve the … … 9431 8324 * Hence, loading of the guest state needs to be done -after- injection of events. 9432 8325 */ 9433 rcStrict = hmR0Vmx LoadGuestStateOptimal(pVM, pVCpu, pMixedCtx);8326 rcStrict = hmR0VmxExportGuestStateOptimal(pVM, pVCpu, pMixedCtx); 9434 8327 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 9435 8328 { /* likely */ } … … 9520 8413 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestFpuState, x); 9521 8414 if (CPUMR0LoadGuestFPU(pVM, pVCpu) == VINF_CPUM_HOST_CR0_MODIFIED) 9522 HMCPU_CF_SET(pVCpu, HM_CHANGED_HOST_CONTEXT);8415 pVCpu->hm.s.fCtxChanged |= HM_CHANGED_HOST_CONTEXT; 9523 8416 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestFpuState, x); 9524 8417 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadGuestFpu); 9525 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_CR0));9526 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);9527 8418 } 9528 8419 … … 9532 8423 if ( !pVCpu->hm.s.vmx.fUpdatedHostMsrs 9533 8424 && pVCpu->hm.s.vmx.cMsrs > 0) 9534 {9535 8425 hmR0VmxUpdateAutoLoadStoreHostMsrs(pVCpu); 9536 }9537 8426 9538 8427 /* 9539 * Loadthe host state bits as we may've been preempted (only happens when8428 * Re-save the host state bits as we may've been preempted (only happens when 9540 8429 * thread-context hooks are used or when hmR0VmxSetupVMRunHandler() changes pfnStartVM). 9541 8430 * Note that the 64-on-32 switcher saves the (64-bit) host state into the VMCS and … … 9543 8432 * See @bugref{8432}. 9544 8433 */ 9545 if ( HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_CONTEXT))9546 { 9547 int rc = hmR0Vmx SaveHostState(pVM,pVCpu);8434 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_HOST_CONTEXT) 8435 { 8436 int rc = hmR0VmxExportHostState(pVCpu); 9548 8437 AssertRC(rc); 9549 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchPreempt SaveHostState);9550 } 9551 Assert(! HMCPU_CF_IS_PENDING(pVCpu,HM_CHANGED_HOST_CONTEXT));8438 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchPreemptExportHostState); 8439 } 8440 Assert(!(pVCpu->hm.s.fCtxChanged & HM_CHANGED_HOST_CONTEXT)); 9552 8441 9553 8442 /* 9554 * Loadthe state shared between host and guest (FPU, debug, lazy MSRs).8443 * Export the state shared between host and guest (FPU, debug, lazy MSRs). 9555 8444 */ 9556 if ( HMCPU_CF_IS_PENDING(pVCpu, HM_CHANGED_HOST_GUEST_SHARED_STATE))9557 hmR0Vmx LoadSharedState(pVM, pVCpu, pMixedCtx);9558 AssertMsg(! HMCPU_CF_VALUE(pVCpu), ("fContextUseFlags=%#RX32\n", HMCPU_CF_VALUE(pVCpu)));8445 if (pVCpu->hm.s.fCtxChanged & HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE) 8446 hmR0VmxExportSharedState(pVM, pVCpu, pMixedCtx); 8447 AssertMsg(!pVCpu->hm.s.fCtxChanged, ("fCtxChanged=%#RX64\n", pVCpu->hm.s.fCtxChanged)); 9559 8448 9560 8449 /* Store status of the shared guest-host state at the time of VM-entry. */ … … 9605 8494 { 9606 8495 bool fMsrUpdated; 9607 int rc2 = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx); 9608 AssertRC(rc2); 9609 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS)); 9610 9611 rc2 = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_TSC_AUX, CPUMGetGuestTscAux(pVCpu), true /* fUpdateHostMsr */, 8496 hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_TSC_AUX); 8497 int rc2 = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_K8_TSC_AUX, CPUMGetGuestTscAux(pVCpu), true /* fUpdateHostMsr */, 9612 8498 &fMsrUpdated); 9613 8499 AssertRC(rc2); 9614 8500 Assert(fMsrUpdated || pVCpu->hm.s.vmx.fUpdatedHostMsrs); 9615 9616 8501 /* Finally, mark that all host MSR values are updated so we don't redo it without leaving VT-x. See @bugref{6956}. */ 9617 8502 pVCpu->hm.s.vmx.fUpdatedHostMsrs = true; … … 9627 8512 { 9628 8513 bool fMsrUpdated; 9629 int rc2 = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx); 9630 AssertRC(rc2); 9631 Assert(HMVMXCPU_GST_IS_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS)); 9632 9633 rc2 = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_IA32_SPEC_CTRL, CPUMGetGuestSpecCtrl(pVCpu), true /* fUpdateHostMsr */, 9634 &fMsrUpdated); 8514 hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_OTHER_MSRS); 8515 int rc2 = hmR0VmxAddAutoLoadStoreMsr(pVCpu, MSR_IA32_SPEC_CTRL, CPUMGetGuestSpecCtrl(pVCpu), true /* fUpdateHostMsr */, 8516 &fMsrUpdated); 9635 8517 AssertRC(rc2); 9636 8518 Assert(fMsrUpdated || pVCpu->hm.s.vmx.fUpdatedHostMsrs); … … 9658 8540 * @param pVM The cross context VM structure. 9659 8541 * @param pVCpu The cross context virtual CPU structure. 9660 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe9661 * out-of-sync. Make sure to update the required fields9662 * before using them.9663 8542 * @param pVmxTransient Pointer to the VMX transient structure. 9664 8543 * @param rcVMRun Return code of VMLAUNCH/VMRESUME. … … 9669 8548 * unconditionally when it is safe to do so. 9670 8549 */ 9671 static void hmR0VmxPostRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, int rcVMRun) 9672 { 9673 NOREF(pVM); 9674 uint64_t uHostTsc = ASMReadTSC(); 9675 8550 static void hmR0VmxPostRunGuest(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient, int rcVMRun) 8551 { 8552 uint64_t const uHostTsc = ASMReadTSC(); 9676 8553 Assert(!VMMRZCallRing3IsEnabled(pVCpu)); 9677 8554 9678 8555 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false); /* See HMInvalidatePageOnAllVCpus(): used for TLB flushing. */ 9679 8556 ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits); /* Initialized in vmR3CreateUVM(): used for EMT poking. */ 9680 HMVMXCPU_GST_RESET_TO(pVCpu, 0);/* Exits/longjmps to ring-3 requires saving the guest state. */8557 pVCpu->hm.s.fCtxChanged = 0; /* Exits/longjmps to ring-3 requires saving the guest state. */ 9681 8558 pVmxTransient->fVmcsFieldsRead = 0; /* Transient fields need to be read from the VMCS. */ 9682 8559 pVmxTransient->fVectoringPF = false; /* Vectoring page-fault needs to be determined later. */ … … 9735 8612 if (!pVmxTransient->fVMEntryFailed) 9736 8613 { 9737 /** @todo We can optimize this by only syncing with our force-flags when 9738 * really needed and keeping the VMCS state as it is for most 9739 * VM-exits. */ 9740 /* Update the guest interruptibility-state from the VMCS. */ 9741 hmR0VmxSaveGuestIntrState(pVCpu, pMixedCtx); 8614 VMMRZCallRing3Enable(pVCpu); 9742 8615 9743 8616 /* 9744 * Allow longjmps to ring-3 -after- saving the guest-interruptibility state 9745 * as it's not part of hmR0VmxSaveGuestState() and thus would trigger an assertion 9746 * on the longjmp path to ring-3 while saving the (rest of) the guest state, 9747 * see @bugref{6208#c63}. 8617 * Import the guest-interruptibility state always as we need it while evaluating 8618 * injecting events on re-entry. 8619 * 8620 * We don't import CR0 (when Unrestricted guest execution is unavailable) despite 8621 * checking for real-mode while exporting the state because all bits that cause 8622 * mode changes wrt CR0 are intercepted. 9748 8623 */ 9749 VMMRZCallRing3Enable(pVCpu); 8624 rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_HM_VMX_INT_STATE); 8625 AssertRC(rc); 9750 8626 9751 8627 #if defined(HMVMX_ALWAYS_SYNC_FULL_GUEST_STATE) || defined(HMVMX_ALWAYS_SAVE_FULL_GUEST_STATE) 9752 rc = hmR0Vmx SaveGuestState(pVCpu, pMixedCtx);8628 rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL); 9753 8629 AssertRC(rc); 9754 8630 #elif defined(HMVMX_ALWAYS_SAVE_GUEST_RFLAGS) 9755 rc = hmR0Vmx SaveGuestRflags(pVCpu, pMixedCtx);8631 rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_RFLAGS); 9756 8632 AssertRC(rc); 9757 8633 #endif … … 9765 8641 rc = APICSetTpr(pVCpu, pVCpu->hm.s.vmx.pbVirtApic[XAPIC_OFF_TPR]); 9766 8642 AssertRC(rc); 9767 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_APIC_STATE);8643 ASMAtomicOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR); 9768 8644 } 9769 8645 … … 9773 8649 else 9774 8650 { 9775 Log4(("VM-entry failure: pVCpu=%p idCpu=%RU32 rcVMRun=%Rrc fVMEntryFailed=%RTbool\n", pVCpu, pVCpu->idCpu, rcVMRun, 9776 pVmxTransient->fVMEntryFailed)); 8651 Log4Func(("VM-entry failure: rcVMRun=%Rrc fVMEntryFailed=%RTbool\n", rcVMRun, pVmxTransient->fVMEntryFailed)); 9777 8652 } 9778 8653 … … 9816 8691 /* Restore any residual host-state and save any bits shared between host 9817 8692 and guest into the guest-CPU state. Re-enables interrupts! */ 9818 hmR0VmxPostRunGuest(pV M, pVCpu, pCtx, &VmxTransient, rcRun);8693 hmR0VmxPostRunGuest(pVCpu, &VmxTransient, rcRun); 9819 8694 9820 8695 /* Check for errors with running the VM (VMLAUNCH/VMRESUME). */ … … 9931 8806 * @param pDbgState The structure to initialize. 9932 8807 */ 9933 DECLINLINE(void)hmR0VmxRunDebugStateInit(PVMCPU pVCpu, PCCPUMCTX pCtx, PVMXRUNDBGSTATE pDbgState)8808 static void hmR0VmxRunDebugStateInit(PVMCPU pVCpu, PCCPUMCTX pCtx, PVMXRUNDBGSTATE pDbgState) 9934 8809 { 9935 8810 pDbgState->uRipStart = pCtx->rip; … … 9962 8837 * @param pDbgState The debug state. 9963 8838 */ 9964 DECLINLINE(void)hmR0VmxPreRunGuestDebugStateApply(PVMCPU pVCpu, PVMXRUNDBGSTATE pDbgState)8839 static void hmR0VmxPreRunGuestDebugStateApply(PVMCPU pVCpu, PVMXRUNDBGSTATE pDbgState) 9965 8840 { 9966 8841 /* … … 10013 8888 10014 8889 10015 DECLINLINE(VBOXSTRICTRC)hmR0VmxRunDebugStateRevert(PVMCPU pVCpu, PVMXRUNDBGSTATE pDbgState, VBOXSTRICTRC rcStrict)8890 static VBOXSTRICTRC hmR0VmxRunDebugStateRevert(PVMCPU pVCpu, PVMXRUNDBGSTATE pDbgState, VBOXSTRICTRC rcStrict) 10016 8891 { 10017 8892 /* … … 10055 8930 * the necessary VM-exits demanded by DBGF and DTrace. 10056 8931 * 10057 * @param pVM The cross context VM structure.10058 8932 * @param pVCpu The cross context virtual CPU structure. 10059 * @param pCtx Pointer to the guest-CPU context.10060 8933 * @param pDbgState The debug state. 10061 8934 * @param pVmxTransient Pointer to the VMX transient structure. May update 10062 8935 * fUpdateTscOffsettingAndPreemptTimer. 10063 8936 */ 10064 static void hmR0VmxPreRunGuestDebugStateUpdate(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, 10065 PVMXRUNDBGSTATE pDbgState, PVMXTRANSIENT pVmxTransient) 8937 static void hmR0VmxPreRunGuestDebugStateUpdate(PVMCPU pVCpu, PVMXRUNDBGSTATE pDbgState, PVMXTRANSIENT pVmxTransient) 10066 8938 { 10067 8939 /* … … 10085 8957 * Software interrupts (INT XXh) - no idea how to trigger these... 10086 8958 */ 8959 PVM pVM = pVCpu->CTX_SUFF(pVM); 10087 8960 if ( DBGF_IS_EVENT_ENABLED(pVM, DBGFEVENT_INTERRUPT_SOFTWARE) 10088 8961 || VBOXVMM_INT_SOFTWARE_ENABLED()) … … 10129 9002 * Process events and probes for VM-exits, making sure we get the wanted VM-exits. 10130 9003 * 10131 * Note! This is the reverse of w aft hmR0VmxHandleExitDtraceEvents does.9004 * Note! This is the reverse of what hmR0VmxHandleExitDtraceEvents does. 10132 9005 * So, when adding/changing/removing please don't forget to update it. 10133 9006 * … … 10211 9084 || IS_EITHER_ENABLED(pVM, INSTR_CRX_WRITE)) 10212 9085 { 10213 int rc 2 = hmR0VmxSaveGuestCR0(pVCpu, pCtx);10214 rc2 |= hmR0VmxSaveGuestCR4(pVCpu, pCtx);10215 rc2 |= hmR0VmxSaveGuestApicState(pVCpu, pCtx);10216 AssertRC(rc 2);9086 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0 9087 | CPUMCTX_EXTRN_CR4 9088 | CPUMCTX_EXTRN_APIC_TPR); 9089 AssertRC(rc); 10217 9090 10218 9091 #if 0 /** @todo fix me */ … … 10234 9107 { 10235 9108 pDbgState->fClearCr0Mask = false; 10236 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0);9109 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR0); 10237 9110 } 10238 9111 if (pDbgState->fClearCr4Mask) 10239 9112 { 10240 9113 pDbgState->fClearCr4Mask = false; 10241 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR4);9114 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR4); 10242 9115 } 10243 9116 } … … 10479 9352 case VMX_EXIT_MOV_CRX: 10480 9353 hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient); 10481 /** @todo r=bird: I feel these macros aren't very descriptive and needs to be at least 30 chars longer! ;-) 10482 * Sensible abbreviations strongly recommended here because even with 130 columns this stuff get too wide! */ 10483 if ( VMX_EXIT_QUALIFICATION_CRX_ACCESS(pVmxTransient->uExitQualification) 10484 == VMX_EXIT_QUALIFICATION_CRX_ACCESS_READ) 9354 if (VMX_EXIT_QUAL_CRX_ACCESS(pVmxTransient->uExitQualification) == VMX_EXIT_QUAL_CRX_ACCESS_READ) 10485 9355 SET_BOTH(CRX_READ); 10486 9356 else 10487 9357 SET_BOTH(CRX_WRITE); 10488 uEventArg = VMX_EXIT_QUAL IFICATION_CRX_REGISTER(pVmxTransient->uExitQualification);9358 uEventArg = VMX_EXIT_QUAL_CRX_REGISTER(pVmxTransient->uExitQualification); 10489 9359 break; 10490 9360 case VMX_EXIT_MOV_DRX: 10491 9361 hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient); 10492 if ( VMX_EXIT_QUAL IFICATION_DRX_DIRECTION(pVmxTransient->uExitQualification)10493 == VMX_EXIT_QUAL IFICATION_DRX_DIRECTION_READ)9362 if ( VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQualification) 9363 == VMX_EXIT_QUAL_DRX_DIRECTION_READ) 10494 9364 SET_BOTH(DRX_READ); 10495 9365 else 10496 9366 SET_BOTH(DRX_WRITE); 10497 uEventArg = VMX_EXIT_QUAL IFICATION_DRX_REGISTER(pVmxTransient->uExitQualification);9367 uEventArg = VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQualification); 10498 9368 break; 10499 9369 case VMX_EXIT_RDMSR: SET_BOTH(RDMSR); break; … … 10571 9441 { 10572 9442 hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient); 10573 hmR0Vmx SaveGuestState(pVCpu, pMixedCtx);9443 hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL); 10574 9444 switch (enmEvent1) 10575 9445 { … … 10759 9629 { 10760 9630 hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient); 10761 hmR0VmxSaveGuestState(pVCpu, pMixedCtx); 9631 int rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL); 9632 AssertRC(rc); 10762 9633 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, pMixedCtx, pVmxTransient->uExitReason, pVmxTransient->uExitQualification); 10763 9634 } … … 10841 9712 case VMX_EXIT_XRSTORS: 10842 9713 { 10843 int rc 2 = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);10844 rc2 |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);10845 AssertRCReturn(rc 2, rc2);9714 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_RIP 9715 | CPUMCTX_EXTRN_CS); 9716 AssertRCReturn(rc, rc); 10846 9717 if ( pMixedCtx->rip != pDbgState->uRipStart 10847 9718 || pMixedCtx->cs.Sel != pDbgState->uCsStart) … … 10905 9776 10906 9777 /* Set HMCPU indicators. */ 10907 bool const fSavedSingleInstruction 10908 pVCpu->hm.s.fSingleInstruction 10909 pVCpu->hm.s.fDebugWantRdTscExit 10910 pVCpu->hm.s.fUsingDebugLoop 9778 bool const fSavedSingleInstruction = pVCpu->hm.s.fSingleInstruction; 9779 pVCpu->hm.s.fSingleInstruction = pVCpu->hm.s.fSingleInstruction || DBGFIsStepping(pVCpu); 9780 pVCpu->hm.s.fDebugWantRdTscExit = false; 9781 pVCpu->hm.s.fUsingDebugLoop = true; 10911 9782 10912 9783 /* State we keep to help modify and later restore the VMCS fields we alter, and for detecting steps. */ 10913 9784 VMXRUNDBGSTATE DbgState; 10914 9785 hmR0VmxRunDebugStateInit(pVCpu, pCtx, &DbgState); 10915 hmR0VmxPreRunGuestDebugStateUpdate(pV M, pVCpu, pCtx, &DbgState, &VmxTransient);9786 hmR0VmxPreRunGuestDebugStateUpdate(pVCpu, &DbgState, &VmxTransient); 10916 9787 10917 9788 /* … … 10949 9820 * and guest into the guest-CPU state. Re-enables interrupts! 10950 9821 */ 10951 hmR0VmxPostRunGuest(pV M, pVCpu, pCtx, &VmxTransient, rcRun);9822 hmR0VmxPostRunGuest(pVCpu, &VmxTransient, rcRun); 10952 9823 10953 9824 /* Check for errors with running the VM (VMLAUNCH/VMRESUME). */ … … 10990 9861 if (fStepping) 10991 9862 { 10992 int rc 2 = hmR0VmxSaveGuestRip(pVCpu, pCtx);10993 rc2 |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pCtx);10994 AssertRC Return(rc2, rc2);9863 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_RIP 9864 | CPUMCTX_EXTRN_CS); 9865 AssertRC(rc); 10995 9866 if ( pCtx->rip != DbgState.uRipStart 10996 9867 || pCtx->cs.Sel != DbgState.uCsStart) … … 10999 9870 break; 11000 9871 } 11001 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);9872 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_DR7); 11002 9873 } 11003 9874 … … 11006 9877 */ 11007 9878 if (VBOXVMM_GET_SETTINGS_SEQ_NO() != DbgState.uDtraceSettingsSeqNo) 11008 hmR0VmxPreRunGuestDebugStateUpdate(pV M, pVCpu, pCtx, &DbgState, &VmxTransient);9879 hmR0VmxPreRunGuestDebugStateUpdate(pVCpu, &DbgState, &VmxTransient); 11009 9880 } 11010 9881 … … 11014 9885 if (pVCpu->hm.s.fClearTrapFlag) 11015 9886 { 11016 int rc 2 = hmR0VmxSaveGuestRflags(pVCpu, pCtx);11017 AssertRC Return(rc2, rc2);9887 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_RFLAGS); 9888 AssertRC(rc); 11018 9889 pVCpu->hm.s.fClearTrapFlag = false; 11019 9890 pCtx->eflags.Bits.u1TF = 0; … … 11190 10061 { 11191 10062 Assert(VMMRZCallRing3IsEnabled(pVCpu)); 11192 Assert( HMVMXCPU_GST_VALUE(pVCpu) == HMVMX_UPDATED_GUEST_ALL);10063 Assert(!ASMAtomicUoReadU64(&pCtx->fExtrn)); 11193 10064 HMVMX_ASSERT_PREEMPT_SAFE(); 11194 10065 … … 11223 10094 DECLINLINE(VBOXSTRICTRC) hmR0VmxHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, uint32_t rcReason) 11224 10095 { 11225 # 11226 # define VMEXIT_CALL_RET(a_CallExpr) \10096 #ifdef DEBUG_ramshankar 10097 #define VMEXIT_CALL_RET(a_fSave, a_CallExpr) \ 11227 10098 do { \ 11228 int rc2 = hmR0VmxSaveGuestState(pVCpu, pMixedCtx); AssertRC(rc2); \ 10099 if (a_fSave != 0) \ 10100 hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL); \ 11229 10101 VBOXSTRICTRC rcStrict = a_CallExpr; \ 11230 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST); \ 10102 if (a_fSave != 0) \ 10103 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST); \ 11231 10104 return rcStrict; \ 11232 10105 } while (0) 11233 # 11234 # define VMEXIT_CALL_RET(a_CallExpr) return a_CallExpr11235 # 10106 #else 10107 # define VMEXIT_CALL_RET(a_fSave, a_CallExpr) return a_CallExpr 10108 #endif 11236 10109 switch (rcReason) 11237 10110 { 11238 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET( hmR0VmxExitEptMisconfig(pVCpu, pMixedCtx, pVmxTransient));11239 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET( hmR0VmxExitEptViolation(pVCpu, pMixedCtx, pVmxTransient));11240 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET( hmR0VmxExitIoInstr(pVCpu, pMixedCtx, pVmxTransient));11241 case VMX_EXIT_CPUID: VMEXIT_CALL_RET( hmR0VmxExitCpuid(pVCpu, pMixedCtx, pVmxTransient));11242 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET( hmR0VmxExitRdtsc(pVCpu, pMixedCtx, pVmxTransient));11243 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET( hmR0VmxExitRdtscp(pVCpu, pMixedCtx, pVmxTransient));11244 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET( hmR0VmxExitApicAccess(pVCpu, pMixedCtx, pVmxTransient));11245 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET( hmR0VmxExitXcptOrNmi(pVCpu, pMixedCtx, pVmxTransient));11246 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET( hmR0VmxExitMovCRx(pVCpu, pMixedCtx, pVmxTransient));11247 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET( hmR0VmxExitExtInt(pVCpu, pMixedCtx, pVmxTransient));11248 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET( hmR0VmxExitIntWindow(pVCpu, pMixedCtx, pVmxTransient));11249 case VMX_EXIT_ MWAIT: VMEXIT_CALL_RET(hmR0VmxExitMwait(pVCpu, pMixedCtx, pVmxTransient));11250 case VMX_EXIT_M ONITOR: VMEXIT_CALL_RET(hmR0VmxExitMonitor(pVCpu, pMixedCtx, pVmxTransient));11251 case VMX_EXIT_ TASK_SWITCH: VMEXIT_CALL_RET(hmR0VmxExitTaskSwitch(pVCpu, pMixedCtx, pVmxTransient));11252 case VMX_EXIT_ PREEMPT_TIMER: VMEXIT_CALL_RET(hmR0VmxExitPreemptTimer(pVCpu, pMixedCtx, pVmxTransient));11253 case VMX_EXIT_ RDMSR: VMEXIT_CALL_RET(hmR0VmxExitRdmsr(pVCpu, pMixedCtx, pVmxTransient));11254 case VMX_EXIT_ WRMSR: VMEXIT_CALL_RET(hmR0VmxExitWrmsr(pVCpu, pMixedCtx, pVmxTransient));11255 case VMX_EXIT_ MOV_DRX: VMEXIT_CALL_RET(hmR0VmxExitMovDRx(pVCpu, pMixedCtx, pVmxTransient));11256 case VMX_EXIT_ TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(hmR0VmxExitTprBelowThreshold(pVCpu, pMixedCtx, pVmxTransient));11257 case VMX_EXIT_ HLT: VMEXIT_CALL_RET(hmR0VmxExitHlt(pVCpu, pMixedCtx, pVmxTransient));11258 case VMX_EXIT_ INVD: VMEXIT_CALL_RET(hmR0VmxExitInvd(pVCpu, pMixedCtx, pVmxTransient));11259 case VMX_EXIT_INV LPG: VMEXIT_CALL_RET(hmR0VmxExitInvlpg(pVCpu, pMixedCtx, pVmxTransient));11260 case VMX_EXIT_ RSM: VMEXIT_CALL_RET(hmR0VmxExitRsm(pVCpu, pMixedCtx, pVmxTransient));11261 case VMX_EXIT_ MTF: VMEXIT_CALL_RET(hmR0VmxExitMtf(pVCpu, pMixedCtx, pVmxTransient));11262 case VMX_EXIT_ PAUSE: VMEXIT_CALL_RET(hmR0VmxExitPause(pVCpu, pMixedCtx, pVmxTransient));11263 case VMX_EXIT_ XDTR_ACCESS: VMEXIT_CALL_RET(hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient));11264 case VMX_EXIT_ TR_ACCESS: VMEXIT_CALL_RET(hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient));11265 case VMX_EXIT_ WBINVD: VMEXIT_CALL_RET(hmR0VmxExitWbinvd(pVCpu, pMixedCtx, pVmxTransient));11266 case VMX_EXIT_ XSETBV: VMEXIT_CALL_RET(hmR0VmxExitXsetbv(pVCpu, pMixedCtx, pVmxTransient));11267 case VMX_EXIT_ RDRAND: VMEXIT_CALL_RET(hmR0VmxExitRdrand(pVCpu, pMixedCtx, pVmxTransient));11268 case VMX_EXIT_ INVPCID: VMEXIT_CALL_RET(hmR0VmxExitInvpcid(pVCpu, pMixedCtx, pVmxTransient));11269 case VMX_EXIT_ GETSEC: VMEXIT_CALL_RET(hmR0VmxExitGetsec(pVCpu, pMixedCtx, pVmxTransient));11270 case VMX_EXIT_ RDPMC: VMEXIT_CALL_RET(hmR0VmxExitRdpmc(pVCpu, pMixedCtx, pVmxTransient));11271 case VMX_EXIT_ VMCALL: VMEXIT_CALL_RET(hmR0VmxExitVmcall(pVCpu, pMixedCtx, pVmxTransient));10111 case VMX_EXIT_EPT_MISCONFIG: VMEXIT_CALL_RET(0, hmR0VmxExitEptMisconfig(pVCpu, pMixedCtx, pVmxTransient)); 10112 case VMX_EXIT_EPT_VIOLATION: VMEXIT_CALL_RET(0, hmR0VmxExitEptViolation(pVCpu, pMixedCtx, pVmxTransient)); 10113 case VMX_EXIT_IO_INSTR: VMEXIT_CALL_RET(0, hmR0VmxExitIoInstr(pVCpu, pMixedCtx, pVmxTransient)); 10114 case VMX_EXIT_CPUID: VMEXIT_CALL_RET(0, hmR0VmxExitCpuid(pVCpu, pMixedCtx, pVmxTransient)); 10115 case VMX_EXIT_RDTSC: VMEXIT_CALL_RET(0, hmR0VmxExitRdtsc(pVCpu, pMixedCtx, pVmxTransient)); 10116 case VMX_EXIT_RDTSCP: VMEXIT_CALL_RET(0, hmR0VmxExitRdtscp(pVCpu, pMixedCtx, pVmxTransient)); 10117 case VMX_EXIT_APIC_ACCESS: VMEXIT_CALL_RET(0, hmR0VmxExitApicAccess(pVCpu, pMixedCtx, pVmxTransient)); 10118 case VMX_EXIT_XCPT_OR_NMI: VMEXIT_CALL_RET(0, hmR0VmxExitXcptOrNmi(pVCpu, pMixedCtx, pVmxTransient)); 10119 case VMX_EXIT_MOV_CRX: VMEXIT_CALL_RET(0, hmR0VmxExitMovCRx(pVCpu, pMixedCtx, pVmxTransient)); 10120 case VMX_EXIT_EXT_INT: VMEXIT_CALL_RET(0, hmR0VmxExitExtInt(pVCpu, pMixedCtx, pVmxTransient)); 10121 case VMX_EXIT_INT_WINDOW: VMEXIT_CALL_RET(0, hmR0VmxExitIntWindow(pVCpu, pMixedCtx, pVmxTransient)); 10122 case VMX_EXIT_TPR_BELOW_THRESHOLD: VMEXIT_CALL_RET(0, hmR0VmxExitTprBelowThreshold(pVCpu, pMixedCtx, pVmxTransient)); 10123 case VMX_EXIT_MWAIT: VMEXIT_CALL_RET(0, hmR0VmxExitMwait(pVCpu, pMixedCtx, pVmxTransient)); 10124 case VMX_EXIT_MONITOR: VMEXIT_CALL_RET(0, hmR0VmxExitMonitor(pVCpu, pMixedCtx, pVmxTransient)); 10125 case VMX_EXIT_TASK_SWITCH: VMEXIT_CALL_RET(0, hmR0VmxExitTaskSwitch(pVCpu, pMixedCtx, pVmxTransient)); 10126 case VMX_EXIT_PREEMPT_TIMER: VMEXIT_CALL_RET(0, hmR0VmxExitPreemptTimer(pVCpu, pMixedCtx, pVmxTransient)); 10127 case VMX_EXIT_RDMSR: VMEXIT_CALL_RET(0, hmR0VmxExitRdmsr(pVCpu, pMixedCtx, pVmxTransient)); 10128 case VMX_EXIT_WRMSR: VMEXIT_CALL_RET(0, hmR0VmxExitWrmsr(pVCpu, pMixedCtx, pVmxTransient)); 10129 case VMX_EXIT_VMCALL: VMEXIT_CALL_RET(0, hmR0VmxExitVmcall(pVCpu, pMixedCtx, pVmxTransient)); 10130 case VMX_EXIT_MOV_DRX: VMEXIT_CALL_RET(0, hmR0VmxExitMovDRx(pVCpu, pMixedCtx, pVmxTransient)); 10131 case VMX_EXIT_HLT: VMEXIT_CALL_RET(0, hmR0VmxExitHlt(pVCpu, pMixedCtx, pVmxTransient)); 10132 case VMX_EXIT_INVD: VMEXIT_CALL_RET(0, hmR0VmxExitInvd(pVCpu, pMixedCtx, pVmxTransient)); 10133 case VMX_EXIT_INVLPG: VMEXIT_CALL_RET(0, hmR0VmxExitInvlpg(pVCpu, pMixedCtx, pVmxTransient)); 10134 case VMX_EXIT_RSM: VMEXIT_CALL_RET(0, hmR0VmxExitRsm(pVCpu, pMixedCtx, pVmxTransient)); 10135 case VMX_EXIT_MTF: VMEXIT_CALL_RET(0, hmR0VmxExitMtf(pVCpu, pMixedCtx, pVmxTransient)); 10136 case VMX_EXIT_PAUSE: VMEXIT_CALL_RET(0, hmR0VmxExitPause(pVCpu, pMixedCtx, pVmxTransient)); 10137 case VMX_EXIT_XDTR_ACCESS: VMEXIT_CALL_RET(0, hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient)); 10138 case VMX_EXIT_TR_ACCESS: VMEXIT_CALL_RET(0, hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient)); 10139 case VMX_EXIT_WBINVD: VMEXIT_CALL_RET(0, hmR0VmxExitWbinvd(pVCpu, pMixedCtx, pVmxTransient)); 10140 case VMX_EXIT_XSETBV: VMEXIT_CALL_RET(0, hmR0VmxExitXsetbv(pVCpu, pMixedCtx, pVmxTransient)); 10141 case VMX_EXIT_RDRAND: VMEXIT_CALL_RET(0, hmR0VmxExitRdrand(pVCpu, pMixedCtx, pVmxTransient)); 10142 case VMX_EXIT_INVPCID: VMEXIT_CALL_RET(0, hmR0VmxExitInvpcid(pVCpu, pMixedCtx, pVmxTransient)); 10143 case VMX_EXIT_GETSEC: VMEXIT_CALL_RET(0, hmR0VmxExitGetsec(pVCpu, pMixedCtx, pVmxTransient)); 10144 case VMX_EXIT_RDPMC: VMEXIT_CALL_RET(0, hmR0VmxExitRdpmc(pVCpu, pMixedCtx, pVmxTransient)); 11272 10145 11273 10146 case VMX_EXIT_TRIPLE_FAULT: return hmR0VmxExitTripleFault(pVCpu, pMixedCtx, pVmxTransient); … … 11296 10169 case VMX_EXIT_XRSTORS: 11297 10170 return hmR0VmxExitSetPendingXcptUD(pVCpu, pMixedCtx, pVmxTransient); 10171 11298 10172 case VMX_EXIT_ENCLS: 11299 10173 case VMX_EXIT_RDSEED: /* only spurious VM-exits, so undefined */ … … 11363 10237 /* Advance the RIP. */ 11364 10238 pMixedCtx->rip += cbInstr; 11365 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP);10239 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP); 11366 10240 11367 10241 /* Update interrupt inhibition. */ … … 11387 10261 { 11388 10262 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 11389 rc |= hmR0Vmx SaveGuestRip(pVCpu, pMixedCtx);11390 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);10263 rc |= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_RIP 10264 | CPUMCTX_EXTRN_RFLAGS); 11391 10265 AssertRCReturn(rc, rc); 11392 10266 … … 11401 10275 if ( !pVCpu->hm.s.fSingleInstruction 11402 10276 && pMixedCtx->eflags.Bits.u1TF) 11403 hmR0VmxSetPendingDebugXcptVmcs(pVCpu); 10277 { 10278 rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu, pMixedCtx); 10279 AssertRCReturn(rc, rc); 10280 } 11404 10281 11405 10282 return VINF_SUCCESS; … … 11440 10317 * CR0. 11441 10318 */ 11442 uint32_t uSetCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);11443 uint32_t uZapCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr0Fixed1);10319 uint32_t fSetCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr0Fixed1); 10320 uint32_t fZapCR0 = (uint32_t)(pVM->hm.s.vmx.Msrs.u64Cr0Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr0Fixed1); 11444 10321 /* Exceptions for unrestricted-guests for fixed CR0 bits (PE, PG). 11445 10322 See Intel spec. 26.3.1 "Checks on Guest Control Registers, Debug Registers and MSRs." */ 11446 10323 if (fUnrestrictedGuest) 11447 uSetCR0 &= ~(X86_CR0_PE | X86_CR0_PG);11448 11449 uint32_t u 32GuestCR0;11450 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u 32GuestCR0);10324 fSetCR0 &= ~(X86_CR0_PE | X86_CR0_PG); 10325 10326 uint32_t uGuestCR0; 10327 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &uGuestCR0); 11451 10328 AssertRCBreak(rc); 11452 HMVMX_CHECK_BREAK((u 32GuestCR0 & uSetCR0) == uSetCR0, VMX_IGS_CR0_FIXED1);11453 HMVMX_CHECK_BREAK(!(u 32GuestCR0 & ~uZapCR0), VMX_IGS_CR0_FIXED0);10329 HMVMX_CHECK_BREAK((uGuestCR0 & fSetCR0) == fSetCR0, VMX_IGS_CR0_FIXED1); 10330 HMVMX_CHECK_BREAK(!(uGuestCR0 & ~fZapCR0), VMX_IGS_CR0_FIXED0); 11454 10331 if ( !fUnrestrictedGuest 11455 && (u32GuestCR0 & X86_CR0_PG)11456 && !(u 32GuestCR0 & X86_CR0_PE))10332 && (uGuestCR0 & X86_CR0_PG) 10333 && !(uGuestCR0 & X86_CR0_PE)) 11457 10334 { 11458 10335 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO); … … 11462 10339 * CR4. 11463 10340 */ 11464 uint64_t uSetCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);11465 uint64_t uZapCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr4Fixed1);11466 11467 uint32_t u 32GuestCR4;11468 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &u 32GuestCR4);10341 uint64_t fSetCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 & pVM->hm.s.vmx.Msrs.u64Cr4Fixed1); 10342 uint64_t fZapCR4 = (pVM->hm.s.vmx.Msrs.u64Cr4Fixed0 | pVM->hm.s.vmx.Msrs.u64Cr4Fixed1); 10343 10344 uint32_t uGuestCR4; 10345 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &uGuestCR4); 11469 10346 AssertRCBreak(rc); 11470 HMVMX_CHECK_BREAK((u 32GuestCR4 & uSetCR4) == uSetCR4, VMX_IGS_CR4_FIXED1);11471 HMVMX_CHECK_BREAK(!(u 32GuestCR4 & ~uZapCR4), VMX_IGS_CR4_FIXED0);10347 HMVMX_CHECK_BREAK((uGuestCR4 & fSetCR4) == fSetCR4, VMX_IGS_CR4_FIXED1); 10348 HMVMX_CHECK_BREAK(!(uGuestCR4 & ~fZapCR4), VMX_IGS_CR4_FIXED0); 11472 10349 11473 10350 /* … … 11525 10402 if ( fLongModeGuest 11526 10403 || ( fUnrestrictedGuest 11527 && !(u 32GuestCR0 & X86_CR0_PE)))10404 && !(uGuestCR0 & X86_CR0_PE))) 11528 10405 { 11529 10406 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID); … … 11545 10422 if (fLongModeGuest) 11546 10423 { 11547 HMVMX_CHECK_BREAK(u 32GuestCR0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE);11548 HMVMX_CHECK_BREAK(u 32GuestCR4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE);10424 HMVMX_CHECK_BREAK(uGuestCR0 & X86_CR0_PG, VMX_IGS_CR0_PG_LONGMODE); 10425 HMVMX_CHECK_BREAK(uGuestCR4 & X86_CR4_PAE, VMX_IGS_CR4_PAE_LONGMODE); 11549 10426 } 11550 10427 11551 10428 if ( !fLongModeGuest 11552 && (u 32GuestCR4 & X86_CR4_PCIDE))10429 && (uGuestCR4 & X86_CR4_PCIDE)) 11553 10430 { 11554 10431 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE); … … 11622 10499 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH); 11623 10500 HMVMX_CHECK_BREAK( fUnrestrictedGuest 11624 || !(u 32GuestCR0 & X86_CR0_PG)10501 || !(uGuestCR0 & X86_CR0_PG) 11625 10502 || RT_BOOL(u64Val & MSR_K6_EFER_LMA) == RT_BOOL(u64Val & MSR_K6_EFER_LME), 11626 10503 VMX_IGS_EFER_LMA_LME_MISMATCH); … … 12048 10925 { 12049 10926 /* 12050 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we injected it ourselves and12051 * anything we inject is not going to cause a VM-exit directly for the event being injected.12052 * See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".10927 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we 10928 * injected it ourselves and anything we inject is not going to cause a VM-exit directly 10929 * for the event being injected[1]. Go ahead and dispatch the NMI to the host[2]. 12053 10930 * 12054 * Dispatch the NMI to the host. See Intel spec. 27.5.5 "Updating Non-Register State". 10931 * [1] -- See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery". 10932 * [2] -- See Intel spec. 27.5.5 "Updating Non-Register State". 12055 10933 */ 12056 10934 VMXDispatchHostNmi(); … … 12123 11001 default: 12124 11002 { 12125 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);12126 AssertRCReturn(rc, rc);12127 12128 11003 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXcpUnk); 12129 11004 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) … … 12133 11008 Assert(CPUMIsGuestInRealModeEx(pMixedCtx)); 12134 11009 12135 rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 11010 rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0); 11011 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 12136 11012 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient); 12137 11013 AssertRCReturn(rc, rc); … … 12139 11015 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 12140 11016 0 /* GCPtrFaultAddress */); 12141 AssertRCReturn(rc, rc);12142 11017 } 12143 11018 else … … 12200 11075 * It is therefore safe to unblock STI and deliver the NMI ourselves. See @bugref{7445}. 12201 11076 */ 12202 uint32_t uIntrState = 0;12203 int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, & uIntrState);11077 uint32_t fIntrState = 0; 11078 int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &fIntrState); 12204 11079 AssertRCReturn(rc, rc); 12205 11080 12206 bool const fBlockSti = RT_BOOL( uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);11081 bool const fBlockSti = RT_BOOL(fIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI); 12207 11082 if ( fBlockSti 12208 11083 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)) … … 12225 11100 { 12226 11101 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); 12227 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWbinvd);12228 11102 return hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient); 12229 11103 } … … 12236 11110 { 12237 11111 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); 12238 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvd);12239 11112 return hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient); 12240 11113 } … … 12247 11120 { 12248 11121 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); 12249 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCpuid);12250 11122 Assert(pMixedCtx == &pVCpu->cpum.GstCtx); 12251 11123 … … 12254 11126 */ 12255 11127 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 12256 rc |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx); 12257 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); 12258 rc |= hmR0VmxSaveGuestCs(pVCpu); 11128 rc |= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_RIP 11129 | CPUMCTX_EXTRN_CS); 12259 11130 AssertRCReturn(rc, rc); 12260 11131 … … 12280 11151 rcStrict = VERR_EM_INTERPRETER; 12281 11152 } 12282 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCpuid);12283 11153 } 12284 11154 else … … 12288 11158 */ 12289 11159 Assert(pMixedCtx == &pVCpu->cpum.GstCtx); 12290 int rc2 = hmR0Vmx SaveGuestRegsForIemInterpreting(pVCpu);11160 int rc2 = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL); 12291 11161 AssertRCReturn(rc2, rc2); 12292 11162 … … 12295 11165 12296 11166 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0); 12297 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);11167 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST); 12298 11168 12299 11169 Log4(("CpuIdExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n", … … 12311 11181 { 12312 11182 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); 12313 int rc = hmR0Vmx SaveGuestCR4(pVCpu, pMixedCtx);11183 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR4); 12314 11184 AssertRCReturn(rc, rc); 12315 11185 … … 12328 11198 { 12329 11199 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); 12330 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /* Needed for CPL < 0 only, really. */ 12331 rc |= hmR0VmxSaveGuestRegsForIemExec(pVCpu, pMixedCtx, false /*fMemory*/, false /*fNeedRsp*/); 11200 int rc = hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK); 12332 11201 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 12333 11202 AssertRCReturn(rc, rc); 11203 12334 11204 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtsc(pVCpu, pVmxTransient->cbInstr); 12335 11205 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 12336 11206 { 12337 /* If we get a spurious VM-exit when offsetting is enabled, we must reset offsetting on VM-reentry. See @bugref{6634}. */ 11207 /* If we get a spurious VM-exit when offsetting is enabled, 11208 we must reset offsetting on VM-reentry. See @bugref{6634}. */ 12338 11209 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING) 12339 11210 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true; 11211 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP 11212 | HM_CHANGED_GUEST_RFLAGS); 12340 11213 } 12341 11214 else if (rcStrict == VINF_IEM_RAISED_XCPT) 11215 { 12342 11216 rcStrict = VINF_SUCCESS; 12343 HMCPU_CF_SET(pVCpu, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS : HM_CHANGED_ALL_GUEST);12344 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc);11217 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_XCPT_RAISED_MASK); 11218 } 12345 11219 return rcStrict; 12346 11220 } … … 12353 11227 { 12354 11228 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); 12355 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /* Needed for CPL < 0 only, really. */ 12356 rc |= hmR0VmxSaveGuestRegsForIemExec(pVCpu, pMixedCtx, false /*fMemory*/, false /*fNeedRsp*/); 12357 rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx); /* For MSR_K8_TSC_AUX */ 11229 int rc = hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK 11230 | CPUMCTX_EXTRN_TSC_AUX); 12358 11231 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 12359 11232 AssertRCReturn(rc, rc); 11233 12360 11234 VBOXSTRICTRC rcStrict = IEMExecDecodedRdtscp(pVCpu, pVmxTransient->cbInstr); 12361 11235 if (RT_LIKELY(rcStrict == VINF_SUCCESS)) 12362 11236 { 12363 /* If we get a spurious VM-exit when offsetting is enabled, we must reset offsetting on VM-reentry. See @bugref{6634}. */ 11237 /* If we get a spurious VM-exit when offsetting is enabled, 11238 we must reset offsetting on VM-reentry. See @bugref{6634}. */ 12364 11239 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING) 12365 11240 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true; 11241 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP 11242 | HM_CHANGED_GUEST_RFLAGS); 12366 11243 } 12367 11244 else if (rcStrict == VINF_IEM_RAISED_XCPT) 11245 { 12368 11246 rcStrict = VINF_SUCCESS; 12369 HMCPU_CF_SET(pVCpu, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS : HM_CHANGED_ALL_GUEST);12370 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtscp);11247 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_XCPT_RAISED_MASK); 11248 } 12371 11249 return rcStrict; 12372 11250 } … … 12379 11257 { 12380 11258 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); 12381 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); 12382 rc |= hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); 11259 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR4 11260 | CPUMCTX_EXTRN_CR0 11261 | CPUMCTX_EXTRN_RFLAGS 11262 | CPUMCTX_EXTRN_SS); 12383 11263 AssertRCReturn(rc, rc); 12384 11264 … … 12395 11275 rc = VERR_EM_INTERPRETER; 12396 11276 } 12397 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdpmc);12398 11277 return rc; 12399 11278 } … … 12406 11285 { 12407 11286 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); 12408 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitVmcall);12409 11287 12410 11288 VBOXSTRICTRC rcStrict = VERR_VMX_IPE_3; 12411 11289 if (EMAreHypercallInstructionsEnabled(pVCpu)) 12412 11290 { 12413 #if 0 12414 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx); 12415 #else 12416 /* Aggressive state sync. for now. */ 12417 int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx); 12418 rc |= hmR0VmxSaveGuestRflags(pVCpu,pMixedCtx); /* For CPL checks in gimHvHypercall() & gimKvmHypercall() */ 12419 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); /* For long-mode checks in gimKvmHypercall(). */ 11291 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_RIP 11292 | CPUMCTX_EXTRN_RFLAGS 11293 | CPUMCTX_EXTRN_CR0 11294 | CPUMCTX_EXTRN_SS 11295 | CPUMCTX_EXTRN_CS 11296 | CPUMCTX_EXTRN_EFER); 12420 11297 AssertRCReturn(rc, rc); 12421 #endif12422 11298 12423 11299 /* Perform the hypercall. */ … … 12437 11313 } 12438 11314 else 12439 Log4 (("hmR0VmxExitVmcall:Hypercalls not enabled\n"));11315 Log4Func(("Hypercalls not enabled\n")); 12440 11316 12441 11317 /* If hypercalls are disabled or the hypercall failed for some reason, raise #UD and continue. */ … … 12460 11336 12461 11337 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient); 12462 rc |= hmR0Vmx SaveGuestControlRegs(pVCpu, pMixedCtx);11338 rc |= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_SREG_MASK); 12463 11339 AssertRCReturn(rc, rc); 12464 11340 … … 12469 11345 AssertMsg(rcStrict == VERR_EM_INTERPRETER, ("hmR0VmxExitInvlpg: EMInterpretInvlpg %#RX64 failed with %Rrc\n", 12470 11346 pVmxTransient->uExitQualification, VBOXSTRICTRC_VAL(rcStrict))); 12471 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvlpg);12472 11347 return rcStrict; 12473 11348 } … … 12480 11355 { 12481 11356 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); 12482 int rc = hmR0Vmx SaveGuestCR0(pVCpu, pMixedCtx);12483 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);12484 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);11357 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0 11358 | CPUMCTX_EXTRN_RFLAGS 11359 | CPUMCTX_EXTRN_SS); 12485 11360 AssertRCReturn(rc, rc); 12486 11361 … … 12505 11380 { 12506 11381 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); 12507 int rc = hmR0Vmx SaveGuestCR0(pVCpu, pMixedCtx);12508 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);12509 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);11382 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0 11383 | CPUMCTX_EXTRN_RFLAGS 11384 | CPUMCTX_EXTRN_SS); 12510 11385 AssertRCReturn(rc, rc); 12511 11386 … … 12521 11396 if ( rc == VINF_EM_HALT 12522 11397 && EMMonitorWaitShouldContinue(pVCpu, pMixedCtx)) 12523 {12524 11398 rc = VINF_SUCCESS; 12525 }12526 11399 } 12527 11400 else … … 12543 11416 { 12544 11417 /* 12545 * Execution of RSM outside of SMM mode causes #UD regardless of VMX root or VMX non-root mode. In theory, we should never 12546 * get this VM-exit. This can happen only if dual-monitor treatment of SMI and VMX is enabled, which can (only?) be done by 12547 * executing VMCALL in VMX root operation. If we get here, something funny is going on. 12548 * See Intel spec. "33.15.5 Enabling the Dual-Monitor Treatment". 11418 * Execution of RSM outside of SMM mode causes #UD regardless of VMX root or VMX non-root 11419 * mode. In theory, we should never get this VM-exit. This can happen only if dual-monitor 11420 * treatment of SMI and VMX is enabled, which can (only?) be done by executing VMCALL in 11421 * VMX root operation. If we get here, something funny is going on. 11422 * 11423 * See Intel spec. 33.15.5 "Enabling the Dual-Monitor Treatment". 12549 11424 */ 12550 11425 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); … … 12560 11435 { 12561 11436 /* 12562 * This can only happen if we support dual-monitor treatment of SMI, which can be activated by executing VMCALL in VMX 12563 * root operation. Only an STM (SMM transfer monitor) would get this VM-exit when we (the executive monitor) execute a VMCALL 12564 * in VMX root mode or receive an SMI. If we get here, something funny is going on. 12565 * See Intel spec. "33.15.6 Activating the Dual-Monitor Treatment" and Intel spec. 25.3 "Other Causes of VM-Exits" 11437 * This can only happen if we support dual-monitor treatment of SMI, which can be activated 11438 * by executing VMCALL in VMX root operation. Only an STM (SMM transfer monitor) would get 11439 * this VM-exit when we (the executive monitor) execute a VMCALL in VMX root mode or receive 11440 * an SMI. If we get here, something funny is going on. 11441 * 11442 * See Intel spec. 33.15.6 "Activating the Dual-Monitor Treatment" 11443 * See Intel spec. 25.3 "Other Causes of VM-Exits" 12566 11444 */ 12567 11445 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); … … 12589 11467 { 12590 11468 /* 12591 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest activity state is used. We currently12592 * don't make use of it (see hmR0VmxLoadGuestActivityState())as our guests don't have direct access to the host LAPIC.11469 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest activity state is used. 11470 * We don't make use of it as our guests don't have direct access to the host LAPIC. 12593 11471 * See Intel spec. 25.3 "Other Causes of VM-exits". 12594 11472 */ … … 12689 11567 12690 11568 int rc = hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 12691 rc |= hmR0Vmx SaveGuestRegsForIemExec(pVCpu, pMixedCtx, false /*fMemory*/, false /*fNeedRsp*/);12692 rc |= hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);11569 rc |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK 11570 | CPUMCTX_EXTRN_CR4); 12693 11571 AssertRCReturn(rc, rc); 12694 11572 12695 11573 VBOXSTRICTRC rcStrict = IEMExecDecodedXsetbv(pVCpu, pVmxTransient->cbInstr); 12696 HMCPU_CF_SET(pVCpu, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS : HM_CHANGED_ALL_GUEST); 11574 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS 11575 : HM_CHANGED_XCPT_RAISED_MASK); 12697 11576 12698 11577 pVCpu->hm.s.fLoadSaveGuestXcr0 = (pMixedCtx->cr4 & X86_CR4_OSXSAVE) && pMixedCtx->aXcr[0] != ASMGetXcr0(); … … 12719 11598 HMVMX_EXIT_NSRC_DECL hmR0VmxExitErrInvalidGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient) 12720 11599 { 12721 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx); 12722 AssertRCReturn(rc, rc); 12723 12724 rc = hmR0VmxCheckVmcsCtls(pVCpu); 11600 int rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL); 11601 rc |= hmR0VmxCheckVmcsCtls(pVCpu); 12725 11602 AssertRCReturn(rc, rc); 12726 11603 … … 12729 11606 12730 11607 #ifdef VBOX_STRICT 12731 uint32_t uIntrState;11608 uint32_t fIntrState; 12732 11609 RTHCUINTREG uHCReg; 12733 11610 uint64_t u64Val; … … 12737 11614 rc |= hmR0VmxReadEntryXcptErrorCodeVmcs(pVmxTransient); 12738 11615 rc |= hmR0VmxReadEntryInstrLenVmcs(pVmxTransient); 12739 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, & uIntrState);11616 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &fIntrState); 12740 11617 AssertRCReturn(rc, rc); 12741 11618 … … 12744 11621 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode)); 12745 11622 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr)); 12746 Log4(("VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE %#RX32\n", uIntrState));11623 Log4(("VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE %#RX32\n", fIntrState)); 12747 11624 12748 11625 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32Val); AssertRC(rc); … … 12829 11706 12830 11707 /* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT. */ 12831 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdrand);12832 11708 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT) 12833 11709 return VERR_EM_INTERPRETER; … … 12845 11721 12846 11722 /* EMInterpretRdmsr() requires CR0, Eflags and SS segment register. */ 12847 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);12848 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);12849 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);11723 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0 11724 | CPUMCTX_EXTRN_RFLAGS 11725 | CPUMCTX_EXTRN_SS); 12850 11726 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)) 12851 { 12852 rc |= hmR0VmxSaveGuestLazyMsrs(pVCpu, pMixedCtx); 12853 rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx); 12854 } 11727 rc |= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_ALL_MSRS); 12855 11728 AssertRCReturn(rc, rc); 12856 Log4 (("ecx=%#RX32\n", pMixedCtx->ecx));11729 Log4Func(("ecx=%#RX32\n", pMixedCtx->ecx)); 12857 11730 12858 11731 #ifdef VBOX_STRICT … … 12905 11778 12906 11779 /* EMInterpretWrmsr() requires CR0, EFLAGS and SS segment register. */ 12907 rc = hmR0Vmx SaveGuestCR0(pVCpu, pMixedCtx);12908 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);12909 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);11780 rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0 11781 | CPUMCTX_EXTRN_RFLAGS 11782 | CPUMCTX_EXTRN_SS); 12910 11783 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)) 12911 { 12912 rc |= hmR0VmxSaveGuestLazyMsrs(pVCpu, pMixedCtx); 12913 rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx); 12914 } 11784 rc |= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_ALL_MSRS); 12915 11785 AssertRCReturn(rc, rc); 12916 Log4 (("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", pMixedCtx->ecx, pMixedCtx->edx, pMixedCtx->eax));11786 Log4Func(("ecx=%#RX32 edx:eax=%#RX32:%#RX32\n", pMixedCtx->ecx, pMixedCtx->edx, pMixedCtx->eax)); 12917 11787 12918 11788 rc = EMInterpretWrmsr(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx)); … … 12934 11804 * EMInterpretWrmsr() changes it. 12935 11805 */ 12936 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_APIC_STATE);11806 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR); 12937 11807 } 12938 11808 else if (pMixedCtx->ecx == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */ … … 12945 11815 * the other bits as well, SCE and NXE. See @bugref{7368}. 12946 11816 */ 12947 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_EFER_MSR | HM_CHANGED_VMX_ENTRY_CTLS | HM_CHANGED_VMX_EXIT_CTLS); 11817 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_EFER_MSR 11818 | HM_CHANGED_VMX_ENTRY_CTLS 11819 | HM_CHANGED_VMX_EXIT_CTLS); 12948 11820 } 12949 11821 … … 12953 11825 switch (pMixedCtx->ecx) 12954 11826 { 12955 /* 12956 * For SYSENTER CS, EIP, ESP MSRs, we set both the flags here so we don't accidentally 12957 * overwrite the changed guest-CPU context value while going to ring-3, see @bufref{8745}. 12958 */ 12959 case MSR_IA32_SYSENTER_CS: 12960 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_CS_MSR); 12961 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR); 12962 break; 12963 case MSR_IA32_SYSENTER_EIP: 12964 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); 12965 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR); 12966 break; 12967 case MSR_IA32_SYSENTER_ESP: 12968 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); 12969 HMVMXCPU_GST_SET_UPDATED(pVCpu, HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR); 12970 break; 12971 case MSR_K8_FS_BASE: RT_FALL_THRU(); 12972 case MSR_K8_GS_BASE: HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_SEGMENT_REGS); break; 12973 case MSR_K6_EFER: /* already handled above */ break; 11827 case MSR_IA32_SYSENTER_CS: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_CS_MSR); break; 11828 case MSR_IA32_SYSENTER_EIP: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_EIP_MSR); break; 11829 case MSR_IA32_SYSENTER_ESP: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_SYSENTER_ESP_MSR); break; 11830 case MSR_K8_FS_BASE: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_FS); break; 11831 case MSR_K8_GS_BASE: ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_GS); break; 11832 case MSR_K6_EFER: /* Nothing to do, already handled above. */ break; 12974 11833 default: 12975 11834 { 12976 11835 if (hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx)) 12977 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMX_GUEST_AUTO_MSRS);11836 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_VMX_GUEST_AUTO_MSRS); 12978 11837 else if (hmR0VmxIsLazyGuestMsr(pVCpu, pMixedCtx->ecx)) 12979 HMCPU_CF_SET(pVCpu, HM_CHANGED_VMM_GUEST_LAZY_MSRS);11838 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_VMX_GUEST_LAZY_MSRS); 12980 11839 break; 12981 11840 } … … 13003 11862 if (hmR0VmxIsAutoLoadStoreGuestMsr(pVCpu, pMixedCtx->ecx)) 13004 11863 { 13005 /* EFER writes are always intercepted, see hmR0Vmx LoadGuestMsrs(). */11864 /* EFER writes are always intercepted, see hmR0VmxExportGuestMsrs(). */ 13006 11865 if (pMixedCtx->ecx != MSR_K6_EFER) 13007 11866 { … … 13040 11899 { 13041 11900 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); 13042 13043 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPause);11901 /** @todo The guest has likely hit a contended spinlock. We might want to 11902 * poke a schedule different guest VCPU. */ 13044 11903 return VINF_EM_RAW_INTERRUPT; 13045 11904 } … … 13078 11937 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS(); 13079 11938 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitMovCRx, y2); 11939 13080 11940 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient); 13081 11941 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 11942 rc |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK); 13082 11943 AssertRCReturn(rc, rc); 13083 11944 11945 VBOXSTRICTRC rcStrict; 11946 PVM pVM = pVCpu->CTX_SUFF(pVM); 13084 11947 RTGCUINTPTR const uExitQualification = pVmxTransient->uExitQualification; 13085 uint32_t const uAccessType = VMX_EXIT_QUALIFICATION_CRX_ACCESS(uExitQualification); 13086 PVM pVM = pVCpu->CTX_SUFF(pVM); 13087 VBOXSTRICTRC rcStrict; 13088 rc = hmR0VmxSaveGuestRegsForIemExec(pVCpu, pMixedCtx, false /*fMemory*/, true /*fNeedRsp*/); 11948 uint32_t const uAccessType = VMX_EXIT_QUAL_CRX_ACCESS(uExitQualification); 13089 11949 switch (uAccessType) 13090 11950 { 13091 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_WRITE: /* MOV to CRx */ 13092 { 13093 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx); 13094 AssertRCReturn(rc, rc); 13095 11951 case VMX_EXIT_QUAL_CRX_ACCESS_WRITE: /* MOV to CRx */ 11952 { 13096 11953 rcStrict = IEMExecDecodedMovCRxWrite(pVCpu, pVmxTransient->cbInstr, 13097 VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification), 13098 VMX_EXIT_QUALIFICATION_CRX_GENREG(uExitQualification)); 13099 AssertMsg( rcStrict == VINF_SUCCESS || rcStrict == VINF_IEM_RAISED_XCPT || rcStrict == VINF_PGM_CHANGE_MODE 11954 VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification), 11955 VMX_EXIT_QUAL_CRX_GENREG(uExitQualification)); 11956 AssertMsg( rcStrict == VINF_SUCCESS 11957 || rcStrict == VINF_IEM_RAISED_XCPT 11958 || rcStrict == VINF_PGM_CHANGE_MODE 13100 11959 || rcStrict == VINF_PGM_SYNC_CR3, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 13101 switch (VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)) 11960 11961 switch (VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification)) 13102 11962 { 13103 case 0: /* CR0 */ 13104 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0); 11963 case 0: 11964 { 11965 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR0); 11966 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR0Write); 13105 11967 Log4(("CRX CR0 write rcStrict=%Rrc CR0=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pMixedCtx->cr0)); 13106 11968 break; 13107 case 2: /* CR2 */ 11969 } 11970 11971 case 2: 11972 { 11973 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR2Write); 13108 11974 /* Nothing to do here, CR2 it's not part of the VMCS. */ 13109 11975 break; 13110 case 3: /* CR3 */ 11976 } 11977 11978 case 3: 11979 { 13111 11980 Assert(!pVM->hm.s.fNestedPaging || !CPUMIsGuestPagingEnabledEx(pMixedCtx) || pVCpu->hm.s.fUsingDebugLoop); 13112 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR3); 11981 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR3Write); 11982 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR3); 13113 11983 Log4(("CRX CR3 write rcStrict=%Rrc CR3=%#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pMixedCtx->cr3)); 13114 11984 break; 13115 case 4: /* CR4 */ 13116 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR4); 13117 Log4(("CRX CR4 write rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", 13118 VBOXSTRICTRC_VAL(rcStrict), pMixedCtx->cr4, pVCpu->hm.s.fLoadSaveGuestXcr0)); 11985 } 11986 11987 case 4: 11988 { 11989 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR4Write); 11990 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR4); 11991 Log4(("CRX CR4 write rc=%Rrc CR4=%#RX64 fLoadSaveGuestXcr0=%u\n", VBOXSTRICTRC_VAL(rcStrict), 11992 pMixedCtx->cr4, pVCpu->hm.s.fLoadSaveGuestXcr0)); 13119 11993 break; 13120 case 8: /* CR8 */ 11994 } 11995 11996 case 8: 11997 { 11998 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR8Write); 13121 11999 Assert(!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)); 13122 /* CR8 contains the APIC TPR. Was updated by IEMExecDecodedMovCRxWrite(). */ 13123 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_APIC_STATE); 12000 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_APIC_TPR); 13124 12001 break; 12002 } 13125 12003 default: 13126 AssertMsgFailed(("Invalid CRx register %#x\n", VMX_EXIT_QUAL IFICATION_CRX_REGISTER(uExitQualification)));12004 AssertMsgFailed(("Invalid CRx register %#x\n", VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification))); 13127 12005 break; 13128 12006 } 13129 13130 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxWrite[VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)]);13131 12007 break; 13132 12008 } 13133 12009 13134 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_READ: /* MOV from CRx */ 13135 { 13136 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx); 13137 AssertRCReturn(rc, rc); 13138 12010 case VMX_EXIT_QUAL_CRX_ACCESS_READ: /* MOV from CRx */ 12011 { 13139 12012 Assert( !pVM->hm.s.fNestedPaging 13140 12013 || !CPUMIsGuestPagingEnabledEx(pMixedCtx) 13141 12014 || pVCpu->hm.s.fUsingDebugLoop 13142 || VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification) != 3); 13143 12015 || VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification) != 3); 13144 12016 /* CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */ 13145 Assert( VMX_EXIT_QUAL IFICATION_CRX_REGISTER(uExitQualification) != 812017 Assert( VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification) != 8 13146 12018 || !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)); 13147 12019 13148 12020 rcStrict = IEMExecDecodedMovCRxRead(pVCpu, pVmxTransient->cbInstr, 13149 VMX_EXIT_QUALIFICATION_CRX_GENREG(uExitQualification), 13150 VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)); 13151 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 13152 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxRead[VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)]); 13153 Log4(("CRX CR%d Read access rcStrict=%Rrc\n", VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification), 12021 VMX_EXIT_QUAL_CRX_GENREG(uExitQualification), 12022 VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification)); 12023 AssertMsg( rcStrict == VINF_SUCCESS 12024 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 12025 #ifdef VBOX_WITH_STATISTICS 12026 switch (VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification)) 12027 { 12028 case 0: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR0Read); break; 12029 case 2: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR2Read); break; 12030 case 3: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR3Read); break; 12031 case 4: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR4Read); break; 12032 case 8: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCR8Read); break; 12033 } 12034 #endif 12035 Log4(("CRX CR%d Read access rcStrict=%Rrc\n", VMX_EXIT_QUAL_CRX_REGISTER(uExitQualification), 13154 12036 VBOXSTRICTRC_VAL(rcStrict))); 13155 if (VMX_EXIT_QUAL IFICATION_CRX_GENREG(uExitQualification) == X86_GREG_xSP)13156 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RSP);12037 if (VMX_EXIT_QUAL_CRX_GENREG(uExitQualification) == X86_GREG_xSP) 12038 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RSP); 13157 12039 break; 13158 12040 } 13159 12041 13160 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_CLTS: /* CLTS (Clear Task-Switch Flag in CR0) */ 13161 { 13162 AssertRCReturn(rc, rc); 12042 case VMX_EXIT_QUAL_CRX_ACCESS_CLTS: /* CLTS (Clear Task-Switch Flag in CR0) */ 12043 { 13163 12044 rcStrict = IEMExecDecodedClts(pVCpu, pVmxTransient->cbInstr); 13164 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 13165 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0); 12045 AssertMsg( rcStrict == VINF_SUCCESS 12046 || rcStrict == VINF_IEM_RAISED_XCPT, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 12047 12048 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR0); 13166 12049 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitClts); 13167 12050 Log4(("CRX CLTS rcStrict=%d\n", VBOXSTRICTRC_VAL(rcStrict))); … … 13169 12052 } 13170 12053 13171 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */ 13172 { 13173 AssertRCReturn(rc, rc); 12054 case VMX_EXIT_QUAL_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */ 12055 { 13174 12056 rcStrict = IEMExecDecodedLmsw(pVCpu, pVmxTransient->cbInstr, 13175 VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(uExitQualification)); 13176 AssertMsg(rcStrict == VINF_SUCCESS || rcStrict == VINF_IEM_RAISED_XCPT || rcStrict == VINF_PGM_CHANGE_MODE, 12057 VMX_EXIT_QUAL_CRX_LMSW_DATA(uExitQualification)); 12058 AssertMsg( rcStrict == VINF_SUCCESS 12059 || rcStrict == VINF_IEM_RAISED_XCPT 12060 || rcStrict == VINF_PGM_CHANGE_MODE, 13177 12061 ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); 13178 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_CR0); 12062 12063 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_CR0); 13179 12064 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitLmsw); 13180 12065 Log4(("CRX LMSW rcStrict=%d\n", VBOXSTRICTRC_VAL(rcStrict))); … … 13187 12072 } 13188 12073 13189 HMCPU_CF_SET(pVCpu, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS : HM_CHANGED_ALL_GUEST); 12074 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, rcStrict != VINF_IEM_RAISED_XCPT ? HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS 12075 : HM_CHANGED_XCPT_RAISED_MASK); 13190 12076 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitMovCRx, y2); 13191 12077 NOREF(pVM); … … 13206 12092 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient); 13207 12093 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 13208 rc |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx); 13209 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); 13210 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx); 13211 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); 12094 rc |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK 12095 | CPUMCTX_EXTRN_SREG_MASK 12096 | CPUMCTX_EXTRN_EFER); 13212 12097 /* EFER also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */ 13213 12098 AssertRCReturn(rc, rc); 13214 12099 13215 12100 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */ 13216 uint32_t uIOPort = VMX_EXIT_QUAL IFICATION_IO_PORT(pVmxTransient->uExitQualification);13217 uint8_t uIOWidth = VMX_EXIT_QUAL IFICATION_IO_WIDTH(pVmxTransient->uExitQualification);13218 bool fIOWrite = ( VMX_EXIT_QUAL IFICATION_IO_DIRECTION(pVmxTransient->uExitQualification)13219 == VMX_EXIT_QUAL IFICATION_IO_DIRECTION_OUT);13220 bool fIOString = VMX_EXIT_QUAL IFICATION_IO_IS_STRING(pVmxTransient->uExitQualification);12101 uint32_t uIOPort = VMX_EXIT_QUAL_IO_PORT(pVmxTransient->uExitQualification); 12102 uint8_t uIOWidth = VMX_EXIT_QUAL_IO_WIDTH(pVmxTransient->uExitQualification); 12103 bool fIOWrite = ( VMX_EXIT_QUAL_IO_DIRECTION(pVmxTransient->uExitQualification) 12104 == VMX_EXIT_QUAL_IO_DIRECTION_OUT); 12105 bool fIOString = VMX_EXIT_QUAL_IO_IS_STRING(pVmxTransient->uExitQualification); 13221 12106 bool fGstStepping = RT_BOOL(pMixedCtx->eflags.Bits.u1TF); 13222 12107 bool fDbgStepping = pVCpu->hm.s.fSingleInstruction; … … 13242 12127 { 13243 12128 /* I/O operation lookup arrays. */ 13244 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses. */ 13245 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving the result (in AL/AX/EAX). */ 13246 12129 static uint32_t const s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses. */ 12130 static uint32_t const s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving result in AL/AX/EAX. */ 13247 12131 uint32_t const cbValue = s_aIOSizes[uIOWidth]; 13248 12132 uint32_t const cbInstr = pVmxTransient->cbInstr; … … 13263 12147 { 13264 12148 int rc2 = hmR0VmxReadExitInstrInfoVmcs(pVmxTransient); 13265 /** @todo optimize this, IEM should request the additional state if it needs it (GP, PF, ++). */13266 rc2 |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx);13267 12149 AssertRCReturn(rc2, rc2); 13268 12150 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_VMX_IPE_3); 13269 12151 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2); 13270 IEMMODE enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;13271 bool fRep = VMX_EXIT_QUALIFICATION_IO_IS_REP(pVmxTransient->uExitQualification);12152 IEMMODE const enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize; 12153 bool const fRep = VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQualification); 13272 12154 if (fIOWrite) 13273 12155 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr, … … 13285 12167 } 13286 12168 else 13287 {13288 /** @todo optimize this, IEM should request the additional state if it needs it (GP, PF, ++). */13289 int rc2 = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);13290 AssertRCReturn(rc2, rc2);13291 12169 rcStrict = IEMExecOne(pVCpu); 13292 } 13293 /** @todo IEM needs to be setting these flags somehow. */ 13294 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP); 12170 12171 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP); 13295 12172 fUpdateRipAlready = true; 13296 12173 } … … 13300 12177 * IN/OUT - I/O instruction. 13301 12178 */ 13302 Log4(("CS:RIP=%04x:%08RX64 %#06x/%u %c\n", pMixedCtx->cs.Sel, pMixedCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r')); 12179 Log4(("CS:RIP=%04x:%08RX64 %#06x/%u %c\n", pMixedCtx->cs.Sel, pMixedCtx->rip, uIOPort, cbValue, 12180 fIOWrite ? 'w' : 'r')); 13303 12181 uint32_t const uAndVal = s_aIOOpAnd[uIOWidth]; 13304 Assert(!VMX_EXIT_QUAL IFICATION_IO_IS_REP(pVmxTransient->uExitQualification));12182 Assert(!VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQualification)); 13305 12183 if (fIOWrite) 13306 12184 { … … 13328 12206 { 13329 12207 hmR0VmxAdvanceGuestRipBy(pVCpu, pMixedCtx, cbInstr); 13330 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP);12208 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP); 13331 12209 } 13332 12210 13333 12211 /* 13334 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru while booting Fedora 17 64-bit guest. 12212 * INS/OUTS with REP prefix updates RFLAGS, can be observed with triple-fault guru 12213 * while booting Fedora 17 64-bit guest. 12214 * 13335 12215 * See Intel Instruction reference for REP/REPE/REPZ/REPNE/REPNZ. 13336 12216 */ … … 13338 12218 { 13339 12219 /** @todo Single-step for INS/OUTS with REP prefix? */ 13340 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RFLAGS);12220 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RFLAGS); 13341 12221 } 13342 12222 else if ( !fDbgStepping 13343 12223 && fGstStepping) 13344 12224 { 13345 hmR0VmxSetPendingDebugXcptVmcs(pVCpu); 12225 rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu, pMixedCtx); 12226 AssertRCReturn(rc, rc); 13346 12227 } 13347 12228 … … 13351 12232 * Note that the I/O breakpoint type is undefined if CR4.DE is 0. 13352 12233 */ 13353 int rc2 = hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx);13354 AssertRCReturn(rc 2, rc2);12234 rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_DR7); 12235 AssertRCReturn(rc, rc); 13355 12236 13356 12237 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the … … 13377 12258 ASMSetDR6(pMixedCtx->dr[6]); 13378 12259 if (pMixedCtx->dr[7] != uDr7) 13379 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);12260 pVCpu->hm.s.fCtxChanged |= HM_CHANGED_GUEST_DR7; 13380 12261 13381 12262 hmR0VmxSetPendingXcptDB(pVCpu, pMixedCtx); … … 13420 12301 * Frequent exit or something needing probing. Get state and call EMHistoryExec. 13421 12302 */ 13422 int rc2 = hmR0Vmx SaveGuestRegsForIemInterpreting(pVCpu);12303 int rc2 = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL); 13423 12304 AssertRCReturn(rc2, rc2); 13424 12305 STAM_COUNTER_INC(!fIOString ? fIOWrite ? &pVCpu->hm.s.StatExitIOWrite : &pVCpu->hm.s.StatExitIORead … … 13426 12307 Log4(("IOExit/%u: %04x:%08RX64: %s%s%s %#x LB %u -> EMHistoryExec\n", 13427 12308 pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, 13428 VMX_EXIT_QUAL IFICATION_IO_IS_REP(pVmxTransient->uExitQualification) ? "REP " : "",12309 VMX_EXIT_QUAL_IO_IS_REP(pVmxTransient->uExitQualification) ? "REP " : "", 13429 12310 fIOWrite ? "OUT" : "IN", fIOString ? "S" : "", uIOPort, uIOWidth)); 13430 12311 13431 12312 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0); 13432 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);12313 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST); 13433 12314 13434 12315 Log4(("IOExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n", … … 13451 12332 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient); 13452 12333 AssertRCReturn(rc, rc); 13453 if (VMX_EXIT_QUAL IFICATION_TASK_SWITCH_TYPE(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE_IDT)12334 if (VMX_EXIT_QUAL_TASK_SWITCH_TYPE(pVmxTransient->uExitQualification) == VMX_EXIT_QUAL_TASK_SWITCH_TYPE_IDT) 13454 12335 { 13455 12336 rc = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient); … … 13534 12415 } 13535 12416 13536 #if 0 13537 /** @todo Investigate if IOMMMIOPhysHandler() requires a lot of state, for now 13538 * just sync the whole thing. */ 13539 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx); 13540 #else 13541 /* Aggressive state sync. for now. */ 13542 int rc = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx); 13543 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx); 13544 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); 13545 #endif 12417 /* IOMMIOPhysHandler() below may call into IEM, save the necessary state. */ 12418 int rc = hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK); 13546 12419 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient); 13547 12420 AssertRCReturn(rc, rc); 13548 12421 13549 12422 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */ 13550 uint32_t uAccessType = VMX_EXIT_QUAL IFICATION_APIC_ACCESS_TYPE(pVmxTransient->uExitQualification);12423 uint32_t uAccessType = VMX_EXIT_QUAL_APIC_ACCESS_TYPE(pVmxTransient->uExitQualification); 13551 12424 VBOXSTRICTRC rcStrict2; 13552 12425 switch (uAccessType) … … 13556 12429 { 13557 12430 AssertMsg( !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW) 13558 || VMX_EXIT_QUAL IFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification) != XAPIC_OFF_TPR,12431 || VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification) != XAPIC_OFF_TPR, 13559 12432 ("hmR0VmxExitApicAccess: can't access TPR offset while using TPR shadowing.\n")); 13560 12433 13561 12434 RTGCPHYS GCPhys = pVCpu->hm.s.vmx.u64MsrApicBase; /* Always up-to-date, u64MsrApicBase is not part of the VMCS. */ 13562 12435 GCPhys &= PAGE_BASE_GC_MASK; 13563 GCPhys += VMX_EXIT_QUAL IFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification);12436 GCPhys += VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification); 13564 12437 PVM pVM = pVCpu->CTX_SUFF(pVM); 13565 Log4 (("ApicAccess uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys,13566 VMX_EXIT_QUAL IFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification)));12438 Log4Func(("Linear access uAccessType=%#x GCPhys=%#RGp Off=%#x\n", uAccessType, GCPhys, 12439 VMX_EXIT_QUAL_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification))); 13567 12440 13568 12441 rcStrict2 = IOMMMIOPhysHandler(pVM, pVCpu, 13569 12442 uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ ? 0 : X86_TRAP_PF_RW, 13570 12443 CPUMCTX2CORE(pMixedCtx), GCPhys); 13571 Log4 (("ApicAccess rcStrict2=%d\n", VBOXSTRICTRC_VAL(rcStrict2)));12444 Log4Func(("IOMMMIOPhysHandler returned %Rrc\n", VBOXSTRICTRC_VAL(rcStrict2))); 13572 12445 if ( rcStrict2 == VINF_SUCCESS 13573 12446 || rcStrict2 == VERR_PAGE_TABLE_NOT_PRESENT 13574 12447 || rcStrict2 == VERR_PAGE_NOT_PRESENT) 13575 12448 { 13576 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP13577 | HM_CHANGED_GUEST_RSP13578 | HM_CHANGED_GUEST_RFLAGS13579 | HM_CHANGED_GUEST_APIC_STATE);12449 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP 12450 | HM_CHANGED_GUEST_RSP 12451 | HM_CHANGED_GUEST_RFLAGS 12452 | HM_CHANGED_GUEST_APIC_TPR); 13580 12453 rcStrict2 = VINF_SUCCESS; 13581 12454 } … … 13584 12457 13585 12458 default: 13586 Log4 (("ApicAccessuAccessType=%#x\n", uAccessType));12459 Log4Func(("uAccessType=%#x\n", uAccessType)); 13587 12460 rcStrict2 = VINF_EM_RAW_EMULATE_INSTR; 13588 12461 break; … … 13635 12508 rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient); 13636 12509 AssertRCReturn(rc, rc); 13637 if (VMX_EXIT_QUAL IFICATION_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE)12510 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE) 13638 12511 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite); 13639 12512 else … … 13649 12522 */ 13650 12523 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient); 13651 rc |= hmR0Vmx SaveGuestSegmentRegs(pVCpu, pMixedCtx);13652 rc |= hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx);12524 rc |= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_SREG_MASK 12525 | CPUMCTX_EXTRN_DR7); 13653 12526 AssertRCReturn(rc, rc); 13654 Log4 (("CS:RIP=%04x:%08RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip));12527 Log4Func(("CS:RIP=%04x:%08RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip)); 13655 12528 13656 12529 PVM pVM = pVCpu->CTX_SUFF(pVM); 13657 if (VMX_EXIT_QUAL IFICATION_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE)12530 if (VMX_EXIT_QUAL_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUAL_DRX_DIRECTION_WRITE) 13658 12531 { 13659 12532 rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), 13660 VMX_EXIT_QUAL IFICATION_DRX_REGISTER(pVmxTransient->uExitQualification),13661 VMX_EXIT_QUAL IFICATION_DRX_GENREG(pVmxTransient->uExitQualification));12533 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQualification), 12534 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQualification)); 13662 12535 if (RT_SUCCESS(rc)) 13663 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_DEBUG);12536 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_DR7); 13664 12537 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite); 13665 12538 } … … 13667 12540 { 13668 12541 rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), 13669 VMX_EXIT_QUAL IFICATION_DRX_GENREG(pVmxTransient->uExitQualification),13670 VMX_EXIT_QUAL IFICATION_DRX_REGISTER(pVmxTransient->uExitQualification));12542 VMX_EXIT_QUAL_DRX_GENREG(pVmxTransient->uExitQualification), 12543 VMX_EXIT_QUAL_DRX_REGISTER(pVmxTransient->uExitQualification)); 13671 12544 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead); 13672 12545 } … … 13714 12587 * Get sufficent state and update the exit history entry. 13715 12588 */ 13716 RTGCPHYS GCPhys = 0;12589 RTGCPHYS GCPhys; 13717 12590 int rc = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys); 13718 13719 #if 0 13720 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx); /** @todo Can we do better? */ 13721 #else 13722 /* Aggressive state sync. for now. */ 13723 rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx); 13724 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx); 13725 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); 13726 #endif 12591 rc |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK); 13727 12592 AssertRCReturn(rc, rc); 13728 12593 … … 13748 12613 { 13749 12614 /* Successfully handled MMIO operation. */ 13750 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP13751 | HM_CHANGED_GUEST_RSP13752 | HM_CHANGED_GUEST_RFLAGS13753 | HM_CHANGED_GUEST_APIC_STATE);12615 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP 12616 | HM_CHANGED_GUEST_RSP 12617 | HM_CHANGED_GUEST_RFLAGS 12618 | HM_CHANGED_GUEST_APIC_TPR); 13754 12619 rcStrict = VINF_SUCCESS; 13755 12620 } … … 13761 12626 */ 13762 12627 Assert(pMixedCtx == &pVCpu->cpum.GstCtx); 13763 int rc2 = hmR0Vmx SaveGuestRegsForIemInterpreting(pVCpu);12628 int rc2 = hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK); 13764 12629 AssertRCReturn(rc2, rc2); 13765 12630 … … 13768 12633 13769 12634 rcStrict = EMHistoryExec(pVCpu, pExitRec, 0); 13770 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);12635 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST); 13771 12636 13772 12637 Log4(("EptMisscfgExit/%u: %04x:%08RX64: EMHistoryExec -> %Rrc + %04x:%08RX64\n", … … 13793 12658 /* In the unlikely case that the EPT violation happened as a result of delivering an event, log it. */ 13794 12659 if (RT_UNLIKELY(pVCpu->hm.s.Event.fPending)) 13795 Log4 (("EPT violation with an event pending u64IntInfo=%#RX64\n", pVCpu->hm.s.Event.u64IntInfo));12660 Log4Func(("EPT violation with an event pending u64IntInfo=%#RX64\n", pVCpu->hm.s.Event.u64IntInfo)); 13796 12661 } 13797 12662 else … … 13802 12667 } 13803 12668 13804 RTGCPHYS GCPhys = 0;12669 RTGCPHYS GCPhys; 13805 12670 int rc = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys); 13806 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient); 13807 #if 0 13808 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx); /** @todo Can we do better? */ 13809 #else 13810 /* Aggressive state sync. for now. */ 13811 rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx); 13812 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx); 13813 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); 13814 #endif 12671 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient); 12672 rc |= hmR0VmxImportGuestState(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK); 13815 12673 AssertRCReturn(rc, rc); 13816 12674 … … 13819 12677 13820 12678 RTGCUINT uErrorCode = 0; 13821 if (pVmxTransient->uExitQualification & VMX_EXIT_QUAL IFICATION_EPT_INSTR_FETCH)12679 if (pVmxTransient->uExitQualification & VMX_EXIT_QUAL_EPT_INSTR_FETCH) 13822 12680 uErrorCode |= X86_TRAP_PF_ID; 13823 if (pVmxTransient->uExitQualification & VMX_EXIT_QUAL IFICATION_EPT_DATA_WRITE)12681 if (pVmxTransient->uExitQualification & VMX_EXIT_QUAL_EPT_DATA_WRITE) 13824 12682 uErrorCode |= X86_TRAP_PF_RW; 13825 if (pVmxTransient->uExitQualification & VMX_EXIT_QUAL IFICATION_EPT_ENTRY_PRESENT)12683 if (pVmxTransient->uExitQualification & VMX_EXIT_QUAL_EPT_ENTRY_PRESENT) 13826 12684 uErrorCode |= X86_TRAP_PF_P; 13827 12685 13828 12686 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode); 13829 12687 13830 Log4 (("EPT violation %#x at %#RX64 ErrorCode %#x CS:RIP=%04x:%08RX64\n", pVmxTransient->uExitQualification, GCPhys,13831 uErrorCode, pMixedCtx->cs.Sel, pMixedCtx->rip));12688 Log4Func(("EPT violation %#x at %#RX64 ErrorCode %#x CS:RIP=%04x:%08RX64\n", pVmxTransient->uExitQualification, GCPhys, 12689 uErrorCode, pMixedCtx->cs.Sel, pMixedCtx->rip)); 13832 12690 13833 12691 /* Handle the pagefault trap for the nested shadow table. */ … … 13843 12701 /* Successfully synced our nested page tables. */ 13844 12702 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNpf); 13845 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP13846 | HM_CHANGED_GUEST_RSP13847 | HM_CHANGED_GUEST_RFLAGS);12703 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP 12704 | HM_CHANGED_GUEST_RSP 12705 | HM_CHANGED_GUEST_RFLAGS); 13848 12706 return VINF_SUCCESS; 13849 12707 } 13850 12708 13851 Log4 (("EPT return to ring-3 rcStrict2=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict2)));12709 Log4Func(("EPT return to ring-3 rcStrict2=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict2))); 13852 12710 return rcStrict2; 13853 12711 } … … 13871 12729 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF); 13872 12730 13873 int rc = hmR0Vmx SaveGuestCR0(pVCpu, pMixedCtx);12731 int rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CR0); 13874 12732 AssertRCReturn(rc, rc); 13875 12733 … … 13901 12759 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBP); 13902 12760 13903 /** @todo Try optimize this by not saving the entire guest state unless 13904 * really needed. */ 13905 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx); 12761 int rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL); 13906 12762 AssertRCReturn(rc, rc); 13907 12763 13908 PVM pVM = pVCpu->CTX_SUFF(pVM); 13909 rc = DBGFRZTrap03Handler(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx)); 12764 rc = DBGFRZTrap03Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pMixedCtx)); 13910 12765 if (rc == VINF_EM_RAW_GUEST_TRAP) 13911 12766 { … … 13938 12793 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 13939 12794 AssertRCReturn(rc, rc); 13940 Assert( pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO);12795 Assert(ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead) & HMVMX_READ_EXIT_INTERRUPTION_INFO); 13941 12796 13942 12797 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), … … 13953 12808 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS(); 13954 12809 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB); 13955 Log6(("XcptDB\n"));13956 12810 13957 12811 /* … … 13960 12814 */ 13961 12815 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient); 13962 AssertRCReturn(rc, rc);13963 12816 13964 12817 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */ … … 13968 12821 13969 12822 rc = DBGFRZTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pMixedCtx), uDR6, pVCpu->hm.s.fSingleInstruction); 12823 Log6Func(("rc=%Rrc\n", rc)); 13970 12824 if (rc == VINF_EM_RAW_GUEST_TRAP) 13971 12825 { … … 13986 12840 VMMRZCallRing3Enable(pVCpu); 13987 12841 13988 rc = hmR0Vmx SaveGuestDR7(pVCpu, pMixedCtx);12842 rc = hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_DR7); 13989 12843 AssertRCReturn(rc, rc); 13990 12844 … … 14002 12856 * Raise #DB in the guest. 14003 12857 * 14004 * It is important to reflect what the VM-exit gave us (preserving the interruption-type) rather than use 14005 * hmR0VmxSetPendingXcptDB() as the #DB could've been raised while executing ICEBP (INT1) and not the 14006 * regular #DB. Thus it -may- trigger different handling in the CPU (like skipped DPL checks), see @bugref{6398}. 12858 * It is important to reflect exactly what the VM-exit gave us (preserving the 12859 * interruption-type) rather than use hmR0VmxSetPendingXcptDB() as the #DB could've 12860 * been raised while executing ICEBP (INT1) and not the regular #DB. Thus it may 12861 * trigger different handling in the CPU (like skipping DPL checks), see @bugref{6398}. 14007 12862 * 14008 * Intel re-documented ICEBP/INT1 on May 2018 previously documented as part of Intel 386,14009 * see Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".12863 * Intel re-documented ICEBP/INT1 on May 2018 previously documented as part of 12864 * Intel 386, see Intel spec. 24.8.3 "VM-Entry Controls for Event Injection". 14010 12865 */ 14011 12866 rc = hmR0VmxReadExitIntInfoVmcs(pVmxTransient); … … 14051 12906 rc |= hmR0VmxReadExitIntErrorCodeVmcs(pVmxTransient); 14052 12907 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 14053 rc |= hmR0Vmx SaveGuestState(pVCpu, pMixedCtx);12908 rc |= hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL); 14054 12909 AssertRCReturn(rc, rc); 14055 Log4 (("#GPGst: CS:RIP %04x:%08RX64 ErrorCode=%#x CR0=%#RX64 CPL=%u TR=%#04x\n", pMixedCtx->cs.Sel, pMixedCtx->rip,14056 pVmxTransient->uExitIntErrorCode, pMixedCtx->cr0, CPUMGetGuestCPL(pVCpu), pMixedCtx->tr.Sel));12910 Log4Func(("Gst: CS:RIP %04x:%08RX64 ErrorCode=%#x CR0=%#RX64 CPL=%u TR=%#04x\n", pMixedCtx->cs.Sel, pMixedCtx->rip, 12911 pVmxTransient->uExitIntErrorCode, pMixedCtx->cr0, CPUMGetGuestCPL(pVCpu), pMixedCtx->tr.Sel)); 14057 12912 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 14058 12913 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */); … … 14064 12919 14065 12920 /* EMInterpretDisasCurrent() requires a lot of the state, save the entire state. */ 14066 rc = hmR0Vmx SaveGuestState(pVCpu, pMixedCtx);12921 rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL); 14067 12922 AssertRCReturn(rc, rc); 14068 12923 … … 14076 12931 rc = VINF_SUCCESS; 14077 12932 Assert(cbOp == pDis->cbInstr); 14078 Log4 (("#GPDisas OpCode=%u CS:EIP %04x:%04RX64\n", pDis->pCurInstr->uOpcode, pMixedCtx->cs.Sel, pMixedCtx->rip));12933 Log4Func(("Disas OpCode=%u CS:EIP %04x:%04RX64\n", pDis->pCurInstr->uOpcode, pMixedCtx->cs.Sel, pMixedCtx->rip)); 14079 12934 switch (pDis->pCurInstr->uOpcode) 14080 12935 { … … 14084 12939 pMixedCtx->eflags.Bits.u1RF = 0; 14085 12940 pMixedCtx->rip += pDis->cbInstr; 14086 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);12941 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS); 14087 12942 if ( !fDbgStepping 14088 12943 && pMixedCtx->eflags.Bits.u1TF) 14089 hmR0VmxSetPendingDebugXcptVmcs(pVCpu); 12944 { 12945 rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu, pMixedCtx); 12946 AssertRCReturn(rc, rc); 12947 } 14090 12948 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCli); 14091 12949 break; … … 14103 12961 Assert(VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)); 14104 12962 } 14105 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);12963 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS); 14106 12964 if ( !fDbgStepping 14107 12965 && pMixedCtx->eflags.Bits.u1TF) 14108 hmR0VmxSetPendingDebugXcptVmcs(pVCpu); 12966 { 12967 rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu, pMixedCtx); 12968 AssertRCReturn(rc, rc); 12969 } 14109 12970 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitSti); 14110 12971 break; … … 14116 12977 pMixedCtx->rip += pDis->cbInstr; 14117 12978 pMixedCtx->eflags.Bits.u1RF = 0; 14118 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS);12979 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS); 14119 12980 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt); 14120 12981 break; … … 14123 12984 case OP_POPF: 14124 12985 { 14125 Log4 (("POPF CS:EIP %04x:%04RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip));12986 Log4Func(("POPF CS:EIP %04x:%04RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip)); 14126 12987 uint32_t cbParm; 14127 12988 uint32_t uMask; … … 14155 13016 break; 14156 13017 } 14157 Log4 (("POPF %#x -> %#RX64 mask=%#x RIP=%#RX64\n", Eflags.u, pMixedCtx->rsp, uMask, pMixedCtx->rip));13018 Log4Func(("POPF %#x -> %#RX64 mask=%#x RIP=%#RX64\n", Eflags.u, pMixedCtx->rsp, uMask, pMixedCtx->rip)); 14158 13019 pMixedCtx->eflags.u32 = (pMixedCtx->eflags.u32 & ~((X86_EFL_POPF_BITS & uMask) | X86_EFL_RF)) 14159 13020 | (Eflags.u32 & X86_EFL_POPF_BITS & uMask); 14160 pMixedCtx->esp 14161 pMixedCtx->esp 14162 pMixedCtx->rip 14163 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP14164 | HM_CHANGED_GUEST_RSP14165 | HM_CHANGED_GUEST_RFLAGS);13021 pMixedCtx->esp += cbParm; 13022 pMixedCtx->esp &= uMask; 13023 pMixedCtx->rip += pDis->cbInstr; 13024 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP 13025 | HM_CHANGED_GUEST_RSP 13026 | HM_CHANGED_GUEST_RFLAGS); 14166 13027 /* Generate a pending-debug exception when the guest stepping over POPF regardless of how 14167 13028 POPF restores EFLAGS.TF. */ 14168 13029 if ( !fDbgStepping 14169 13030 && fGstStepping) 14170 hmR0VmxSetPendingDebugXcptVmcs(pVCpu); 13031 { 13032 rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu, pMixedCtx); 13033 AssertRCReturn(rc, rc); 13034 } 14171 13035 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPopf); 14172 13036 break; … … 14209 13073 break; 14210 13074 } 14211 Log4 (("PUSHF %#x -> %#RGv\n", Eflags.u, GCPtrStack));14212 pMixedCtx->esp 14213 pMixedCtx->esp 14214 pMixedCtx->rip 14215 pMixedCtx->eflags.Bits.u1RF 14216 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP14217 | HM_CHANGED_GUEST_RSP14218 | HM_CHANGED_GUEST_RFLAGS);13075 Log4Func(("PUSHF %#x -> %#RGv\n", Eflags.u, GCPtrStack)); 13076 pMixedCtx->esp -= cbParm; 13077 pMixedCtx->esp &= uMask; 13078 pMixedCtx->rip += pDis->cbInstr; 13079 pMixedCtx->eflags.Bits.u1RF = 0; 13080 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP 13081 | HM_CHANGED_GUEST_RSP 13082 | HM_CHANGED_GUEST_RFLAGS); 14219 13083 if ( !fDbgStepping 14220 13084 && pMixedCtx->eflags.Bits.u1TF) 14221 hmR0VmxSetPendingDebugXcptVmcs(pVCpu); 13085 { 13086 rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu, pMixedCtx); 13087 AssertRCReturn(rc, rc); 13088 } 14222 13089 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPushf); 14223 13090 break; … … 14258 13125 | (aIretFrame[2] & X86_EFL_POPF_BITS & uMask); 14259 13126 pMixedCtx->sp += sizeof(aIretFrame); 14260 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP14261 | HM_CHANGED_GUEST_SEGMENT_REGS14262 | HM_CHANGED_GUEST_RSP14263 | HM_CHANGED_GUEST_RFLAGS);13127 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RIP 13128 | HM_CHANGED_GUEST_CS 13129 | HM_CHANGED_GUEST_RSP 13130 | HM_CHANGED_GUEST_RFLAGS); 14264 13131 /* Generate a pending-debug exception when stepping over IRET regardless of how IRET modifies EFLAGS.TF. */ 14265 13132 if ( !fDbgStepping 14266 13133 && fGstStepping) 14267 hmR0VmxSetPendingDebugXcptVmcs(pVCpu); 14268 Log4(("IRET %#RX32 to %04x:%04x\n", GCPtrStack, pMixedCtx->cs.Sel, pMixedCtx->ip)); 13134 { 13135 rc = hmR0VmxSetPendingDebugXcptVmcs(pVCpu, pMixedCtx); 13136 AssertRCReturn(rc, rc); 13137 } 13138 Log4Func(("IRET %#RX32 to %04x:%04x\n", GCPtrStack, pMixedCtx->cs.Sel, pMixedCtx->ip)); 14269 13139 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIret); 14270 13140 break; … … 14291 13161 { 14292 13162 pMixedCtx->eflags.Bits.u1RF = 0; 14293 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RFLAGS);13163 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_GUEST_RFLAGS); 14294 13164 } 14295 13165 break; … … 14302 13172 EMCODETYPE_SUPERVISOR); 14303 13173 rc = VBOXSTRICTRC_VAL(rc2); 14304 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST);13174 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST); 14305 13175 /** @todo We have to set pending-debug exceptions here when the guest is 14306 13176 * single-stepping depending on the instruction that was interpreted. */ 14307 Log4 (("#GP rc=%Rrc\n", rc));13177 Log4Func(("#GP rc=%Rrc\n", rc)); 14308 13178 break; 14309 13179 } … … 14332 13202 #ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS 14333 13203 AssertMsg(pVCpu->hm.s.fUsingDebugLoop || pVCpu->hm.s.vmx.RealMode.fRealOnV86Active, 14334 ("uVector=%# 04x u32XcptBitmap=%#010RX32\n",13204 ("uVector=%#x u32XcptBitmap=%#X32\n", 14335 13205 VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVmxTransient->uExitIntInfo), pVCpu->hm.s.vmx.u32XcptBitmap)); 14336 13206 #endif … … 14341 13211 rc |= hmR0VmxReadExitInstrLenVmcs(pVmxTransient); 14342 13212 AssertRCReturn(rc, rc); 14343 Assert( pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO);13213 Assert(ASMAtomicUoReadU32(&pVmxTransient->fVmcsFieldsRead) & HMVMX_READ_EXIT_INTERRUPTION_INFO); 14344 13214 14345 13215 #ifdef DEBUG_ramshankar 14346 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); 13216 rc |= hmR0VmxImportGuestState(pVCpu, CPUMCTX_EXTRN_CS 13217 | CPUMCTX_EXTRN_RIP); 14347 13218 uint8_t uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVmxTransient->uExitIntInfo); 14348 13219 Log(("hmR0VmxExitXcptGeneric: Reinjecting Xcpt. uVector=%#x cs:rip=%#04x:%#RX64\n", uVector, pCtx->cs.Sel, pCtx->rip)); … … 14384 13255 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */ 14385 13256 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx); 14386 Log4 (("Pending #DF due to vectoring #PF. NP\n"));13257 Log4Func(("Pending #DF due to vectoring #PF w/ NestedPaging\n")); 14387 13258 } 14388 13259 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF); … … 14398 13269 } 14399 13270 14400 rc = hmR0Vmx SaveGuestState(pVCpu, pMixedCtx);13271 rc = hmR0VmxImportGuestState(pVCpu, HMVMX_CPUMCTX_EXTRN_ALL); 14401 13272 AssertRCReturn(rc, rc); 14402 13273 14403 Log4 (("#PF: cr2=%#RX64 cs:rip=%#04x:%#RX64 uErrCode %#RX32 cr3=%#RX64\n", pVmxTransient->uExitQualification,14404 pMixedCtx->cs.Sel, pMixedCtx->rip, pVmxTransient->uExitIntErrorCode, pMixedCtx->cr3));13274 Log4Func(("#PF: cr2=%#RX64 cs:rip=%#04x:%#RX64 uErrCode %#RX32 cr3=%#RX64\n", pVmxTransient->uExitQualification, 13275 pMixedCtx->cs.Sel, pMixedCtx->rip, pVmxTransient->uExitIntErrorCode, pMixedCtx->cr3)); 14405 13276 14406 13277 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQualification, (RTGCUINT)pVmxTransient->uExitIntErrorCode); … … 14408 13279 (RTGCPTR)pVmxTransient->uExitQualification); 14409 13280 14410 Log4 (("#PF: rc=%Rrc\n", rc));13281 Log4Func(("#PF: rc=%Rrc\n", rc)); 14411 13282 if (rc == VINF_SUCCESS) 14412 13283 { 14413 #if 014414 /* Successfully synced shadow pages tables or emulated an MMIO instruction. */14415 /** @todo this isn't quite right, what if guest does lgdt with some MMIO14416 * memory? We don't update the whole state here... */14417 HMCPU_CF_SET(pVCpu, HM_CHANGED_GUEST_RIP14418 | HM_CHANGED_GUEST_RSP14419 | HM_CHANGED_GUEST_RFLAGS14420 | HM_CHANGED_GUEST_APIC_STATE);14421 #else14422 13284 /* 14423 13285 * This is typically a shadow page table sync or a MMIO instruction. But we may have 14424 13286 * emulated something like LTR or a far jump. Any part of the CPU context may have changed. 14425 13287 */ 14426 /** @todo take advantage of CPUM changed flags instead of brute forcing. */ 14427 HMCPU_CF_SET(pVCpu, HM_CHANGED_ALL_GUEST); 14428 #endif 13288 ASMAtomicUoOrU64(&pVCpu->hm.s.fCtxChanged, HM_CHANGED_ALL_GUEST); 14429 13289 TRPMResetTrap(pVCpu); 14430 13290 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF); … … 14449 13309 pVCpu->hm.s.Event.fPending = false; /* Clear pending #PF to replace it with #DF. */ 14450 13310 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx); 14451 Log4 (("#PF: Pending #DF due to vectoring #PF\n"));13311 Log4Func(("#PF: Pending #DF due to vectoring #PF\n")); 14452 13312 } 14453 13313 -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.h
r72643 r72744 39 39 VMMR0DECL(int) VMXR0TermVM(PVM pVM); 40 40 VMMR0DECL(int) VMXR0SetupVM(PVM pVM); 41 VMMR0DECL(int) VMXR0 SaveHostState(PVM pVM,PVMCPU pVCpu);41 VMMR0DECL(int) VMXR0ExportHostState(PVMCPU pVCpu); 42 42 VMMR0DECL(int) VMXR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt); 43 VMMR0DECL(int) VMXR0ImportStateOnDemand(PVMCPU pVCpu, PCPUMCTX pCtx,uint64_t fWhat);43 VMMR0DECL(int) VMXR0ImportStateOnDemand(PVMCPU pVCpu, uint64_t fWhat); 44 44 VMMR0DECL(VBOXSTRICTRC) VMXR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx); 45 45 DECLASM(int) VMXR0StartVM32(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu); … … 48 48 # if HC_ARCH_BITS == 32 && defined(VBOX_WITH_64_BITS_GUESTS) 49 49 DECLASM(int) VMXR0SwitcherStartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu); 50 VMMR0DECL(int) VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp, uint32_t cbParam, 51 uint32_t *paParam); 50 VMMR0DECL(int) VMXR0Execute64BitsHandler(PVMCPU pVCpu, HM64ON32OP enmOp, uint32_t cbParam, uint32_t *paParam); 52 51 # endif 53 52
Note:
See TracChangeset
for help on using the changeset viewer.