Changeset 76482 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Dec 26, 2018 3:49:56 AM (6 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMR0.cpp
r76469 r76482 83 83 { 84 84 /** Per CPU globals. */ 85 HM GLOBALCPUINFOaCpuInfo[RTCPUSET_MAX_CPUS];85 HMPHYSCPU aCpuInfo[RTCPUSET_MAX_CPUS]; 86 86 87 87 /** @name Ring-0 method table for AMD-V and VT-x specific operations. 88 88 * @{ */ 89 DECLR0CALLBACKMEMBER(int, pfnEnterSession, (PVMCPU pVCpu , PHMGLOBALCPUINFO pHostCpu));89 DECLR0CALLBACKMEMBER(int, pfnEnterSession, (PVMCPU pVCpu)); 90 90 DECLR0CALLBACKMEMBER(void, pfnThreadCtxCallback, (RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit)); 91 91 DECLR0CALLBACKMEMBER(int, pfnExportHostState, (PVMCPU pVCpu)); 92 92 DECLR0CALLBACKMEMBER(VBOXSTRICTRC, pfnRunGuestCode, (PVMCPU pVCpu)); 93 DECLR0CALLBACKMEMBER(int, pfnEnableCpu, (PHM GLOBALCPUINFOpHostCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage,93 DECLR0CALLBACKMEMBER(int, pfnEnableCpu, (PHMPHYSCPU pHostCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, 94 94 bool fEnabledByHost, PCSUPHWVIRTMSRS pHwvirtMsrs)); 95 DECLR0CALLBACKMEMBER(int, pfnDisableCpu, ( PHMGLOBALCPUINFO pHostCpu,void *pvCpuPage, RTHCPHYS HCPhysCpuPage));95 DECLR0CALLBACKMEMBER(int, pfnDisableCpu, (void *pvCpuPage, RTHCPHYS HCPhysCpuPage)); 96 96 DECLR0CALLBACKMEMBER(int, pfnInitVM, (PVM pVM)); 97 97 DECLR0CALLBACKMEMBER(int, pfnTermVM, (PVM pVM)); … … 227 227 * @{ */ 228 228 229 static DECLCALLBACK(int) hmR0DummyEnter(PVMCPU pVCpu , PHMGLOBALCPUINFO pHostCpu)230 { 231 RT_NOREF 2(pVCpu, pHostCpu);229 static DECLCALLBACK(int) hmR0DummyEnter(PVMCPU pVCpu) 230 { 231 RT_NOREF1(pVCpu); 232 232 return VINF_SUCCESS; 233 233 } … … 238 238 } 239 239 240 static DECLCALLBACK(int) hmR0DummyEnableCpu(PHM GLOBALCPUINFOpHostCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage,240 static DECLCALLBACK(int) hmR0DummyEnableCpu(PHMPHYSCPU pHostCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, 241 241 bool fEnabledBySystem, PCSUPHWVIRTMSRS pHwvirtMsrs) 242 242 { … … 245 245 } 246 246 247 static DECLCALLBACK(int) hmR0DummyDisableCpu( PHMGLOBALCPUINFO pHostCpu,void *pvCpuPage, RTHCPHYS HCPhysCpuPage)248 { 249 RT_NOREF 3(pHostCpu,pvCpuPage, HCPhysCpuPage);247 static DECLCALLBACK(int) hmR0DummyDisableCpu(void *pvCpuPage, RTHCPHYS HCPhysCpuPage) 248 { 249 RT_NOREF2(pvCpuPage, HCPhysCpuPage); 250 250 return VINF_SUCCESS; 251 251 } … … 789 789 static int hmR0EnableCpu(PVM pVM, RTCPUID idCpu) 790 790 { 791 PHM GLOBALCPUINFOpHostCpu = &g_HmR0.aCpuInfo[idCpu];791 PHMPHYSCPU pHostCpu = &g_HmR0.aCpuInfo[idCpu]; 792 792 793 793 Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /** @todo fix idCpu == index assumption (rainy day) */ … … 975 975 static int hmR0DisableCpu(RTCPUID idCpu) 976 976 { 977 PHM GLOBALCPUINFOpHostCpu = &g_HmR0.aCpuInfo[idCpu];977 PHMPHYSCPU pHostCpu = &g_HmR0.aCpuInfo[idCpu]; 978 978 979 979 Assert(!g_HmR0.hwvirt.u.vmx.fSupported || !g_HmR0.hwvirt.u.vmx.fUsingSUPR0EnableVTx); … … 992 992 if (pHostCpu->fConfigured) 993 993 { 994 rc = g_HmR0.pfnDisableCpu(pHostCpu , pHostCpu->pvMemObj, pHostCpu->HCPhysMemObj);994 rc = g_HmR0.pfnDisableCpu(pHostCpu->pvMemObj, pHostCpu->HCPhysMemObj); 995 995 AssertRCReturn(rc, rc); 996 996 … … 1323 1323 int rc = VINF_SUCCESS; 1324 1324 RTCPUID const idCpu = RTMpCpuId(); 1325 PHM GLOBALCPUINFOpHostCpu = &g_HmR0.aCpuInfo[idCpu];1325 PHMPHYSCPU pHostCpu = &g_HmR0.aCpuInfo[idCpu]; 1326 1326 AssertPtr(pHostCpu); 1327 1327 … … 1358 1358 /* Load the bare minimum state required for entering HM. */ 1359 1359 int rc = hmR0EnterCpu(pVCpu); 1360 AssertRCReturn(rc, rc); 1360 if (RT_SUCCESS(rc)) 1361 { 1362 if (g_HmR0.hwvirt.u.vmx.fSupported) 1363 { 1364 Assert((pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)) 1365 == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)); 1366 } 1367 else 1368 { 1369 Assert((pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE)) 1370 == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE)); 1371 } 1361 1372 1362 1373 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE 1363 AssertReturn(!VMMR0ThreadCtxHookIsEnabled(pVCpu), VERR_HM_IPE_5);1364 boolfStartedSet = PGMR0DynMapStartOrMigrateAutoSet(pVCpu);1374 AssertReturn(!VMMR0ThreadCtxHookIsEnabled(pVCpu), VERR_HM_IPE_5); 1375 bool const fStartedSet = PGMR0DynMapStartOrMigrateAutoSet(pVCpu); 1365 1376 #endif 1366 1377 1367 RTCPUID const idCpu = RTMpCpuId(); 1368 PHMGLOBALCPUINFO pHostCpu = &g_HmR0.aCpuInfo[idCpu]; 1369 Assert(pHostCpu); 1370 if (g_HmR0.hwvirt.u.vmx.fSupported) 1371 { 1372 Assert((pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)) 1373 == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_VMX_HOST_GUEST_SHARED_STATE)); 1374 } 1375 else 1376 { 1377 Assert((pVCpu->hm.s.fCtxChanged & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE)) 1378 == (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_SVM_HOST_GUEST_SHARED_STATE)); 1379 } 1380 1381 rc = g_HmR0.pfnEnterSession(pVCpu, pHostCpu); 1382 AssertMsgRCReturn(rc, ("rc=%Rrc pVCpu=%p HostCpuId=%u\n", rc, pVCpu, idCpu), rc); 1383 1384 /* Exports the host-state as we may be resuming code after a longjmp and quite 1385 possibly now be scheduled on a different CPU. */ 1386 rc = g_HmR0.pfnExportHostState(pVCpu); 1387 AssertMsgRCReturn(rc, ("rc=%Rrc pVCpu=%p HostCpuId=%u\n", rc, pVCpu, idCpu), rc); 1378 /* Keep track of the CPU owning the VMCS for debugging scheduling weirdness and ring-3 calls. */ 1379 rc = g_HmR0.pfnEnterSession(pVCpu); 1380 AssertMsgRCReturnStmt(rc, ("rc=%Rrc pVCpu=%p\n", rc, pVCpu), pVCpu->hm.s.idEnteredCpu = NIL_RTCPUID, rc); 1381 1382 /* Exports the host-state as we may be resuming code after a longjmp and quite 1383 possibly now be scheduled on a different CPU. */ 1384 rc = g_HmR0.pfnExportHostState(pVCpu); 1385 AssertMsgRCReturnStmt(rc, ("rc=%Rrc pVCpu=%p\n", rc, pVCpu), pVCpu->hm.s.idEnteredCpu = NIL_RTCPUID, rc); 1388 1386 1389 1387 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE 1390 if (fStartedSet)1391 PGMRZDynMapReleaseAutoSet(pVCpu);1388 if (fStartedSet) 1389 PGMRZDynMapReleaseAutoSet(pVCpu); 1392 1390 #endif 1393 1394 /* Keep track of the CPU owning the VMCS for debugging scheduling weirdness and ring-3 calls. */ 1395 if (RT_FAILURE(rc)) 1396 pVCpu->hm.s.idEnteredCpu = NIL_RTCPUID; 1391 } 1397 1392 return rc; 1398 1393 } … … 1413 1408 VMCPU_ASSERT_EMT_RETURN(pVCpu, VERR_HM_WRONG_CPU); 1414 1409 1415 RTCPUID const 1416 P HMGLOBALCPUINFOpHostCpu = &g_HmR0.aCpuInfo[idCpu];1410 RTCPUID const idCpu = RTMpCpuId(); 1411 PCHMPHYSCPU pHostCpu = &g_HmR0.aCpuInfo[idCpu]; 1417 1412 1418 1413 if ( !g_HmR0.fGlobalInit … … 1470 1465 if (!RTThreadPreemptIsEnabled(NIL_RTTHREAD)) 1471 1466 { 1472 P HMGLOBALCPUINFOpHostCpu = &g_HmR0.aCpuInfo[RTMpCpuId()];1467 PCHMPHYSCPU pHostCpu = &g_HmR0.aCpuInfo[RTMpCpuId()]; 1473 1468 Assert(!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)); 1474 1469 Assert(pHostCpu->fConfigured); … … 1609 1604 * @returns The cpu structure pointer. 1610 1605 */ 1611 VMMR0_INT_DECL(PHM GLOBALCPUINFO) hmR0GetCurrentCpu(void)1606 VMMR0_INT_DECL(PHMPHYSCPU) hmR0GetCurrentCpu(void) 1612 1607 { 1613 1608 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); … … 1692 1687 1693 1688 /* Ok, disable VT-x. */ 1694 P HMGLOBALCPUINFOpHostCpu = hmR0GetCurrentCpu();1689 PCHMPHYSCPU pHostCpu = hmR0GetCurrentCpu(); 1695 1690 AssertReturn( pHostCpu 1696 1691 && pHostCpu->hMemObj != NIL_RTR0MEMOBJ … … 1700 1695 1701 1696 *pfVTxDisabled = true; 1702 return VMXR0DisableCpu(pHostCpu , pHostCpu->pvMemObj, pHostCpu->HCPhysMemObj);1697 return VMXR0DisableCpu(pHostCpu->pvMemObj, pHostCpu->HCPhysMemObj); 1703 1698 } 1704 1699 … … 1726 1721 Assert(g_HmR0.fGlobalInit); 1727 1722 1728 PHM GLOBALCPUINFOpHostCpu = hmR0GetCurrentCpu();1723 PHMPHYSCPU pHostCpu = hmR0GetCurrentCpu(); 1729 1724 AssertReturnVoid( pHostCpu 1730 1725 && pHostCpu->hMemObj != NIL_RTR0MEMOBJ -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r76464 r76482 519 519 * 520 520 * @returns VBox status code. 521 * @param pHostCpu Pointer to the CPU info struct.521 * @param pHostCpu The HM physical-CPU structure. 522 522 * @param pVM The cross context VM structure. Can be 523 523 * NULL after a resume! … … 528 528 * unused). 529 529 */ 530 VMMR0DECL(int) SVMR0EnableCpu(PHM GLOBALCPUINFOpHostCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost,530 VMMR0DECL(int) SVMR0EnableCpu(PHMPHYSCPU pHostCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost, 531 531 PCSUPHWVIRTMSRS pHwvirtMsrs) 532 532 { … … 590 590 * 591 591 * @returns VBox status code. 592 * @param pHostCpu Pointer to the CPU info struct.593 592 * @param pvCpuPage Pointer to the global CPU page. 594 593 * @param HCPhysCpuPage Physical address of the global CPU page. 595 594 */ 596 VMMR0DECL(int) SVMR0DisableCpu( PHMGLOBALCPUINFO pHostCpu,void *pvCpuPage, RTHCPHYS HCPhysCpuPage)595 VMMR0DECL(int) SVMR0DisableCpu(void *pvCpuPage, RTHCPHYS HCPhysCpuPage) 597 596 { 598 597 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); … … 600 599 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER); 601 600 AssertReturn(pvCpuPage, VERR_INVALID_PARAMETER); 602 RT_NOREF(pHostCpu);603 601 604 602 /* Paranoid: Disable interrupts as, in theory, interrupt handlers might mess with EFER. */ … … 1192 1190 * Flushes the appropriate tagged-TLB entries. 1193 1191 * 1192 * @param pHostCpu The HM physical-CPU structure. 1194 1193 * @param pVCpu The cross context virtual CPU structure. 1195 1194 * @param pVmcb Pointer to the VM control block. 1196 * @param pHostCpu Pointer to the HM host-CPU info. 1197 */ 1198 static void hmR0SvmFlushTaggedTlb(PVMCPU pVCpu, PSVMVMCB pVmcb, PHMGLOBALCPUINFO pHostCpu) 1195 */ 1196 static void hmR0SvmFlushTaggedTlb(PHMPHYSCPU pHostCpu, PVMCPU pVCpu, PSVMVMCB pVmcb) 1199 1197 { 1200 1198 /* … … 2321 2319 * @returns VBox status code. 2322 2320 * @param pVCpu The cross context virtual CPU structure. 2323 * @param pHostCpu Pointer to the CPU info struct. 2324 */ 2325 VMMR0DECL(int) SVMR0Enter(PVMCPU pVCpu, PHMGLOBALCPUINFO pHostCpu) 2321 */ 2322 VMMR0DECL(int) SVMR0Enter(PVMCPU pVCpu) 2326 2323 { 2327 2324 AssertPtr(pVCpu); 2328 2325 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.svm.fSupported); 2329 2326 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 2330 RT_NOREF(pHostCpu);2331 2327 2332 2328 LogFlowFunc(("pVCpu=%p\n", pVCpu)); … … 2522 2518 * whether the nested-guest is intercepting it or not. 2523 2519 * 2524 * @param pHostCpu Pointer to the physical CPU HM info. struct.2525 * @param pVCpu 2520 * @param pHostCpu The HM physical-CPU structure. 2521 * @param pVCpu The cross context virtual CPU structure. 2526 2522 * 2527 2523 * @remarks No-long-jmp zone!!! 2528 2524 */ 2529 DECLINLINE(void) hmR0SvmMergeMsrpmNested(PHM GLOBALCPUINFOpHostCpu, PVMCPU pVCpu)2525 DECLINLINE(void) hmR0SvmMergeMsrpmNested(PHMPHYSCPU pHostCpu, PVMCPU pVCpu) 2530 2526 { 2531 2527 uint64_t const *pu64GstMsrpm = (uint64_t const *)pVCpu->hm.s.svm.pvMsrBitmap; … … 4558 4554 AssertMsg(!pVCpu->hm.s.fCtxChanged, ("fCtxChanged=%#RX64\n", pVCpu->hm.s.fCtxChanged)); 4559 4555 4560 PHM GLOBALCPUINFOpHostCpu = hmR0GetCurrentCpu();4561 RTCPUID const 4562 bool const 4556 PHMPHYSCPU pHostCpu = hmR0GetCurrentCpu(); 4557 RTCPUID const idHostCpu = pHostCpu->idCpu; 4558 bool const fMigratedHostCpu = idHostCpu != pVCpu->hm.s.idLastCpu; 4563 4559 4564 4560 /* Setup TSC offsetting. */ … … 4606 4602 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true); /* Used for TLB flushing, set this across the world switch. */ 4607 4603 /* Flush the appropriate tagged-TLB entries. */ 4608 hmR0SvmFlushTaggedTlb(p VCpu, pVmcb, pHostCpu);4604 hmR0SvmFlushTaggedTlb(pHostCpu, pVCpu, pVmcb); 4609 4605 Assert(pVCpu->hm.s.idLastCpu == idHostCpu); 4610 4606 -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.h
r76464 r76482 36 36 VMMR0DECL(int) SVMR0GlobalInit(void); 37 37 VMMR0DECL(void) SVMR0GlobalTerm(void); 38 VMMR0DECL(int) SVMR0Enter(PVMCPU pVCpu , PHMGLOBALCPUINFO pHostCpu);38 VMMR0DECL(int) SVMR0Enter(PVMCPU pVCpu); 39 39 VMMR0DECL(void) SVMR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit); 40 VMMR0DECL(int) SVMR0EnableCpu(PHM GLOBALCPUINFOpHostCpu, PVM pVM, void *pvPageCpu, RTHCPHYS HCPhysCpuPage,40 VMMR0DECL(int) SVMR0EnableCpu(PHMPHYSCPU pHostCpu, PVM pVM, void *pvPageCpu, RTHCPHYS HCPhysCpuPage, 41 41 bool fEnabledBySystem, PCSUPHWVIRTMSRS pHwvirtMsrs); 42 VMMR0DECL(int) SVMR0DisableCpu( PHMGLOBALCPUINFO pHostCpu,void *pvPageCpu, RTHCPHYS pPageCpuPhys);42 VMMR0DECL(int) SVMR0DisableCpu(void *pvPageCpu, RTHCPHYS pPageCpuPhys); 43 43 VMMR0DECL(int) SVMR0InitVM(PVM pVM); 44 44 VMMR0DECL(int) SVMR0TermVM(PVM pVM); -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r76477 r76482 1119 1119 * 1120 1120 * @returns VBox status code. 1121 * @param pHostCpu Pointer to the global CPU info struct.1121 * @param pHostCpu The HM physical-CPU structure. 1122 1122 * @param pVM The cross context VM structure. Can be 1123 1123 * NULL after a host resume operation. … … 1130 1130 * @param pHwvirtMsrs Pointer to the hardware-virtualization MSRs. 1131 1131 */ 1132 VMMR0DECL(int) VMXR0EnableCpu(PHM GLOBALCPUINFOpHostCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost,1132 VMMR0DECL(int) VMXR0EnableCpu(PHMPHYSCPU pHostCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost, 1133 1133 PCSUPHWVIRTMSRS pHwvirtMsrs) 1134 1134 { … … 1169 1169 * 1170 1170 * @returns VBox status code. 1171 * @param pHostCpu Pointer to the global CPU info struct.1172 1171 * @param pvCpuPage Pointer to the VMXON region. 1173 1172 * @param HCPhysCpuPage Physical address of the VMXON region. … … 1176 1175 * similar was used to enable VT-x on the host. 1177 1176 */ 1178 VMMR0DECL(int) VMXR0DisableCpu( PHMGLOBALCPUINFO pHostCpu,void *pvCpuPage, RTHCPHYS HCPhysCpuPage)1179 { 1180 RT_NOREF 3(pHostCpu,pvCpuPage, HCPhysCpuPage);1177 VMMR0DECL(int) VMXR0DisableCpu(void *pvCpuPage, RTHCPHYS HCPhysCpuPage) 1178 { 1179 RT_NOREF2(pvCpuPage, HCPhysCpuPage); 1181 1180 1182 1181 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); … … 1411 1410 static bool hmR0VmxIsAutoLoadStoreGuestMsr(PVMCPU pVCpu, uint32_t uMsr) 1412 1411 { 1413 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;1414 uint32_t 1412 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr; 1413 uint32_t const cMsrs = pVCpu->hm.s.vmx.cMsrs; 1415 1414 1416 1415 for (uint32_t i = 0; i < cMsrs; i++, pGuestMsr++) … … 1435 1434 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr; 1436 1435 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr; 1437 uint32_t cMsrs= pVCpu->hm.s.vmx.cMsrs;1436 uint32_t const cMsrs = pVCpu->hm.s.vmx.cMsrs; 1438 1437 1439 1438 for (uint32_t i = 0; i < cMsrs; i++, pHostMsr++, pGuestMsr++) … … 1894 1893 * case where neither EPT nor VPID is supported by the CPU. 1895 1894 * 1896 * @param p VCpu The cross context virtualCPU structure.1897 * @param p Cpu Pointer to the global HM struct.1895 * @param pHostCpu The HM physical-CPU structure. 1896 * @param pVCpu The cross context virtual CPU structure. 1898 1897 * 1899 1898 * @remarks Called with interrupts disabled. 1900 1899 */ 1901 static void hmR0VmxFlushTaggedTlbNone(P VMCPU pVCpu, PHMGLOBALCPUINFO pCpu)1900 static void hmR0VmxFlushTaggedTlbNone(PHMPHYSCPU pHostCpu, PVMCPU pVCpu) 1902 1901 { 1903 1902 AssertPtr(pVCpu); 1904 AssertPtr(p Cpu);1903 AssertPtr(pHostCpu); 1905 1904 1906 1905 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH); 1907 1906 1908 Assert(p Cpu->idCpu != NIL_RTCPUID);1909 pVCpu->hm.s.idLastCpu = p Cpu->idCpu;1910 pVCpu->hm.s.cTlbFlushes = p Cpu->cTlbFlushes;1907 Assert(pHostCpu->idCpu != NIL_RTCPUID); 1908 pVCpu->hm.s.idLastCpu = pHostCpu->idCpu; 1909 pVCpu->hm.s.cTlbFlushes = pHostCpu->cTlbFlushes; 1911 1910 pVCpu->hm.s.fForceTLBFlush = false; 1912 1911 return; … … 1917 1916 * Flushes the tagged-TLB entries for EPT+VPID CPUs as necessary. 1918 1917 * 1919 * @param pVCpu The cross context virtualCPU structure.1920 * @param pCpu Pointer to the global HM CPU struct.1918 * @param pHostCpu The HM physical-CPU structure. 1919 * @param pVCpu The cross context virtual CPU structure. 1921 1920 * 1922 1921 * @remarks All references to "ASID" in this function pertains to "VPID" in Intel's … … 1926 1925 * @remarks Called with interrupts disabled. 1927 1926 */ 1928 static void hmR0VmxFlushTaggedTlbBoth(P VMCPU pVCpu, PHMGLOBALCPUINFO pCpu)1927 static void hmR0VmxFlushTaggedTlbBoth(PHMPHYSCPU pHostCpu, PVMCPU pVCpu) 1929 1928 { 1930 1929 #ifdef VBOX_WITH_STATISTICS … … 1940 1939 #endif 1941 1940 1942 AssertPtr(pCpu);1943 1941 AssertPtr(pVCpu); 1944 Assert(pCpu->idCpu != NIL_RTCPUID); 1942 AssertPtr(pHostCpu); 1943 Assert(pHostCpu->idCpu != NIL_RTCPUID); 1945 1944 1946 1945 PVM pVM = pVCpu->CTX_SUFF(pVM); … … 1955 1954 * cannot reuse the current ASID anymore. 1956 1955 */ 1957 if ( pVCpu->hm.s.idLastCpu != p Cpu->idCpu1958 || pVCpu->hm.s.cTlbFlushes != p Cpu->cTlbFlushes)1959 { 1960 ++p Cpu->uCurrentAsid;1961 if (p Cpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)1962 { 1963 p Cpu->uCurrentAsid = 1;/* Wraparound to 1; host uses 0. */1964 p Cpu->cTlbFlushes++;/* All VCPUs that run on this host CPU must use a new VPID. */1965 p Cpu->fFlushAsidBeforeUse = true;/* All VCPUs that run on this host CPU must flush their new VPID before use. */1966 } 1967 1968 pVCpu->hm.s.uCurrentAsid = p Cpu->uCurrentAsid;1969 pVCpu->hm.s.idLastCpu = p Cpu->idCpu;1970 pVCpu->hm.s.cTlbFlushes = p Cpu->cTlbFlushes;1956 if ( pVCpu->hm.s.idLastCpu != pHostCpu->idCpu 1957 || pVCpu->hm.s.cTlbFlushes != pHostCpu->cTlbFlushes) 1958 { 1959 ++pHostCpu->uCurrentAsid; 1960 if (pHostCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid) 1961 { 1962 pHostCpu->uCurrentAsid = 1; /* Wraparound to 1; host uses 0. */ 1963 pHostCpu->cTlbFlushes++; /* All VCPUs that run on this host CPU must use a new VPID. */ 1964 pHostCpu->fFlushAsidBeforeUse = true; /* All VCPUs that run on this host CPU must flush their new VPID before use. */ 1965 } 1966 1967 pVCpu->hm.s.uCurrentAsid = pHostCpu->uCurrentAsid; 1968 pVCpu->hm.s.idLastCpu = pHostCpu->idCpu; 1969 pVCpu->hm.s.cTlbFlushes = pHostCpu->cTlbFlushes; 1971 1970 1972 1971 /* … … 1998 1997 HMVMX_UPDATE_FLUSH_SKIPPED_STAT(); 1999 1998 2000 Assert(pVCpu->hm.s.idLastCpu == p Cpu->idCpu);2001 Assert(pVCpu->hm.s.cTlbFlushes == p Cpu->cTlbFlushes);2002 AssertMsg(pVCpu->hm.s.cTlbFlushes == p Cpu->cTlbFlushes,2003 ("Flush count mismatch for cpu %d (%u vs %u)\n", p Cpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));2004 AssertMsg(p Cpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,2005 ("Cpu[%u] uCurrentAsid=%u cTlbFlushes=%u pVCpu->idLastCpu=%u pVCpu->cTlbFlushes=%u\n", p Cpu->idCpu,2006 p Cpu->uCurrentAsid, pCpu->cTlbFlushes, pVCpu->hm.s.idLastCpu, pVCpu->hm.s.cTlbFlushes));1999 Assert(pVCpu->hm.s.idLastCpu == pHostCpu->idCpu); 2000 Assert(pVCpu->hm.s.cTlbFlushes == pHostCpu->cTlbFlushes); 2001 AssertMsg(pVCpu->hm.s.cTlbFlushes == pHostCpu->cTlbFlushes, 2002 ("Flush count mismatch for cpu %d (%u vs %u)\n", pHostCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pHostCpu->cTlbFlushes)); 2003 AssertMsg(pHostCpu->uCurrentAsid >= 1 && pHostCpu->uCurrentAsid < pVM->hm.s.uMaxAsid, 2004 ("Cpu[%u] uCurrentAsid=%u cTlbFlushes=%u pVCpu->idLastCpu=%u pVCpu->cTlbFlushes=%u\n", pHostCpu->idCpu, 2005 pHostCpu->uCurrentAsid, pHostCpu->cTlbFlushes, pVCpu->hm.s.idLastCpu, pVCpu->hm.s.cTlbFlushes)); 2007 2006 AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid, 2008 ("Cpu[%u] pVCpu->uCurrentAsid=%u\n", p Cpu->idCpu, pVCpu->hm.s.uCurrentAsid));2007 ("Cpu[%u] pVCpu->uCurrentAsid=%u\n", pHostCpu->idCpu, pVCpu->hm.s.uCurrentAsid)); 2009 2008 2010 2009 /* Update VMCS with the VPID. */ … … 2019 2018 * Flushes the tagged-TLB entries for EPT CPUs as necessary. 2020 2019 * 2021 * @ returns VBox status code.2020 * @param pHostCpu The HM physical-CPU structure. 2022 2021 * @param pVCpu The cross context virtual CPU structure. 2023 * @param pCpu Pointer to the global HM CPU struct.2024 2022 * 2025 2023 * @remarks Called with interrupts disabled. 2026 2024 */ 2027 static void hmR0VmxFlushTaggedTlbEpt(P VMCPU pVCpu, PHMGLOBALCPUINFO pCpu)2025 static void hmR0VmxFlushTaggedTlbEpt(PHMPHYSCPU pHostCpu, PVMCPU pVCpu) 2028 2026 { 2029 2027 AssertPtr(pVCpu); 2030 AssertPtr(p Cpu);2031 Assert(p Cpu->idCpu != NIL_RTCPUID);2028 AssertPtr(pHostCpu); 2029 Assert(pHostCpu->idCpu != NIL_RTCPUID); 2032 2030 AssertMsg(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked without NestedPaging.")); 2033 2031 AssertMsg(!pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fVpid, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked with VPID.")); … … 2037 2035 * A change in the TLB flush count implies the host CPU is online after a suspend/resume. 2038 2036 */ 2039 if ( pVCpu->hm.s.idLastCpu != p Cpu->idCpu2040 || pVCpu->hm.s.cTlbFlushes != p Cpu->cTlbFlushes)2037 if ( pVCpu->hm.s.idLastCpu != pHostCpu->idCpu 2038 || pVCpu->hm.s.cTlbFlushes != pHostCpu->cTlbFlushes) 2041 2039 { 2042 2040 pVCpu->hm.s.fForceTLBFlush = true; … … 2051 2049 } 2052 2050 2053 pVCpu->hm.s.idLastCpu = p Cpu->idCpu;2054 pVCpu->hm.s.cTlbFlushes = p Cpu->cTlbFlushes;2051 pVCpu->hm.s.idLastCpu = pHostCpu->idCpu; 2052 pVCpu->hm.s.cTlbFlushes = pHostCpu->cTlbFlushes; 2055 2053 2056 2054 if (pVCpu->hm.s.fForceTLBFlush) … … 2065 2063 * Flushes the tagged-TLB entries for VPID CPUs as necessary. 2066 2064 * 2067 * @ returns VBox status code.2065 * @param pHostCpu The HM physical-CPU structure. 2068 2066 * @param pVCpu The cross context virtual CPU structure. 2069 * @param pCpu Pointer to the global HM CPU struct.2070 2067 * 2071 2068 * @remarks Called with interrupts disabled. 2072 2069 */ 2073 static void hmR0VmxFlushTaggedTlbVpid(P VMCPU pVCpu, PHMGLOBALCPUINFO pCpu)2070 static void hmR0VmxFlushTaggedTlbVpid(PHMPHYSCPU pHostCpu, PVMCPU pVCpu) 2074 2071 { 2075 2072 AssertPtr(pVCpu); 2076 AssertPtr(p Cpu);2077 Assert(p Cpu->idCpu != NIL_RTCPUID);2073 AssertPtr(pHostCpu); 2074 Assert(pHostCpu->idCpu != NIL_RTCPUID); 2078 2075 AssertMsg(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fVpid, ("hmR0VmxFlushTlbVpid cannot be invoked without VPID.")); 2079 2076 AssertMsg(!pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging, ("hmR0VmxFlushTlbVpid cannot be invoked with NestedPaging")); … … 2085 2082 * cannot reuse the current ASID anymore. 2086 2083 */ 2087 if ( pVCpu->hm.s.idLastCpu != p Cpu->idCpu2088 || pVCpu->hm.s.cTlbFlushes != p Cpu->cTlbFlushes)2084 if ( pVCpu->hm.s.idLastCpu != pHostCpu->idCpu 2085 || pVCpu->hm.s.cTlbFlushes != pHostCpu->cTlbFlushes) 2089 2086 { 2090 2087 pVCpu->hm.s.fForceTLBFlush = true; … … 2098 2095 * If we ever support VPID flush combinations other than ALL or SINGLE-context (see 2099 2096 * hmR0VmxSetupTaggedTlb()) we would need to explicitly flush in this case (add an 2100 * fExplicitFlush = true here and change the p Cpu->fFlushAsidBeforeUse check below to2097 * fExplicitFlush = true here and change the pHostCpu->fFlushAsidBeforeUse check below to 2101 2098 * include fExplicitFlush's too) - an obscure corner case. 2102 2099 */ … … 2106 2103 2107 2104 PVM pVM = pVCpu->CTX_SUFF(pVM); 2108 pVCpu->hm.s.idLastCpu = p Cpu->idCpu;2105 pVCpu->hm.s.idLastCpu = pHostCpu->idCpu; 2109 2106 if (pVCpu->hm.s.fForceTLBFlush) 2110 2107 { 2111 ++p Cpu->uCurrentAsid;2112 if (p Cpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)2113 { 2114 p Cpu->uCurrentAsid = 1;/* Wraparound to 1; host uses 0 */2115 p Cpu->cTlbFlushes++;/* All VCPUs that run on this host CPU must use a new VPID. */2116 p Cpu->fFlushAsidBeforeUse = true;/* All VCPUs that run on this host CPU must flush their new VPID before use. */2108 ++pHostCpu->uCurrentAsid; 2109 if (pHostCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid) 2110 { 2111 pHostCpu->uCurrentAsid = 1; /* Wraparound to 1; host uses 0 */ 2112 pHostCpu->cTlbFlushes++; /* All VCPUs that run on this host CPU must use a new VPID. */ 2113 pHostCpu->fFlushAsidBeforeUse = true; /* All VCPUs that run on this host CPU must flush their new VPID before use. */ 2117 2114 } 2118 2115 2119 2116 pVCpu->hm.s.fForceTLBFlush = false; 2120 pVCpu->hm.s.cTlbFlushes = p Cpu->cTlbFlushes;2121 pVCpu->hm.s.uCurrentAsid = p Cpu->uCurrentAsid;2122 if (p Cpu->fFlushAsidBeforeUse)2117 pVCpu->hm.s.cTlbFlushes = pHostCpu->cTlbFlushes; 2118 pVCpu->hm.s.uCurrentAsid = pHostCpu->uCurrentAsid; 2119 if (pHostCpu->fFlushAsidBeforeUse) 2123 2120 { 2124 2121 if (pVM->hm.s.vmx.enmTlbFlushVpid == VMXTLBFLUSHVPID_SINGLE_CONTEXT) … … 2127 2124 { 2128 2125 hmR0VmxFlushVpid(pVCpu, VMXTLBFLUSHVPID_ALL_CONTEXTS, 0 /* GCPtr */); 2129 p Cpu->fFlushAsidBeforeUse = false;2126 pHostCpu->fFlushAsidBeforeUse = false; 2130 2127 } 2131 2128 else … … 2137 2134 } 2138 2135 2139 AssertMsg(pVCpu->hm.s.cTlbFlushes == p Cpu->cTlbFlushes,2140 ("Flush count mismatch for cpu %d (%u vs %u)\n", p Cpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));2141 AssertMsg(p Cpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,2142 ("Cpu[%u] uCurrentAsid=%u cTlbFlushes=%u pVCpu->idLastCpu=%u pVCpu->cTlbFlushes=%u\n", p Cpu->idCpu,2143 p Cpu->uCurrentAsid, pCpu->cTlbFlushes, pVCpu->hm.s.idLastCpu, pVCpu->hm.s.cTlbFlushes));2136 AssertMsg(pVCpu->hm.s.cTlbFlushes == pHostCpu->cTlbFlushes, 2137 ("Flush count mismatch for cpu %d (%u vs %u)\n", pHostCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pHostCpu->cTlbFlushes)); 2138 AssertMsg(pHostCpu->uCurrentAsid >= 1 && pHostCpu->uCurrentAsid < pVM->hm.s.uMaxAsid, 2139 ("Cpu[%u] uCurrentAsid=%u cTlbFlushes=%u pVCpu->idLastCpu=%u pVCpu->cTlbFlushes=%u\n", pHostCpu->idCpu, 2140 pHostCpu->uCurrentAsid, pHostCpu->cTlbFlushes, pVCpu->hm.s.idLastCpu, pVCpu->hm.s.cTlbFlushes)); 2144 2141 AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid, 2145 ("Cpu[%u] pVCpu->uCurrentAsid=%u\n", p Cpu->idCpu, pVCpu->hm.s.uCurrentAsid));2142 ("Cpu[%u] pVCpu->uCurrentAsid=%u\n", pHostCpu->idCpu, pVCpu->hm.s.uCurrentAsid)); 2146 2143 2147 2144 int rc = VMXWriteVmcs32(VMX_VMCS16_VPID, pVCpu->hm.s.uCurrentAsid); … … 2153 2150 * Flushes the guest TLB entry based on CPU capabilities. 2154 2151 * 2155 * @param pVCpu The cross context virtual CPU structure. 2156 * @param pCpu Pointer to the global HM CPU struct. 2157 */ 2158 DECLINLINE(void) hmR0VmxFlushTaggedTlb(PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu) 2152 * @param pHostCpu The HM physical-CPU structure. 2153 * @param pVCpu The cross context virtual CPU structure. 2154 * 2155 * @remarks Called with interrupts disabled. 2156 */ 2157 DECLINLINE(void) hmR0VmxFlushTaggedTlb(PHMPHYSCPU pHostCpu, PVMCPU pVCpu) 2159 2158 { 2160 2159 #ifdef HMVMX_ALWAYS_FLUSH_TLB … … 2164 2163 switch (pVM->hm.s.vmx.enmTlbFlushType) 2165 2164 { 2166 case VMXTLBFLUSHTYPE_EPT_VPID: hmR0VmxFlushTaggedTlbBoth(p VCpu, pCpu); break;2167 case VMXTLBFLUSHTYPE_EPT: hmR0VmxFlushTaggedTlbEpt(p VCpu, pCpu); break;2168 case VMXTLBFLUSHTYPE_VPID: hmR0VmxFlushTaggedTlbVpid(p VCpu, pCpu); break;2169 case VMXTLBFLUSHTYPE_NONE: hmR0VmxFlushTaggedTlbNone(p VCpu, pCpu); break;2165 case VMXTLBFLUSHTYPE_EPT_VPID: hmR0VmxFlushTaggedTlbBoth(pHostCpu, pVCpu); break; 2166 case VMXTLBFLUSHTYPE_EPT: hmR0VmxFlushTaggedTlbEpt(pHostCpu, pVCpu); break; 2167 case VMXTLBFLUSHTYPE_VPID: hmR0VmxFlushTaggedTlbVpid(pHostCpu, pVCpu); break; 2168 case VMXTLBFLUSHTYPE_NONE: hmR0VmxFlushTaggedTlbNone(pHostCpu, pVCpu); break; 2170 2169 default: 2171 2170 AssertMsgFailed(("Invalid flush-tag function identifier\n")); … … 2821 2820 hmR0VmxUpdateErrorRecord(pVCpu, rc), rc); 2822 2821 2823 pVCpu->hm.s.vmx. uVmcsState = HMVMX_VMCS_STATE_CLEAR;2822 pVCpu->hm.s.vmx.fVmcsState = HMVMX_VMCS_STATE_CLEAR; 2824 2823 2825 2824 hmR0VmxUpdateErrorRecord(pVCpu, rc); … … 4982 4981 * See MSDN "Configuring Programs for 64-bit/x64 Software Conventions / Register Usage". 4983 4982 */ 4984 bool const fResumeVM = RT_BOOL(pVCpu->hm.s.vmx. uVmcsState & HMVMX_VMCS_STATE_LAUNCHED);4983 bool const fResumeVM = RT_BOOL(pVCpu->hm.s.vmx.fVmcsState & HMVMX_VMCS_STATE_LAUNCHED); 4985 4984 /** @todo Add stats for resume vs launch. */ 4986 4985 PVM pVM = pVCpu->CTX_SUFF(pVM); … … 5284 5283 #endif 5285 5284 5286 P HMGLOBALCPUINFO pCpu = hmR0GetCurrentCpu();5287 RTHCPHYS HCPhysCpuPage = p Cpu->HCPhysMemObj;5285 PCHMPHYSCPU pHostCpu = hmR0GetCurrentCpu(); 5286 RTHCPHYS HCPhysCpuPage = pHostCpu->HCPhysMemObj; 5288 5287 5289 5288 /* Clear VMCS. Marking it inactive, clearing implementation-specific data and writing VMCS data back to memory. */ 5290 5289 VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs); 5291 pVCpu->hm.s.vmx. uVmcsState = HMVMX_VMCS_STATE_CLEAR;5290 pVCpu->hm.s.vmx.fVmcsState = HMVMX_VMCS_STATE_CLEAR; 5292 5291 5293 5292 /* Leave VMX Root Mode. */ … … 5323 5322 rc2 = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs); 5324 5323 AssertRC(rc2); 5325 pVCpu->hm.s.vmx. uVmcsState = HMVMX_VMCS_STATE_ACTIVE;5324 pVCpu->hm.s.vmx.fVmcsState = HMVMX_VMCS_STATE_ACTIVE; 5326 5325 Assert(!(ASMGetFlags() & X86_EFL_IF)); 5327 5326 ASMSetFlags(fOldEFlags); … … 5345 5344 NOREF(fResume); 5346 5345 5347 P HMGLOBALCPUINFO pCpu= hmR0GetCurrentCpu();5348 RTHCPHYS HCPhysCpuPage = pCpu->HCPhysMemObj;5346 PCHMPHYSCPU pHostCpu = hmR0GetCurrentCpu(); 5347 RTHCPHYS const HCPhysCpuPage = pHostCpu->HCPhysMemObj; 5349 5348 5350 5349 #ifdef VBOX_WITH_CRASHDUMP_MAGIC … … 7211 7210 * context. 7212 7211 */ 7213 if (pVCpu->hm.s.vmx. uVmcsState & HMVMX_VMCS_STATE_ACTIVE)7212 if (pVCpu->hm.s.vmx.fVmcsState & HMVMX_VMCS_STATE_ACTIVE) 7214 7213 { 7215 7214 int rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs); 7216 7215 AssertRCReturn(rc, rc); 7217 7216 7218 pVCpu->hm.s.vmx. uVmcsState = HMVMX_VMCS_STATE_CLEAR;7217 pVCpu->hm.s.vmx.fVmcsState = HMVMX_VMCS_STATE_CLEAR; 7219 7218 Log4Func(("Cleared Vmcs. HostCpuId=%u\n", idCpu)); 7220 7219 } 7221 Assert(!(pVCpu->hm.s.vmx. uVmcsState & HMVMX_VMCS_STATE_LAUNCHED));7220 Assert(!(pVCpu->hm.s.vmx.fVmcsState & HMVMX_VMCS_STATE_LAUNCHED)); 7222 7221 NOREF(idCpu); 7223 7222 … … 7413 7412 pVCpu->hm.s.vmx.fUpdatedHostMsrs = false; 7414 7413 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC); 7415 if (pVCpu->hm.s.vmx. uVmcsState & HMVMX_VMCS_STATE_ACTIVE)7414 if (pVCpu->hm.s.vmx.fVmcsState & HMVMX_VMCS_STATE_ACTIVE) 7416 7415 { 7417 7416 VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs); 7418 pVCpu->hm.s.vmx. uVmcsState = HMVMX_VMCS_STATE_CLEAR;7417 pVCpu->hm.s.vmx.fVmcsState = HMVMX_VMCS_STATE_CLEAR; 7419 7418 } 7420 7419 … … 8027 8026 * @returns VBox status code. 8028 8027 * @param pVCpu The cross context virtual CPU structure. 8029 * @param pHostCpu Pointer to the global CPU info struct. 8030 */ 8031 VMMR0DECL(int) VMXR0Enter(PVMCPU pVCpu, PHMGLOBALCPUINFO pHostCpu) 8028 */ 8029 VMMR0DECL(int) VMXR0Enter(PVMCPU pVCpu) 8032 8030 { 8033 8031 AssertPtr(pVCpu); 8034 8032 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fSupported); 8035 8033 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 8036 RT_NOREF(pHostCpu);8037 8034 8038 8035 LogFlowFunc(("pVCpu=%p\n", pVCpu)); … … 8053 8050 * Load the VCPU's VMCS as the current (and active) one. 8054 8051 */ 8055 Assert(pVCpu->hm.s.vmx. uVmcsState & HMVMX_VMCS_STATE_CLEAR);8052 Assert(pVCpu->hm.s.vmx.fVmcsState & HMVMX_VMCS_STATE_CLEAR); 8056 8053 int rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs); 8057 if (RT_FAILURE(rc)) 8058 return rc; 8059 8060 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_ACTIVE; 8061 pVCpu->hm.s.fLeaveDone = false; 8062 Log4Func(("Activated Vmcs. HostCpuId=%u\n", RTMpCpuId())); 8063 8064 return VINF_SUCCESS; 8054 if (RT_SUCCESS(rc)) 8055 { 8056 pVCpu->hm.s.vmx.fVmcsState = HMVMX_VMCS_STATE_ACTIVE; 8057 pVCpu->hm.s.fLeaveDone = false; 8058 Log4Func(("Activated Vmcs. HostCpuId=%u\n", RTMpCpuId())); 8059 } 8060 return rc; 8065 8061 } 8066 8062 … … 8131 8127 8132 8128 /* Load the active VMCS as the current one. */ 8133 if (pVCpu->hm.s.vmx. uVmcsState & HMVMX_VMCS_STATE_CLEAR)8129 if (pVCpu->hm.s.vmx.fVmcsState & HMVMX_VMCS_STATE_CLEAR) 8134 8130 { 8135 8131 rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs); 8136 8132 AssertRC(rc); NOREF(rc); 8137 pVCpu->hm.s.vmx. uVmcsState = HMVMX_VMCS_STATE_ACTIVE;8133 pVCpu->hm.s.vmx.fVmcsState = HMVMX_VMCS_STATE_ACTIVE; 8138 8134 Log4Func(("Resumed: Activated Vmcs. HostCpuId=%u\n", RTMpCpuId())); 8139 8135 } … … 8693 8689 pVmxTransient->u8GuestTpr = pVCpu->hm.s.vmx.pbVirtApic[XAPIC_OFF_TPR]; 8694 8690 8695 PHM GLOBALCPUINFO pCpu= hmR0GetCurrentCpu();8696 RTCPUID idCurrentCpu = pCpu->idCpu;8691 PHMPHYSCPU pHostCpu = hmR0GetCurrentCpu(); 8692 RTCPUID idCurrentCpu = pHostCpu->idCpu; 8697 8693 if ( pVmxTransient->fUpdateTscOffsettingAndPreemptTimer 8698 8694 || idCurrentCpu != pVCpu->hm.s.idLastCpu) … … 8703 8699 8704 8700 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true); /* Used for TLB flushing, set this across the world switch. */ 8705 hmR0VmxFlushTaggedTlb(p VCpu, pCpu);/* Invalidate the appropriate guest entries from the TLB. */8701 hmR0VmxFlushTaggedTlb(pHostCpu, pVCpu); /* Invalidate the appropriate guest entries from the TLB. */ 8706 8702 Assert(idCurrentCpu == pVCpu->hm.s.idLastCpu); 8707 8703 pVCpu->hm.s.vmx.LastError.idCurrentCpu = idCurrentCpu; /* Update the error reporting info. with the current host CPU. */ … … 8800 8796 #endif 8801 8797 #if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) 8802 /* The 64-on-32 switcher maintains uVmcsState on its own and we need to leave it alone here. */8798 /* The 64-on-32 switcher maintains fVmcsState on its own and we need to leave it alone here. */ 8803 8799 if (pVCpu->hm.s.vmx.pfnStartVM != VMXR0SwitcherStartVM64) 8804 pVCpu->hm.s.vmx. uVmcsState |= HMVMX_VMCS_STATE_LAUNCHED; /* Use VMRESUME instead of VMLAUNCH in the next run. */8800 pVCpu->hm.s.vmx.fVmcsState |= HMVMX_VMCS_STATE_LAUNCHED; /* Use VMRESUME instead of VMLAUNCH in the next run. */ 8805 8801 #else 8806 pVCpu->hm.s.vmx. uVmcsState |= HMVMX_VMCS_STATE_LAUNCHED; /* Use VMRESUME instead of VMLAUNCH in the next run. */8802 pVCpu->hm.s.vmx.fVmcsState |= HMVMX_VMCS_STATE_LAUNCHED; /* Use VMRESUME instead of VMLAUNCH in the next run. */ 8807 8803 #endif 8808 8804 #ifdef VBOX_STRICT … … 9274 9270 } else do { } while (0) 9275 9271 9276 SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH, VMX_EXIT_TASK_SWITCH); 9277 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_VIOLATION, VMX_EXIT_EPT_VIOLATION); 9278 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_MISCONFIG, VMX_EXIT_EPT_MISCONFIG); 9279 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_ACCESS, VMX_EXIT_APIC_ACCESS); 9280 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_WRITE, VMX_EXIT_APIC_WRITE); 9281 9282 SET_ONLY_XBM_IF_EITHER_EN(INSTR_CPUID, VMX_EXIT_CPUID); 9272 SET_ONLY_XBM_IF_EITHER_EN(EXIT_TASK_SWITCH, VMX_EXIT_TASK_SWITCH); /* unconditional */ 9273 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_VIOLATION, VMX_EXIT_EPT_VIOLATION); /* unconditional */ 9274 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_EPT_MISCONFIG, VMX_EXIT_EPT_MISCONFIG); /* unconditional (unless #VE) */ 9275 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_ACCESS, VMX_EXIT_APIC_ACCESS); /* feature dependent, nothing to enable here */ 9276 SET_ONLY_XBM_IF_EITHER_EN(EXIT_VMX_VAPIC_WRITE, VMX_EXIT_APIC_WRITE); /* feature dependent, nothing to enable here */ 9277 9278 SET_ONLY_XBM_IF_EITHER_EN(INSTR_CPUID, VMX_EXIT_CPUID); /* unconditional */ 9283 9279 SET_ONLY_XBM_IF_EITHER_EN( EXIT_CPUID, VMX_EXIT_CPUID); 9284 SET_ONLY_XBM_IF_EITHER_EN(INSTR_GETSEC, VMX_EXIT_GETSEC); 9280 SET_ONLY_XBM_IF_EITHER_EN(INSTR_GETSEC, VMX_EXIT_GETSEC); /* unconditional */ 9285 9281 SET_ONLY_XBM_IF_EITHER_EN( EXIT_GETSEC, VMX_EXIT_GETSEC); 9286 9282 SET_CPE1_XBM_IF_EITHER_EN(INSTR_HALT, VMX_EXIT_HLT, VMX_PROC_CTLS_HLT_EXIT); /* paranoia */ 9287 9283 SET_ONLY_XBM_IF_EITHER_EN( EXIT_HALT, VMX_EXIT_HLT); 9288 SET_ONLY_XBM_IF_EITHER_EN(INSTR_INVD, VMX_EXIT_INVD); 9284 SET_ONLY_XBM_IF_EITHER_EN(INSTR_INVD, VMX_EXIT_INVD); /* unconditional */ 9289 9285 SET_ONLY_XBM_IF_EITHER_EN( EXIT_INVD, VMX_EXIT_INVD); 9290 9286 SET_CPE1_XBM_IF_EITHER_EN(INSTR_INVLPG, VMX_EXIT_INVLPG, VMX_PROC_CTLS_INVLPG_EXIT); … … 9294 9290 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSC, VMX_EXIT_RDTSC, VMX_PROC_CTLS_RDTSC_EXIT); 9295 9291 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSC, VMX_EXIT_RDTSC); 9296 SET_ONLY_XBM_IF_EITHER_EN(INSTR_RSM, VMX_EXIT_RSM); 9292 SET_ONLY_XBM_IF_EITHER_EN(INSTR_RSM, VMX_EXIT_RSM); /* unconditional */ 9297 9293 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RSM, VMX_EXIT_RSM); 9298 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMM_CALL, VMX_EXIT_VMCALL); 9294 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMM_CALL, VMX_EXIT_VMCALL); /* unconditional */ 9299 9295 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMM_CALL, VMX_EXIT_VMCALL); 9300 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMCLEAR, VMX_EXIT_VMCLEAR); 9296 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMCLEAR, VMX_EXIT_VMCLEAR); /* unconditional */ 9301 9297 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMCLEAR, VMX_EXIT_VMCLEAR); 9302 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH); 9298 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH); /* unconditional */ 9303 9299 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMLAUNCH, VMX_EXIT_VMLAUNCH); 9304 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRLD, VMX_EXIT_VMPTRLD); 9300 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRLD, VMX_EXIT_VMPTRLD); /* unconditional */ 9305 9301 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRLD, VMX_EXIT_VMPTRLD); 9306 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRST, VMX_EXIT_VMPTRST); 9302 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMPTRST, VMX_EXIT_VMPTRST); /* unconditional */ 9307 9303 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMPTRST, VMX_EXIT_VMPTRST); 9308 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMREAD, VMX_EXIT_VMREAD); 9304 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMREAD, VMX_EXIT_VMREAD); /* unconditional */ 9309 9305 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMREAD, VMX_EXIT_VMREAD); 9310 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMRESUME, VMX_EXIT_VMRESUME); 9306 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMRESUME, VMX_EXIT_VMRESUME); /* unconditional */ 9311 9307 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMRESUME, VMX_EXIT_VMRESUME); 9312 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMWRITE, VMX_EXIT_VMWRITE); 9308 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMWRITE, VMX_EXIT_VMWRITE); /* unconditional */ 9313 9309 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMWRITE, VMX_EXIT_VMWRITE); 9314 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXOFF, VMX_EXIT_VMXOFF); 9310 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXOFF, VMX_EXIT_VMXOFF); /* unconditional */ 9315 9311 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXOFF, VMX_EXIT_VMXOFF); 9316 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXON, VMX_EXIT_VMXON); 9312 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMXON, VMX_EXIT_VMXON); /* unconditional */ 9317 9313 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMXON, VMX_EXIT_VMXON); 9318 9314 … … 9400 9396 SET_ONLY_XBM_IF_EITHER_EN( EXIT_LTR, VMX_EXIT_LDTR_TR_ACCESS); 9401 9397 9402 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVEPT, VMX_EXIT_INVEPT); 9398 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVEPT, VMX_EXIT_INVEPT); /* unconditional */ 9403 9399 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVEPT, VMX_EXIT_INVEPT); 9404 9400 SET_CPE1_XBM_IF_EITHER_EN(INSTR_RDTSCP, VMX_EXIT_RDTSCP, VMX_PROC_CTLS_RDTSC_EXIT); 9405 9401 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDTSCP, VMX_EXIT_RDTSCP); 9406 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVVPID, VMX_EXIT_INVVPID); 9402 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_INVVPID, VMX_EXIT_INVVPID); /* unconditional */ 9407 9403 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVVPID, VMX_EXIT_INVVPID); 9408 9404 SET_CPE2_XBM_IF_EITHER_EN(INSTR_WBINVD, VMX_EXIT_WBINVD, VMX_PROC_CTLS2_WBINVD_EXIT); 9409 9405 SET_ONLY_XBM_IF_EITHER_EN( EXIT_WBINVD, VMX_EXIT_WBINVD); 9410 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSETBV, VMX_EXIT_XSETBV); 9406 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSETBV, VMX_EXIT_XSETBV); /* unconditional */ 9411 9407 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XSETBV, VMX_EXIT_XSETBV); 9412 9408 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDRAND, VMX_EXIT_RDRAND, VMX_PROC_CTLS2_RDRAND_EXIT); … … 9414 9410 SET_CPE1_XBM_IF_EITHER_EN(INSTR_VMX_INVPCID, VMX_EXIT_INVPCID, VMX_PROC_CTLS_INVLPG_EXIT); 9415 9411 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_INVPCID, VMX_EXIT_INVPCID); 9416 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMFUNC, VMX_EXIT_VMFUNC); 9412 SET_ONLY_XBM_IF_EITHER_EN(INSTR_VMX_VMFUNC, VMX_EXIT_VMFUNC); /* unconditional for the current setup */ 9417 9413 SET_ONLY_XBM_IF_EITHER_EN( EXIT_VMX_VMFUNC, VMX_EXIT_VMFUNC); 9418 9414 SET_CPE2_XBM_IF_EITHER_EN(INSTR_RDSEED, VMX_EXIT_RDSEED, VMX_PROC_CTLS2_RDSEED_EXIT); 9419 9415 SET_ONLY_XBM_IF_EITHER_EN( EXIT_RDSEED, VMX_EXIT_RDSEED); 9420 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSAVES, VMX_EXIT_XSAVES); 9416 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XSAVES, VMX_EXIT_XSAVES); /* unconditional (enabled by host, guest cfg) */ 9421 9417 SET_ONLY_XBM_IF_EITHER_EN(EXIT_XSAVES, VMX_EXIT_XSAVES); 9422 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XRSTORS, VMX_EXIT_XRSTORS); 9418 SET_ONLY_XBM_IF_EITHER_EN(INSTR_XRSTORS, VMX_EXIT_XRSTORS); /* unconditional (enabled by host, guest cfg) */ 9423 9419 SET_ONLY_XBM_IF_EITHER_EN( EXIT_XRSTORS, VMX_EXIT_XRSTORS); 9424 9420 … … 11967 11963 } 11968 11964 else 11969 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_READ, ("Unexpected IEMExecDecodedRdmsr status: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));11965 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_READ, ("Unexpected IEMExecDecodedRdmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict))); 11970 11966 11971 11967 return rcStrict; … … 12110 12106 } 12111 12107 else 12112 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_WRITE, ("Unexpected IEMExecDecodedWrmsr status: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));12108 AssertMsg(rcStrict == VINF_CPUM_R3_MSR_WRITE, ("Unexpected IEMExecDecodedWrmsr rc (%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict))); 12113 12109 12114 12110 return rcStrict; … … 13232 13228 if ( !pVCpu->hm.s.fTrapXcptGpForLovelyMesaDrv 13233 13229 || !hmR0VmxIsMesaDrvGp(pVCpu, pVmxTransient, pCtx)) 13234 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), pVmxTransient->cbInstr,13235 pVmxTransient-> uExitIntErrorCode, 0 /* GCPtrFaultAddress */);13230 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INT_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntInfo), 13231 pVmxTransient->cbInstr, pVmxTransient->uExitIntErrorCode, 0 /* GCPtrFaultAddress */); 13236 13232 else 13237 13233 rc = hmR0VmxHandleMesaDrvGp(pVCpu, pVmxTransient, pCtx); -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.h
r76464 r76482 29 29 #ifdef IN_RING0 30 30 31 VMMR0DECL(int) VMXR0Enter(PVMCPU pVCpu , PHMGLOBALCPUINFO pHostCpu);31 VMMR0DECL(int) VMXR0Enter(PVMCPU pVCpu); 32 32 VMMR0DECL(void) VMXR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit); 33 VMMR0DECL(int) VMXR0EnableCpu(PHM GLOBALCPUINFOpHostCpu, PVM pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys,33 VMMR0DECL(int) VMXR0EnableCpu(PHMPHYSCPU pHostCpu, PVM pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys, 34 34 bool fEnabledBySystem, PCSUPHWVIRTMSRS pHwvirtMsrs); 35 VMMR0DECL(int) VMXR0DisableCpu( PHMGLOBALCPUINFO pHostCpu,void *pvPageCpu, RTHCPHYS pPageCpuPhys);35 VMMR0DECL(int) VMXR0DisableCpu(void *pvPageCpu, RTHCPHYS pPageCpuPhys); 36 36 VMMR0DECL(int) VMXR0GlobalInit(void); 37 37 VMMR0DECL(void) VMXR0GlobalTerm(void); -
trunk/src/VBox/VMM/VMMR3/HM.cpp
r76477 r76482 3092 3092 LogRel(("HM: CPU[%u] Exit reason %#x\n", i, pVCpu->hm.s.vmx.LastError.u32ExitReason)); 3093 3093 3094 if ( pV M->aCpus[i].hm.s.vmx.LastError.u32InstrError == VMXINSTRERR_VMLAUNCH_NON_CLEAR_VMCS3095 || pV M->aCpus[i].hm.s.vmx.LastError.u32InstrError == VMXINSTRERR_VMRESUME_NON_LAUNCHED_VMCS)3094 if ( pVCpu->hm.s.vmx.LastError.u32InstrError == VMXINSTRERR_VMLAUNCH_NON_CLEAR_VMCS 3095 || pVCpu->hm.s.vmx.LastError.u32InstrError == VMXINSTRERR_VMRESUME_NON_LAUNCHED_VMCS) 3096 3096 { 3097 3097 LogRel(("HM: CPU[%u] Entered Host Cpu %u\n", i, pVCpu->hm.s.vmx.LastError.idEnteredCpu)); 3098 3098 LogRel(("HM: CPU[%u] Current Host Cpu %u\n", i, pVCpu->hm.s.vmx.LastError.idCurrentCpu)); 3099 3099 } 3100 else if (pV M->aCpus[i].hm.s.vmx.LastError.u32InstrError == VMXINSTRERR_VMENTRY_INVALID_CTLS)3100 else if (pVCpu->hm.s.vmx.LastError.u32InstrError == VMXINSTRERR_VMENTRY_INVALID_CTLS) 3101 3101 { 3102 3102 LogRel(("HM: CPU[%u] PinCtls %#RX32\n", i, pVCpu->hm.s.vmx.u32PinCtls)); -
trunk/src/VBox/VMM/include/HMInternal.h
r76477 r76482 241 241 242 242 /** 243 * Global per-cpu information. (host)244 */ 245 typedef struct HM GLOBALCPUINFO243 * HM physical (host) CPU information. 244 */ 245 typedef struct HMPHYSCPU 246 246 { 247 247 /** The CPU ID. */ … … 284 284 } n; 285 285 #endif 286 } HMGLOBALCPUINFO; 287 /** Pointer to the per-cpu global information. */ 288 typedef HMGLOBALCPUINFO *PHMGLOBALCPUINFO; 289 290 typedef enum 291 { 292 HMPENDINGIO_INVALID = 0, 293 HMPENDINGIO_PORT_READ, 294 /* not implemented: HMPENDINGIO_STRING_READ, */ 295 /* not implemented: HMPENDINGIO_STRING_WRITE, */ 296 /** The usual 32-bit paranoia. */ 297 HMPENDINGIO_32BIT_HACK = 0x7fffffff 298 } HMPENDINGIO; 299 300 286 } HMPHYSCPU; 287 /** Pointer to HMPHYSCPU struct. */ 288 typedef HMPHYSCPU *PHMPHYSCPU; 289 /** Pointer to a const HMPHYSCPU struct. */ 290 typedef const HMPHYSCPU *PCHMPHYSCPU; 291 292 /** 293 * TPR-instruction type. 294 */ 301 295 typedef enum 302 296 { … … 311 305 } HMTPRINSTR; 312 306 307 /** 308 * TPR patch information. 309 */ 313 310 typedef struct 314 311 { … … 384 381 /** Pointer to switcher function. */ 385 382 typedef FNHMSWITCHERHC *PFNHMSWITCHERHC; 383 384 /** @def HM_UNION_NM 385 * For compilers (like DTrace) that does not grok nameless unions, we have a 386 * little hack to make them palatable. 387 */ 388 /** @def HM_STRUCT_NM 389 * For compilers (like DTrace) that does not grok nameless structs (it is 390 * non-standard C++), we have a little hack to make them palatable. 391 */ 392 #ifdef VBOX_FOR_DTRACE_LIB 393 # define HM_UNION_NM(a_Nm) a_Nm 394 # define HM_STRUCT_NM(a_Nm) a_Nm 395 #elif defined(IPRT_WITHOUT_NAMED_UNIONS_AND_STRUCTS) 396 # define HM_UNION_NM(a_Nm) a_Nm 397 # define HM_STRUCT_NM(a_Nm) a_Nm 398 #else 399 # define HM_UNION_NM(a_Nm) 400 # define HM_STRUCT_NM(a_Nm) 401 #endif 386 402 387 403 /** … … 720 736 uint64_t u64HostTscAux; 721 737 722 struct738 union /* no tag! */ 723 739 { 724 /** Ring 0 handlers for VT-x. */ 725 PFNHMVMXSTARTVM pfnStartVM; 726 #if HC_ARCH_BITS == 32 727 uint32_t u32Alignment0; 728 #endif 729 /** Current pin-based VM-execution controls. */ 730 uint32_t u32PinCtls; 731 /** Current processor-based VM-execution controls. */ 732 uint32_t u32ProcCtls; 733 /** Current secondary processor-based VM-execution controls. */ 734 uint32_t u32ProcCtls2; 735 /** Current VM-entry controls. */ 736 uint32_t u32EntryCtls; 737 /** Current VM-exit controls. */ 738 uint32_t u32ExitCtls; 739 740 /** Current CR0 mask. */ 741 uint32_t u32Cr0Mask; 742 /** Current CR4 mask. */ 743 uint32_t u32Cr4Mask; 744 /** Current exception bitmap. */ 745 uint32_t u32XcptBitmap; 746 /** The updated-guest-state mask. */ 747 uint32_t au32Alignment0[2]; 748 749 /** Physical address of the VM control structure (VMCS). */ 750 RTHCPHYS HCPhysVmcs; 751 /** R0 memory object for the VM control structure (VMCS). */ 752 RTR0MEMOBJ hMemObjVmcs; 753 /** Virtual address of the VM control structure (VMCS). */ 754 R0PTRTYPE(void *) pvVmcs; 755 756 /** Physical address of the virtual APIC page for TPR caching. */ 757 RTHCPHYS HCPhysVirtApic; 758 /** Padding. */ 759 R0PTRTYPE(void *) pvAlignment0; 760 /** Virtual address of the virtual APIC page for TPR caching. */ 761 R0PTRTYPE(uint8_t *) pbVirtApic; 762 763 /** Physical address of the MSR bitmap. */ 764 RTHCPHYS HCPhysMsrBitmap; 765 /** R0 memory object for the MSR bitmap. */ 766 RTR0MEMOBJ hMemObjMsrBitmap; 767 /** Virtual address of the MSR bitmap. */ 768 R0PTRTYPE(void *) pvMsrBitmap; 769 770 /** Physical address of the VM-entry MSR-load and VM-exit MSR-store area (used 771 * for guest MSRs). */ 772 RTHCPHYS HCPhysGuestMsr; 773 /** R0 memory object of the VM-entry MSR-load and VM-exit MSR-store area 774 * (used for guest MSRs). */ 775 RTR0MEMOBJ hMemObjGuestMsr; 776 /** Virtual address of the VM-entry MSR-load and VM-exit MSR-store area (used 777 * for guest MSRs). */ 778 R0PTRTYPE(void *) pvGuestMsr; 779 780 /** Physical address of the VM-exit MSR-load area (used for host MSRs). */ 781 RTHCPHYS HCPhysHostMsr; 782 /** R0 memory object for the VM-exit MSR-load area (used for host MSRs). */ 783 RTR0MEMOBJ hMemObjHostMsr; 784 /** Virtual address of the VM-exit MSR-load area (used for host MSRs). */ 785 R0PTRTYPE(void *) pvHostMsr; 786 787 /** Current EPTP. */ 788 RTHCPHYS HCPhysEPTP; 789 790 /** Number of guest/host MSR pairs in the auto-load/store area. */ 791 uint32_t cMsrs; 792 /** Whether the host MSR values are up-to-date in the auto-load/store area. */ 793 bool fUpdatedHostMsrs; 794 uint8_t u8Alignment0[3]; 795 796 /** Host LSTAR MSR value to restore lazily while leaving VT-x. */ 797 uint64_t u64HostLStarMsr; 798 /** Host STAR MSR value to restore lazily while leaving VT-x. */ 799 uint64_t u64HostStarMsr; 800 /** Host SF_MASK MSR value to restore lazily while leaving VT-x. */ 801 uint64_t u64HostSFMaskMsr; 802 /** Host KernelGS-Base MSR value to restore lazily while leaving VT-x. */ 803 uint64_t u64HostKernelGSBaseMsr; 804 /** A mask of which MSRs have been swapped and need restoration. */ 805 uint32_t fLazyMsrs; 806 uint32_t u32Alignment2; 807 808 /** The cached APIC-base MSR used for identifying when to map the HC physical APIC-access page. */ 809 uint64_t u64MsrApicBase; 810 /** Last use TSC offset value. (cached) */ 811 uint64_t u64TscOffset; 812 813 /** VMCS cache. */ 814 VMCSCACHE VMCSCache; 815 816 /** Real-mode emulation state. */ 740 /** VT-x data. */ 817 741 struct 818 742 { 819 X86DESCATTR AttrCS; 820 X86DESCATTR AttrDS; 821 X86DESCATTR AttrES; 822 X86DESCATTR AttrFS; 823 X86DESCATTR AttrGS; 824 X86DESCATTR AttrSS; 825 X86EFLAGS Eflags; 826 bool fRealOnV86Active; 827 } RealMode; 828 829 /** VT-x error-reporting (mainly for ring-3 propagation). */ 743 /** Ring 0 handlers for VT-x. */ 744 PFNHMVMXSTARTVM pfnStartVM; 745 #if HC_ARCH_BITS == 32 746 uint32_t u32Alignment0; 747 #endif 748 749 /** Current pin-based VM-execution controls. */ 750 uint32_t u32PinCtls; 751 /** Current processor-based VM-execution controls. */ 752 uint32_t u32ProcCtls; 753 /** Current secondary processor-based VM-execution controls. */ 754 uint32_t u32ProcCtls2; 755 /** Current VM-entry controls. */ 756 uint32_t u32EntryCtls; 757 /** Current VM-exit controls. */ 758 uint32_t u32ExitCtls; 759 /** Current CR0 mask. */ 760 uint32_t u32Cr0Mask; 761 /** Current CR4 mask. */ 762 uint32_t u32Cr4Mask; 763 /** Current exception bitmap. */ 764 uint32_t u32XcptBitmap; 765 766 /** Physical address of the VM control structure (VMCS). */ 767 RTHCPHYS HCPhysVmcs; 768 /** R0 memory object for the VM control structure (VMCS). */ 769 RTR0MEMOBJ hMemObjVmcs; 770 /** Virtual address of the VM control structure (VMCS). */ 771 R0PTRTYPE(void *) pvVmcs; 772 773 /** Physical address of the current EPTP. */ 774 RTHCPHYS HCPhysEPTP; 775 /** Physical address of the virtual APIC page for TPR caching. */ 776 RTHCPHYS HCPhysVirtApic; 777 /** Virtual address of the virtual APIC page for TPR caching. */ 778 R0PTRTYPE(uint8_t *) pbVirtApic; 779 780 /** Physical address of the MSR bitmap. */ 781 RTHCPHYS HCPhysMsrBitmap; 782 /** R0 memory object for the MSR bitmap. */ 783 RTR0MEMOBJ hMemObjMsrBitmap; 784 /** Virtual address of the MSR bitmap. */ 785 R0PTRTYPE(void *) pvMsrBitmap; 786 787 /** Physical address of the VM-entry MSR-load and VM-exit MSR-store area (used 788 * for guest MSRs). */ 789 RTHCPHYS HCPhysGuestMsr; 790 /** R0 memory object of the VM-entry MSR-load and VM-exit MSR-store area 791 * (used for guest MSRs). */ 792 RTR0MEMOBJ hMemObjGuestMsr; 793 /** Virtual address of the VM-entry MSR-load and VM-exit MSR-store area (used 794 * for guest MSRs). */ 795 R0PTRTYPE(void *) pvGuestMsr; 796 797 /** Physical address of the VM-exit MSR-load area (used for host MSRs). */ 798 RTHCPHYS HCPhysHostMsr; 799 /** R0 memory object for the VM-exit MSR-load area (used for host MSRs). */ 800 RTR0MEMOBJ hMemObjHostMsr; 801 /** Virtual address of the VM-exit MSR-load area (used for host MSRs). */ 802 R0PTRTYPE(void *) pvHostMsr; 803 804 /** Number of guest/host MSR pairs in the auto-load/store area. */ 805 uint32_t cMsrs; 806 /** Whether the host MSR values are up-to-date in the auto-load/store area. */ 807 bool fUpdatedHostMsrs; 808 uint8_t au8Alignment0[3]; 809 810 /** Host LSTAR MSR value to restore lazily while leaving VT-x. */ 811 uint64_t u64HostLStarMsr; 812 /** Host STAR MSR value to restore lazily while leaving VT-x. */ 813 uint64_t u64HostStarMsr; 814 /** Host SF_MASK MSR value to restore lazily while leaving VT-x. */ 815 uint64_t u64HostSFMaskMsr; 816 /** Host KernelGS-Base MSR value to restore lazily while leaving VT-x. */ 817 uint64_t u64HostKernelGSBaseMsr; 818 /** A mask of which MSRs have been swapped and need restoration. */ 819 uint32_t fLazyMsrs; 820 uint32_t u32Alignment1; 821 822 /** The cached APIC-base MSR used for identifying when to map the HC physical APIC-access page. */ 823 uint64_t u64MsrApicBase; 824 /** Last use TSC offset value. (cached) */ 825 uint64_t u64TscOffset; 826 827 /** VMCS cache. */ 828 VMCSCACHE VMCSCache; 829 830 /** Real-mode emulation state. */ 831 struct 832 { 833 X86DESCATTR AttrCS; 834 X86DESCATTR AttrDS; 835 X86DESCATTR AttrES; 836 X86DESCATTR AttrFS; 837 X86DESCATTR AttrGS; 838 X86DESCATTR AttrSS; 839 X86EFLAGS Eflags; 840 bool fRealOnV86Active; 841 } RealMode; 842 843 /** VT-x error-reporting (mainly for ring-3 propagation). */ 844 struct 845 { 846 uint64_t u64VmcsPhys; 847 uint32_t u32VmcsRev; 848 uint32_t u32InstrError; 849 uint32_t u32ExitReason; 850 uint32_t u32Alignment0; 851 RTCPUID idEnteredCpu; 852 RTCPUID idCurrentCpu; 853 } LastError; 854 855 /** Current state of the VMCS. */ 856 uint32_t fVmcsState; 857 /** Which host-state bits to restore before being preempted. */ 858 uint32_t fRestoreHostFlags; 859 /** The host-state restoration structure. */ 860 VMXRESTOREHOST RestoreHost; 861 862 /** Set if guest was executing in real mode (extra checks). */ 863 bool fWasInRealMode; 864 /** Set if guest switched to 64-bit mode on a 32-bit host. */ 865 bool fSwitchedTo64on32; 866 /** Padding. */ 867 uint8_t au8Alignment1[6]; 868 } vmx; 869 870 /** SVM data. */ 830 871 struct 831 872 { 832 uint64_t u64VmcsPhys; 833 uint32_t u32VmcsRev; 834 uint32_t u32InstrError; 835 uint32_t u32ExitReason; 836 RTCPUID idEnteredCpu; 837 RTCPUID idCurrentCpu; 838 uint32_t u32Alignment0; 839 } LastError; 840 841 /** Current state of the VMCS. */ 842 uint32_t uVmcsState; 843 /** Which host-state bits to restore before being preempted. */ 844 uint32_t fRestoreHostFlags; 845 /** The host-state restoration structure. */ 846 VMXRESTOREHOST RestoreHost; 847 848 /** Set if guest was executing in real mode (extra checks). */ 849 bool fWasInRealMode; 850 /** Set if guest switched to 64-bit mode on a 32-bit host. */ 851 bool fSwitchedTo64on32; 852 853 uint8_t u8Alignment1[6]; 854 } vmx; 855 856 struct 857 { 858 /** Ring 0 handlers for VT-x. */ 859 PFNHMSVMVMRUN pfnVMRun; 873 /** Ring 0 handlers for VT-x. */ 874 PFNHMSVMVMRUN pfnVMRun; 860 875 #if HC_ARCH_BITS == 32 861 uint32_t u32Alignment0; 862 #endif 863 864 /** Physical address of the host VMCB which holds additional host-state. */ 865 RTHCPHYS HCPhysVmcbHost; 866 /** R0 memory object for the host VMCB which holds additional host-state. */ 867 RTR0MEMOBJ hMemObjVmcbHost; 868 /** Padding. */ 869 R0PTRTYPE(void *) pvPadding; 870 871 /** Physical address of the guest VMCB. */ 872 RTHCPHYS HCPhysVmcb; 873 /** R0 memory object for the guest VMCB. */ 874 RTR0MEMOBJ hMemObjVmcb; 875 /** Pointer to the guest VMCB. */ 876 R0PTRTYPE(PSVMVMCB) pVmcb; 877 878 /** Physical address of the MSR bitmap (8 KB). */ 879 RTHCPHYS HCPhysMsrBitmap; 880 /** R0 memory object for the MSR bitmap (8 KB). */ 881 RTR0MEMOBJ hMemObjMsrBitmap; 882 /** Pointer to the MSR bitmap. */ 883 R0PTRTYPE(void *) pvMsrBitmap; 884 885 /** Whether VTPR with V_INTR_MASKING set is in effect, indicating 886 * we should check if the VTPR changed on every VM-exit. */ 887 bool fSyncVTpr; 888 uint8_t u8Alignment0[7]; 889 890 /** Cache of the nested-guest's VMCB fields that we modify in order to run the 891 * nested-guest using AMD-V. This will be restored on \#VMEXIT. */ 892 SVMNESTEDVMCBCACHE NstGstVmcbCache; 893 } svm; 876 uint32_t u32Alignment0; 877 #endif 878 879 /** Physical address of the host VMCB which holds additional host-state. */ 880 RTHCPHYS HCPhysVmcbHost; 881 /** R0 memory object for the host VMCB which holds additional host-state. */ 882 RTR0MEMOBJ hMemObjVmcbHost; 883 /** Padding. */ 884 R0PTRTYPE(void *) pvPadding; 885 886 /** Physical address of the guest VMCB. */ 887 RTHCPHYS HCPhysVmcb; 888 /** R0 memory object for the guest VMCB. */ 889 RTR0MEMOBJ hMemObjVmcb; 890 /** Pointer to the guest VMCB. */ 891 R0PTRTYPE(PSVMVMCB) pVmcb; 892 893 /** Physical address of the MSR bitmap (8 KB). */ 894 RTHCPHYS HCPhysMsrBitmap; 895 /** R0 memory object for the MSR bitmap (8 KB). */ 896 RTR0MEMOBJ hMemObjMsrBitmap; 897 /** Pointer to the MSR bitmap. */ 898 R0PTRTYPE(void *) pvMsrBitmap; 899 900 /** Whether VTPR with V_INTR_MASKING set is in effect, indicating 901 * we should check if the VTPR changed on every VM-exit. */ 902 bool fSyncVTpr; 903 uint8_t au8Alignment0[7]; 904 905 /** Cache of the nested-guest's VMCB fields that we modify in order to run the 906 * nested-guest using AMD-V. This will be restored on \#VMEXIT. */ 907 SVMNESTEDVMCBCACHE NstGstVmcbCache; 908 } svm; 909 } HM_UNION_NM(u); 894 910 895 911 /** Event injection state. */ … … 1067 1083 AssertCompileMemberAlignment(HMCPU, cWorldSwitchExits, 4); 1068 1084 AssertCompileMemberAlignment(HMCPU, fCtxChanged, 8); 1069 AssertCompileMemberAlignment(HMCPU, vmx, 8);1070 AssertCompileMemberAlignment(HMCPU, svm, 8);1085 AssertCompileMemberAlignment(HMCPU, HM_UNION_NM(u.) vmx, 8); 1086 AssertCompileMemberAlignment(HMCPU, HM_UNION_NM(u.) svm, 8); 1071 1087 AssertCompileMemberAlignment(HMCPU, Event, 8); 1072 1088 1073 1089 #ifdef IN_RING0 1074 VMMR0_INT_DECL(PHM GLOBALCPUINFO) hmR0GetCurrentCpu(void);1090 VMMR0_INT_DECL(PHMPHYSCPU) hmR0GetCurrentCpu(void); 1075 1091 VMMR0_INT_DECL(int) hmR0EnterCpu(PVMCPU pVCpu); 1076 1092 … … 1081 1097 1082 1098 # ifdef VBOX_WITH_KERNEL_USING_XMM 1083 DECLASM(int) hmR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu, PFNHMVMXSTARTVM pfnStartVM); 1084 DECLASM(int) hmR0SVMRunWrapXMM(RTHCPHYS pVmcbHostPhys, RTHCPHYS pVmcbPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, PFNHMSVMVMRUN pfnVMRun); 1099 DECLASM(int) hmR0VMXStartVMWrapXMM(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu, 1100 PFNHMVMXSTARTVM pfnStartVM); 1101 DECLASM(int) hmR0SVMRunWrapXMM(RTHCPHYS pVmcbHostPhys, RTHCPHYS pVmcbPhys, PCPUMCTX pCtx, PVM pVM, PVMCPU pVCpu, 1102 PFNHMSVMVMRUN pfnVMRun); 1085 1103 # endif 1086 1104 #endif /* IN_RING0 */ -
trunk/src/VBox/VMM/testcase/tstVMStructSize.cpp
r73097 r76482 431 431 CHECK_MEMBER_ALIGNMENT(HMCPU, vmx.HCPhysVmcs, 8); 432 432 CHECK_MEMBER_ALIGNMENT(HMCPU, vmx.LastError, 8); 433 CHECK_MEMBER_ALIGNMENT(HMCPU, vmx.RestoreHost, 8); 433 434 CHECK_MEMBER_ALIGNMENT(HMCPU, svm, 8); 434 435 CHECK_MEMBER_ALIGNMENT(HMCPU, svm.pfnVMRun, 8);
Note:
See TracChangeset
for help on using the changeset viewer.