Changeset 48218 in vbox for trunk/src/VBox
- Timestamp:
- Sep 1, 2013 4:31:26 PM (11 years ago)
- Location:
- trunk/src/VBox/VMM/VMMR0
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMR0.cpp
r48217 r48218 91 91 DECLR0CALLBACKMEMBER(int, pfnRunGuestCode,(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)); 92 92 DECLR0CALLBACKMEMBER(int, pfnEnableCpu,(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, 93 bool fEnabledByHost ));93 bool fEnabledByHost, void *pvArg)); 94 94 DECLR0CALLBACKMEMBER(int, pfnDisableCpu,(PHMGLOBALCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)); 95 95 DECLR0CALLBACKMEMBER(int, pfnInitVM,(PVM pVM)); … … 243 243 244 244 static DECLCALLBACK(int) hmR0DummyEnableCpu(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, 245 bool fEnabledBySystem )246 { 247 NOREF(pCpu); NOREF(pVM); NOREF(pvCpuPage); NOREF(HCPhysCpuPage); NOREF(fEnabledBySystem); 245 bool fEnabledBySystem, void *pvArg) 246 { 247 NOREF(pCpu); NOREF(pVM); NOREF(pvCpuPage); NOREF(HCPhysCpuPage); NOREF(fEnabledBySystem); NOREF(pvArg); 248 248 return VINF_SUCCESS; 249 249 } … … 908 908 int rc; 909 909 if (g_HvmR0.vmx.fSupported && g_HvmR0.vmx.fUsingSUPR0EnableVTx) 910 rc = g_HvmR0.pfnEnableCpu(pCpu, pVM, NULL /* pvCpuPage */, NIL_RTHCPHYS, true );910 rc = g_HvmR0.pfnEnableCpu(pCpu, pVM, NULL /* pvCpuPage */, NIL_RTHCPHYS, true, &g_HvmR0.vmx.Msrs); 911 911 else 912 912 { … … 914 914 void *pvCpuPage = RTR0MemObjAddress(pCpu->hMemObj); 915 915 RTHCPHYS HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0); 916 rc = g_HvmR0.pfnEnableCpu(pCpu, pVM, pvCpuPage, HCPhysCpuPage, false); 916 917 if (g_HvmR0.vmx.fSupported) 918 rc = g_HvmR0.pfnEnableCpu(pCpu, pVM, pvCpuPage, HCPhysCpuPage, false, &g_HvmR0.vmx.Msrs); 919 else 920 rc = g_HvmR0.pfnEnableCpu(pCpu, pVM, pvCpuPage, HCPhysCpuPage, false, NULL /* pvArg */); 917 921 } 918 922 AssertRC(rc); … … 1767 1771 void *pvCpuPage = RTR0MemObjAddress(pCpu->hMemObj); 1768 1772 RTHCPHYS HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0); 1769 VMXR0EnableCpu(pCpu, pVM, pvCpuPage, HCPhysCpuPage, false );1773 VMXR0EnableCpu(pCpu, pVM, pvCpuPage, HCPhysCpuPage, false, &g_HvmR0.vmx.Msrs); 1770 1774 } 1771 1775 } -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.cpp
r48206 r48218 301 301 * @param pvCpuPage Pointer to the global CPU page. 302 302 * @param HCPhysCpuPage Physical address of the global CPU page. 303 */ 304 VMMR0DECL(int) SVMR0EnableCpu(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost) 303 * @param pvArg Unused on AMD-V. 304 */ 305 VMMR0DECL(int) SVMR0EnableCpu(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost, 306 void *pvArg) 305 307 { 306 308 AssertReturn(!fEnabledByHost, VERR_INVALID_PARAMETER); … … 308 310 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER); 309 311 AssertReturn(pvCpuPage, VERR_INVALID_PARAMETER); 312 NOREF(pvArg); 310 313 311 314 /* -
trunk/src/VBox/VMM/VMMR0/HMSVMR0.h
r48153 r48218 42 42 VMMR0DECL(int) SVMR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu); 43 43 VMMR0DECL(void) SVMR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit); 44 VMMR0DECL(int) SVMR0EnableCpu(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS HCPhysCpuPage, bool fEnabledBySystem); 44 VMMR0DECL(int) SVMR0EnableCpu(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS HCPhysCpuPage, bool fEnabledBySystem, 45 void *pvArg); 45 46 VMMR0DECL(int) SVMR0DisableCpu(PHMGLOBALCPUINFO pCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys); 46 47 VMMR0DECL(int) SVMR0InitVM(PVM pVM); -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r48216 r48218 314 314 * Internal Functions * 315 315 *******************************************************************************/ 316 static void hmR0VmxFlushEpt(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_EPT enmFlush);316 static void hmR0VmxFlushEpt(PVMCPU pVCpu, VMX_FLUSH_EPT enmFlush); 317 317 static void hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_VPID enmFlush, RTGCPTR GCPtr); 318 318 static int hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntrInfo, uint32_t cbInstr, … … 1009 1009 * @param fEnabledByHost Set if SUPR0EnableVTx() or similar was used to 1010 1010 * enable VT-x on the host. 1011 */ 1012 VMMR0DECL(int) VMXR0EnableCpu(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost) 1011 * @param pvMsrs Opaque pointer to VMXMSRS struct. 1012 */ 1013 VMMR0DECL(int) VMXR0EnableCpu(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost, 1014 void *pvMsrs) 1013 1015 { 1014 1016 AssertReturn(pCpu, VERR_INVALID_PARAMETER); 1017 AssertReturn(pvMsrs, VERR_INVALID_PARAMETER); 1015 1018 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD)); 1016 1019 … … 1023 1026 1024 1027 /* 1025 * Flush all EPTP tagged-TLB entries (in case any other hypervisor have been using EPTPs) so that 1026 * we can avoid an explicit flush while using new VPIDs. We would still need to flush 1027 * each time while reusing a VPID after hitting the MaxASID limit once. 1028 * Flush all EPT tagged-TLB entries (in case VirtualBox or any other hypervisor have been using EPTPs) so 1029 * we don't retain any stale guest-physical mappings which won't get invalidated when flushing by VPID. 1028 1030 */ 1029 if ( pVM 1030 && pVM->hm.s.fNestedPaging) 1031 { 1032 /* We require ALL_CONTEXT flush-type to be available on the CPU. See hmR0VmxSetupTaggedTlb(). */ 1033 Assert(pVM->hm.s.vmx.Msrs.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS); 1034 hmR0VmxFlushEpt(pVM, NULL /* pVCpu */, VMX_FLUSH_EPT_ALL_CONTEXTS); 1031 PVMXMSRS pMsrs = (PVMXMSRS)pvMsrs; 1032 if (pMsrs->u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS) 1033 { 1034 hmR0VmxFlushEpt(NULL /* pVCpu */, VMX_FLUSH_EPT_ALL_CONTEXTS); 1035 1035 pCpu->fFlushAsidBeforeUse = false; 1036 1036 } 1037 1037 else 1038 {1039 /** @todo This is still not perfect. If on host resume (pVM is NULL or a VM1040 * without Nested Paging triggered this function) we still have the risk1041 * of potentially running with stale TLB-entries from other hypervisors1042 * when later we use a VM with NestedPaging. To fix this properly we will1043 * have to pass '&g_HvmR0' (see HMR0.cpp) to this function and read1044 * 'u64EptVpidCaps' from it. Sigh. */1045 1038 pCpu->fFlushAsidBeforeUse = true; 1046 }1047 1039 1048 1040 /* Ensure each VCPU scheduled on this CPU gets a new VPID on resume. See @bugref{6255}. */ … … 1126 1118 * 1127 1119 * @returns VBox status code. 1128 * @param pVM Pointer to the VM.1129 1120 * @param pVCpu Pointer to the VMCPU (can be NULL depending on @a 1130 1121 * enmFlush). 1131 1122 * @param enmFlush Type of flush. 1132 */ 1133 static void hmR0VmxFlushEpt(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_EPT enmFlush) 1134 { 1135 AssertPtr(pVM); 1136 Assert(pVM->hm.s.fNestedPaging); 1137 1123 * 1124 * @remarks Caller is responsible for making sure this function is called only 1125 * when NestedPaging is supported and providing @a enmFlush that is 1126 * supported by the CPU. 1127 */ 1128 static void hmR0VmxFlushEpt(PVMCPU pVCpu, VMX_FLUSH_EPT enmFlush) 1129 { 1138 1130 uint64_t descriptor[2]; 1139 1131 if (enmFlush == VMX_FLUSH_EPT_ALL_CONTEXTS) … … 1351 1343 * invalidated. We don't need to flush-by-VPID here as flushing by EPT covers it. See @bugref{6568}. 1352 1344 */ 1353 hmR0VmxFlushEpt(pV M, pVCpu, pVM->hm.s.vmx.enmFlushEpt);1345 hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt); 1354 1346 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch); 1355 1347 HMVMX_SET_TAGGED_TLB_FLUSHED(); … … 1367 1359 * See Intel spec. 28.3.2 "Creating and Using Cached Translation Information". See @bugref{6568}. 1368 1360 */ 1369 hmR0VmxFlushEpt(pV M, pVCpu, pVM->hm.s.vmx.enmFlushEpt);1361 hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt); 1370 1362 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb); 1371 1363 HMVMX_SET_TAGGED_TLB_FLUSHED(); … … 1389 1381 } 1390 1382 else 1391 hmR0VmxFlushEpt(pV M, pVCpu, pVM->hm.s.vmx.enmFlushEpt);1383 hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt); 1392 1384 1393 1385 HMVMX_SET_TAGGED_TLB_FLUSHED(); … … 1459 1451 if (pVCpu->hm.s.fForceTLBFlush) 1460 1452 { 1461 hmR0VmxFlushEpt(pV M, pVCpu, pVM->hm.s.vmx.enmFlushEpt);1453 hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt); 1462 1454 pVCpu->hm.s.fForceTLBFlush = false; 1463 1455 } … … 1471 1463 /* We cannot flush individual entries without VPID support. Flush using EPT. */ 1472 1464 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown); 1473 hmR0VmxFlushEpt(pV M, pVCpu, pVM->hm.s.vmx.enmFlushEpt);1465 hmR0VmxFlushEpt(pVCpu, pVM->hm.s.vmx.enmFlushEpt); 1474 1466 } 1475 1467 else -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.h
r48153 r48218 31 31 VMMR0DECL(int) VMXR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu); 32 32 VMMR0DECL(void) VMXR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit); 33 VMMR0DECL(int) VMXR0EnableCpu(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys, bool fEnabledBySystem); 33 VMMR0DECL(int) VMXR0EnableCpu(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys, bool fEnabledBySystem, 34 void *pvMsrs); 34 35 VMMR0DECL(int) VMXR0DisableCpu(PHMGLOBALCPUINFO pCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys); 35 36 VMMR0DECL(int) VMXR0GlobalInit(void);
Note:
See TracChangeset
for help on using the changeset viewer.