Changeset 43494 in vbox for trunk/src/VBox/VMM/VMMR0
- Timestamp:
- Oct 1, 2012 2:29:11 PM (12 years ago)
- Location:
- trunk/src/VBox/VMM/VMMR0
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMR0/HMR0.cpp
r43430 r43494 100 100 101 101 /** Maximum ASID allowed. */ 102 uint32_t uMaxA SID;102 uint32_t uMaxAsid; 103 103 104 104 /** VT-x data. */ … … 145 145 struct 146 146 { 147 /* HWCR msr(for diagnostics) */148 uint64_t msrH WCR;147 /* HWCR MSR (for diagnostics) */ 148 uint64_t msrHwcr; 149 149 150 150 /** SVM revision. */ … … 419 419 g_HvmR0.vmx.hostEFER = ASMRdMsr(MSR_K6_EFER); 420 420 /* VPID 16 bits ASID. */ 421 g_HvmR0.uMaxA SID= 0x10000; /* exclusive */421 g_HvmR0.uMaxAsid = 0x10000; /* exclusive */ 422 422 423 423 if (g_HvmR0.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL) … … 569 569 /* Query AMD features. */ 570 570 uint32_t u32Dummy; 571 ASMCpuId(0x8000000A, &g_HvmR0.svm.u32Rev, &g_HvmR0.uMaxASID, 572 &u32Dummy, &g_HvmR0.svm.u32Features); 571 ASMCpuId(0x8000000A, &g_HvmR0.svm.u32Rev, &g_HvmR0.uMaxAsid, &u32Dummy, &g_HvmR0.svm.u32Features); 573 572 574 573 /* … … 588 587 if (RT_SUCCESS(rc)) 589 588 { 590 /* Read the HWCR msrfor diagnostics. */591 g_HvmR0.svm.msrH WCR= ASMRdMsr(MSR_K8_HWCR);589 /* Read the HWCR MSR for diagnostics. */ 590 g_HvmR0.svm.msrHwcr = ASMRdMsr(MSR_K8_HWCR); 592 591 g_HvmR0.svm.fSupported = true; 593 592 } … … 871 870 872 871 pCpu->idCpu = idCpu; 873 pCpu->uCurrentA SID= 0; /* we'll aways increment this the first time (host uses ASID 0) */874 /* Do NOT reset cT LBFlushes here, see @bugref{6255}. */872 pCpu->uCurrentAsid = 0; /* we'll aways increment this the first time (host uses ASID 0) */ 873 /* Do NOT reset cTlbFlushes here, see @bugref{6255}. */ 875 874 876 875 int rc; … … 939 938 Assert(g_HvmR0.aCpuInfo[i].hMemObj == NIL_RTR0MEMOBJ); 940 939 g_HvmR0.aCpuInfo[i].fConfigured = false; 941 g_HvmR0.aCpuInfo[i].cT LBFlushes = 0;940 g_HvmR0.aCpuInfo[i].cTlbFlushes = 0; 942 941 } 943 942 … … 1051 1050 rc = VINF_SUCCESS; /* nothing to do */ 1052 1051 1053 pCpu->uCurrentA SID= 0;1052 pCpu->uCurrentAsid = 0; 1054 1053 return rc; 1055 1054 } … … 1215 1214 pVM->hm.s.vmx.msr.vmx_vmcs_enum = g_HvmR0.vmx.msr.vmx_vmcs_enum; 1216 1215 pVM->hm.s.vmx.msr.vmx_eptcaps = g_HvmR0.vmx.msr.vmx_eptcaps; 1217 pVM->hm.s.svm.msrH WCR = g_HvmR0.svm.msrHWCR;1216 pVM->hm.s.svm.msrHwcr = g_HvmR0.svm.msrHwcr; 1218 1217 pVM->hm.s.svm.u32Rev = g_HvmR0.svm.u32Rev; 1219 1218 pVM->hm.s.svm.u32Features = g_HvmR0.svm.u32Features; … … 1222 1221 pVM->hm.s.lLastError = g_HvmR0.lLastError; 1223 1222 1224 pVM->hm.s.uMaxA SID = g_HvmR0.uMaxASID;1223 pVM->hm.s.uMaxAsid = g_HvmR0.uMaxAsid; 1225 1224 1226 1225 … … 1247 1246 1248 1247 /* We'll aways increment this the first time (host uses ASID 0) */ 1249 pVCpu->hm.s.uCurrentA SID= 0;1248 pVCpu->hm.s.uCurrentAsid = 0; 1250 1249 } 1251 1250 … … 1500 1499 /* Reset these to force a TLB flush for the next entry. (-> EXPENSIVE) */ 1501 1500 pVCpu->hm.s.idLastCpu = NIL_RTCPUID; 1502 pVCpu->hm.s.uCurrentA SID= 0;1501 pVCpu->hm.s.uCurrentAsid = 0; 1503 1502 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH); 1504 1503 } -
trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp
r43455 r43494 138 138 * Theoretically, other hypervisors may have used ASIDs, ideally we should flush all non-zero ASIDs 139 139 * when enabling SVM. AMD doesn't have an SVM instruction to flush all ASIDs (flushing is done 140 * upon VMRUN). Therefore, just set the fFlushA SIDBeforeUse flag which instructs hmR0SvmSetupTLB()140 * upon VMRUN). Therefore, just set the fFlushAsidBeforeUse flag which instructs hmR0SvmSetupTLB() 141 141 * to flush the TLB with before using a new ASID. 142 142 */ 143 pCpu->fFlushA SIDBeforeUse = true;143 pCpu->fFlushAsidBeforeUse = true; 144 144 145 145 /* 146 146 * Ensure each VCPU scheduled on this CPU gets a new VPID on resume. See @bugref{6255}. 147 147 */ 148 ++pCpu->cT LBFlushes;148 ++pCpu->cTlbFlushes; 149 149 150 150 return VINF_SUCCESS; … … 237 237 pVCpu->hm.s.svm.hMemObjVMCBHost = NIL_RTR0MEMOBJ; 238 238 pVCpu->hm.s.svm.hMemObjVMCB = NIL_RTR0MEMOBJ; 239 pVCpu->hm.s.svm.hMemObjM SRBitmap = NIL_RTR0MEMOBJ;239 pVCpu->hm.s.svm.hMemObjMsrBitmap = NIL_RTR0MEMOBJ; 240 240 241 241 /* Allocate one page for the host context */ … … 260 260 261 261 /* Allocate 8 KB for the MSR bitmap (doesn't seem to be a way to convince SVM not to use it) */ 262 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjM SRBitmap, 2 << PAGE_SHIFT, false /* fExecutable */);262 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.svm.hMemObjMsrBitmap, 2 << PAGE_SHIFT, false /* fExecutable */); 263 263 if (RT_FAILURE(rc)) 264 264 return rc; 265 265 266 pVCpu->hm.s.svm.pvM SRBitmap = RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjMSRBitmap);267 pVCpu->hm.s.svm.HCPhysM SRBitmap = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjMSRBitmap, 0);266 pVCpu->hm.s.svm.pvMsrBitmap = RTR0MemObjAddress(pVCpu->hm.s.svm.hMemObjMsrBitmap); 267 pVCpu->hm.s.svm.HCPhysMsrBitmap = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.svm.hMemObjMsrBitmap, 0); 268 268 /* Set all bits to intercept all MSR accesses. */ 269 ASMMemFill32(pVCpu->hm.s.svm.pvM SRBitmap, 2 << PAGE_SHIFT, 0xffffffff);269 ASMMemFill32(pVCpu->hm.s.svm.pvMsrBitmap, 2 << PAGE_SHIFT, 0xffffffff); 270 270 } 271 271 … … 301 301 pVCpu->hm.s.svm.hMemObjVMCB = NIL_RTR0MEMOBJ; 302 302 } 303 if (pVCpu->hm.s.svm.hMemObjM SRBitmap != NIL_RTR0MEMOBJ)304 { 305 RTR0MemObjFree(pVCpu->hm.s.svm.hMemObjM SRBitmap, false);306 pVCpu->hm.s.svm.pvM SRBitmap = 0;307 pVCpu->hm.s.svm.HCPhysM SRBitmap = 0;308 pVCpu->hm.s.svm.hMemObjM SRBitmap = NIL_RTR0MEMOBJ;303 if (pVCpu->hm.s.svm.hMemObjMsrBitmap != NIL_RTR0MEMOBJ) 304 { 305 RTR0MemObjFree(pVCpu->hm.s.svm.hMemObjMsrBitmap, false); 306 pVCpu->hm.s.svm.pvMsrBitmap = 0; 307 pVCpu->hm.s.svm.HCPhysMsrBitmap = 0; 308 pVCpu->hm.s.svm.hMemObjMsrBitmap = NIL_RTR0MEMOBJ; 309 309 } 310 310 } … … 411 411 /* Set IO and MSR bitmap addresses. */ 412 412 pvVMCB->ctrl.u64IOPMPhysAddr = pVM->hm.s.svm.HCPhysIOBitmap; 413 pvVMCB->ctrl.u64MSRPMPhysAddr = pVCpu->hm.s.svm.HCPhysM SRBitmap;413 pvVMCB->ctrl.u64MSRPMPhysAddr = pVCpu->hm.s.svm.HCPhysMsrBitmap; 414 414 415 415 /* No LBR virtualization. */ … … 476 476 { 477 477 unsigned ulBit; 478 uint8_t *pvM SRBitmap = (uint8_t *)pVCpu->hm.s.svm.pvMSRBitmap;478 uint8_t *pvMsrBitmap = (uint8_t *)pVCpu->hm.s.svm.pvMsrBitmap; 479 479 480 480 if (ulMSR <= 0x00001FFF) … … 488 488 /* AMD Sixth Generation x86 Processor MSRs and SYSCALL */ 489 489 ulBit = (ulMSR - 0xC0000000) * 2; 490 pvM SRBitmap += 0x800;490 pvMsrBitmap += 0x800; 491 491 } 492 492 else if ( ulMSR >= 0xC0010000 … … 495 495 /* AMD Seventh and Eighth Generation Processor MSRs */ 496 496 ulBit = (ulMSR - 0xC0001000) * 2; 497 pvM SRBitmap += 0x1000;497 pvMsrBitmap += 0x1000; 498 498 } 499 499 else … … 504 504 Assert(ulBit < 16 * 1024 - 1); 505 505 if (fRead) 506 ASMBitClear(pvM SRBitmap, ulBit);506 ASMBitClear(pvMsrBitmap, ulBit); 507 507 else 508 ASMBitSet(pvM SRBitmap, ulBit);508 ASMBitSet(pvMsrBitmap, ulBit); 509 509 510 510 if (fWrite) 511 ASMBitClear(pvM SRBitmap, ulBit + 1);511 ASMBitClear(pvMsrBitmap, ulBit + 1); 512 512 else 513 ASMBitSet(pvM SRBitmap, ulBit + 1);513 ASMBitSet(pvMsrBitmap, ulBit + 1); 514 514 } 515 515 … … 1069 1069 * so we cannot reuse the ASIDs without flushing. 1070 1070 */ 1071 bool fNewA SID= false;1071 bool fNewAsid = false; 1072 1072 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu 1073 || pVCpu->hm.s.cT LBFlushes != pCpu->cTLBFlushes)1073 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes) 1074 1074 { 1075 1075 pVCpu->hm.s.fForceTLBFlush = true; 1076 fNewA SID= true;1076 fNewAsid = true; 1077 1077 } 1078 1078 … … 1096 1096 * This is the AMD erratum 170. We need to flush the entire TLB for each world switch. Sad. 1097 1097 */ 1098 pCpu->uCurrentA SID= 1;1099 pVCpu->hm.s.uCurrentA SID= 1;1100 pVCpu->hm.s.cT LBFlushes = pCpu->cTLBFlushes;1098 pCpu->uCurrentAsid = 1; 1099 pVCpu->hm.s.uCurrentAsid = 1; 1100 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes; 1101 1101 pvVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE; 1102 1102 } 1103 1103 else if (pVCpu->hm.s.fForceTLBFlush) 1104 1104 { 1105 if (fNewA SID)1106 { 1107 ++pCpu->uCurrentA SID;1105 if (fNewAsid) 1106 { 1107 ++pCpu->uCurrentAsid; 1108 1108 bool fHitASIDLimit = false; 1109 if (pCpu->uCurrentA SID >= pVM->hm.s.uMaxASID)1109 if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid) 1110 1110 { 1111 pCpu->uCurrentA SID= 1; /* start at 1; host uses 0 */1112 pCpu->cT LBFlushes++;1111 pCpu->uCurrentAsid = 1; /* start at 1; host uses 0 */ 1112 pCpu->cTlbFlushes++; 1113 1113 fHitASIDLimit = true; 1114 1114 … … 1116 1116 { 1117 1117 pvVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_SINGLE_CONTEXT; 1118 pCpu->fFlushA SIDBeforeUse = true;1118 pCpu->fFlushAsidBeforeUse = true; 1119 1119 } 1120 1120 else 1121 1121 { 1122 1122 pvVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE; 1123 pCpu->fFlushA SIDBeforeUse = false;1123 pCpu->fFlushAsidBeforeUse = false; 1124 1124 } 1125 1125 } 1126 1126 1127 1127 if ( !fHitASIDLimit 1128 && pCpu->fFlushA SIDBeforeUse)1128 && pCpu->fFlushAsidBeforeUse) 1129 1129 { 1130 1130 if (pVM->hm.s.svm.u32Features & AMD_CPUID_SVM_FEATURE_EDX_FLUSH_BY_ASID) … … 1133 1133 { 1134 1134 pvVMCB->ctrl.TLBCtrl.n.u8TLBFlush = SVM_TLB_FLUSH_ENTIRE; 1135 pCpu->fFlushA SIDBeforeUse = false;1135 pCpu->fFlushAsidBeforeUse = false; 1136 1136 } 1137 1137 } 1138 1138 1139 pVCpu->hm.s.uCurrentA SID = pCpu->uCurrentASID;1140 pVCpu->hm.s.cT LBFlushes = pCpu->cTLBFlushes;1139 pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid; 1140 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes; 1141 1141 } 1142 1142 else … … 1168 1168 1169 1169 /* Update VMCB with the ASID. */ 1170 pvVMCB->ctrl.TLBCtrl.n.u32ASID = pVCpu->hm.s.uCurrentA SID;1171 1172 AssertMsg(pVCpu->hm.s.cT LBFlushes == pCpu->cTLBFlushes,1173 ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hm.s.cT LBFlushes, pCpu->cTLBFlushes));1174 AssertMsg(pCpu->uCurrentA SID >= 1 && pCpu->uCurrentASID < pVM->hm.s.uMaxASID,1175 ("cpu%d uCurrentA SID = %x\n", pCpu->idCpu, pCpu->uCurrentASID));1176 AssertMsg(pVCpu->hm.s.uCurrentA SID >= 1 && pVCpu->hm.s.uCurrentASID < pVM->hm.s.uMaxASID,1177 ("cpu%d VM uCurrentA SID = %x\n", pCpu->idCpu, pVCpu->hm.s.uCurrentASID));1170 pvVMCB->ctrl.TLBCtrl.n.u32ASID = pVCpu->hm.s.uCurrentAsid; 1171 1172 AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes, 1173 ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes)); 1174 AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid, 1175 ("cpu%d uCurrentAsid = %x\n", pCpu->idCpu, pCpu->uCurrentAsid)); 1176 AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid, 1177 ("cpu%d VM uCurrentAsid = %x\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid)); 1178 1178 1179 1179 #ifdef VBOX_WITH_STATISTICS … … 1433 1433 if (pVCpu->hm.s.idLastCpu != pCpu->idCpu) 1434 1434 LogFlow(("Force TLB flush due to rescheduling to a different cpu (%d vs %d)\n", pVCpu->hm.s.idLastCpu, pCpu->idCpu)); 1435 else if (pVCpu->hm.s.cT LBFlushes != pCpu->cTLBFlushes)1436 LogFlow(("Force TLB flush due to changed TLB flush count (%x vs %x)\n", pVCpu->hm.s.cT LBFlushes, pCpu->cTLBFlushes));1435 else if (pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes) 1436 LogFlow(("Force TLB flush due to changed TLB flush count (%x vs %x)\n", pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes)); 1437 1437 else if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TLB_FLUSH)) 1438 1438 LogFlow(("Manual TLB flush\n")); … … 1478 1478 Assert(pvVMCB->ctrl.IntCtrl.n.u1VIrqMasking); 1479 1479 Assert(pvVMCB->ctrl.u64IOPMPhysAddr == pVM->hm.s.svm.HCPhysIOBitmap); 1480 Assert(pvVMCB->ctrl.u64MSRPMPhysAddr == pVCpu->hm.s.svm.HCPhysM SRBitmap);1480 Assert(pvVMCB->ctrl.u64MSRPMPhysAddr == pVCpu->hm.s.svm.HCPhysMsrBitmap); 1481 1481 Assert(pvVMCB->ctrl.u64LBRVirt == 0); 1482 1482 … … 1494 1494 && !(pvVMCB->ctrl.u32InterceptCtrl2 & SVM_CTRL2_INTERCEPT_RDTSCP)) 1495 1495 { 1496 pVCpu->hm.s.u64HostT SCAux = ASMRdMsr(MSR_K8_TSC_AUX);1497 uint64_t u64GuestT SCAux = 0;1498 rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &u64GuestT SCAux);1496 pVCpu->hm.s.u64HostTscAux = ASMRdMsr(MSR_K8_TSC_AUX); 1497 uint64_t u64GuestTscAux = 0; 1498 rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &u64GuestTscAux); 1499 1499 AssertRC(rc2); 1500 ASMWrMsr(MSR_K8_TSC_AUX, u64GuestT SCAux);1500 ASMWrMsr(MSR_K8_TSC_AUX, u64GuestTscAux); 1501 1501 } 1502 1502 … … 1507 1507 pVCpu->hm.s.svm.pfnVMRun(pVCpu->hm.s.svm.HCPhysVMCBHost, pVCpu->hm.s.svm.HCPhysVMCB, pCtx, pVM, pVCpu); 1508 1508 #endif 1509 1509 1510 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false); 1510 1511 ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits); … … 1514 1515 /* Restore host's TSC_AUX. */ 1515 1516 if (u32HostExtFeatures & X86_CPUID_EXT_FEATURE_EDX_RDTSCP) 1516 ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hm.s.u64HostT SCAux);1517 ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hm.s.u64HostTscAux); 1517 1518 1518 1519 TMCpuTickSetLastSeen(pVCpu, ASMReadTSC() + … … 2895 2896 Assert(pVM->hm.s.svm.fSupported); 2896 2897 2897 LogFlow(("SVMR0Enter cpu%d last=%d asid=%d\n", pCpu->idCpu, pVCpu->hm.s.idLastCpu, pVCpu->hm.s.uCurrentA SID));2898 LogFlow(("SVMR0Enter cpu%d last=%d asid=%d\n", pCpu->idCpu, pVCpu->hm.s.idLastCpu, pVCpu->hm.s.uCurrentAsid)); 2898 2899 pVCpu->hm.s.fResumeVM = false; 2899 2900 -
trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp
r43469 r43494 165 165 */ 166 166 if ( pVM 167 && pVM->hm.s.vmx.fV PID167 && pVM->hm.s.vmx.fVpid 168 168 && (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_ALL_CONTEXTS)) 169 169 { 170 170 hmR0VmxFlushVPID(pVM, NULL /* pvCpu */, VMX_FLUSH_VPID_ALL_CONTEXTS, 0 /* GCPtr */); 171 pCpu->fFlushA SIDBeforeUse = false;171 pCpu->fFlushAsidBeforeUse = false; 172 172 } 173 173 else 174 pCpu->fFlushA SIDBeforeUse = true;174 pCpu->fFlushAsidBeforeUse = true; 175 175 176 176 /* 177 177 * Ensure each VCPU scheduled on this CPU gets a new VPID on resume. See @bugref{6255}. 178 178 */ 179 ++pCpu->cT LBFlushes;179 ++pCpu->cTlbFlushes; 180 180 181 181 return VINF_SUCCESS; … … 425 425 426 426 /* Initialize these always, see hmR3InitFinalizeR0().*/ 427 pVM->hm.s.vmx.enmFlushE PT= VMX_FLUSH_EPT_NONE;428 pVM->hm.s.vmx.enmFlushV PID= VMX_FLUSH_VPID_NONE;427 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_NONE; 428 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_NONE; 429 429 430 430 /* Determine optimal flush type for EPT. */ … … 434 434 { 435 435 if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_SINGLE_CONTEXT) 436 pVM->hm.s.vmx.enmFlushE PT= VMX_FLUSH_EPT_SINGLE_CONTEXT;436 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_SINGLE_CONTEXT; 437 437 else if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVEPT_CAPS_ALL_CONTEXTS) 438 pVM->hm.s.vmx.enmFlushE PT= VMX_FLUSH_EPT_ALL_CONTEXTS;438 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_ALL_CONTEXTS; 439 439 else 440 440 { … … 443 443 * We cannot ignore EPT at this point as we've already setup Unrestricted Guest execution. 444 444 */ 445 pVM->hm.s.vmx.enmFlushE PT= VMX_FLUSH_EPT_NOT_SUPPORTED;445 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_NOT_SUPPORTED; 446 446 return VERR_VMX_GENERIC; 447 447 } … … 452 452 * Should never really happen. EPT is supported but INVEPT instruction is not supported. 453 453 */ 454 pVM->hm.s.vmx.enmFlushE PT= VMX_FLUSH_EPT_NOT_SUPPORTED;454 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_NOT_SUPPORTED; 455 455 return VERR_VMX_GENERIC; 456 456 } … … 458 458 459 459 /* Determine optimal flush type for VPID. */ 460 if (pVM->hm.s.vmx.fV PID)460 if (pVM->hm.s.vmx.fVpid) 461 461 { 462 462 if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID) 463 463 { 464 464 if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_SINGLE_CONTEXT) 465 pVM->hm.s.vmx.enmFlushV PID= VMX_FLUSH_VPID_SINGLE_CONTEXT;465 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_SINGLE_CONTEXT; 466 466 else if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_ALL_CONTEXTS) 467 pVM->hm.s.vmx.enmFlushV PID= VMX_FLUSH_VPID_ALL_CONTEXTS;467 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_ALL_CONTEXTS; 468 468 else 469 469 { … … 476 476 if (pVM->hm.s.vmx.msr.vmx_eptcaps & MSR_IA32_VMX_EPT_CAPS_INVVPID_CAPS_SINGLE_CONTEXT_RETAIN_GLOBALS) 477 477 Log(("VMXR0SetupVM: Only VMX_FLUSH_VPID_SINGLE_CONTEXT_RETAIN_GLOBALS supported. Ignoring VPID.\n")); 478 pVM->hm.s.vmx.enmFlushV PID= VMX_FLUSH_VPID_NOT_SUPPORTED;479 pVM->hm.s.vmx.fV PID= false;478 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_NOT_SUPPORTED; 479 pVM->hm.s.vmx.fVpid = false; 480 480 } 481 481 } … … 487 487 */ 488 488 Log(("VMXR0SetupVM: VPID supported without INVEPT support. Ignoring VPID.\n")); 489 pVM->hm.s.vmx.enmFlushV PID= VMX_FLUSH_VPID_NOT_SUPPORTED;490 pVM->hm.s.vmx.fV PID= false;489 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_NOT_SUPPORTED; 490 pVM->hm.s.vmx.fVpid = false; 491 491 } 492 492 } … … 597 597 val |= VMX_VMCS_CTRL_PROC_EXEC2_EPT; 598 598 599 if (pVM->hm.s.vmx.fV PID)599 if (pVM->hm.s.vmx.fVpid) 600 600 val |= VMX_VMCS_CTRL_PROC_EXEC2_VPID; 601 601 … … 782 782 * Setup the right TLB function based on CPU capabilities. 783 783 */ 784 if (pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fV PID)785 pVM->hm.s.vmx.pfnSetupTaggedT LB= hmR0VmxSetupTLBBoth;784 if (pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVpid) 785 pVM->hm.s.vmx.pfnSetupTaggedTlb = hmR0VmxSetupTLBBoth; 786 786 else if (pVM->hm.s.fNestedPaging) 787 pVM->hm.s.vmx.pfnSetupTaggedT LB= hmR0VmxSetupTLBEPT;788 else if (pVM->hm.s.vmx.fV PID)789 pVM->hm.s.vmx.pfnSetupTaggedT LB= hmR0VmxSetupTLBVPID;787 pVM->hm.s.vmx.pfnSetupTaggedTlb = hmR0VmxSetupTLBEPT; 788 else if (pVM->hm.s.vmx.fVpid) 789 pVM->hm.s.vmx.pfnSetupTaggedTlb = hmR0VmxSetupTLBVPID; 790 790 else 791 pVM->hm.s.vmx.pfnSetupTaggedT LB= hmR0VmxSetupTLBDummy;791 pVM->hm.s.vmx.pfnSetupTaggedTlb = hmR0VmxSetupTLBDummy; 792 792 793 793 vmx_end: … … 2473 2473 PHMGLOBLCPUINFO pCpu; 2474 2474 2475 Assert(pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fV PID);2475 Assert(pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVpid); 2476 2476 2477 2477 pCpu = HMR0GetCurrentCpu(); … … 2483 2483 * or the host Cpu is online after a suspend/resume, so we cannot reuse the current ASID anymore. 2484 2484 */ 2485 bool fNewA SID= false;2485 bool fNewAsid = false; 2486 2486 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu 2487 || pVCpu->hm.s.cT LBFlushes != pCpu->cTLBFlushes)2487 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes) 2488 2488 { 2489 2489 pVCpu->hm.s.fForceTLBFlush = true; 2490 fNewA SID= true;2490 fNewAsid = true; 2491 2491 } 2492 2492 … … 2501 2501 if (pVCpu->hm.s.fForceTLBFlush) 2502 2502 { 2503 if (fNewA SID)2504 { 2505 ++pCpu->uCurrentA SID;2506 if (pCpu->uCurrentA SID >= pVM->hm.s.uMaxASID)2503 if (fNewAsid) 2504 { 2505 ++pCpu->uCurrentAsid; 2506 if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid) 2507 2507 { 2508 pCpu->uCurrentA SID= 1; /* start at 1; host uses 0 */2509 pCpu->cT LBFlushes++;2510 pCpu->fFlushA SIDBeforeUse = true;2508 pCpu->uCurrentAsid = 1; /* start at 1; host uses 0 */ 2509 pCpu->cTlbFlushes++; 2510 pCpu->fFlushAsidBeforeUse = true; 2511 2511 } 2512 2512 2513 pVCpu->hm.s.uCurrentA SID = pCpu->uCurrentASID;2514 if (pCpu->fFlushA SIDBeforeUse)2515 hmR0VmxFlushVPID(pVM, pVCpu, pVM->hm.s.vmx.enmFlushV PID, 0 /* GCPtr */);2513 pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid; 2514 if (pCpu->fFlushAsidBeforeUse) 2515 hmR0VmxFlushVPID(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVpid, 0 /* GCPtr */); 2516 2516 } 2517 2517 else … … 2520 2520 hmR0VmxFlushVPID(pVM, pVCpu, VMX_FLUSH_VPID_SINGLE_CONTEXT, 0 /* GCPtr */); 2521 2521 else 2522 hmR0VmxFlushEPT(pVM, pVCpu, pVM->hm.s.vmx.enmFlushE PT);2523 } 2524 2525 pVCpu->hm.s.cT LBFlushes = pCpu->cTLBFlushes;2522 hmR0VmxFlushEPT(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt); 2523 } 2524 2525 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes; 2526 2526 pVCpu->hm.s.fForceTLBFlush = false; 2527 2527 } 2528 2528 else 2529 2529 { 2530 AssertMsg(pVCpu->hm.s.uCurrentA SID && pCpu->uCurrentASID,2531 ("hm->uCurrentA SID=%lu hm->cTLBFlushes=%lu cpu->uCurrentASID=%lu cpu->cTLBFlushes=%lu\n",2532 pVCpu->hm.s.uCurrentA SID, pVCpu->hm.s.cTLBFlushes,2533 pCpu->uCurrentA SID, pCpu->cTLBFlushes));2530 AssertMsg(pVCpu->hm.s.uCurrentAsid && pCpu->uCurrentAsid, 2531 ("hm->uCurrentAsid=%lu hm->cTlbFlushes=%lu cpu->uCurrentAsid=%lu cpu->cTlbFlushes=%lu\n", 2532 pVCpu->hm.s.uCurrentAsid, pVCpu->hm.s.cTlbFlushes, 2533 pCpu->uCurrentAsid, pCpu->cTlbFlushes)); 2534 2534 2535 2535 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should … … 2550 2550 } 2551 2551 else 2552 hmR0VmxFlushEPT(pVM, pVCpu, pVM->hm.s.vmx.enmFlushE PT);2552 hmR0VmxFlushEPT(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt); 2553 2553 } 2554 2554 else … … 2562 2562 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN); 2563 2563 2564 AssertMsg(pVCpu->hm.s.cT LBFlushes == pCpu->cTLBFlushes,2565 ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hm.s.cT LBFlushes, pCpu->cTLBFlushes));2566 AssertMsg(pCpu->uCurrentA SID >= 1 && pCpu->uCurrentASID < pVM->hm.s.uMaxASID,2567 ("cpu%d uCurrentA SID = %x\n", pCpu->idCpu, pCpu->uCurrentASID));2568 AssertMsg(pVCpu->hm.s.uCurrentA SID >= 1 && pVCpu->hm.s.uCurrentASID < pVM->hm.s.uMaxASID,2569 ("cpu%d VM uCurrentA SID = %x\n", pCpu->idCpu, pVCpu->hm.s.uCurrentASID));2564 AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes, 2565 ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes)); 2566 AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid, 2567 ("cpu%d uCurrentAsid = %x\n", pCpu->idCpu, pCpu->uCurrentAsid)); 2568 AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid, 2569 ("cpu%d VM uCurrentAsid = %x\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid)); 2570 2570 2571 2571 /* Update VMCS with the VPID. */ 2572 int rc = VMXWriteVMCS(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hm.s.uCurrentA SID);2572 int rc = VMXWriteVMCS(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hm.s.uCurrentAsid); 2573 2573 AssertRC(rc); 2574 2574 } … … 2587 2587 2588 2588 Assert(pVM->hm.s.fNestedPaging); 2589 Assert(!pVM->hm.s.vmx.fV PID);2589 Assert(!pVM->hm.s.vmx.fVpid); 2590 2590 2591 2591 pCpu = HMR0GetCurrentCpu(); … … 2597 2597 */ 2598 2598 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu 2599 || pVCpu->hm.s.cT LBFlushes != pCpu->cTLBFlushes)2599 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes) 2600 2600 { 2601 2601 pVCpu->hm.s.fForceTLBFlush = true; … … 2609 2609 2610 2610 pVCpu->hm.s.idLastCpu = pCpu->idCpu; 2611 pVCpu->hm.s.cT LBFlushes = pCpu->cTLBFlushes;2611 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes; 2612 2612 2613 2613 if (pVCpu->hm.s.fForceTLBFlush) 2614 hmR0VmxFlushEPT(pVM, pVCpu, pVM->hm.s.vmx.enmFlushE PT);2614 hmR0VmxFlushEPT(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt); 2615 2615 else 2616 2616 { … … 2624 2624 */ 2625 2625 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown); 2626 hmR0VmxFlushEPT(pVM, pVCpu, pVM->hm.s.vmx.enmFlushE PT);2626 hmR0VmxFlushEPT(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt); 2627 2627 } 2628 2628 } … … 2650 2650 PHMGLOBLCPUINFO pCpu; 2651 2651 2652 Assert(pVM->hm.s.vmx.fV PID);2652 Assert(pVM->hm.s.vmx.fVpid); 2653 2653 Assert(!pVM->hm.s.fNestedPaging); 2654 2654 … … 2662 2662 */ 2663 2663 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu 2664 || pVCpu->hm.s.cT LBFlushes != pCpu->cTLBFlushes)2664 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes) 2665 2665 { 2666 2666 /* Force a TLB flush on VM entry. */ … … 2678 2678 if (pVCpu->hm.s.fForceTLBFlush) 2679 2679 { 2680 ++pCpu->uCurrentA SID;2681 if (pCpu->uCurrentA SID >= pVM->hm.s.uMaxASID)2682 { 2683 pCpu->uCurrentA SID= 1; /* start at 1; host uses 0 */2684 pCpu->cT LBFlushes++;2685 pCpu->fFlushA SIDBeforeUse = true;2680 ++pCpu->uCurrentAsid; 2681 if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid) 2682 { 2683 pCpu->uCurrentAsid = 1; /* start at 1; host uses 0 */ 2684 pCpu->cTlbFlushes++; 2685 pCpu->fFlushAsidBeforeUse = true; 2686 2686 } 2687 2687 2688 2688 pVCpu->hm.s.fForceTLBFlush = false; 2689 pVCpu->hm.s.cT LBFlushes = pCpu->cTLBFlushes;2690 pVCpu->hm.s.uCurrentA SID = pCpu->uCurrentASID;2691 if (pCpu->fFlushA SIDBeforeUse)2692 hmR0VmxFlushVPID(pVM, pVCpu, pVM->hm.s.vmx.enmFlushV PID, 0 /* GCPtr */);2689 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes; 2690 pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid; 2691 if (pCpu->fFlushAsidBeforeUse) 2692 hmR0VmxFlushVPID(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVpid, 0 /* GCPtr */); 2693 2693 } 2694 2694 else 2695 2695 { 2696 AssertMsg(pVCpu->hm.s.uCurrentA SID && pCpu->uCurrentASID,2697 ("hm->uCurrentA SID=%lu hm->cTLBFlushes=%lu cpu->uCurrentASID=%lu cpu->cTLBFlushes=%lu\n",2698 pVCpu->hm.s.uCurrentA SID, pVCpu->hm.s.cTLBFlushes,2699 pCpu->uCurrentA SID, pCpu->cTLBFlushes));2696 AssertMsg(pVCpu->hm.s.uCurrentAsid && pCpu->uCurrentAsid, 2697 ("hm->uCurrentAsid=%lu hm->cTlbFlushes=%lu cpu->uCurrentAsid=%lu cpu->cTlbFlushes=%lu\n", 2698 pVCpu->hm.s.uCurrentAsid, pVCpu->hm.s.cTlbFlushes, 2699 pCpu->uCurrentAsid, pCpu->cTlbFlushes)); 2700 2700 2701 2701 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should … … 2714 2714 } 2715 2715 else 2716 hmR0VmxFlushVPID(pVM, pVCpu, pVM->hm.s.vmx.enmFlushV PID, 0 /* GCPtr */);2716 hmR0VmxFlushVPID(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVpid, 0 /* GCPtr */); 2717 2717 } 2718 2718 } … … 2720 2720 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN); 2721 2721 2722 AssertMsg(pVCpu->hm.s.cT LBFlushes == pCpu->cTLBFlushes,2723 ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hm.s.cT LBFlushes, pCpu->cTLBFlushes));2724 AssertMsg(pCpu->uCurrentA SID >= 1 && pCpu->uCurrentASID< pVM->hm.s.uMaxASID,2725 ("cpu%d uCurrentA SID = %x\n", pCpu->idCpu, pCpu->uCurrentASID));2726 AssertMsg(pVCpu->hm.s.uCurrentA SID >= 1 && pVCpu->hm.s.uCurrentASID< pVM->hm.s.uMaxASID,2727 ("cpu%d VM uCurrentA SID = %x\n", pCpu->idCpu, pVCpu->hm.s.uCurrentASID));2728 2729 int rc = VMXWriteVMCS(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hm.s.uCurrentA SID);2722 AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes, 2723 ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes)); 2724 AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxASID, 2725 ("cpu%d uCurrentAsid = %x\n", pCpu->idCpu, pCpu->uCurrentAsid)); 2726 AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxASID, 2727 ("cpu%d VM uCurrentAsid = %x\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid)); 2728 2729 int rc = VMXWriteVMCS(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hm.s.uCurrentAsid); 2730 2730 AssertRC(rc); 2731 2731 … … 3069 3069 #ifdef LOG_ENABLED 3070 3070 if ( pVM->hm.s.fNestedPaging 3071 || pVM->hm.s.vmx.fV PID)3071 || pVM->hm.s.vmx.fVpid) 3072 3072 { 3073 3073 PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu(); … … 3077 3077 pCpu->idCpu)); 3078 3078 } 3079 else if (pVCpu->hm.s.cT LBFlushes != pCpu->cTLBFlushes)3080 { 3081 LogFlow(("Force TLB flush due to changed TLB flush count (%x vs %x)\n", pVCpu->hm.s.cT LBFlushes,3082 pCpu->cT LBFlushes));3079 else if (pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes) 3080 { 3081 LogFlow(("Force TLB flush due to changed TLB flush count (%x vs %x)\n", pVCpu->hm.s.cTlbFlushes, 3082 pCpu->cTlbFlushes)); 3083 3083 } 3084 3084 else if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TLB_FLUSH)) … … 3150 3150 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true); 3151 3151 /* Deal with tagged TLB setup and invalidation. */ 3152 pVM->hm.s.vmx.pfnSetupTaggedT LB(pVM, pVCpu);3152 pVM->hm.s.vmx.pfnSetupTaggedTlb(pVM, pVCpu); 3153 3153 3154 3154 /* … … 5017 5017 uint64_t descriptor[2]; 5018 5018 5019 Assert(pVM->hm.s.vmx.fV PID);5019 Assert(pVM->hm.s.vmx.fVpid); 5020 5020 if (enmFlush == VMX_FLUSH_VPID_ALL_CONTEXTS) 5021 5021 { … … 5026 5026 { 5027 5027 AssertPtr(pVCpu); 5028 AssertMsg(pVCpu->hm.s.uCurrentA SID != 0, ("VMXR0InvVPID invalid ASID %lu\n", pVCpu->hm.s.uCurrentASID));5029 AssertMsg(pVCpu->hm.s.uCurrentA SID <= UINT16_MAX, ("VMXR0InvVPID invalid ASID %lu\n", pVCpu->hm.s.uCurrentASID));5030 descriptor[0] = pVCpu->hm.s.uCurrentA SID;5028 AssertMsg(pVCpu->hm.s.uCurrentAsid != 0, ("VMXR0InvVPID invalid ASID %lu\n", pVCpu->hm.s.uCurrentAsid)); 5029 AssertMsg(pVCpu->hm.s.uCurrentAsid <= UINT16_MAX, ("VMXR0InvVPID invalid ASID %lu\n", pVCpu->hm.s.uCurrentAsid)); 5030 descriptor[0] = pVCpu->hm.s.uCurrentAsid; 5031 5031 descriptor[1] = GCPtr; 5032 5032 } 5033 5033 int rc = VMXR0InvVPID(enmFlush, &descriptor[0]); NOREF(rc); 5034 5034 AssertMsg(rc == VINF_SUCCESS, 5035 ("VMXR0InvVPID %x %x %RGv failed with %d\n", enmFlush, pVCpu ? pVCpu->hm.s.uCurrentA SID: 0, GCPtr, rc));5035 ("VMXR0InvVPID %x %x %RGv failed with %d\n", enmFlush, pVCpu ? pVCpu->hm.s.uCurrentAsid : 0, GCPtr, rc)); 5036 5036 #ifdef VBOX_WITH_STATISTICS 5037 5037 if (pVCpu) … … 5065 5065 * function maybe called in a loop with individual addresses. 5066 5066 */ 5067 if (pVM->hm.s.vmx.fV PID)5067 if (pVM->hm.s.vmx.fVpid) 5068 5068 { 5069 5069 /* If we can flush just this page do it, otherwise flush as little as possible. */
Note:
See TracChangeset
for help on using the changeset viewer.