- Timestamp:
- Oct 7, 2008 7:28:54 AM (16 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/HWACCM.cpp
r12989 r13025 521 521 if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_EPT) 522 522 pVM->hwaccm.s.fNestedPaging = pVM->hwaccm.s.fAllowNestedPaging; 523 524 if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VPID) 525 pVM->hwaccm.s.vmx.fVPID = true; 523 526 #endif 524 527 … … 609 612 LogRel(("HWACMM: cpuid 0x80000001.u32AMDFeatureEDX = %VX32\n", pVM->hwaccm.s.cpuid.u32AMDFeatureEDX)); 610 613 LogRel(("HWACCM: SVM revision = %X\n", pVM->hwaccm.s.svm.u32Rev)); 611 LogRel(("HWACCM: SVM max ASID = %d\n", pVM->hwaccm.s. svm.u32MaxASID));614 LogRel(("HWACCM: SVM max ASID = %d\n", pVM->hwaccm.s.uMaxASID)); 612 615 LogRel(("HWACCM: SVM features = %X\n", pVM->hwaccm.s.svm.u32Features)); 613 616 -
trunk/src/VBox/VMM/HWACCMInternal.h
r12989 r13025 138 138 139 139 RTR0MEMOBJ pMemObj; 140 /* Current ASID (AMD-V only) */140 /* Current ASID (AMD-V)/VPID (Intel) */ 141 141 uint32_t uCurrentASID; 142 142 /* TLB flush count */ … … 185 185 bool fAllowNestedPaging; 186 186 187 /** Set if we need to flush the TLB during the world switch. */ 188 bool fForceTLBFlush; 189 190 /** Old style FPU reporting trap mask override performed (optimization) */ 191 bool fFPUOldStyleOverride; 192 187 193 /** Explicit alignment padding to make 32-bit gcc align u64RegisterMask 188 194 * naturally. */ 189 bool padding[ 3+4];195 bool padding[1]; 190 196 191 197 /** HWACCM_CHANGED_* flags. */ 192 uint32_t fContextUseFlags; 193 194 /** Old style FPU reporting trap mask override performed (optimization) */ 195 uint32_t fFPUOldStyleOverride; 198 RTUINT fContextUseFlags; 199 200 /* Id of the last cpu we were executing code on (NIL_RTCPUID for the first time) */ 201 RTCPUID idLastCpu; 202 203 /* TLB flush count */ 204 RTUINT cTLBFlushes; 205 206 /* Current ASID in use by the VM */ 207 RTUINT uCurrentASID; 208 209 /** Maximum ASID allowed. */ 210 RTUINT uMaxASID; 196 211 197 212 /** And mask for copying register contents. */ … … 207 222 /** Set if we can use VMXResume to execute guest code. */ 208 223 bool fResumeVM; 224 225 /** Set if VPID is supported. */ 226 bool fVPID; 209 227 210 228 /** R0 memory object for the VM control structure (VMCS). */ … … 344 362 /** Set if erratum 170 affects the AMD cpu. */ 345 363 bool fAlwaysFlushTLB; 346 /** Set if we need to flush the TLB during the world switch. */347 bool fForceTLBFlush;348 349 /* Id of the last cpu we were executing code on (NIL_RTCPUID for the first time) */350 RTCPUID idLastCpu;351 352 /* TLB flush count */353 uint32_t cTLBFlushes;354 355 /* Current ASID in use by the VM */356 uint32_t uCurrentASID;357 364 358 365 /** R0 memory object for the VM control block (VMCB). */ … … 389 396 /** SVM revision. */ 390 397 uint32_t u32Rev; 391 392 /** Maximum ASID allowed. */393 uint32_t u32MaxASID;394 398 395 399 /** SVM feature bits from cpuid 0x8000000a */ -
trunk/src/VBox/VMM/VMMAll/HWACCMAll.cpp
r12989 r13025 53 53 VMMDECL(int) HWACCMInvalidatePage(PVM pVM, RTGCPTR GCVirt) 54 54 { 55 /** @todo Intel for nested paging */56 55 #ifdef IN_RING0 57 if (pVM->hwaccm.s.svm.fSupported) 58 return SVMR0InvalidatePage(pVM, GCVirt); 56 if (pVM->hwaccm.s.vmx.fSupported) 57 return VMXR0InvalidatePage(pVM, GCVirt); 58 59 Assert(pVM->hwaccm.s.svm.fSupported); 60 return SVMR0InvalidatePage(pVM, GCVirt); 59 61 #endif 60 62 … … 70 72 VMMDECL(int) HWACCMFlushTLB(PVM pVM) 71 73 { 72 /** @todo Intel for nested paging */ 73 if (pVM->hwaccm.s.svm.fSupported) 74 { 75 LogFlow(("HWACCMFlushTLB\n")); 76 pVM->hwaccm.s.svm.fForceTLBFlush = true; 77 STAM_COUNTER_INC(&pVM->hwaccm.s.StatFlushTLBManual); 78 } 74 LogFlow(("HWACCMFlushTLB\n")); 75 76 pVM->hwaccm.s.fForceTLBFlush = true; 77 STAM_COUNTER_INC(&pVM->hwaccm.s.StatFlushTLBManual); 79 78 return VINF_SUCCESS; 80 79 } … … 121 120 122 121 #ifdef IN_RING0 123 /** @todo Intel for nested paging */124 if (pVM->hwaccm.s.svm.fSupported)125 { 126 SVMR0InvalidatePhysPage(pVM, GCPhys);127 }122 if (pVM->hwaccm.s.vmx.fSupported) 123 return VMXR0InvalidatePhysPage(pVM, GCPhys); 124 125 Assert(pVM->hwaccm.s.svm.fSupported); 126 SVMR0InvalidatePhysPage(pVM, GCPhys); 128 127 #else 129 128 HWACCMFlushTLB(pVM); -
trunk/src/VBox/VMM/VMMR0/HWACCMR0.cpp
r12989 r13025 74 74 DECLR0CALLBACKMEMBER(int, pfnSetupVM, (PVM pVM)); 75 75 76 /** Maximum ASID allowed. */ 77 uint32_t uMaxASID; 78 76 79 struct 77 80 { … … 111 114 uint32_t u32Rev; 112 115 113 /** Maximum ASID allowed. */114 uint32_t u32MaxASID;115 116 116 /** SVM feature bits from cpuid 0x8000000a */ 117 117 uint32_t u32Features; … … 224 224 HWACCMR0Globals.vmx.msr.vmx_cr4_fixed1 = ASMRdMsr(MSR_IA32_VMX_CR4_FIXED1); 225 225 HWACCMR0Globals.vmx.msr.vmx_vmcs_enum = ASMRdMsr(MSR_IA32_VMX_VMCS_ENUM); 226 /* VPID 16 bits ASID. */ 227 HWACCMR0Globals.uMaxASID = 0x10000; /* exclusive */ 226 228 227 229 if (HWACCMR0Globals.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL) … … 331 333 { 332 334 /* Query AMD features. */ 333 ASMCpuId(0x8000000A, &HWACCMR0Globals.svm.u32Rev, &HWACCMR0Globals. svm.u32MaxASID, &u32Dummy, &HWACCMR0Globals.svm.u32Features);335 ASMCpuId(0x8000000A, &HWACCMR0Globals.svm.u32Rev, &HWACCMR0Globals.uMaxASID, &u32Dummy, &HWACCMR0Globals.svm.u32Features); 334 336 335 337 HWACCMR0Globals.svm.fSupported = true; … … 591 593 pCpu->fFlushTLB = true; 592 594 595 pCpu->uCurrentASID = 0; /* we'll aways increment this the first time (host uses ASID 0) */ 596 pCpu->cTLBFlushes = 0; 597 593 598 /* Should never happen */ 594 if (! HWACCMR0Globals.aCpuInfo[idCpu].pMemObj)599 if (!pCpu->pMemObj) 595 600 { 596 601 AssertFailed(); … … 599 604 } 600 605 601 pvPageCpu = RTR0MemObjAddress( HWACCMR0Globals.aCpuInfo[idCpu].pMemObj);602 pPageCpuPhys = RTR0MemObjGetPagePhysAddr( HWACCMR0Globals.aCpuInfo[idCpu].pMemObj, 0);606 pvPageCpu = RTR0MemObjAddress(pCpu->pMemObj); 607 pPageCpuPhys = RTR0MemObjGetPagePhysAddr(pCpu->pMemObj, 0); 603 608 604 609 paRc[idCpu] = HWACCMR0Globals.pfnEnableCpu(pCpu, pVM, pvPageCpu, pPageCpuPhys); 605 610 AssertRC(paRc[idCpu]); 606 611 if (VBOX_SUCCESS(paRc[idCpu])) 607 HWACCMR0Globals.aCpuInfo[idCpu].fConfigured = true;612 pCpu->fConfigured = true; 608 613 609 614 return; … … 620 625 static DECLCALLBACK(void) HWACCMR0DisableCPU(RTCPUID idCpu, void *pvUser1, void *pvUser2) 621 626 { 622 void *pvPageCpu; 623 RTHCPHYS pPageCpuPhys; 624 int *paRc = (int *)pvUser1; 627 void *pvPageCpu; 628 RTHCPHYS pPageCpuPhys; 629 int *paRc = (int *)pvUser1; 630 PHWACCM_CPUINFO pCpu = &HWACCMR0Globals.aCpuInfo[idCpu]; 625 631 626 632 Assert(idCpu == (RTCPUID)RTMpCpuIdToSetIndex(idCpu)); /// @todo fix idCpu == index assumption (rainy day) 627 633 Assert(idCpu < RT_ELEMENTS(HWACCMR0Globals.aCpuInfo)); 628 634 629 if (! HWACCMR0Globals.aCpuInfo[idCpu].pMemObj)635 if (!pCpu->pMemObj) 630 636 return; 631 637 632 pvPageCpu = RTR0MemObjAddress( HWACCMR0Globals.aCpuInfo[idCpu].pMemObj);633 pPageCpuPhys = RTR0MemObjGetPagePhysAddr( HWACCMR0Globals.aCpuInfo[idCpu].pMemObj, 0);634 635 paRc[idCpu] = HWACCMR0Globals.pfnDisableCpu( &HWACCMR0Globals.aCpuInfo[idCpu], pvPageCpu, pPageCpuPhys);638 pvPageCpu = RTR0MemObjAddress(pCpu->pMemObj); 639 pPageCpuPhys = RTR0MemObjGetPagePhysAddr(pCpu->pMemObj, 0); 640 641 paRc[idCpu] = HWACCMR0Globals.pfnDisableCpu(pCpu, pvPageCpu, pPageCpuPhys); 636 642 AssertRC(paRc[idCpu]); 637 643 HWACCMR0Globals.aCpuInfo[idCpu].fConfigured = false; 644 645 pCpu->uCurrentASID = 0; 646 638 647 return; 639 648 } … … 676 685 pVM->hwaccm.s.vmx.msr.vmx_eptcaps = HWACCMR0Globals.vmx.msr.vmx_eptcaps; 677 686 pVM->hwaccm.s.svm.u32Rev = HWACCMR0Globals.svm.u32Rev; 678 pVM->hwaccm.s.svm.u32MaxASID = HWACCMR0Globals.svm.u32MaxASID;679 687 pVM->hwaccm.s.svm.u32Features = HWACCMR0Globals.svm.u32Features; 680 688 pVM->hwaccm.s.cpuid.u32AMDFeatureECX = HWACCMR0Globals.cpuid.u32AMDFeatureECX; … … 684 692 pVM->hwaccm.s.idEnteredCpu = NIL_RTCPUID; 685 693 #endif 694 695 pVM->hwaccm.s.uMaxASID = HWACCMR0Globals.uMaxASID; 696 697 /* Invalidate the last cpu we were running on. */ 698 pVM->hwaccm.s.idLastCpu = NIL_RTCPUID; 699 700 /* we'll aways increment this the first time (host uses ASID 0) */ 701 pVM->hwaccm.s.uCurrentASID = 0; 686 702 687 703 /* Init a VT-x or AMD-V VM. */ -
trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp
r12989 r13025 86 86 ASMWrMsr(MSR_K8_VM_HSAVE_PA, pPageCpuPhys); 87 87 88 pCpu->uCurrentASID = 0; /* we'll aways increment this the first time (host uses ASID 0) */89 pCpu->cTLBFlushes = 0;90 88 return VINF_SUCCESS; 91 89 } … … 114 112 /* Invalidate host state physical address. */ 115 113 ASMWrMsr(MSR_K8_VM_HSAVE_PA, 0); 116 pCpu->uCurrentASID = 0;117 114 118 115 return VINF_SUCCESS; … … 203 200 pVM->hwaccm.s.svm.fAlwaysFlushTLB = true; 204 201 } 205 206 /* Invalidate the last cpu we were running on. */207 pVM->hwaccm.s.svm.idLastCpu = NIL_RTCPUID;208 209 /* we'll aways increment this the first time (host uses ASID 0) */210 pVM->hwaccm.s.svm.uCurrentASID = 0;211 202 return VINF_SUCCESS; 212 203 } … … 917 908 #ifdef LOG_ENABLED 918 909 pCpu = HWACCMR0GetCurrentCpu(); 919 if ( pVM->hwaccm.s. svm.idLastCpu!= pCpu->idCpu920 || pVM->hwaccm.s. svm.cTLBFlushes != pCpu->cTLBFlushes)921 { 922 if (pVM->hwaccm.s. svm.idLastCpu != pCpu->idCpu)923 Log(("Force TLB flush due to rescheduling to a different cpu (%d vs %d)\n", pVM->hwaccm.s. svm.idLastCpu, pCpu->idCpu));910 if ( pVM->hwaccm.s.idLastCpu != pCpu->idCpu 911 || pVM->hwaccm.s.cTLBFlushes != pCpu->cTLBFlushes) 912 { 913 if (pVM->hwaccm.s.idLastCpu != pCpu->idCpu) 914 Log(("Force TLB flush due to rescheduling to a different cpu (%d vs %d)\n", pVM->hwaccm.s.idLastCpu, pCpu->idCpu)); 924 915 else 925 Log(("Force TLB flush due to changed TLB flush count (%x vs %x)\n", pVM->hwaccm.s. svm.cTLBFlushes, pCpu->cTLBFlushes));916 Log(("Force TLB flush due to changed TLB flush count (%x vs %x)\n", pVM->hwaccm.s.cTLBFlushes, pCpu->cTLBFlushes)); 926 917 } 927 918 if (pCpu->fFlushTLB) … … 949 940 /* Force a TLB flush for the first world switch if the current cpu differs from the one we ran on last. */ 950 941 /* Note that this can happen both for start and resume due to long jumps back to ring 3. */ 951 if ( pVM->hwaccm.s. svm.idLastCpu != pCpu->idCpu942 if ( pVM->hwaccm.s.idLastCpu != pCpu->idCpu 952 943 /* if the tlb flush count has changed, another VM has flushed the TLB of this cpu, so we can't use our current ASID anymore. */ 953 || pVM->hwaccm.s. svm.cTLBFlushes != pCpu->cTLBFlushes)944 || pVM->hwaccm.s.cTLBFlushes != pCpu->cTLBFlushes) 954 945 { 955 946 /* Force a TLB flush on VM entry. */ 956 pVM->hwaccm.s. svm.fForceTLBFlush = true;947 pVM->hwaccm.s.fForceTLBFlush = true; 957 948 } 958 949 else 959 950 Assert(!pCpu->fFlushTLB || pVM->hwaccm.s.svm.fAlwaysFlushTLB); 960 951 961 pVM->hwaccm.s. svm.idLastCpu = pCpu->idCpu;952 pVM->hwaccm.s.idLastCpu = pCpu->idCpu; 962 953 963 954 /* Make sure we flush the TLB when required. Switch ASID to achieve the same thing, but without actually flushing the whole TLB (which is expensive). */ 964 if ( pVM->hwaccm.s. svm.fForceTLBFlush955 if ( pVM->hwaccm.s.fForceTLBFlush 965 956 && !pVM->hwaccm.s.svm.fAlwaysFlushTLB) 966 957 { 967 if ( ++pCpu->uCurrentASID >= pVM->hwaccm.s. svm.u32MaxASID958 if ( ++pCpu->uCurrentASID >= pVM->hwaccm.s.uMaxASID 968 959 || pCpu->fFlushTLB) 969 960 { … … 976 967 STAM_COUNTER_INC(&pVM->hwaccm.s.StatFlushASID); 977 968 978 pVM->hwaccm.s. svm.cTLBFlushes = pCpu->cTLBFlushes;979 pVM->hwaccm.s. svm.uCurrentASID = pCpu->uCurrentASID;969 pVM->hwaccm.s.cTLBFlushes = pCpu->cTLBFlushes; 970 pVM->hwaccm.s.uCurrentASID = pCpu->uCurrentASID; 980 971 } 981 972 else … … 984 975 985 976 /* We never increase uCurrentASID in the fAlwaysFlushTLB (erratum 170) case. */ 986 if (!pCpu->uCurrentASID || !pVM->hwaccm.s. svm.uCurrentASID)987 pVM->hwaccm.s. svm.uCurrentASID = pCpu->uCurrentASID = 1;988 989 Assert(!pVM->hwaccm.s.svm.fAlwaysFlushTLB || pVM->hwaccm.s. svm.fForceTLBFlush);990 pVMCB->ctrl.TLBCtrl.n.u1TLBFlush = pVM->hwaccm.s. svm.fForceTLBFlush;991 } 992 AssertMsg(pVM->hwaccm.s. svm.cTLBFlushes == pCpu->cTLBFlushes, ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVM->hwaccm.s.svm.cTLBFlushes, pCpu->cTLBFlushes));993 AssertMsg(pCpu->uCurrentASID >= 1 && pCpu->uCurrentASID < pVM->hwaccm.s. svm.u32MaxASID, ("cpu%d uCurrentASID = %x\n", pCpu->idCpu, pCpu->uCurrentASID));994 AssertMsg(pVM->hwaccm.s. svm.uCurrentASID >= 1 && pVM->hwaccm.s.svm.uCurrentASID < pVM->hwaccm.s.svm.u32MaxASID, ("cpu%d VM uCurrentASID = %x\n", pCpu->idCpu, pVM->hwaccm.s.svm.uCurrentASID));995 pVMCB->ctrl.TLBCtrl.n.u32ASID = pVM->hwaccm.s. svm.uCurrentASID;977 if (!pCpu->uCurrentASID || !pVM->hwaccm.s.uCurrentASID) 978 pVM->hwaccm.s.uCurrentASID = pCpu->uCurrentASID = 1; 979 980 Assert(!pVM->hwaccm.s.svm.fAlwaysFlushTLB || pVM->hwaccm.s.fForceTLBFlush); 981 pVMCB->ctrl.TLBCtrl.n.u1TLBFlush = pVM->hwaccm.s.fForceTLBFlush; 982 } 983 AssertMsg(pVM->hwaccm.s.cTLBFlushes == pCpu->cTLBFlushes, ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVM->hwaccm.s.cTLBFlushes, pCpu->cTLBFlushes)); 984 AssertMsg(pCpu->uCurrentASID >= 1 && pCpu->uCurrentASID < pVM->hwaccm.s.uMaxASID, ("cpu%d uCurrentASID = %x\n", pCpu->idCpu, pCpu->uCurrentASID)); 985 AssertMsg(pVM->hwaccm.s.uCurrentASID >= 1 && pVM->hwaccm.s.uCurrentASID < pVM->hwaccm.s.uMaxASID, ("cpu%d VM uCurrentASID = %x\n", pCpu->idCpu, pVM->hwaccm.s.uCurrentASID)); 986 pVMCB->ctrl.TLBCtrl.n.u32ASID = pVM->hwaccm.s.uCurrentASID; 996 987 997 988 #ifdef VBOX_WITH_STATISTICS … … 1004 995 /* In case we execute a goto ResumeExecution later on. */ 1005 996 pVM->hwaccm.s.svm.fResumeVM = true; 1006 pVM->hwaccm.s. svm.fForceTLBFlush = pVM->hwaccm.s.svm.fAlwaysFlushTLB;997 pVM->hwaccm.s.fForceTLBFlush = pVM->hwaccm.s.svm.fAlwaysFlushTLB; 1007 998 1008 999 Assert(sizeof(pVM->hwaccm.s.svm.pVMCBPhys) == 8); … … 1661 1652 1662 1653 /* Must be set by PGMSyncCR3 */ 1663 Assert(PGMGetGuestMode(pVM) <= PGMMODE_PROTECTED || pVM->hwaccm.s. svm.fForceTLBFlush);1654 Assert(PGMGetGuestMode(pVM) <= PGMMODE_PROTECTED || pVM->hwaccm.s.fForceTLBFlush); 1664 1655 } 1665 1656 if (rc == VINF_SUCCESS) … … 2059 2050 Assert(pVM->hwaccm.s.svm.fSupported); 2060 2051 2061 LogFlow(("SVMR0Enter cpu%d last=%d asid=%d\n", pCpu->idCpu, pVM->hwaccm.s. svm.idLastCpu, pVM->hwaccm.s.svm.uCurrentASID));2052 LogFlow(("SVMR0Enter cpu%d last=%d asid=%d\n", pCpu->idCpu, pVM->hwaccm.s.idLastCpu, pVM->hwaccm.s.uCurrentASID)); 2062 2053 pVM->hwaccm.s.svm.fResumeVM = false; 2063 2054 … … 2195 2186 VMMR0DECL(int) SVMR0InvalidatePage(PVM pVM, RTGCPTR GCVirt) 2196 2187 { 2197 bool fFlushPending = pVM->hwaccm.s.svm.fAlwaysFlushTLB | pVM->hwaccm.s. svm.fForceTLBFlush;2188 bool fFlushPending = pVM->hwaccm.s.svm.fAlwaysFlushTLB | pVM->hwaccm.s.fForceTLBFlush; 2198 2189 2199 2190 /* Skip it if a TLB flush is already pending. */ … … 2227 2218 VMMR0DECL(int) SVMR0InvalidatePhysPage(PVM pVM, RTGCPHYS GCPhys) 2228 2219 { 2229 bool fFlushPending = pVM->hwaccm.s.svm.fAlwaysFlushTLB | pVM->hwaccm.s. svm.fForceTLBFlush;2220 bool fFlushPending = pVM->hwaccm.s.svm.fAlwaysFlushTLB | pVM->hwaccm.s.fForceTLBFlush; 2230 2221 2231 2222 Assert(pVM->hwaccm.s.fNestedPaging); -
trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp
r12989 r13025 275 275 val = val | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT 276 276 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET 277 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT278 277 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT 279 278 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT 280 279 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT; /* don't execute mwait or else we'll idle inside the guest (host thinks the cpu load is high) */ 280 281 /* Without nested paging we should intercept invlpg and cr3 mov instructions. */ 282 if (!pVM->hwaccm.s.fNestedPaging) 283 val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT 284 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT 285 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT; 281 286 282 287 /* Note: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT might cause a vmlaunch failure with an invalid control fields error. (combined with some other exit reasons) */ … … 321 326 val |= VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT; 322 327 328 #ifdef HWACCM_VTX_WITH_EPT 329 if (pVM->hwaccm.s.fNestedPaging) 330 val |= VMX_VMCS_CTRL_PROC_EXEC2_EPT; 331 #endif 323 332 /* Mask away the bits that the CPU doesn't support */ 324 333 /** @todo make sure they don't conflict with the above requirements. */ … … 362 371 */ 363 372 pVM->hwaccm.s.vmx.u32TrapMask = HWACCM_VMX_TRAP_MASK; 373 #ifndef DEBUG 374 if (pVM->hwaccm.s.fNestedPaging) 375 pVM->hwaccm.s.vmx.u32TrapMask &= ~RT_BIT(X86_XCPT_PF); /* no longer need to intercept #PF. */ 376 #endif 364 377 rc = VMXWriteVMCS(VMX_VMCS_CTRL_EXCEPTION_BITMAP, pVM->hwaccm.s.vmx.u32TrapMask); 365 378 AssertRC(rc); … … 994 1007 /* Note: protected mode & paging are always enabled; we use them for emulating real and protected mode without paging too. */ 995 1008 val |= X86_CR0_PE | X86_CR0_PG; 996 /* Note: We must also set this as we rely on protecting various pages for which supervisor writes must be caught. */ 997 val |= X86_CR0_WP; 1009 if (!pVM->hwaccm.s.fNestedPaging) 1010 { 1011 /* Note: We must also set this as we rely on protecting various pages for which supervisor writes must be caught. */ 1012 val |= X86_CR0_WP; 1013 } 998 1014 999 1015 /* Always enable caching. */ … … 1027 1043 /* Set the required bits in cr4 too (currently X86_CR4_VMXE). */ 1028 1044 val = pCtx->cr4 | (uint32_t)pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed0; 1029 switch(pVM->hwaccm.s.enmShadowMode) 1030 { 1031 case PGMMODE_REAL: /* Real mode -> emulated using v86 mode */ 1032 case PGMMODE_PROTECTED: /* Protected mode, no paging -> emulated using identity mapping. */ 1033 case PGMMODE_32_BIT: /* 32-bit paging. */ 1034 break; 1035 1036 case PGMMODE_PAE: /* PAE paging. */ 1037 case PGMMODE_PAE_NX: /* PAE paging with NX enabled. */ 1038 /** @todo use normal 32 bits paging */ 1039 val |= X86_CR4_PAE; 1040 break; 1041 1042 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */ 1043 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */ 1045 1046 if (!pVM->hwaccm.s.fNestedPaging) 1047 { 1048 switch(pVM->hwaccm.s.enmShadowMode) 1049 { 1050 case PGMMODE_REAL: /* Real mode -> emulated using v86 mode */ 1051 case PGMMODE_PROTECTED: /* Protected mode, no paging -> emulated using identity mapping. */ 1052 case PGMMODE_32_BIT: /* 32-bit paging. */ 1053 break; 1054 1055 case PGMMODE_PAE: /* PAE paging. */ 1056 case PGMMODE_PAE_NX: /* PAE paging with NX enabled. */ 1057 /** @todo use normal 32 bits paging */ 1058 val |= X86_CR4_PAE; 1059 break; 1060 1061 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */ 1062 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */ 1044 1063 #ifdef VBOX_ENABLE_64_BITS_GUESTS 1045 break;1064 break; 1046 1065 #else 1047 AssertFailed(); 1048 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE; 1049 #endif 1050 default: /* shut up gcc */ 1051 AssertFailed(); 1052 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE; 1053 } 1066 AssertFailed(); 1067 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE; 1068 #endif 1069 default: /* shut up gcc */ 1070 AssertFailed(); 1071 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE; 1072 } 1073 } 1074 1054 1075 #ifdef HWACCM_VMX_EMULATE_REALMODE 1055 1076 /* Real mode emulation using v86 mode with CR4.VME (interrupt redirection using the int bitmap in the TSS) */ … … 1080 1101 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR3) 1081 1102 { 1082 /* Save our shadow CR3 register. */1083 1103 val = PGMGetHyperCR3(pVM); 1084 1104 Assert(val); 1105 if (pVM->hwaccm.s.fNestedPaging) 1106 { 1107 #if HC_ARCH_BITS == 64 1108 rc = VMXWriteVMCS(VMX_VMCS_CTRL_EPTP_FULL, val); 1109 #else 1110 rc = VMXWriteVMCS(VMX_VMCS_CTRL_EPTP_FULL, val); 1111 rc = VMXWriteVMCS(VMX_VMCS_CTRL_EPTP_HIGH, val); 1112 #endif 1113 AssertRC(rc); 1114 1115 /* Save the real guest CR3 in VMX_VMCS_GUEST_CR3 */ 1116 val = pCtx->cr3; 1117 } 1118 /* Save our shadow CR3 register. */ 1085 1119 rc = VMXWriteVMCS(VMX_VMCS_GUEST_CR3, val); 1086 1120 AssertRC(rc); … … 1255 1289 RTGCUINTPTR errCode, instrInfo, uInterruptState; 1256 1290 bool fSyncTPR = false; 1291 PHWACCM_CPUINFO pCpu = 0; 1257 1292 unsigned cResume = 0; 1258 1293 #ifdef VBOX_STRICT … … 1412 1447 } 1413 1448 1449 #ifdef LOG_ENABLED 1450 pCpu = HWACCMR0GetCurrentCpu(); 1451 if ( pVM->hwaccm.s.idLastCpu != pCpu->idCpu 1452 || pVM->hwaccm.s.cTLBFlushes != pCpu->cTLBFlushes) 1453 { 1454 if (pVM->hwaccm.s.idLastCpu != pCpu->idCpu) 1455 Log(("Force TLB flush due to rescheduling to a different cpu (%d vs %d)\n", pVM->hwaccm.s.idLastCpu, pCpu->idCpu)); 1456 else 1457 Log(("Force TLB flush due to changed TLB flush count (%x vs %x)\n", pVM->hwaccm.s.cTLBFlushes, pCpu->cTLBFlushes)); 1458 } 1459 if (pCpu->fFlushTLB) 1460 Log(("Force TLB flush: first time cpu %d is used -> flush\n", pCpu->idCpu)); 1461 #endif 1462 1414 1463 /* 1415 1464 * NOTE: DO NOT DO ANYTHING AFTER THIS POINT THAT MIGHT JUMP BACK TO RING 3! … … 1432 1481 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatEntry, x); 1433 1482 goto end; 1483 } 1484 1485 /* Deal with tagged TLBs if VPID is supported. */ 1486 if (pVM->hwaccm.s.vmx.fVPID) 1487 { 1488 pCpu = HWACCMR0GetCurrentCpu(); 1489 /* Force a TLB flush for the first world switch if the current cpu differs from the one we ran on last. */ 1490 /* Note that this can happen both for start and resume due to long jumps back to ring 3. */ 1491 if ( pVM->hwaccm.s.idLastCpu != pCpu->idCpu 1492 /* if the tlb flush count has changed, another VM has flushed the TLB of this cpu, so we can't use our current ASID anymore. */ 1493 || pVM->hwaccm.s.cTLBFlushes != pCpu->cTLBFlushes) 1494 { 1495 /* Force a TLB flush on VM entry. */ 1496 pVM->hwaccm.s.fForceTLBFlush = true; 1497 } 1498 else 1499 Assert(!pCpu->fFlushTLB); 1500 1501 pVM->hwaccm.s.idLastCpu = pCpu->idCpu; 1502 1503 /* Make sure we flush the TLB when required. Switch ASID to achieve the same thing, but without actually flushing the whole TLB (which is expensive). */ 1504 if (pVM->hwaccm.s.fForceTLBFlush) 1505 { 1506 if ( ++pCpu->uCurrentASID >= pVM->hwaccm.s.uMaxASID 1507 || pCpu->fFlushTLB) 1508 { 1509 pCpu->fFlushTLB = false; 1510 pCpu->uCurrentASID = 1; /* start at 1; host uses 0 */ 1511 pCpu->cTLBFlushes++; 1512 } 1513 else 1514 { 1515 STAM_COUNTER_INC(&pVM->hwaccm.s.StatFlushASID); 1516 pVM->hwaccm.s.fForceTLBFlush = false; 1517 } 1518 1519 pVM->hwaccm.s.cTLBFlushes = pCpu->cTLBFlushes; 1520 pVM->hwaccm.s.uCurrentASID = pCpu->uCurrentASID; 1521 } 1522 else 1523 { 1524 Assert(!pCpu->fFlushTLB); 1525 1526 if (!pCpu->uCurrentASID || !pVM->hwaccm.s.uCurrentASID) 1527 pVM->hwaccm.s.uCurrentASID = pCpu->uCurrentASID = 1; 1528 } 1529 AssertMsg(pVM->hwaccm.s.cTLBFlushes == pCpu->cTLBFlushes, ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVM->hwaccm.s.cTLBFlushes, pCpu->cTLBFlushes)); 1530 AssertMsg(pCpu->uCurrentASID >= 1 && pCpu->uCurrentASID < pVM->hwaccm.s.uMaxASID, ("cpu%d uCurrentASID = %x\n", pCpu->idCpu, pCpu->uCurrentASID)); 1531 AssertMsg(pVM->hwaccm.s.uCurrentASID >= 1 && pVM->hwaccm.s.uCurrentASID < pVM->hwaccm.s.uMaxASID, ("cpu%d VM uCurrentASID = %x\n", pCpu->idCpu, pVM->hwaccm.s.uCurrentASID)); 1532 1533 rc = VMXWriteVMCS(VMX_VMCS_GUEST_FIELD_VPID, pVM->hwaccm.s.uCurrentASID); 1534 AssertRC(rc); 1535 1536 if (pVM->hwaccm.s.fForceTLBFlush) 1537 { 1538 1539 } 1540 1541 #ifdef VBOX_WITH_STATISTICS 1542 if (pVM->hwaccm.s.fForceTLBFlush) 1543 STAM_COUNTER_INC(&pVM->hwaccm.s.StatFlushTLBWorldSwitch); 1544 else 1545 STAM_COUNTER_INC(&pVM->hwaccm.s.StatNoFlushTLBWorldSwitch); 1546 #endif 1547 1434 1548 } 1435 1549 … … 1463 1577 1464 1578 /* In case we execute a goto ResumeExecution later on. */ 1465 pVM->hwaccm.s.vmx.fResumeVM = true; 1579 pVM->hwaccm.s.vmx.fResumeVM = true; 1580 pVM->hwaccm.s.fForceTLBFlush = false; 1466 1581 1467 1582 /* … … 1675 1790 CPUMSetGuestCR4(pVM, val); 1676 1791 1792 /* Can be updated behind our back in the nested paging case. */ 1677 1793 CPUMSetGuestCR2(pVM, ASMGetCR2()); 1794 1795 /* Note: no reason to sync back the CRx registers. They can't be changed by the guest. */ 1796 /* Note: only in the nested paging case can CR3 & CR4 be changed by the guest. */ 1797 if (pVM->hwaccm.s.fNestedPaging) 1798 { 1799 VMXReadVMCS(VMX_VMCS_GUEST_CR3, &val); 1800 1801 if (val != pCtx->cr3) 1802 { 1803 CPUMSetGuestCR3(pVM, val); 1804 PGMUpdateCR3(pVM, val); 1805 } 1806 } 1678 1807 1679 1808 /* Sync back DR7 here. */ … … 1857 1986 case X86_XCPT_PF: /* Page fault */ 1858 1987 { 1859 Log2(("Page fault at %VGv error code %x\n", exitQualification ,errCode)); 1988 #ifdef DEBUG 1989 if (pVM->hwaccm.s.fNestedPaging) 1990 { /* A genuine pagefault. 1991 * Forward the trap to the guest by injecting the exception and resuming execution. 1992 */ 1993 Log(("Guest page fault at %VGv cr2=%VGv error code %x rsp=%VGv\n", (RTGCPTR)pCtx->rip, exitQualification, errCode, (RTGCPTR)pCtx->rsp)); 1994 1995 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestPF); 1996 /* The error code might have been changed. */ 1997 errCode = TRPMGetErrorCode(pVM); 1998 1999 TRPMResetTrap(pVM); 2000 2001 /* Now we must update CR2. */ 2002 pCtx->cr2 = exitQualification; 2003 rc = VMXR0InjectEvent(pVM, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode); 2004 AssertRC(rc); 2005 2006 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x); 2007 goto ResumeExecution; 2008 } 2009 #endif 2010 Assert(!pVM->hwaccm.s.fNestedPaging); 2011 2012 Log2(("Page fault at %VGv error code %x\n", exitQualification, errCode)); 1860 2013 /* Exit qualification contains the linear address of the page fault. */ 1861 2014 TRPMAssertTrap(pVM, X86_XCPT_PF, TRPM_TRAP); … … 2056 2209 } 2057 2210 2211 case VMX_EXIT_EPT_VIOLATION: /* 48 EPT violation. An attempt to access memory with a guest-physical address was disallowed by the configuration of the EPT paging structures. */ 2212 { 2213 Log2(("EPT Page fault at %VGv error code %x\n", exitQualification ,errCode)); 2214 Assert(pVM->hwaccm.s.fNestedPaging); 2215 2216 /* Exit qualification contains the linear address of the page fault. */ 2217 TRPMAssertTrap(pVM, X86_XCPT_PF, TRPM_TRAP); 2218 TRPMSetErrorCode(pVM, errCode); 2219 TRPMSetFaultAddress(pVM, exitQualification ); 2220 2221 /* Handle the pagefault trap for the nested shadow table. */ 2222 rc = PGMR0Trap0eHandlerNestedPaging(pVM, PGMMODE_EPT, errCode, CPUMCTX2CORE(pCtx), exitQualification ); 2223 Log2(("PGMR0Trap0eHandlerNestedPaging %VGv returned %Vrc\n", pCtx->rip, rc)); 2224 if (rc == VINF_SUCCESS) 2225 { /* We've successfully synced our shadow pages, so let's just continue execution. */ 2226 Log2(("Shadow page fault at %VGv cr2=%VGp error code %x\n", pCtx->rip, exitQualification , errCode)); 2227 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitShadowPF); 2228 2229 TRPMResetTrap(pVM); 2230 2231 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x); 2232 goto ResumeExecution; 2233 } 2234 2235 #ifdef VBOX_STRICT 2236 if (rc != VINF_EM_RAW_EMULATE_INSTR) 2237 LogFlow(("PGMTrap0eHandlerNestedPaging failed with %d\n", rc)); 2238 #endif 2239 /* Need to go back to the recompiler to emulate the instruction. */ 2240 TRPMResetTrap(pVM); 2241 break; 2242 } 2243 2058 2244 case VMX_EXIT_IRQ_WINDOW: /* 7 Interrupt window. */ 2059 2245 /* Clear VM-exit on IF=1 change. */ … … 2113 2299 { 2114 2300 Log2(("VMX: invlpg\n")); 2301 Assert(!pVM->hwaccm.s.fNestedPaging); 2302 2115 2303 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitInvpg); 2116 2304 rc = EMInterpretInvlpg(pVM, CPUMCTX2CORE(pCtx), exitQualification); … … 2165 2353 break; 2166 2354 case 3: 2355 Assert(!pVM->hwaccm.s.fNestedPaging); 2167 2356 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR3; 2168 2357 break; … … 2191 2380 Log2(("VMX: mov x, crx\n")); 2192 2381 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitCRxRead); 2382 2383 Assert(!pVM->hwaccm.s.fNestedPaging || VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification) != USE_REG_CR3); 2193 2384 2194 2385 /* CR8 reads only cause an exit when the TPR shadow feature isn't present. */ … … 2692 2883 } 2693 2884 2885 /** 2886 * Invalidates a guest page 2887 * 2888 * @returns VBox status code. 2889 * @param pVM The VM to operate on. 2890 * @param GCVirt Page to invalidate 2891 */ 2892 VMMR0DECL(int) VMXR0InvalidatePage(PVM pVM, RTGCPTR GCVirt) 2893 { 2894 bool fFlushPending = pVM->hwaccm.s.fForceTLBFlush; 2895 2896 /* @todo Only relevant if we want to use VPID. */ 2897 2898 /* Skip it if a TLB flush is already pending. */ 2899 if (!fFlushPending) 2900 { 2901 } 2902 return VINF_SUCCESS; 2903 } 2904 2905 /** 2906 * Invalidates a guest page by physical address 2907 * 2908 * NOTE: Assumes the current instruction references this physical page though a virtual address!! 2909 * 2910 * @returns VBox status code. 2911 * @param pVM The VM to operate on. 2912 * @param GCPhys Page to invalidate 2913 */ 2914 VMMR0DECL(int) VMXR0InvalidatePhysPage(PVM pVM, RTGCPHYS GCPhys) 2915 { 2916 bool fFlushPending = pVM->hwaccm.s.fForceTLBFlush; 2917 2918 Assert(pVM->hwaccm.s.fNestedPaging); 2919 2920 /* Skip it if a TLB flush is already pending. */ 2921 if (!fFlushPending) 2922 { 2923 } 2924 return VINF_SUCCESS; 2925 }
Note:
See TracChangeset
for help on using the changeset viewer.