Changeset 19910 in vbox
- Timestamp:
- May 22, 2009 12:31:50 PM (16 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/HWACCMInternal.h
r19812 r19910 124 124 125 125 /** Maximum number of page flushes we are willing to remember before considering a full TLB flush. */ 126 #define HWACCM_MAX_TLB_SHOOTDOWN_PAGES 16126 #define HWACCM_MAX_TLB_SHOOTDOWN_PAGES 8 127 127 128 128 /** Size for the EPT identity page table (1024 4 MB pages to cover the entire address space). */ … … 569 569 570 570 /** To keep track of pending TLB shootdown pages. (SMP guest only) */ 571 RTGCPTR aTlbShootdownPages[HWACCM_MAX_TLB_SHOOTDOWN_PAGES]; 572 RTUINT cTlbShootdownPages; 571 struct 572 { 573 RTGCPTR aPages[HWACCM_MAX_TLB_SHOOTDOWN_PAGES]; 574 unsigned cPages; 575 } TlbShootdown; 573 576 574 577 RTUINT padding2[1]; -
trunk/src/VBox/VMM/VMMAll/HWACCMAll.cpp
r19903 r19910 45 45 46 46 /** 47 * Invalidates a guest page47 * Queues a page for invalidation 48 48 * 49 49 * @returns VBox status code. … … 51 51 * @param GCVirt Page to invalidate 52 52 */ 53 void hwaccmQueueInvlPage(PVMCPU pVCpu, RTGCPTR GCVirt) 54 { 55 Assert(HWACCMIsEnabled(pVCpu->CTX_SUFF(pVM))); 56 57 /* Nothing to do if a TLB flush is already pending */ 58 if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TLB_FLUSH)) 59 return; 60 #if 1 61 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH); 62 #else 63 if (iPage == RT_ELEMENTS(pVCpu->hwaccm.s.TlbShootdown.aPages)) 64 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH); 65 else 66 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_SHOOTDOWN); 67 #endif 68 } 69 70 /** 71 * Invalidates a guest page 72 * 73 * @returns VBox status code. 74 * @param pVCpu The VMCPU to operate on. 75 * @param GCVirt Page to invalidate 76 */ 53 77 VMMDECL(int) HWACCMInvalidatePage(PVMCPU pVCpu, RTGCPTR GCVirt) 54 78 { 79 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFlushPageManual); 55 80 #ifdef IN_RING0 56 81 PVM pVM = pVCpu->CTX_SUFF(pVM); … … 62 87 #endif 63 88 89 hwaccmQueueInvlPage(pVCpu, GCVirt); 64 90 return VINF_SUCCESS; 65 91 } … … 90 116 VMMDECL(int) HWACCMInvalidatePageOnAllVCpus(PVM pVM, RTGCPTR GCPtr) 91 117 { 92 /* @todo */ 93 HWACCMInvalidatePage(VMMGetCpu(pVM), GCPtr); 118 VMCPUID idCurCpu = VMMGetCpuId(pVM); 119 120 for (unsigned idCpu = 0; idCpu < pVM->cCPUs; idCpu++) 121 { 122 PVMCPU pVCpu = &pVM->aCpus[idCpu]; 123 124 if (pVCpu->idCpu == idCurCpu) 125 { 126 HWACCMInvalidatePage(pVCpu, GCPtr); 127 } 128 else 129 { 130 hwaccmQueueInvlPage(pVCpu, GCPtr); 131 if (VMCPU_GET_STATE(pVCpu) == VMCPUSTATE_STARTED_EXEC) 132 { 133 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatTlbShootdown); 134 #ifdef IN_RING0 135 RTCPUID idHostCpu = pVCpu->hwaccm.s.idEnteredCpu; 136 if (idHostCpu != NIL_RTCPUID) 137 RTMpPokeCpu(idHostCpu); 138 #else 139 VMR3NotifyCpuFFU(pVCpu->pUVCpu, VMNOTIFYFF_FLAGS_POKE); 140 #endif 141 } 142 else 143 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFlushPageManual); 144 } 145 } 146 94 147 return VINF_SUCCESS; 95 148 } … … 184 237 185 238 Assert(pVM->hwaccm.s.svm.fSupported); 239 /* AMD-V doesn't support invalidation with guest physical addresses; see comment in SVMR0InvalidatePhysPage. */ 186 240 HWACCMFlushTLBOnAllVCpus(pVM); 187 241 #else -
trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp
r19903 r19910 1048 1048 /* Deal with pending TLB shootdown actions which were queued when we were not executing code. */ 1049 1049 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatTlbShootdown); 1050 for (unsigned i=0;i<pVCpu->hwaccm.s. cTlbShootdownPages;i++)1051 SVMR0InvlpgA(pVCpu->hwaccm.s. aTlbShootdownPages[i], pVMCB->ctrl.TLBCtrl.n.u32ASID);1052 } 1053 } 1054 pVCpu->hwaccm.s. cTlbShootdownPages = 0;1050 for (unsigned i=0;i<pVCpu->hwaccm.s.TlbShootdown.cPages;i++) 1051 SVMR0InvlpgA(pVCpu->hwaccm.s.TlbShootdown.aPages[i], pVMCB->ctrl.TLBCtrl.n.u32ASID); 1052 } 1053 } 1054 pVCpu->hwaccm.s.TlbShootdown.cPages = 0; 1055 1055 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN); 1056 1056 … … 2339 2339 AssertMsgReturn(pVMCB, ("Invalid pVMCB\n"), VERR_EM_INTERNAL_ERROR); 2340 2340 2341 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatFlushPageManual);2342 2341 #if HC_ARCH_BITS == 32 2343 2342 /* If we get a flush in 64 bits guest mode, then force a full TLB flush. Invlpga takes only 32 bits addresses. */ -
trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp
r19829 r19910 1798 1798 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH); 1799 1799 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN); 1800 pVCpu->hwaccm.s. cTlbShootdownPages = 0;1800 pVCpu->hwaccm.s.TlbShootdown.cPages = 0; 1801 1801 return; 1802 1802 } … … 1846 1846 /* Deal with pending TLB shootdown actions which were queued when we were not executing code. */ 1847 1847 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatTlbShootdown); 1848 for (unsigned i=0;i<pVCpu->hwaccm.s.cTlbShootdownPages;i++) 1848 1849 for (unsigned i=0;i<pVCpu->hwaccm.s.TlbShootdown.cPages;i++) 1849 1850 { 1850 1851 /* aTlbShootdownPages contains physical addresses in this case. */ 1851 vmxR0FlushEPT(pVM, pVCpu, pVM->hwaccm.s.vmx.enmFlushContext, pVCpu->hwaccm.s. aTlbShootdownPages[i]);1852 } 1853 } 1854 pVCpu->hwaccm.s. cTlbShootdownPages= 0;1852 vmxR0FlushEPT(pVM, pVCpu, pVM->hwaccm.s.vmx.enmFlushContext, pVCpu->hwaccm.s.TlbShootdown.aPages[i]); 1853 } 1854 } 1855 pVCpu->hwaccm.s.TlbShootdown.cPages= 0; 1855 1856 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN); 1856 1857 … … 1926 1927 /* Deal with pending TLB shootdown actions which were queued when we were not executing code. */ 1927 1928 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatTlbShootdown); 1928 for (unsigned i=0;i<pVCpu->hwaccm.s. cTlbShootdownPages;i++)1929 vmxR0FlushVPID(pVM, pVCpu, pVM->hwaccm.s.vmx.enmFlushContext, pVCpu->hwaccm.s. aTlbShootdownPages[i]);1930 } 1931 } 1932 pVCpu->hwaccm.s. cTlbShootdownPages = 0;1929 for (unsigned i=0;i<pVCpu->hwaccm.s.TlbShootdown.cPages;i++) 1930 vmxR0FlushVPID(pVM, pVCpu, pVM->hwaccm.s.vmx.enmFlushContext, pVCpu->hwaccm.s.TlbShootdown.aPages[i]); 1931 } 1932 } 1933 pVCpu->hwaccm.s.TlbShootdown.cPages = 0; 1933 1934 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN); 1934 1935
Note:
See TracChangeset
for help on using the changeset viewer.