- Timestamp:
- May 4, 2009 2:05:45 PM (16 years ago)
- Location:
- trunk
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vm.h
r19286 r19326 258 258 /** Inhibit interrupts pending. See EMGetInhibitInterruptsPC(). */ 259 259 #define VMCPU_FF_INHIBIT_INTERRUPTS RT_BIT_32(24) 260 /** Check for pending TLB shootdown actions. */ 261 #define VMCPU_FF_TLB_SHOOTDOWN RT_BIT_32(25) 260 262 /** CSAM needs to scan the page that's being executed */ 261 263 #define VMCPU_FF_CSAM_SCAN_PAGE RT_BIT_32(26) -
trunk/src/VBox/VMM/HWACCM.cpp
r19257 r19326 342 342 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushASID, "/HWACCM/CPU%d/Flush/TLB/ASID"); 343 343 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatFlushTLBInvlpga, "/HWACCM/CPU%d/Flush/TLB/PhysInvl"); 344 344 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTlbShootdown, "/HWACCM/CPU%d/Flush/TLB/Shootdown"); 345 345 346 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTSCOffset, "/HWACCM/CPU%d/TSC/Offset"); 346 347 HWACCM_REG_COUNTER(&pVCpu->hwaccm.s.StatTSCIntercept, "/HWACCM/CPU%d/TSC/Intercept"); -
trunk/src/VBox/VMM/HWACCMInternal.h
r18932 r19326 122 122 /** Maxium resume loops allowed in ring 0 (safety precaution) */ 123 123 #define HWACCM_MAX_RESUME_LOOPS 1024 124 125 /** Maximum number of page flushes we are willing to remember before considering a full TLB flush. */ 126 #define HWACCM_MAX_TLB_SHOOTDOWN_PAGES 16 124 127 125 128 /** Size for the EPT identity page table (1024 4 MB pages to cover the entire address space). */ … … 475 478 /* Current ASID in use by the VM */ 476 479 RTUINT uCurrentASID; 480 481 /** To keep track of pending TLB shootdown pages. (SMP guest only) */ 482 RTGCPTR aTlbShootdownPages[HWACCM_MAX_TLB_SHOOTDOWN_PAGES]; 483 unsigned cTlbShootdownPages; 477 484 478 485 struct … … 632 639 STAMCOUNTER StatFlushASID; 633 640 STAMCOUNTER StatFlushTLBInvlpga; 641 STAMCOUNTER StatTlbShootdown; 634 642 635 643 STAMCOUNTER StatSwitchGuestIrq; -
trunk/src/VBox/VMM/VMMR0/HWSVMR0.cpp
r19288 r19326 1028 1028 Assert(!pVM->hwaccm.s.svm.fAlwaysFlushTLB || pVCpu->hwaccm.s.fForceTLBFlush); 1029 1029 pVMCB->ctrl.TLBCtrl.n.u1TLBFlush = pVCpu->hwaccm.s.fForceTLBFlush; 1030 } 1030 1031 if ( !pVM->hwaccm.s.svm.fAlwaysFlushTLB 1032 && VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN)) 1033 { 1034 /* Deal with pending TLB shootdown actions which were queued when we were not executing code. */ 1035 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatTlbShootdown); 1036 for (unsigned i=0;i<pVCpu->hwaccm.s.cTlbShootdownPages;i++) 1037 SVMR0InvlpgA(pVCpu->hwaccm.s.aTlbShootdownPages[i], pVMCB->ctrl.TLBCtrl.n.u32ASID); 1038 } 1039 } 1040 pVCpu->hwaccm.s.cTlbShootdownPages = 0; 1041 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN); 1042 1031 1043 AssertMsg(pVCpu->hwaccm.s.cTLBFlushes == pCpu->cTLBFlushes, ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hwaccm.s.cTLBFlushes, pCpu->cTLBFlushes)); 1032 1044 AssertMsg(pCpu->uCurrentASID >= 1 && pCpu->uCurrentASID < pVM->hwaccm.s.uMaxASID, ("cpu%d uCurrentASID = %x\n", pCpu->idCpu, pCpu->uCurrentASID)); -
trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp
r19288 r19326 1829 1829 1830 1830 pVCpu->hwaccm.s.idLastCpu = pCpu->idCpu; 1831 pCpu->fFlushTLB = false;1831 pCpu->fFlushTLB = false; 1832 1832 1833 1833 if (pVCpu->hwaccm.s.fForceTLBFlush) 1834 { 1834 1835 vmxR0FlushEPT(pVM, pVCpu, pVM->hwaccm.s.vmx.enmFlushContext, 0); 1836 } 1837 else 1838 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN)) 1839 { 1840 /* Deal with pending TLB shootdown actions which were queued when we were not executing code. */ 1841 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatTlbShootdown); 1842 for (unsigned i=0;i<pVCpu->hwaccm.s.cTlbShootdownPages;i++) 1843 { 1844 /* aTlbShootdownPages contains physical addresses in this case. */ 1845 vmxR0FlushEPT(pVM, pVCpu, pVM->hwaccm.s.vmx.enmFlushContext, pVCpu->hwaccm.s.aTlbShootdownPages[i]); 1846 } 1847 } 1848 pVCpu->hwaccm.s.cTlbShootdownPages = 0; 1849 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN); 1835 1850 1836 1851 #ifdef VBOX_WITH_STATISTICS … … 1895 1910 { 1896 1911 Assert(!pCpu->fFlushTLB); 1897 1898 if (!pCpu->uCurrentASID || !pVCpu->hwaccm.s.uCurrentASID) 1899 pVCpu->hwaccm.s.uCurrentASID = pCpu->uCurrentASID = 1; 1900 } 1912 Assert(pVCpu->hwaccm.s.uCurrentASID && pCpu->uCurrentASID); 1913 1914 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN)) 1915 { 1916 /* Deal with pending TLB shootdown actions which were queued when we were not executing code. */ 1917 STAM_COUNTER_INC(&pVCpu->hwaccm.s.StatTlbShootdown); 1918 for (unsigned i=0;i<pVCpu->hwaccm.s.cTlbShootdownPages;i++) 1919 vmxR0FlushVPID(pVM, pVCpu, pVM->hwaccm.s.vmx.enmFlushContext, pVCpu->hwaccm.s.aTlbShootdownPages[i]); 1920 } 1921 } 1922 pVCpu->hwaccm.s.cTlbShootdownPages = 0; 1923 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN); 1924 1901 1925 AssertMsg(pVCpu->hwaccm.s.cTLBFlushes == pCpu->cTLBFlushes, ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hwaccm.s.cTLBFlushes, pCpu->cTLBFlushes)); 1902 1926 AssertMsg(pCpu->uCurrentASID >= 1 && pCpu->uCurrentASID < pVM->hwaccm.s.uMaxASID, ("cpu%d uCurrentASID = %x\n", pCpu->idCpu, pCpu->uCurrentASID));
Note:
See TracChangeset
for help on using the changeset viewer.