Changeset 26202 in vbox for trunk/src/VBox/VMM/VMMAll
- Timestamp:
- Feb 3, 2010 3:19:36 PM (15 years ago)
- svn:sync-xref-src-repo-rev:
- 57224
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
r26180 r26202 452 452 * Call the worker. 453 453 */ 454 pgmLock(pVM); 455 int rc = PGM_BTH_PFN(Trap0eHandler, pVCpu)(pVCpu, uErr, pRegFrame, pvFault); 456 Assert(PGMIsLockOwner(pVM)); 457 pgmUnlock(pVM); 454 bool fLockTaken = false; 455 int rc = PGM_BTH_PFN(Trap0eHandler, pVCpu)(pVCpu, uErr, pRegFrame, pvFault, &fLockTaken); 456 if (fLockTaken) 457 { 458 Assert(PGMIsLockOwner(pVM)); 459 pgmUnlock(pVM); 460 } 458 461 if (rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE) 459 462 rc = VINF_SUCCESS; -
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r26194 r26202 26 26 *******************************************************************************/ 27 27 RT_C_DECLS_BEGIN 28 PGM_BTH_DECL(int, Trap0eHandler)(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault );28 PGM_BTH_DECL(int, Trap0eHandler)(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, bool *pfLockTaken); 29 29 PGM_BTH_DECL(int, InvalidatePage)(PVMCPU pVCpu, RTGCPTR GCPtrPage); 30 30 PGM_BTH_DECL(int, SyncPage)(PVMCPU pVCpu, GSTPDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uErr); 31 PGM_BTH_DECL(int, CheckPageFault)(PVMCPU pVCpu, uint32_t uErr, PSHWPDE pPdeDst, PGSTPDE pPdeSrc, RTGCPTR GCPtrPage); 31 PGM_BTH_DECL(int, CheckPageFault)(PVMCPU pVCpu, uint32_t uErr, PGSTPDE pPdeSrc, RTGCPTR GCPtrPage); 32 PGM_BTH_DECL(int, CheckDirtyPageFault)(PVMCPU pVCpu, uint32_t uErr, PSHWPDE pPdeDst, PGSTPDE pPdeSrc, RTGCPTR GCPtrPage); 32 33 PGM_BTH_DECL(int, SyncPT)(PVMCPU pVCpu, unsigned iPD, PGSTPD pPDSrc, RTGCPTR GCPtrPage); 33 34 PGM_BTH_DECL(int, VerifyAccessSyncPage)(PVMCPU pVCpu, RTGCPTR Addr, unsigned fPage, unsigned uErr); … … 74 75 * @param pRegFrame Trap register frame. 75 76 * @param pvFault The fault address. 77 * @param pfLockTaken PGM lock taken here or not (out) 76 78 */ 77 PGM_BTH_DECL(int, Trap0eHandler)(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault )79 PGM_BTH_DECL(int, Trap0eHandler)(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, bool *pfLockTaken) 78 80 { 79 81 PVM pVM = pVCpu->CTX_SUFF(pVM); 82 83 *pfLockTaken = false; 80 84 81 85 # if defined(IN_RC) && defined(VBOX_STRICT) … … 143 147 const unsigned iPDSrc = 0; 144 148 # endif /* !PGM_WITH_PAGING */ 149 150 /* First check for a genuine guest page fault. */ 151 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 152 STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0eTimeCheckPageFault, e); 153 rc = PGM_BTH_NAME(CheckPageFault)(pVCpu, uErr, &pPDSrc->a[iPDSrc], pvFault); 154 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeCheckPageFault, e); 155 if (rc == VINF_EM_RAW_GUEST_TRAP) 156 { 157 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) 158 = &pVCpu->pgm.s.StatRZTrap0eTime2GuestTrap; }); 159 return rc; 160 } 161 # endif /* PGM_WITH_PAGING */ 162 163 /* Take the big lock now. */ 164 *pfLockTaken = true; 165 pgmLock(pVM); 145 166 146 167 /* Fetch the guest PDE */ … … 214 235 215 236 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 216 /* 217 * If we successfully correct the write protection fault due to dirty bit 218 * tracking, or this page fault is a genuine one, then return immediately. 219 */ 220 STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0eTimeCheckPageFault, e); 221 rc = PGM_BTH_NAME(CheckPageFault)(pVCpu, uErr, &pPDDst->a[iPDDst], &pPDSrc->a[iPDSrc], pvFault); 222 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeCheckPageFault, e); 223 if ( rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT 224 || rc == VINF_EM_RAW_GUEST_TRAP) 225 { 226 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) 227 = rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT ? &pVCpu->pgm.s.StatRZTrap0eTime2DirtyAndAccessed : &pVCpu->pgm.s.StatRZTrap0eTime2GuestTrap; }); 228 LogBird(("Trap0eHandler: returns %s\n", rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT ? "VINF_SUCCESS" : "VINF_EM_RAW_GUEST_TRAP")); 229 return rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT ? VINF_SUCCESS : rc; 237 /* Dirty page handling. */ 238 if (uErr & X86_TRAP_PF_RW) /* write fault? */ 239 { 240 /* 241 * If we successfully correct the write protection fault due to dirty bit 242 * tracking, then return immediately. 243 */ 244 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a); 245 rc = PGM_BTH_NAME(CheckDirtyPageFault)(pVCpu, uErr, &pPDDst->a[iPDDst], &pPDSrc->a[iPDSrc], pvFault); 246 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a); 247 if (rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT) 248 { 249 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) 250 = rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT ? &pVCpu->pgm.s.StatRZTrap0eTime2DirtyAndAccessed : &pVCpu->pgm.s.StatRZTrap0eTime2GuestTrap; }); 251 LogBird(("Trap0eHandler: returns VINF_SUCCESS\n")); 252 return VINF_SUCCESS; 253 } 230 254 } 231 255 … … 1992 2016 * @param pVCpu The VMCPU handle. 1993 2017 * @param uErr Page fault error code. 1994 * @param pPdeDst Shadow page directory entry.1995 2018 * @param pPdeSrc Guest page directory entry. 1996 2019 * @param GCPtrPage Guest context page address. 1997 2020 */ 1998 PGM_BTH_DECL(int, CheckPageFault)(PVMCPU pVCpu, uint32_t uErr, P SHWPDE pPdeDst, PGSTPDE pPdeSrc, RTGCPTR GCPtrPage)2021 PGM_BTH_DECL(int, CheckPageFault)(PVMCPU pVCpu, uint32_t uErr, PGSTPDE pPdeSrc, RTGCPTR GCPtrPage) 1999 2022 { 2000 2023 bool fUserLevelFault = !!(uErr & X86_TRAP_PF_US); 2001 2024 bool fWriteFault = !!(uErr & X86_TRAP_PF_RW); 2002 2025 bool fMaybeWriteProtFault = fWriteFault && (fUserLevelFault || CPUMIsGuestR0WriteProtEnabled(pVCpu)); 2026 # if PGM_WITH_NX(PGM_GST_TYPE, PGM_SHW_TYPE) 2027 bool fMaybeNXEFault = (uErr & X86_TRAP_PF_ID) && CPUMIsGuestNXEnabled(pVCpu); 2028 # endif 2003 2029 unsigned uPageFaultLevel; 2004 2030 int rc; 2005 2031 PVM pVM = pVCpu->CTX_SUFF(pVM); 2006 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 2007 2008 Assert(PGMIsLockOwner(pVM)); 2009 2010 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a); 2032 2011 2033 LogFlow(("CheckPageFault: GCPtrPage=%RGv uErr=%#x PdeSrc=%08x\n", GCPtrPage, uErr, pPdeSrc->u)); 2012 2034 … … 2026 2048 if ( (uErr & X86_TRAP_PF_RSVD) 2027 2049 || !pPml4eSrc->n.u1Present 2028 || ((uErr & X86_TRAP_PF_ID) && pPml4eSrc->n.u1NoExecute && CPUMIsGuestNXEnabled(pVCpu))2029 2050 || (fMaybeWriteProtFault && !pPml4eSrc->n.u1Write) 2051 || (fMaybeNXEFault && pPml4eSrc->n.u1NoExecute) 2030 2052 || (fUserLevelFault && !pPml4eSrc->n.u1User) 2031 2053 ) … … 2046 2068 || !pPdpeSrc->n.u1Present 2047 2069 # if PGM_GST_TYPE == PGM_TYPE_AMD64 /* NX, r/w, u/s bits in the PDPE are long mode only */ 2048 || ((uErr & X86_TRAP_PF_ID) && pPdpeSrc->lm.u1NoExecute && CPUMIsGuestNXEnabled(pVCpu))2049 2070 || (fMaybeWriteProtFault && !pPdpeSrc->lm.u1Write) 2071 || (fMaybeNXEFault && pPdpeSrc->lm.u1NoExecute) 2050 2072 || (fUserLevelFault && !pPdpeSrc->lm.u1User) 2051 2073 # endif … … 2062 2084 if ( (uErr & X86_TRAP_PF_RSVD) 2063 2085 || !pPdeSrc->n.u1Present 2086 || (fMaybeWriteProtFault && !pPdeSrc->n.u1Write) 2064 2087 # if PGM_WITH_NX(PGM_GST_TYPE, PGM_SHW_TYPE) 2065 || ((uErr & X86_TRAP_PF_ID) && pPdeSrc->n.u1NoExecute && CPUMIsGuestNXEnabled(pVCpu)) 2066 # endif 2067 || (fMaybeWriteProtFault && !pPdeSrc->n.u1Write) 2088 || (fMaybeNXEFault && pPdeSrc->n.u1NoExecute) 2089 # endif 2068 2090 || (fUserLevelFault && !pPdeSrc->n.u1User) ) 2069 2091 { … … 2096 2118 /* Mark guest page directory as dirty (BIG page only). */ 2097 2119 pPdeSrc->b.u1Dirty = 1; 2098 2099 if (pPdeDst->n.u1Present)2100 {2101 if (pPdeDst->u & PGM_PDFLAGS_TRACK_DIRTY)2102 {2103 SHWPDE PdeDst = *pPdeDst;2104 2105 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyPageTrap));2106 Assert(pPdeSrc->b.u1Write);2107 2108 /* Note: No need to invalidate this entry on other VCPUs as a stale TLB entry will not harm; write access will simply2109 * fault again and take this path to only invalidate the entry.2110 */2111 PdeDst.n.u1Write = 1;2112 PdeDst.n.u1Accessed = 1;2113 PdeDst.au32[0] &= ~PGM_PDFLAGS_TRACK_DIRTY;2114 ASMAtomicWriteSize(pPdeDst, PdeDst.u);2115 PGM_INVL_BIG_PG(pVCpu, GCPtrPage);2116 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);2117 return VINF_PGM_HANDLED_DIRTY_BIT_FAULT; /* restarts the instruction. */2118 }2119 # ifdef IN_RING02120 else2121 /* Check for stale TLB entry; only applies to the SMP guest case. */2122 if ( pVM->cCpus > 12123 && pPdeDst->n.u1Write2124 && pPdeDst->n.u1Accessed)2125 {2126 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, pPdeDst->u & SHW_PDE_PG_MASK);2127 if (pShwPage)2128 {2129 PSHWPT pPTDst = (PSHWPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);2130 PSHWPTE pPteDst = &pPTDst->a[(GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK];2131 if ( pPteDst->n.u1Present2132 && pPteDst->n.u1Write)2133 {2134 /* Stale TLB entry. */2135 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyPageStale));2136 PGM_INVL_PG(pVCpu, GCPtrPage);2137 2138 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);2139 return VINF_PGM_HANDLED_DIRTY_BIT_FAULT; /* restarts the instruction. */2140 }2141 }2142 }2143 # endif /* IN_RING0 */2144 }2145 2120 } 2146 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a); 2147 return VINF_PGM_NO_DIRTY_BIT_TRACKING; 2121 return VINF_SUCCESS; 2148 2122 } 2149 2123 /* else: 4KB page table */ … … 2162 2136 const GSTPTE PteSrc = *pPteSrc; 2163 2137 if ( !PteSrc.n.u1Present 2138 || (fMaybeWriteProtFault && !PteSrc.n.u1Write) 2164 2139 # if PGM_WITH_NX(PGM_GST_TYPE, PGM_SHW_TYPE) 2165 || ( (uErr & X86_TRAP_PF_ID) && PteSrc.n.u1NoExecute && CPUMIsGuestNXEnabled(pVCpu))2140 || (fMaybeNXEFault && PteSrc.n.u1NoExecute) 2166 2141 # endif 2167 || (fMaybeWriteProtFault && !PteSrc.n.u1Write)2168 2142 || (fUserLevelFault && !PteSrc.n.u1User) 2169 2143 ) 2170 2144 { 2171 2145 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyTrackRealPF)); 2172 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);2173 2146 LogFlow(("CheckPageFault: real page fault at %RGv PteSrc.u=%08x (2)\n", GCPtrPage, PteSrc.u)); 2174 2147 … … 2179 2152 TRPMSetErrorCode(pVCpu, uErr | X86_TRAP_PF_P); /* page-level protection violation */ 2180 2153 2181 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);2182 2154 return VINF_EM_RAW_GUEST_TRAP; 2183 2155 } … … 2208 2180 2209 2181 pPteSrc->n.u1Dirty = 1; 2210 2211 if (pPdeDst->n.u1Present)2212 {2213 #ifndef IN_RING02214 /* Bail out here as pgmPoolGetPageByHCPhys will return NULL and we'll crash below.2215 * Our individual shadow handlers will provide more information and force a fatal exit.2216 */2217 if (MMHyperIsInsideArea(pVM, (RTGCPTR)GCPtrPage))2218 {2219 LogRel(("CheckPageFault: write to hypervisor region %RGv\n", GCPtrPage));2220 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);2221 return VINF_SUCCESS;2222 }2223 #endif2224 /*2225 * Map shadow page table.2226 */2227 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, pPdeDst->u & SHW_PDE_PG_MASK);2228 if (pShwPage)2229 {2230 PSHWPT pPTDst = (PSHWPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);2231 PSHWPTE pPteDst = &pPTDst->a[(GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK];2232 if (pPteDst->n.u1Present) /** @todo Optimize accessed bit emulation? */2233 {2234 if (pPteDst->u & PGM_PTFLAGS_TRACK_DIRTY)2235 {2236 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, pPteSrc->u & GST_PTE_PG_MASK);2237 SHWPTE PteDst = *pPteDst;2238 2239 LogFlow(("DIRTY page trap addr=%RGv\n", GCPtrPage));2240 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyPageTrap));2241 2242 Assert(pPteSrc->n.u1Write);2243 2244 /* Note: No need to invalidate this entry on other VCPUs as a stale TLB entry will not harm; write access will simply2245 * fault again and take this path to only invalidate the entry.2246 */2247 if (RT_LIKELY(pPage))2248 {2249 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))2250 /* Assuming write handlers here as the PTE is present (otherwise we wouldn't be here). */2251 PteDst.n.u1Write = 0;2252 else2253 {2254 if ( PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED2255 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM)2256 {2257 rc = pgmPhysPageMakeWritableUnlocked(pVM, pPage, pPteSrc->u & GST_PTE_PG_MASK);2258 AssertRC(rc);2259 }2260 if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED)2261 PteDst.n.u1Write = 1;2262 else2263 PteDst.n.u1Write = 0;2264 }2265 }2266 else2267 PteDst.n.u1Write = 1;2268 2269 PteDst.n.u1Dirty = 1;2270 PteDst.n.u1Accessed = 1;2271 PteDst.au32[0] &= ~PGM_PTFLAGS_TRACK_DIRTY;2272 ASMAtomicWriteSize(pPteDst, PteDst.u);2273 PGM_INVL_PG(pVCpu, GCPtrPage);2274 2275 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);2276 return VINF_PGM_HANDLED_DIRTY_BIT_FAULT; /* restarts the instruction. */2277 }2278 # ifdef IN_RING02279 else2280 /* Check for stale TLB entry; only applies to the SMP guest case. */2281 if ( pVM->cCpus > 12282 && pPteDst->n.u1Write == 12283 && pPteDst->n.u1Accessed == 1)2284 {2285 /* Stale TLB entry. */2286 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyPageStale));2287 PGM_INVL_PG(pVCpu, GCPtrPage);2288 2289 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);2290 return VINF_PGM_HANDLED_DIRTY_BIT_FAULT; /* restarts the instruction. */2291 }2292 # endif2293 }2294 }2295 else2296 AssertMsgFailed(("pgmPoolGetPageByHCPhys %RGp failed!\n", pPdeDst->u & SHW_PDE_PG_MASK));2297 }2298 2182 } 2299 /** @todo Optimize accessed bit emulation? */ 2300 # ifdef VBOX_STRICT 2301 /* 2302 * Sanity check. 2303 */ 2304 else if ( !pPteSrc->n.u1Dirty 2305 && (pPdeSrc->n.u1Write & pPteSrc->n.u1Write) 2306 && pPdeDst->n.u1Present) 2307 { 2308 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, pPdeDst->u & SHW_PDE_PG_MASK); 2309 PSHWPT pPTDst = (PSHWPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage); 2310 PSHWPTE pPteDst = &pPTDst->a[(GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK]; 2311 if ( pPteDst->n.u1Present 2312 && pPteDst->n.u1Write) 2313 LogFlow(("Writable present page %RGv not marked for dirty bit tracking!!!\n", GCPtrPage)); 2314 } 2315 # endif /* VBOX_STRICT */ 2316 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a); 2317 return VINF_PGM_NO_DIRTY_BIT_TRACKING; 2183 return VINF_SUCCESS; 2318 2184 } 2319 2185 AssertRC(rc); 2320 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);2321 2186 return rc; 2322 2187 … … 2328 2193 */ 2329 2194 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyTrackRealPF)); 2330 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);2331 2195 Log(("CheckPageFault: real page fault at %RGv (%d)\n", GCPtrPage, uPageFaultLevel)); 2332 2196 … … 2354 2218 * Map the guest page table. 2355 2219 */ 2356 PGSTPT pPTSrc 2;2357 rc = PGM_GCPHYS_2_PTR(pVM, pPdeSrc->u & GST_PDE_PG_MASK, &pPTSrc 2);2220 PGSTPT pPTSrc; 2221 rc = PGM_GCPHYS_2_PTR(pVM, pPdeSrc->u & GST_PDE_PG_MASK, &pPTSrc); 2358 2222 if (RT_SUCCESS(rc)) 2359 2223 { 2360 PGSTPTE pPteSrc = &pPTSrc 2->a[(GCPtrPage >> GST_PT_SHIFT) & GST_PT_MASK];2224 PGSTPTE pPteSrc = &pPTSrc->a[(GCPtrPage >> GST_PT_SHIFT) & GST_PT_MASK]; 2361 2225 const GSTPTE PteSrc = *pPteSrc; 2362 2226 if (pPteSrc->n.u1Present) … … 2367 2231 } 2368 2232 return VINF_EM_RAW_GUEST_TRAP; 2233 } 2234 2235 /** 2236 * Handle dirty bit tracking faults. 2237 * 2238 * @returns VBox status code. 2239 * @param pVCpu The VMCPU handle. 2240 * @param uErr Page fault error code. 2241 * @param pPdeSrc Guest page directory entry. 2242 * @param pPdeDst Shadow page directory entry. 2243 * @param GCPtrPage Guest context page address. 2244 */ 2245 PGM_BTH_DECL(int, CheckDirtyPageFault)(PVMCPU pVCpu, uint32_t uErr, PSHWPDE pPdeDst, PGSTPDE pPdeSrc, RTGCPTR GCPtrPage) 2246 { 2247 # if PGM_GST_TYPE == PGM_TYPE_AMD64 2248 const bool fBigPagesSupported = true; 2249 # else 2250 const bool fBigPagesSupported = !!(CPUMGetGuestCR4(pVCpu) & X86_CR4_PSE); 2251 # endif 2252 PVM pVM = pVCpu->CTX_SUFF(pVM); 2253 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 2254 2255 Assert(PGMIsLockOwner(pVM)); 2256 2257 if (pPdeSrc->b.u1Size && fBigPagesSupported) 2258 { 2259 if ( pPdeDst->n.u1Present 2260 && (pPdeDst->u & PGM_PDFLAGS_TRACK_DIRTY)) 2261 { 2262 SHWPDE PdeDst = *pPdeDst; 2263 2264 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyPageTrap)); 2265 Assert(pPdeSrc->b.u1Write); 2266 2267 /* Note: No need to invalidate this entry on other VCPUs as a stale TLB entry will not harm; write access will simply 2268 * fault again and take this path to only invalidate the entry. 2269 */ 2270 PdeDst.n.u1Write = 1; 2271 PdeDst.n.u1Accessed = 1; 2272 PdeDst.au32[0] &= ~PGM_PDFLAGS_TRACK_DIRTY; 2273 ASMAtomicWriteSize(pPdeDst, PdeDst.u); 2274 PGM_INVL_BIG_PG(pVCpu, GCPtrPage); 2275 return VINF_PGM_HANDLED_DIRTY_BIT_FAULT; /* restarts the instruction. */ 2276 } 2277 # ifdef IN_RING0 2278 else 2279 /* Check for stale TLB entry; only applies to the SMP guest case. */ 2280 if ( pVM->cCpus > 1 2281 && pPdeDst->n.u1Write 2282 && pPdeDst->n.u1Accessed) 2283 { 2284 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, pPdeDst->u & SHW_PDE_PG_MASK); 2285 if (pShwPage) 2286 { 2287 PSHWPT pPTDst = (PSHWPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage); 2288 PSHWPTE pPteDst = &pPTDst->a[(GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK]; 2289 if ( pPteDst->n.u1Present 2290 && pPteDst->n.u1Write) 2291 { 2292 /* Stale TLB entry. */ 2293 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyPageStale)); 2294 PGM_INVL_PG(pVCpu, GCPtrPage); 2295 return VINF_PGM_HANDLED_DIRTY_BIT_FAULT; /* restarts the instruction. */ 2296 } 2297 } 2298 } 2299 # endif /* IN_RING0 */ 2300 return VINF_PGM_NO_DIRTY_BIT_TRACKING; 2301 } 2302 2303 /* 2304 * Map the guest page table. 2305 */ 2306 PGSTPT pPTSrc; 2307 int rc = PGM_GCPHYS_2_PTR(pVM, pPdeSrc->u & GST_PDE_PG_MASK, &pPTSrc); 2308 if (RT_SUCCESS(rc)) 2309 { 2310 if (pPdeDst->n.u1Present) 2311 { 2312 PGSTPTE pPteSrc = &pPTSrc->a[(GCPtrPage >> GST_PT_SHIFT) & GST_PT_MASK]; 2313 const GSTPTE PteSrc = *pPteSrc; 2314 #ifndef IN_RING0 2315 /* Bail out here as pgmPoolGetPageByHCPhys will return NULL and we'll crash below. 2316 * Our individual shadow handlers will provide more information and force a fatal exit. 2317 */ 2318 if (MMHyperIsInsideArea(pVM, (RTGCPTR)GCPtrPage)) 2319 { 2320 LogRel(("CheckPageFault: write to hypervisor region %RGv\n", GCPtrPage)); 2321 return VINF_PGM_NO_DIRTY_BIT_TRACKING; 2322 } 2323 #endif 2324 /* 2325 * Map shadow page table. 2326 */ 2327 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, pPdeDst->u & SHW_PDE_PG_MASK); 2328 if (pShwPage) 2329 { 2330 PSHWPT pPTDst = (PSHWPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage); 2331 PSHWPTE pPteDst = &pPTDst->a[(GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK]; 2332 if (pPteDst->n.u1Present) /** @todo Optimize accessed bit emulation? */ 2333 { 2334 if (pPteDst->u & PGM_PTFLAGS_TRACK_DIRTY) 2335 { 2336 PPGMPAGE pPage = pgmPhysGetPage(&pVM->pgm.s, pPteSrc->u & GST_PTE_PG_MASK); 2337 SHWPTE PteDst = *pPteDst; 2338 2339 LogFlow(("DIRTY page trap addr=%RGv\n", GCPtrPage)); 2340 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyPageTrap)); 2341 2342 Assert(pPteSrc->n.u1Write); 2343 2344 /* Note: No need to invalidate this entry on other VCPUs as a stale TLB entry will not harm; write access will simply 2345 * fault again and take this path to only invalidate the entry. 2346 */ 2347 if (RT_LIKELY(pPage)) 2348 { 2349 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)) 2350 /* Assuming write handlers here as the PTE is present (otherwise we wouldn't be here). */ 2351 PteDst.n.u1Write = 0; 2352 else 2353 { 2354 if ( PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_WRITE_MONITORED 2355 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM) 2356 { 2357 rc = pgmPhysPageMakeWritableUnlocked(pVM, pPage, pPteSrc->u & GST_PTE_PG_MASK); 2358 AssertRC(rc); 2359 } 2360 if (PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED) 2361 PteDst.n.u1Write = 1; 2362 else 2363 PteDst.n.u1Write = 0; 2364 } 2365 } 2366 else 2367 PteDst.n.u1Write = 1; 2368 2369 PteDst.n.u1Dirty = 1; 2370 PteDst.n.u1Accessed = 1; 2371 PteDst.au32[0] &= ~PGM_PTFLAGS_TRACK_DIRTY; 2372 ASMAtomicWriteSize(pPteDst, PteDst.u); 2373 PGM_INVL_PG(pVCpu, GCPtrPage); 2374 return VINF_PGM_HANDLED_DIRTY_BIT_FAULT; /* restarts the instruction. */ 2375 } 2376 # ifdef IN_RING0 2377 else 2378 /* Check for stale TLB entry; only applies to the SMP guest case. */ 2379 if ( pVM->cCpus > 1 2380 && pPteDst->n.u1Write == 1 2381 && pPteDst->n.u1Accessed == 1) 2382 { 2383 /* Stale TLB entry. */ 2384 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyPageStale)); 2385 PGM_INVL_PG(pVCpu, GCPtrPage); 2386 return VINF_PGM_HANDLED_DIRTY_BIT_FAULT; /* restarts the instruction. */ 2387 } 2388 # endif 2389 } 2390 } 2391 else 2392 AssertMsgFailed(("pgmPoolGetPageByHCPhys %RGp failed!\n", pPdeDst->u & SHW_PDE_PG_MASK)); 2393 } 2394 return VINF_PGM_NO_DIRTY_BIT_TRACKING; 2395 } 2396 AssertRC(rc); 2397 return rc; 2369 2398 } 2370 2399 #endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */ … … 3234 3263 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 3235 3264 /* Check for dirty bit fault */ 3236 rc = PGM_BTH_NAME(Check PageFault)(pVCpu, uErr, pPdeDst, &pPDSrc->a[iPDSrc], GCPtrPage);3265 rc = PGM_BTH_NAME(CheckDirtyPageFault)(pVCpu, uErr, pPdeDst, &pPDSrc->a[iPDSrc], GCPtrPage); 3237 3266 if (rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT) 3238 3267 Log(("PGMVerifyAccess: success (dirty)\n"));
Note:
See TracChangeset
for help on using the changeset viewer.