Changeset 31140 in vbox for trunk/src/VBox/VMM/VMMAll
- Timestamp:
- Jul 27, 2010 1:29:43 PM (15 years ago)
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r31136 r31140 34 34 PGM_BTH_DECL(int, Trap0eHandler)(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, bool *pfLockTaken); 35 35 PGM_BTH_DECL(int, InvalidatePage)(PVMCPU pVCpu, RTGCPTR GCPtrPage); 36 PGM_BTH_DECL(int, SyncPage)(PVMCPU pVCpu, GSTPDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uErr); 37 PGM_BTH_DECL(int, CheckPageFault)(PVMCPU pVCpu, uint32_t uErr, PGSTPDE pPdeSrc, RTGCPTR GCPtrPage); 38 PGM_BTH_DECL(int, CheckDirtyPageFault)(PVMCPU pVCpu, uint32_t uErr, PSHWPDE pPdeDst, GSTPDE const *pPdeSrc, RTGCPTR GCPtrPage); 39 PGM_BTH_DECL(int, SyncPT)(PVMCPU pVCpu, unsigned iPD, PGSTPD pPDSrc, RTGCPTR GCPtrPage); 36 static int PGM_BTH_NAME(SyncPage)(PVMCPU pVCpu, GSTPDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uErr); 37 static int PGM_BTH_NAME(CheckDirtyPageFault)(PVMCPU pVCpu, uint32_t uErr, PSHWPDE pPdeDst, GSTPDE const *pPdeSrc, RTGCPTR GCPtrPage); 38 static int PGM_BTH_NAME(SyncPT)(PVMCPU pVCpu, unsigned iPD, PGSTPD pPDSrc, RTGCPTR GCPtrPage); 40 39 PGM_BTH_DECL(int, VerifyAccessSyncPage)(PVMCPU pVCpu, RTGCPTR Addr, unsigned fPage, unsigned uErr); 41 40 PGM_BTH_DECL(int, PrefetchPage)(PVMCPU pVCpu, RTGCPTR GCPtrPage); … … 44 43 PGM_BTH_DECL(unsigned, AssertCR3)(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr = 0, RTGCPTR cb = ~(RTGCPTR)0); 45 44 #endif 46 DECLINLINE(void) PGM_BTH_NAME(SyncPageWorkerTrackDeref)(PVMCPU pVCpu, PPGMPOOLPAGE pShwPage, RTHCPHYS HCPhys, uint16_t iPte);47 45 PGM_BTH_DECL(int, MapCR3)(PVMCPU pVCpu, RTGCPHYS GCPhysCR3); 48 46 PGM_BTH_DECL(int, UnmapCR3)(PVMCPU pVCpu); … … 391 389 # if ( PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT \ 392 390 || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64) \ 393 && PGM_SHW_TYPE != PGM_TYPE_NESTED 391 && PGM_SHW_TYPE != PGM_TYPE_NESTED \ 394 392 && (PGM_SHW_TYPE != PGM_TYPE_EPT || PGM_GST_TYPE == PGM_TYPE_PROT) 395 393 int rc; … … 1710 1708 * @param uErr Fault error (X86_TRAP_PF_*). 1711 1709 */ 1712 PGM_BTH_DECL(int,SyncPage)(PVMCPU pVCpu, GSTPDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uErr)1710 static int PGM_BTH_NAME(SyncPage)(PVMCPU pVCpu, GSTPDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uErr) 1713 1711 { 1714 1712 PVM pVM = pVCpu->CTX_SUFF(pVM); … … 2274 2272 2275 2273 /** 2276 * Investigate a page fault to identify ones targetted at the guest and to2277 * handle write protection page faults caused by dirty bit tracking.2278 *2279 * This will do detect invalid entries and raise X86_TRAP_PF_RSVD.2280 *2281 * @returns VBox status code.2282 * @param pVCpu The VMCPU handle.2283 * @param uErr Page fault error code. The X86_TRAP_PF_RSVD flag2284 * cannot be trusted as it is used for MMIO optimizations.2285 * @param pPdeSrc Guest page directory entry.2286 * @param GCPtrPage Guest context page address.2287 */2288 PGM_BTH_DECL(int, CheckPageFault)(PVMCPU pVCpu, uint32_t uErr, PGSTPDE pPdeSrc, RTGCPTR GCPtrPage)2289 {2290 bool fUserLevelFault = !!(uErr & X86_TRAP_PF_US);2291 bool fWriteFault = !!(uErr & X86_TRAP_PF_RW);2292 # if PGM_WITH_NX(PGM_GST_TYPE, PGM_SHW_TYPE)2293 bool fMaybeNXEFault = (uErr & X86_TRAP_PF_ID) && GST_IS_NX_ACTIVE(pVCpu);2294 # endif2295 bool fMaybeWriteProtFault = fWriteFault && (fUserLevelFault || CPUMIsGuestR0WriteProtEnabled(pVCpu));2296 PVM pVM = pVCpu->CTX_SUFF(pVM);2297 int rc;2298 2299 LogFlow(("CheckPageFault: GCPtrPage=%RGv uErr=%#x PdeSrc=%08x\n", GCPtrPage, uErr, pPdeSrc->u));2300 2301 /*2302 * Note! For PAE it is safe to assume that bad guest physical addresses2303 * (which returns all FFs) in the translation tables will cause2304 * #PF(RSVD). The same will be the case for long mode provided the2305 * physical address width is less than 52 bits - this we ASSUME.2306 *2307 * Note! No convenient shortcuts here, we have to validate everything!2308 */2309 2310 # if PGM_GST_TYPE == PGM_TYPE_AMD642311 /*2312 * Real page fault? (PML4E level)2313 */2314 PX86PML4 pPml4Src = pgmGstGetLongModePML4Ptr(pVCpu);2315 if (RT_UNLIKELY(!pPml4Src))2316 return PGM_BTH_NAME(CheckPageFaultReturnRSVD)(pVCpu, uErr, GCPtrPage, 0);2317 2318 PX86PML4E pPml4eSrc = &pPml4Src->a[(GCPtrPage >> X86_PML4_SHIFT) & X86_PML4_MASK];2319 if (!pPml4eSrc->n.u1Present)2320 return PGM_BTH_NAME(CheckPageFaultReturnNP)(pVCpu, uErr, GCPtrPage, 0);2321 if (RT_UNLIKELY(!GST_IS_PML4E_VALID(pVCpu, *pPml4eSrc)))2322 return PGM_BTH_NAME(CheckPageFaultReturnRSVD)(pVCpu, uErr, GCPtrPage, 0);2323 if ( (fMaybeWriteProtFault && !pPml4eSrc->n.u1Write)2324 || (fMaybeNXEFault && pPml4eSrc->n.u1NoExecute)2325 || (fUserLevelFault && !pPml4eSrc->n.u1User) )2326 return PGM_BTH_NAME(CheckPageFaultReturnProt)(pVCpu, uErr, GCPtrPage, 0);2327 2328 /*2329 * Real page fault? (PDPE level)2330 */2331 PX86PDPT pPdptSrc;2332 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, pPml4eSrc->u & X86_PML4E_PG_MASK, &pPdptSrc);2333 if (RT_FAILURE(rc))2334 {2335 AssertMsgReturn(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc), rc);2336 return PGM_BTH_NAME(CheckPageFaultReturnRSVD)(pVCpu, uErr, GCPtrPage, 1);2337 }2338 2339 PX86PDPE pPdpeSrc = &pPdptSrc->a[(GCPtrPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64];2340 if (!pPdpeSrc->n.u1Present)2341 return PGM_BTH_NAME(CheckPageFaultReturnNP)(pVCpu, uErr, GCPtrPage, 1);2342 if (!GST_IS_PDPE_VALID(pVCpu, *pPdpeSrc))2343 return PGM_BTH_NAME(CheckPageFaultReturnRSVD)(pVCpu, uErr, GCPtrPage, 1);2344 if ( (fMaybeWriteProtFault && !pPdpeSrc->lm.u1Write)2345 || (fMaybeNXEFault && pPdpeSrc->lm.u1NoExecute)2346 || (fUserLevelFault && !pPdpeSrc->lm.u1User) )2347 return PGM_BTH_NAME(CheckPageFaultReturnProt)(pVCpu, uErr, GCPtrPage, 1);2348 2349 # elif PGM_GST_TYPE == PGM_TYPE_PAE2350 /*2351 * Real page fault? (PDPE level)2352 */2353 PX86PDPT pPdptSrc = pgmGstGetPaePDPTPtr(pVCpu);2354 if (RT_UNLIKELY(!pPdptSrc))2355 return PGM_BTH_NAME(CheckPageFaultReturnRSVD)(pVCpu, uErr, GCPtrPage, 1);2356 /** @todo Handle bad CR3 address. */2357 PX86PDPE pPdpeSrc = pgmGstGetPaePDPEPtr(pVCpu, GCPtrPage);2358 if (!pPdpeSrc->n.u1Present)2359 return PGM_BTH_NAME(CheckPageFaultReturnNP)(pVCpu, uErr, GCPtrPage, 1);2360 if (!GST_IS_PDPE_VALID(pVCpu, *pPdpeSrc))2361 return PGM_BTH_NAME(CheckPageFaultReturnRSVD)(pVCpu, uErr, GCPtrPage, 1);2362 # endif /* PGM_GST_TYPE == PGM_TYPE_PAE */2363 2364 /*2365 * Real page fault? (PDE level)2366 */2367 if (!pPdeSrc->n.u1Present)2368 return PGM_BTH_NAME(CheckPageFaultReturnNP)(pVCpu, uErr, GCPtrPage, 2);2369 bool const fBigPage = pPdeSrc->b.u1Size && GST_IS_PSE_ACTIVE(pVCpu);2370 if (!fBigPage ? !GST_IS_PDE_VALID(pVCpu, *pPdeSrc) : !GST_IS_BIG_PDE_VALID(pVCpu, *pPdeSrc))2371 return PGM_BTH_NAME(CheckPageFaultReturnRSVD)(pVCpu, uErr, GCPtrPage, 2);2372 if ( (fMaybeWriteProtFault && !pPdeSrc->n.u1Write)2373 # if PGM_WITH_NX(PGM_GST_TYPE, PGM_SHW_TYPE)2374 || (fMaybeNXEFault && pPdeSrc->n.u1NoExecute)2375 # endif2376 || (fUserLevelFault && !pPdeSrc->n.u1User) )2377 return PGM_BTH_NAME(CheckPageFaultReturnProt)(pVCpu, uErr, GCPtrPage, 2);2378 2379 /*2380 * First check the easy case where the page directory has been marked2381 * read-only to track the dirty bit of an emulated BIG page.2382 */2383 if (fBigPage)2384 {2385 /* Mark guest page directory as accessed */2386 # if PGM_GST_TYPE == PGM_TYPE_AMD642387 pPml4eSrc->n.u1Accessed = 1;2388 pPdpeSrc->lm.u1Accessed = 1;2389 # endif2390 pPdeSrc->b.u1Accessed = 1;2391 2392 /* Mark the entry guest PDE dirty it it's a write access. */2393 if (fWriteFault)2394 pPdeSrc->b.u1Dirty = 1;2395 }2396 else2397 {2398 /*2399 * Map the guest page table.2400 */2401 PGSTPT pPTSrc;2402 PGSTPTE pPteSrc;2403 GSTPTE PteSrc;2404 rc = PGM_GCPHYS_2_PTR(pVM, pPdeSrc->u & GST_PDE_PG_MASK, &pPTSrc);2405 if (RT_SUCCESS(rc))2406 {2407 pPteSrc = &pPTSrc->a[(GCPtrPage >> GST_PT_SHIFT) & GST_PT_MASK];2408 PteSrc.u = pPteSrc->u;2409 }2410 else if (rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS)2411 {2412 /* All bits in the PTE are set. */2413 # if PGM_GST_TYPE == PGM_TYPE_32BIT2414 PteSrc.u = UINT32_MAX;2415 # else2416 PteSrc.u = UINT64_MAX;2417 # endif2418 pPteSrc = &PteSrc;2419 }2420 else2421 {2422 AssertRC(rc);2423 return rc;2424 }2425 2426 /*2427 * Real page fault?2428 */2429 if (!PteSrc.n.u1Present)2430 return PGM_BTH_NAME(CheckPageFaultReturnNP)(pVCpu, uErr, GCPtrPage, 3);2431 if (!GST_IS_PTE_VALID(pVCpu, PteSrc))2432 return PGM_BTH_NAME(CheckPageFaultReturnRSVD)(pVCpu, uErr, GCPtrPage, 3);2433 if ( (fMaybeWriteProtFault && !PteSrc.n.u1Write)2434 # if PGM_WITH_NX(PGM_GST_TYPE, PGM_SHW_TYPE)2435 || (fMaybeNXEFault && PteSrc.n.u1NoExecute)2436 # endif2437 || (fUserLevelFault && !PteSrc.n.u1User) )2438 return PGM_BTH_NAME(CheckPageFaultReturnProt)(pVCpu, uErr, GCPtrPage, 0);2439 2440 LogFlow(("CheckPageFault: page fault at %RGv PteSrc.u=%08x\n", GCPtrPage, PteSrc.u));2441 2442 /*2443 * Set the accessed bits in the page directory and the page table.2444 */2445 # if PGM_GST_TYPE == PGM_TYPE_AMD642446 pPml4eSrc->n.u1Accessed = 1;2447 pPdpeSrc->lm.u1Accessed = 1;2448 # endif2449 pPdeSrc->n.u1Accessed = 1;2450 pPteSrc->n.u1Accessed = 1;2451 2452 /*2453 * Set the dirty flag in the PTE if it's a write access.2454 */2455 if (fWriteFault)2456 {2457 # ifdef VBOX_WITH_STATISTICS2458 if (!pPteSrc->n.u1Dirty)2459 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,DirtiedPage));2460 else2461 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageAlreadyDirty));2462 # endif2463 2464 pPteSrc->n.u1Dirty = 1;2465 }2466 }2467 return VINF_SUCCESS;2468 }2469 2470 2471 /**2472 2274 * Handle dirty bit tracking faults. 2473 2275 * … … 2479 2281 * @param GCPtrPage Guest context page address. 2480 2282 */ 2481 PGM_BTH_DECL(int,CheckDirtyPageFault)(PVMCPU pVCpu, uint32_t uErr, PSHWPDE pPdeDst, GSTPDE const *pPdeSrc, RTGCPTR GCPtrPage)2283 static int PGM_BTH_NAME(CheckDirtyPageFault)(PVMCPU pVCpu, uint32_t uErr, PSHWPDE pPdeDst, GSTPDE const *pPdeSrc, RTGCPTR GCPtrPage) 2482 2284 { 2483 2285 PVM pVM = pVCpu->CTX_SUFF(pVM); … … 2660 2462 * @param GCPtrPage GC Pointer of the page that caused the fault 2661 2463 */ 2662 PGM_BTH_DECL(int,SyncPT)(PVMCPU pVCpu, unsigned iPDSrc, PGSTPD pPDSrc, RTGCPTR GCPtrPage)2464 static int PGM_BTH_NAME(SyncPT)(PVMCPU pVCpu, unsigned iPDSrc, PGSTPD pPDSrc, RTGCPTR GCPtrPage) 2663 2465 { 2664 2466 PVM pVM = pVCpu->CTX_SUFF(pVM); … … 2673 2475 Assert(PGMIsLocked(pVM)); 2674 2476 2675 #if ( PGM_GST_TYPE == PGM_TYPE_32BIT\2676 || PGM_GST_TYPE == PGM_TYPE_PAE\2677 2678 && PGM_SHW_TYPE != PGM_TYPE_NESTED\2679 2477 #if ( PGM_GST_TYPE == PGM_TYPE_32BIT \ 2478 || PGM_GST_TYPE == PGM_TYPE_PAE \ 2479 || PGM_GST_TYPE == PGM_TYPE_AMD64) \ 2480 && PGM_SHW_TYPE != PGM_TYPE_NESTED \ 2481 && PGM_SHW_TYPE != PGM_TYPE_EPT 2680 2482 2681 2483 int rc = VINF_SUCCESS; -
trunk/src/VBox/VMM/VMMAll/PGMAllGst.h
r31123 r31140 24 24 || PGM_GST_TYPE == PGM_TYPE_PAE \ 25 25 || PGM_GST_TYPE == PGM_TYPE_AMD64 26 PGM_GST_DECL(int,Walk)(PVMCPU pVCpu, RTGCPTR GCPtr, PGSTPTWALK pWalk);26 static int PGM_GST_NAME(Walk)(PVMCPU pVCpu, RTGCPTR GCPtr, PGSTPTWALK pWalk); 27 27 #endif 28 28 PGM_GST_DECL(int, GetPage)(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys); … … 73 73 * @param pWalk Where to return the walk result. This is always set. 74 74 */ 75 PGM_GST_DECL(int,Walk)(PVMCPU pVCpu, RTGCPTR GCPtr, PGSTPTWALK pWalk)75 static int PGM_GST_NAME(Walk)(PVMCPU pVCpu, RTGCPTR GCPtr, PGSTPTWALK pWalk) 76 76 { 77 77 int rc; … … 285 285 if (pfFlags) 286 286 { 287 /* The RW and US flags are determined via bitwise AND across all levels. */ 288 uint64_t fUpperRwUs = (X86_PTE_RW | X86_PTE_US) 289 # if PGM_GST_TYPE == PGM_TYPE_AMD64 290 & Walk.Pml4e.u 291 & Walk.Pdpe.u 292 # endif 293 & Walk.Pde.u; 294 fUpperRwUs |= ~(uint64_t)(X86_PTE_RW | X86_PTE_US); 295 296 /* The RW and US flags are determined via bitwise AND across all levels. */ 287 if (!Walk.Core.fBigPage) 288 *pfFlags = (Walk.Pte.u & ~(GST_PTE_PG_MASK | X86_PTE_RW | X86_PTE_US)) /* NX not needed */ 289 | (Walk.Core.fEffectiveRW ? X86_PTE_RW : 0) 290 | (Walk.Core.fEffectiveUS ? X86_PTE_US : 0) 297 291 # if PGM_WITH_NX(PGM_GST_TYPE, PGM_GST_TYPE) 298 uint32_t fUpperNx = 0 299 # if PGM_GST_TYPE == PGM_TYPE_AMD64 300 | Walk.Pml4e.n.u1NoExecute 301 | Walk.Pdpe.lm.u1NoExecute 302 # endif 303 | Walk.Pde.n.u1NoExecute; 304 # endif 305 306 if (!Walk.Core.fBigPage) 307 { 308 *pfFlags = (Walk.Pte.u & ~GST_PTE_PG_MASK) & fUpperRwUs; 309 # if PGM_WITH_NX(PGM_GST_TYPE, PGM_GST_TYPE) 310 if (Walk.Pte.n.u1NoExecute || fUpperNx) 311 { 312 Assert(GST_IS_NX_ACTIVE(pVCpu)); /* should trigger RSVD error otherwise. */ 313 *pfFlags |= X86_PTE_PAE_NX; 314 } 315 # endif 316 } 292 | (Walk.Core.fEffectiveNX ? X86_PTE_PAE_NX : 0) 293 # endif 294 ; 317 295 else 318 296 { 319 *pfFlags = ( (Walk.Pde.u & ~(GST_PTE_PG_MASK | X86_PTE_PAT)) 320 | ((Walk.Pde.u & X86_PDE4M_PAT) >> X86_PDE4M_PAT_SHIFT)) 321 & fUpperRwUs; 297 *pfFlags = (Walk.Pde.u & ~(GST_PTE_PG_MASK | X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_PS)) /* NX not needed */ 298 | ((Walk.Pde.u & X86_PDE4M_PAT) >> X86_PDE4M_PAT_SHIFT) 299 | (Walk.Core.fEffectiveRW ? X86_PTE_RW : 0) 300 | (Walk.Core.fEffectiveUS ? X86_PTE_US : 0) 322 301 # if PGM_WITH_NX(PGM_GST_TYPE, PGM_GST_TYPE) 323 if (fUpperNx) 324 { 325 Assert(GST_IS_NX_ACTIVE(pVCpu)); /* should trigger RSVD error otherwise. */ 326 *pfFlags |= X86_PTE_PAE_NX; 327 } 328 # endif 302 | (Walk.Core.fEffectiveNX ? X86_PTE_PAE_NX : 0) 303 # endif 304 ; 329 305 } 330 306 }
Note:
See TracChangeset
for help on using the changeset viewer.