Changeset 31140 in vbox
- Timestamp:
- Jul 27, 2010 1:29:43 PM (14 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/PGM.cpp
r31136 r31140 2943 2943 pVCpu->pgm.s.pfnR3BthSyncCR3 = pModeData->pfnR3BthSyncCR3; 2944 2944 Assert(pVCpu->pgm.s.pfnR3BthSyncCR3); 2945 pVCpu->pgm.s.pfnR3BthSyncPage = pModeData->pfnR3BthSyncPage;2946 2945 pVCpu->pgm.s.pfnR3BthPrefetchPage = pModeData->pfnR3BthPrefetchPage; 2947 2946 pVCpu->pgm.s.pfnR3BthVerifyAccessSyncPage = pModeData->pfnR3BthVerifyAccessSyncPage; … … 2955 2954 pVCpu->pgm.s.pfnRCBthInvalidatePage = pModeData->pfnRCBthInvalidatePage; 2956 2955 pVCpu->pgm.s.pfnRCBthSyncCR3 = pModeData->pfnRCBthSyncCR3; 2957 pVCpu->pgm.s.pfnRCBthSyncPage = pModeData->pfnRCBthSyncPage;2958 2956 pVCpu->pgm.s.pfnRCBthPrefetchPage = pModeData->pfnRCBthPrefetchPage; 2959 2957 pVCpu->pgm.s.pfnRCBthVerifyAccessSyncPage = pModeData->pfnRCBthVerifyAccessSyncPage; … … 2967 2965 pVCpu->pgm.s.pfnR0BthInvalidatePage = pModeData->pfnR0BthInvalidatePage; 2968 2966 pVCpu->pgm.s.pfnR0BthSyncCR3 = pModeData->pfnR0BthSyncCR3; 2969 pVCpu->pgm.s.pfnR0BthSyncPage = pModeData->pfnR0BthSyncPage;2970 2967 pVCpu->pgm.s.pfnR0BthPrefetchPage = pModeData->pfnR0BthPrefetchPage; 2971 2968 pVCpu->pgm.s.pfnR0BthVerifyAccessSyncPage = pModeData->pfnR0BthVerifyAccessSyncPage; -
trunk/src/VBox/VMM/PGMBth.h
r31066 r31140 27 27 PGM_BTH_DECL(int, Trap0eHandler)(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, bool *pfLockTaken); 28 28 PGM_BTH_DECL(int, SyncCR3)(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal); 29 PGM_BTH_DECL(int, SyncPage)(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError);30 29 PGM_BTH_DECL(int, VerifyAccessSyncPage)(PVMCPU pVCpu, RTGCPTR Addr, unsigned fPage, unsigned uError); 31 30 PGM_BTH_DECL(int, InvalidatePage)(PVMCPU pVCpu, RTGCPTR GCPtrPage); … … 54 53 pModeData->pfnR3BthSyncCR3 = PGM_BTH_NAME(SyncCR3); 55 54 pModeData->pfnR3BthInvalidatePage = PGM_BTH_NAME(InvalidatePage); 56 pModeData->pfnR3BthSyncPage = PGM_BTH_NAME(SyncPage);57 55 pModeData->pfnR3BthPrefetchPage = PGM_BTH_NAME(PrefetchPage); 58 56 pModeData->pfnR3BthVerifyAccessSyncPage = PGM_BTH_NAME(VerifyAccessSyncPage); … … 74 72 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_RC_STR(InvalidatePage), rc), rc); 75 73 rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_BTH_NAME_RC_STR(SyncCR3), &pModeData->pfnRCBthSyncCR3); 76 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_RC_STR(SyncPage), rc), rc); 77 rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_BTH_NAME_RC_STR(SyncPage), &pModeData->pfnRCBthSyncPage); 78 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_RC_STR(SyncPage), rc), rc); 74 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_RC_STR(SyncCR3), rc), rc); 79 75 rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_BTH_NAME_RC_STR(PrefetchPage), &pModeData->pfnRCBthPrefetchPage); 80 76 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_RC_STR(PrefetchPage), rc), rc); … … 98 94 rc = PDMR3LdrGetSymbolR0(pVM, NULL, PGM_BTH_NAME_R0_STR(SyncCR3), &pModeData->pfnR0BthSyncCR3); 99 95 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_R0_STR(SyncCR3), rc), rc); 100 rc = PDMR3LdrGetSymbolR0(pVM, NULL, PGM_BTH_NAME_R0_STR(SyncPage), &pModeData->pfnR0BthSyncPage);101 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_R0_STR(SyncPage), rc), rc);102 96 rc = PDMR3LdrGetSymbolR0(pVM, NULL, PGM_BTH_NAME_R0_STR(PrefetchPage), &pModeData->pfnR0BthPrefetchPage); 103 97 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_R0_STR(PrefetchPage), rc), rc); -
trunk/src/VBox/VMM/PGMInternal.h
r31136 r31140 2572 2572 DECLR3CALLBACKMEMBER(int, pfnR3BthInvalidatePage,(PVMCPU pVCpu, RTGCPTR GCPtrPage)); 2573 2573 DECLR3CALLBACKMEMBER(int, pfnR3BthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)); 2574 DECLR3CALLBACKMEMBER(int, pfnR3BthSyncPage,(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));2575 2574 DECLR3CALLBACKMEMBER(int, pfnR3BthPrefetchPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage)); 2576 2575 DECLR3CALLBACKMEMBER(int, pfnR3BthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError)); … … 2584 2583 DECLRCCALLBACKMEMBER(int, pfnRCBthInvalidatePage,(PVMCPU pVCpu, RTGCPTR GCPtrPage)); 2585 2584 DECLRCCALLBACKMEMBER(int, pfnRCBthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)); 2586 DECLRCCALLBACKMEMBER(int, pfnRCBthSyncPage,(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));2587 2585 DECLRCCALLBACKMEMBER(int, pfnRCBthPrefetchPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage)); 2588 2586 DECLRCCALLBACKMEMBER(int, pfnRCBthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError)); … … 2596 2594 DECLR0CALLBACKMEMBER(int, pfnR0BthInvalidatePage,(PVMCPU pVCpu, RTGCPTR GCPtrPage)); 2597 2595 DECLR0CALLBACKMEMBER(int, pfnR0BthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)); 2598 DECLR0CALLBACKMEMBER(int, pfnR0BthSyncPage,(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));2599 2596 DECLR0CALLBACKMEMBER(int, pfnR0BthPrefetchPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage)); 2600 2597 DECLR0CALLBACKMEMBER(int, pfnR0BthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError)); … … 3476 3473 DECLR3CALLBACKMEMBER(int, pfnR3BthInvalidatePage,(PVMCPU pVCpu, RTGCPTR GCPtrPage)); 3477 3474 DECLR3CALLBACKMEMBER(int, pfnR3BthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)); 3478 DECLR3CALLBACKMEMBER(int, pfnR3BthSyncPage,(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));3479 3475 DECLR3CALLBACKMEMBER(int, pfnR3BthPrefetchPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage)); 3480 3476 DECLR3CALLBACKMEMBER(int, pfnR3BthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError)); … … 3486 3482 DECLR0CALLBACKMEMBER(int, pfnR0BthInvalidatePage,(PVMCPU pVCpu, RTGCPTR GCPtrPage)); 3487 3483 DECLR0CALLBACKMEMBER(int, pfnR0BthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)); 3488 DECLR0CALLBACKMEMBER(int, pfnR0BthSyncPage,(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));3489 3484 DECLR0CALLBACKMEMBER(int, pfnR0BthPrefetchPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage)); 3490 3485 DECLR0CALLBACKMEMBER(int, pfnR0BthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError)); … … 3496 3491 DECLRCCALLBACKMEMBER(int, pfnRCBthInvalidatePage,(PVMCPU pVCpu, RTGCPTR GCPtrPage)); 3497 3492 DECLRCCALLBACKMEMBER(int, pfnRCBthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal)); 3498 DECLRCCALLBACKMEMBER(int, pfnRCBthSyncPage,(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));3499 3493 DECLRCCALLBACKMEMBER(int, pfnRCBthPrefetchPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage)); 3500 3494 DECLRCCALLBACKMEMBER(int, pfnRCBthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError)); … … 3502 3496 DECLRCCALLBACKMEMBER(int, pfnRCBthMapCR3,(PVMCPU pVCpu, RTGCPHYS GCPhysCR3)); 3503 3497 DECLRCCALLBACKMEMBER(int, pfnRCBthUnmapCR3,(PVMCPU pVCpu)); 3498 #if 0 3504 3499 RTRCPTR alignment2; /**< structure size alignment. */ 3500 #endif 3505 3501 /** @} */ 3506 3502 -
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r31136 r31140 34 34 PGM_BTH_DECL(int, Trap0eHandler)(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, bool *pfLockTaken); 35 35 PGM_BTH_DECL(int, InvalidatePage)(PVMCPU pVCpu, RTGCPTR GCPtrPage); 36 PGM_BTH_DECL(int, SyncPage)(PVMCPU pVCpu, GSTPDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uErr); 37 PGM_BTH_DECL(int, CheckPageFault)(PVMCPU pVCpu, uint32_t uErr, PGSTPDE pPdeSrc, RTGCPTR GCPtrPage); 38 PGM_BTH_DECL(int, CheckDirtyPageFault)(PVMCPU pVCpu, uint32_t uErr, PSHWPDE pPdeDst, GSTPDE const *pPdeSrc, RTGCPTR GCPtrPage); 39 PGM_BTH_DECL(int, SyncPT)(PVMCPU pVCpu, unsigned iPD, PGSTPD pPDSrc, RTGCPTR GCPtrPage); 36 static int PGM_BTH_NAME(SyncPage)(PVMCPU pVCpu, GSTPDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uErr); 37 static int PGM_BTH_NAME(CheckDirtyPageFault)(PVMCPU pVCpu, uint32_t uErr, PSHWPDE pPdeDst, GSTPDE const *pPdeSrc, RTGCPTR GCPtrPage); 38 static int PGM_BTH_NAME(SyncPT)(PVMCPU pVCpu, unsigned iPD, PGSTPD pPDSrc, RTGCPTR GCPtrPage); 40 39 PGM_BTH_DECL(int, VerifyAccessSyncPage)(PVMCPU pVCpu, RTGCPTR Addr, unsigned fPage, unsigned uErr); 41 40 PGM_BTH_DECL(int, PrefetchPage)(PVMCPU pVCpu, RTGCPTR GCPtrPage); … … 44 43 PGM_BTH_DECL(unsigned, AssertCR3)(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr = 0, RTGCPTR cb = ~(RTGCPTR)0); 45 44 #endif 46 DECLINLINE(void) PGM_BTH_NAME(SyncPageWorkerTrackDeref)(PVMCPU pVCpu, PPGMPOOLPAGE pShwPage, RTHCPHYS HCPhys, uint16_t iPte);47 45 PGM_BTH_DECL(int, MapCR3)(PVMCPU pVCpu, RTGCPHYS GCPhysCR3); 48 46 PGM_BTH_DECL(int, UnmapCR3)(PVMCPU pVCpu); … … 391 389 # if ( PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT \ 392 390 || PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64) \ 393 && PGM_SHW_TYPE != PGM_TYPE_NESTED 391 && PGM_SHW_TYPE != PGM_TYPE_NESTED \ 394 392 && (PGM_SHW_TYPE != PGM_TYPE_EPT || PGM_GST_TYPE == PGM_TYPE_PROT) 395 393 int rc; … … 1710 1708 * @param uErr Fault error (X86_TRAP_PF_*). 1711 1709 */ 1712 PGM_BTH_DECL(int,SyncPage)(PVMCPU pVCpu, GSTPDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uErr)1710 static int PGM_BTH_NAME(SyncPage)(PVMCPU pVCpu, GSTPDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uErr) 1713 1711 { 1714 1712 PVM pVM = pVCpu->CTX_SUFF(pVM); … … 2274 2272 2275 2273 /** 2276 * Investigate a page fault to identify ones targetted at the guest and to2277 * handle write protection page faults caused by dirty bit tracking.2278 *2279 * This will do detect invalid entries and raise X86_TRAP_PF_RSVD.2280 *2281 * @returns VBox status code.2282 * @param pVCpu The VMCPU handle.2283 * @param uErr Page fault error code. The X86_TRAP_PF_RSVD flag2284 * cannot be trusted as it is used for MMIO optimizations.2285 * @param pPdeSrc Guest page directory entry.2286 * @param GCPtrPage Guest context page address.2287 */2288 PGM_BTH_DECL(int, CheckPageFault)(PVMCPU pVCpu, uint32_t uErr, PGSTPDE pPdeSrc, RTGCPTR GCPtrPage)2289 {2290 bool fUserLevelFault = !!(uErr & X86_TRAP_PF_US);2291 bool fWriteFault = !!(uErr & X86_TRAP_PF_RW);2292 # if PGM_WITH_NX(PGM_GST_TYPE, PGM_SHW_TYPE)2293 bool fMaybeNXEFault = (uErr & X86_TRAP_PF_ID) && GST_IS_NX_ACTIVE(pVCpu);2294 # endif2295 bool fMaybeWriteProtFault = fWriteFault && (fUserLevelFault || CPUMIsGuestR0WriteProtEnabled(pVCpu));2296 PVM pVM = pVCpu->CTX_SUFF(pVM);2297 int rc;2298 2299 LogFlow(("CheckPageFault: GCPtrPage=%RGv uErr=%#x PdeSrc=%08x\n", GCPtrPage, uErr, pPdeSrc->u));2300 2301 /*2302 * Note! For PAE it is safe to assume that bad guest physical addresses2303 * (which returns all FFs) in the translation tables will cause2304 * #PF(RSVD). The same will be the case for long mode provided the2305 * physical address width is less than 52 bits - this we ASSUME.2306 *2307 * Note! No convenient shortcuts here, we have to validate everything!2308 */2309 2310 # if PGM_GST_TYPE == PGM_TYPE_AMD642311 /*2312 * Real page fault? (PML4E level)2313 */2314 PX86PML4 pPml4Src = pgmGstGetLongModePML4Ptr(pVCpu);2315 if (RT_UNLIKELY(!pPml4Src))2316 return PGM_BTH_NAME(CheckPageFaultReturnRSVD)(pVCpu, uErr, GCPtrPage, 0);2317 2318 PX86PML4E pPml4eSrc = &pPml4Src->a[(GCPtrPage >> X86_PML4_SHIFT) & X86_PML4_MASK];2319 if (!pPml4eSrc->n.u1Present)2320 return PGM_BTH_NAME(CheckPageFaultReturnNP)(pVCpu, uErr, GCPtrPage, 0);2321 if (RT_UNLIKELY(!GST_IS_PML4E_VALID(pVCpu, *pPml4eSrc)))2322 return PGM_BTH_NAME(CheckPageFaultReturnRSVD)(pVCpu, uErr, GCPtrPage, 0);2323 if ( (fMaybeWriteProtFault && !pPml4eSrc->n.u1Write)2324 || (fMaybeNXEFault && pPml4eSrc->n.u1NoExecute)2325 || (fUserLevelFault && !pPml4eSrc->n.u1User) )2326 return PGM_BTH_NAME(CheckPageFaultReturnProt)(pVCpu, uErr, GCPtrPage, 0);2327 2328 /*2329 * Real page fault? (PDPE level)2330 */2331 PX86PDPT pPdptSrc;2332 rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, pPml4eSrc->u & X86_PML4E_PG_MASK, &pPdptSrc);2333 if (RT_FAILURE(rc))2334 {2335 AssertMsgReturn(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc), rc);2336 return PGM_BTH_NAME(CheckPageFaultReturnRSVD)(pVCpu, uErr, GCPtrPage, 1);2337 }2338 2339 PX86PDPE pPdpeSrc = &pPdptSrc->a[(GCPtrPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64];2340 if (!pPdpeSrc->n.u1Present)2341 return PGM_BTH_NAME(CheckPageFaultReturnNP)(pVCpu, uErr, GCPtrPage, 1);2342 if (!GST_IS_PDPE_VALID(pVCpu, *pPdpeSrc))2343 return PGM_BTH_NAME(CheckPageFaultReturnRSVD)(pVCpu, uErr, GCPtrPage, 1);2344 if ( (fMaybeWriteProtFault && !pPdpeSrc->lm.u1Write)2345 || (fMaybeNXEFault && pPdpeSrc->lm.u1NoExecute)2346 || (fUserLevelFault && !pPdpeSrc->lm.u1User) )2347 return PGM_BTH_NAME(CheckPageFaultReturnProt)(pVCpu, uErr, GCPtrPage, 1);2348 2349 # elif PGM_GST_TYPE == PGM_TYPE_PAE2350 /*2351 * Real page fault? (PDPE level)2352 */2353 PX86PDPT pPdptSrc = pgmGstGetPaePDPTPtr(pVCpu);2354 if (RT_UNLIKELY(!pPdptSrc))2355 return PGM_BTH_NAME(CheckPageFaultReturnRSVD)(pVCpu, uErr, GCPtrPage, 1);2356 /** @todo Handle bad CR3 address. */2357 PX86PDPE pPdpeSrc = pgmGstGetPaePDPEPtr(pVCpu, GCPtrPage);2358 if (!pPdpeSrc->n.u1Present)2359 return PGM_BTH_NAME(CheckPageFaultReturnNP)(pVCpu, uErr, GCPtrPage, 1);2360 if (!GST_IS_PDPE_VALID(pVCpu, *pPdpeSrc))2361 return PGM_BTH_NAME(CheckPageFaultReturnRSVD)(pVCpu, uErr, GCPtrPage, 1);2362 # endif /* PGM_GST_TYPE == PGM_TYPE_PAE */2363 2364 /*2365 * Real page fault? (PDE level)2366 */2367 if (!pPdeSrc->n.u1Present)2368 return PGM_BTH_NAME(CheckPageFaultReturnNP)(pVCpu, uErr, GCPtrPage, 2);2369 bool const fBigPage = pPdeSrc->b.u1Size && GST_IS_PSE_ACTIVE(pVCpu);2370 if (!fBigPage ? !GST_IS_PDE_VALID(pVCpu, *pPdeSrc) : !GST_IS_BIG_PDE_VALID(pVCpu, *pPdeSrc))2371 return PGM_BTH_NAME(CheckPageFaultReturnRSVD)(pVCpu, uErr, GCPtrPage, 2);2372 if ( (fMaybeWriteProtFault && !pPdeSrc->n.u1Write)2373 # if PGM_WITH_NX(PGM_GST_TYPE, PGM_SHW_TYPE)2374 || (fMaybeNXEFault && pPdeSrc->n.u1NoExecute)2375 # endif2376 || (fUserLevelFault && !pPdeSrc->n.u1User) )2377 return PGM_BTH_NAME(CheckPageFaultReturnProt)(pVCpu, uErr, GCPtrPage, 2);2378 2379 /*2380 * First check the easy case where the page directory has been marked2381 * read-only to track the dirty bit of an emulated BIG page.2382 */2383 if (fBigPage)2384 {2385 /* Mark guest page directory as accessed */2386 # if PGM_GST_TYPE == PGM_TYPE_AMD642387 pPml4eSrc->n.u1Accessed = 1;2388 pPdpeSrc->lm.u1Accessed = 1;2389 # endif2390 pPdeSrc->b.u1Accessed = 1;2391 2392 /* Mark the entry guest PDE dirty it it's a write access. */2393 if (fWriteFault)2394 pPdeSrc->b.u1Dirty = 1;2395 }2396 else2397 {2398 /*2399 * Map the guest page table.2400 */2401 PGSTPT pPTSrc;2402 PGSTPTE pPteSrc;2403 GSTPTE PteSrc;2404 rc = PGM_GCPHYS_2_PTR(pVM, pPdeSrc->u & GST_PDE_PG_MASK, &pPTSrc);2405 if (RT_SUCCESS(rc))2406 {2407 pPteSrc = &pPTSrc->a[(GCPtrPage >> GST_PT_SHIFT) & GST_PT_MASK];2408 PteSrc.u = pPteSrc->u;2409 }2410 else if (rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS)2411 {2412 /* All bits in the PTE are set. */2413 # if PGM_GST_TYPE == PGM_TYPE_32BIT2414 PteSrc.u = UINT32_MAX;2415 # else2416 PteSrc.u = UINT64_MAX;2417 # endif2418 pPteSrc = &PteSrc;2419 }2420 else2421 {2422 AssertRC(rc);2423 return rc;2424 }2425 2426 /*2427 * Real page fault?2428 */2429 if (!PteSrc.n.u1Present)2430 return PGM_BTH_NAME(CheckPageFaultReturnNP)(pVCpu, uErr, GCPtrPage, 3);2431 if (!GST_IS_PTE_VALID(pVCpu, PteSrc))2432 return PGM_BTH_NAME(CheckPageFaultReturnRSVD)(pVCpu, uErr, GCPtrPage, 3);2433 if ( (fMaybeWriteProtFault && !PteSrc.n.u1Write)2434 # if PGM_WITH_NX(PGM_GST_TYPE, PGM_SHW_TYPE)2435 || (fMaybeNXEFault && PteSrc.n.u1NoExecute)2436 # endif2437 || (fUserLevelFault && !PteSrc.n.u1User) )2438 return PGM_BTH_NAME(CheckPageFaultReturnProt)(pVCpu, uErr, GCPtrPage, 0);2439 2440 LogFlow(("CheckPageFault: page fault at %RGv PteSrc.u=%08x\n", GCPtrPage, PteSrc.u));2441 2442 /*2443 * Set the accessed bits in the page directory and the page table.2444 */2445 # if PGM_GST_TYPE == PGM_TYPE_AMD642446 pPml4eSrc->n.u1Accessed = 1;2447 pPdpeSrc->lm.u1Accessed = 1;2448 # endif2449 pPdeSrc->n.u1Accessed = 1;2450 pPteSrc->n.u1Accessed = 1;2451 2452 /*2453 * Set the dirty flag in the PTE if it's a write access.2454 */2455 if (fWriteFault)2456 {2457 # ifdef VBOX_WITH_STATISTICS2458 if (!pPteSrc->n.u1Dirty)2459 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,DirtiedPage));2460 else2461 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageAlreadyDirty));2462 # endif2463 2464 pPteSrc->n.u1Dirty = 1;2465 }2466 }2467 return VINF_SUCCESS;2468 }2469 2470 2471 /**2472 2274 * Handle dirty bit tracking faults. 2473 2275 * … … 2479 2281 * @param GCPtrPage Guest context page address. 2480 2282 */ 2481 PGM_BTH_DECL(int,CheckDirtyPageFault)(PVMCPU pVCpu, uint32_t uErr, PSHWPDE pPdeDst, GSTPDE const *pPdeSrc, RTGCPTR GCPtrPage)2283 static int PGM_BTH_NAME(CheckDirtyPageFault)(PVMCPU pVCpu, uint32_t uErr, PSHWPDE pPdeDst, GSTPDE const *pPdeSrc, RTGCPTR GCPtrPage) 2482 2284 { 2483 2285 PVM pVM = pVCpu->CTX_SUFF(pVM); … … 2660 2462 * @param GCPtrPage GC Pointer of the page that caused the fault 2661 2463 */ 2662 PGM_BTH_DECL(int,SyncPT)(PVMCPU pVCpu, unsigned iPDSrc, PGSTPD pPDSrc, RTGCPTR GCPtrPage)2464 static int PGM_BTH_NAME(SyncPT)(PVMCPU pVCpu, unsigned iPDSrc, PGSTPD pPDSrc, RTGCPTR GCPtrPage) 2663 2465 { 2664 2466 PVM pVM = pVCpu->CTX_SUFF(pVM); … … 2673 2475 Assert(PGMIsLocked(pVM)); 2674 2476 2675 #if ( PGM_GST_TYPE == PGM_TYPE_32BIT\2676 || PGM_GST_TYPE == PGM_TYPE_PAE\2677 2678 && PGM_SHW_TYPE != PGM_TYPE_NESTED\2679 2477 #if ( PGM_GST_TYPE == PGM_TYPE_32BIT \ 2478 || PGM_GST_TYPE == PGM_TYPE_PAE \ 2479 || PGM_GST_TYPE == PGM_TYPE_AMD64) \ 2480 && PGM_SHW_TYPE != PGM_TYPE_NESTED \ 2481 && PGM_SHW_TYPE != PGM_TYPE_EPT 2680 2482 2681 2483 int rc = VINF_SUCCESS; -
trunk/src/VBox/VMM/VMMAll/PGMAllGst.h
r31123 r31140 24 24 || PGM_GST_TYPE == PGM_TYPE_PAE \ 25 25 || PGM_GST_TYPE == PGM_TYPE_AMD64 26 PGM_GST_DECL(int,Walk)(PVMCPU pVCpu, RTGCPTR GCPtr, PGSTPTWALK pWalk);26 static int PGM_GST_NAME(Walk)(PVMCPU pVCpu, RTGCPTR GCPtr, PGSTPTWALK pWalk); 27 27 #endif 28 28 PGM_GST_DECL(int, GetPage)(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys); … … 73 73 * @param pWalk Where to return the walk result. This is always set. 74 74 */ 75 PGM_GST_DECL(int,Walk)(PVMCPU pVCpu, RTGCPTR GCPtr, PGSTPTWALK pWalk)75 static int PGM_GST_NAME(Walk)(PVMCPU pVCpu, RTGCPTR GCPtr, PGSTPTWALK pWalk) 76 76 { 77 77 int rc; … … 285 285 if (pfFlags) 286 286 { 287 /* The RW and US flags are determined via bitwise AND across all levels. */ 288 uint64_t fUpperRwUs = (X86_PTE_RW | X86_PTE_US) 289 # if PGM_GST_TYPE == PGM_TYPE_AMD64 290 & Walk.Pml4e.u 291 & Walk.Pdpe.u 292 # endif 293 & Walk.Pde.u; 294 fUpperRwUs |= ~(uint64_t)(X86_PTE_RW | X86_PTE_US); 295 296 /* The RW and US flags are determined via bitwise AND across all levels. */ 287 if (!Walk.Core.fBigPage) 288 *pfFlags = (Walk.Pte.u & ~(GST_PTE_PG_MASK | X86_PTE_RW | X86_PTE_US)) /* NX not needed */ 289 | (Walk.Core.fEffectiveRW ? X86_PTE_RW : 0) 290 | (Walk.Core.fEffectiveUS ? X86_PTE_US : 0) 297 291 # if PGM_WITH_NX(PGM_GST_TYPE, PGM_GST_TYPE) 298 uint32_t fUpperNx = 0 299 # if PGM_GST_TYPE == PGM_TYPE_AMD64 300 | Walk.Pml4e.n.u1NoExecute 301 | Walk.Pdpe.lm.u1NoExecute 302 # endif 303 | Walk.Pde.n.u1NoExecute; 304 # endif 305 306 if (!Walk.Core.fBigPage) 307 { 308 *pfFlags = (Walk.Pte.u & ~GST_PTE_PG_MASK) & fUpperRwUs; 309 # if PGM_WITH_NX(PGM_GST_TYPE, PGM_GST_TYPE) 310 if (Walk.Pte.n.u1NoExecute || fUpperNx) 311 { 312 Assert(GST_IS_NX_ACTIVE(pVCpu)); /* should trigger RSVD error otherwise. */ 313 *pfFlags |= X86_PTE_PAE_NX; 314 } 315 # endif 316 } 292 | (Walk.Core.fEffectiveNX ? X86_PTE_PAE_NX : 0) 293 # endif 294 ; 317 295 else 318 296 { 319 *pfFlags = ( (Walk.Pde.u & ~(GST_PTE_PG_MASK | X86_PTE_PAT)) 320 | ((Walk.Pde.u & X86_PDE4M_PAT) >> X86_PDE4M_PAT_SHIFT)) 321 & fUpperRwUs; 297 *pfFlags = (Walk.Pde.u & ~(GST_PTE_PG_MASK | X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_PS)) /* NX not needed */ 298 | ((Walk.Pde.u & X86_PDE4M_PAT) >> X86_PDE4M_PAT_SHIFT) 299 | (Walk.Core.fEffectiveRW ? X86_PTE_RW : 0) 300 | (Walk.Core.fEffectiveUS ? X86_PTE_US : 0) 322 301 # if PGM_WITH_NX(PGM_GST_TYPE, PGM_GST_TYPE) 323 if (fUpperNx) 324 { 325 Assert(GST_IS_NX_ACTIVE(pVCpu)); /* should trigger RSVD error otherwise. */ 326 *pfFlags |= X86_PTE_PAE_NX; 327 } 328 # endif 302 | (Walk.Core.fEffectiveNX ? X86_PTE_PAE_NX : 0) 303 # endif 304 ; 329 305 } 330 306 } -
trunk/src/VBox/VMM/testcase/tstVMStructRC.cpp
r31123 r31140 510 510 GEN_CHECK_OFF(PGMCPU, pfnR3BthSyncCR3); 511 511 GEN_CHECK_OFF(PGMCPU, pfnR3BthInvalidatePage); 512 GEN_CHECK_OFF(PGMCPU, pfnR3BthSyncPage);513 512 GEN_CHECK_OFF(PGMCPU, pfnR3BthPrefetchPage); 514 513 GEN_CHECK_OFF(PGMCPU, pfnR3BthVerifyAccessSyncPage); … … 516 515 GEN_CHECK_OFF(PGMCPU, pfnRCBthTrap0eHandler); 517 516 GEN_CHECK_OFF(PGMCPU, pfnRCBthInvalidatePage); 518 GEN_CHECK_OFF(PGMCPU, pfnRCBthSyncPage);519 517 GEN_CHECK_OFF(PGMCPU, pfnRCBthPrefetchPage); 520 518 GEN_CHECK_OFF(PGMCPU, pfnRCBthVerifyAccessSyncPage); 521 519 GEN_CHECK_OFF(PGMCPU, pfnRCBthAssertCR3); 520 GEN_CHECK_OFF(PGMCPU, pfnR0BthTrap0eHandler); 521 GEN_CHECK_OFF(PGMCPU, pfnR0BthInvalidatePage); 522 GEN_CHECK_OFF(PGMCPU, pfnR0BthPrefetchPage); 523 GEN_CHECK_OFF(PGMCPU, pfnR0BthVerifyAccessSyncPage); 524 GEN_CHECK_OFF(PGMCPU, pfnR0BthAssertCR3); 522 525 GEN_CHECK_OFF(PGMCPU, DisState); 523 526 GEN_CHECK_OFF(PGMCPU, cGuestModeChanges);
Note:
See TracChangeset
for help on using the changeset viewer.