VirtualBox

Changeset 31140 in vbox


Ignore:
Timestamp:
Jul 27, 2010 1:29:43 PM (14 years ago)
Author:
vboxsync
Message:

PGM: Removed CheckPageFault as it is no longer used. Removed pfn*SyncPage since it is always used directly. Made a few more internal functions static to encourage compiler optimizations.

Location:
trunk/src/VBox/VMM
Files:
6 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/PGM.cpp

    r31136 r31140  
    29432943    pVCpu->pgm.s.pfnR3BthSyncCR3              = pModeData->pfnR3BthSyncCR3;
    29442944    Assert(pVCpu->pgm.s.pfnR3BthSyncCR3);
    2945     pVCpu->pgm.s.pfnR3BthSyncPage             = pModeData->pfnR3BthSyncPage;
    29462945    pVCpu->pgm.s.pfnR3BthPrefetchPage         = pModeData->pfnR3BthPrefetchPage;
    29472946    pVCpu->pgm.s.pfnR3BthVerifyAccessSyncPage = pModeData->pfnR3BthVerifyAccessSyncPage;
     
    29552954    pVCpu->pgm.s.pfnRCBthInvalidatePage       = pModeData->pfnRCBthInvalidatePage;
    29562955    pVCpu->pgm.s.pfnRCBthSyncCR3              = pModeData->pfnRCBthSyncCR3;
    2957     pVCpu->pgm.s.pfnRCBthSyncPage             = pModeData->pfnRCBthSyncPage;
    29582956    pVCpu->pgm.s.pfnRCBthPrefetchPage         = pModeData->pfnRCBthPrefetchPage;
    29592957    pVCpu->pgm.s.pfnRCBthVerifyAccessSyncPage = pModeData->pfnRCBthVerifyAccessSyncPage;
     
    29672965    pVCpu->pgm.s.pfnR0BthInvalidatePage       = pModeData->pfnR0BthInvalidatePage;
    29682966    pVCpu->pgm.s.pfnR0BthSyncCR3              = pModeData->pfnR0BthSyncCR3;
    2969     pVCpu->pgm.s.pfnR0BthSyncPage             = pModeData->pfnR0BthSyncPage;
    29702967    pVCpu->pgm.s.pfnR0BthPrefetchPage         = pModeData->pfnR0BthPrefetchPage;
    29712968    pVCpu->pgm.s.pfnR0BthVerifyAccessSyncPage = pModeData->pfnR0BthVerifyAccessSyncPage;
  • trunk/src/VBox/VMM/PGMBth.h

    r31066 r31140  
    2727PGM_BTH_DECL(int, Trap0eHandler)(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, bool *pfLockTaken);
    2828PGM_BTH_DECL(int, SyncCR3)(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal);
    29 PGM_BTH_DECL(int, SyncPage)(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError);
    3029PGM_BTH_DECL(int, VerifyAccessSyncPage)(PVMCPU pVCpu, RTGCPTR Addr, unsigned fPage, unsigned uError);
    3130PGM_BTH_DECL(int, InvalidatePage)(PVMCPU pVCpu, RTGCPTR GCPtrPage);
     
    5453    pModeData->pfnR3BthSyncCR3              = PGM_BTH_NAME(SyncCR3);
    5554    pModeData->pfnR3BthInvalidatePage       = PGM_BTH_NAME(InvalidatePage);
    56     pModeData->pfnR3BthSyncPage             = PGM_BTH_NAME(SyncPage);
    5755    pModeData->pfnR3BthPrefetchPage         = PGM_BTH_NAME(PrefetchPage);
    5856    pModeData->pfnR3BthVerifyAccessSyncPage = PGM_BTH_NAME(VerifyAccessSyncPage);
     
    7472        AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_RC_STR(InvalidatePage), rc), rc);
    7573        rc = PDMR3LdrGetSymbolRC(pVM, NULL,       PGM_BTH_NAME_RC_STR(SyncCR3),             &pModeData->pfnRCBthSyncCR3);
    76         AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_RC_STR(SyncPage), rc), rc);
    77         rc = PDMR3LdrGetSymbolRC(pVM, NULL,       PGM_BTH_NAME_RC_STR(SyncPage),            &pModeData->pfnRCBthSyncPage);
    78         AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_RC_STR(SyncPage), rc), rc);
     74        AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_RC_STR(SyncCR3), rc), rc);
    7975        rc = PDMR3LdrGetSymbolRC(pVM, NULL,       PGM_BTH_NAME_RC_STR(PrefetchPage),        &pModeData->pfnRCBthPrefetchPage);
    8076        AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_RC_STR(PrefetchPage), rc), rc);
     
    9894        rc = PDMR3LdrGetSymbolR0(pVM, NULL,       PGM_BTH_NAME_R0_STR(SyncCR3),             &pModeData->pfnR0BthSyncCR3);
    9995        AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_R0_STR(SyncCR3), rc), rc);
    100         rc = PDMR3LdrGetSymbolR0(pVM, NULL,       PGM_BTH_NAME_R0_STR(SyncPage),            &pModeData->pfnR0BthSyncPage);
    101         AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_R0_STR(SyncPage), rc), rc);
    10296        rc = PDMR3LdrGetSymbolR0(pVM, NULL,       PGM_BTH_NAME_R0_STR(PrefetchPage),        &pModeData->pfnR0BthPrefetchPage);
    10397        AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_BTH_NAME_R0_STR(PrefetchPage), rc), rc);
  • trunk/src/VBox/VMM/PGMInternal.h

    r31136 r31140  
    25722572    DECLR3CALLBACKMEMBER(int,       pfnR3BthInvalidatePage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
    25732573    DECLR3CALLBACKMEMBER(int,       pfnR3BthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
    2574     DECLR3CALLBACKMEMBER(int,       pfnR3BthSyncPage,(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
    25752574    DECLR3CALLBACKMEMBER(int,       pfnR3BthPrefetchPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
    25762575    DECLR3CALLBACKMEMBER(int,       pfnR3BthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
     
    25842583    DECLRCCALLBACKMEMBER(int,       pfnRCBthInvalidatePage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
    25852584    DECLRCCALLBACKMEMBER(int,       pfnRCBthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
    2586     DECLRCCALLBACKMEMBER(int,       pfnRCBthSyncPage,(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
    25872585    DECLRCCALLBACKMEMBER(int,       pfnRCBthPrefetchPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
    25882586    DECLRCCALLBACKMEMBER(int,       pfnRCBthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
     
    25962594    DECLR0CALLBACKMEMBER(int,       pfnR0BthInvalidatePage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
    25972595    DECLR0CALLBACKMEMBER(int,       pfnR0BthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
    2598     DECLR0CALLBACKMEMBER(int,       pfnR0BthSyncPage,(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
    25992596    DECLR0CALLBACKMEMBER(int,       pfnR0BthPrefetchPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
    26002597    DECLR0CALLBACKMEMBER(int,       pfnR0BthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
     
    34763473    DECLR3CALLBACKMEMBER(int,       pfnR3BthInvalidatePage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
    34773474    DECLR3CALLBACKMEMBER(int,       pfnR3BthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
    3478     DECLR3CALLBACKMEMBER(int,       pfnR3BthSyncPage,(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
    34793475    DECLR3CALLBACKMEMBER(int,       pfnR3BthPrefetchPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
    34803476    DECLR3CALLBACKMEMBER(int,       pfnR3BthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
     
    34863482    DECLR0CALLBACKMEMBER(int,       pfnR0BthInvalidatePage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
    34873483    DECLR0CALLBACKMEMBER(int,       pfnR0BthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
    3488     DECLR0CALLBACKMEMBER(int,       pfnR0BthSyncPage,(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
    34893484    DECLR0CALLBACKMEMBER(int,       pfnR0BthPrefetchPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
    34903485    DECLR0CALLBACKMEMBER(int,       pfnR0BthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
     
    34963491    DECLRCCALLBACKMEMBER(int,       pfnRCBthInvalidatePage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
    34973492    DECLRCCALLBACKMEMBER(int,       pfnRCBthSyncCR3,(PVMCPU pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal));
    3498     DECLRCCALLBACKMEMBER(int,       pfnRCBthSyncPage,(PVMCPU pVCpu, X86PDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uError));
    34993493    DECLRCCALLBACKMEMBER(int,       pfnRCBthPrefetchPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage));
    35003494    DECLRCCALLBACKMEMBER(int,       pfnRCBthVerifyAccessSyncPage,(PVMCPU pVCpu, RTGCPTR GCPtrPage, unsigned fFlags, unsigned uError));
     
    35023496    DECLRCCALLBACKMEMBER(int,       pfnRCBthMapCR3,(PVMCPU pVCpu, RTGCPHYS GCPhysCR3));
    35033497    DECLRCCALLBACKMEMBER(int,       pfnRCBthUnmapCR3,(PVMCPU pVCpu));
     3498#if 0
    35043499    RTRCPTR                         alignment2; /**< structure size alignment. */
     3500#endif
    35053501    /** @} */
    35063502
  • trunk/src/VBox/VMM/VMMAll/PGMAllBth.h

    r31136 r31140  
    3434PGM_BTH_DECL(int, Trap0eHandler)(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, bool *pfLockTaken);
    3535PGM_BTH_DECL(int, InvalidatePage)(PVMCPU pVCpu, RTGCPTR GCPtrPage);
    36 PGM_BTH_DECL(int, SyncPage)(PVMCPU pVCpu, GSTPDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uErr);
    37 PGM_BTH_DECL(int, CheckPageFault)(PVMCPU pVCpu, uint32_t uErr, PGSTPDE pPdeSrc, RTGCPTR GCPtrPage);
    38 PGM_BTH_DECL(int, CheckDirtyPageFault)(PVMCPU pVCpu, uint32_t uErr, PSHWPDE pPdeDst, GSTPDE const *pPdeSrc, RTGCPTR GCPtrPage);
    39 PGM_BTH_DECL(int, SyncPT)(PVMCPU pVCpu, unsigned iPD, PGSTPD pPDSrc, RTGCPTR GCPtrPage);
     36static int PGM_BTH_NAME(SyncPage)(PVMCPU pVCpu, GSTPDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uErr);
     37static int PGM_BTH_NAME(CheckDirtyPageFault)(PVMCPU pVCpu, uint32_t uErr, PSHWPDE pPdeDst, GSTPDE const *pPdeSrc, RTGCPTR GCPtrPage);
     38static int PGM_BTH_NAME(SyncPT)(PVMCPU pVCpu, unsigned iPD, PGSTPD pPDSrc, RTGCPTR GCPtrPage);
    4039PGM_BTH_DECL(int, VerifyAccessSyncPage)(PVMCPU pVCpu, RTGCPTR Addr, unsigned fPage, unsigned uErr);
    4140PGM_BTH_DECL(int, PrefetchPage)(PVMCPU pVCpu, RTGCPTR GCPtrPage);
     
    4443PGM_BTH_DECL(unsigned, AssertCR3)(PVMCPU pVCpu, uint64_t cr3, uint64_t cr4, RTGCPTR GCPtr = 0, RTGCPTR cb = ~(RTGCPTR)0);
    4544#endif
    46 DECLINLINE(void) PGM_BTH_NAME(SyncPageWorkerTrackDeref)(PVMCPU pVCpu, PPGMPOOLPAGE pShwPage, RTHCPHYS HCPhys, uint16_t iPte);
    4745PGM_BTH_DECL(int, MapCR3)(PVMCPU pVCpu, RTGCPHYS GCPhysCR3);
    4846PGM_BTH_DECL(int, UnmapCR3)(PVMCPU pVCpu);
     
    391389# if  (   PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT \
    392390       || PGM_GST_TYPE == PGM_TYPE_PAE   || PGM_GST_TYPE == PGM_TYPE_AMD64) \
    393     && PGM_SHW_TYPE != PGM_TYPE_NESTED    \
     391    && PGM_SHW_TYPE != PGM_TYPE_NESTED \
    394392    && (PGM_SHW_TYPE != PGM_TYPE_EPT || PGM_GST_TYPE == PGM_TYPE_PROT)
    395393    int rc;
     
    17101708 * @param   uErr        Fault error (X86_TRAP_PF_*).
    17111709 */
    1712 PGM_BTH_DECL(int, SyncPage)(PVMCPU pVCpu, GSTPDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uErr)
     1710static int PGM_BTH_NAME(SyncPage)(PVMCPU pVCpu, GSTPDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uErr)
    17131711{
    17141712    PVM      pVM = pVCpu->CTX_SUFF(pVM);
     
    22742272
    22752273/**
    2276  * Investigate a page fault to identify ones targetted at the guest and to
    2277  * handle write protection page faults caused by dirty bit tracking.
    2278  *
    2279  * This will do detect invalid entries and raise X86_TRAP_PF_RSVD.
    2280  *
    2281  * @returns VBox status code.
    2282  * @param   pVCpu       The VMCPU handle.
    2283  * @param   uErr        Page fault error code.  The X86_TRAP_PF_RSVD flag
    2284  *                      cannot be trusted as it is used for MMIO optimizations.
    2285  * @param   pPdeSrc     Guest page directory entry.
    2286  * @param   GCPtrPage   Guest context page address.
    2287  */
    2288 PGM_BTH_DECL(int, CheckPageFault)(PVMCPU pVCpu, uint32_t uErr, PGSTPDE pPdeSrc, RTGCPTR GCPtrPage)
    2289 {
    2290     bool        fUserLevelFault      = !!(uErr & X86_TRAP_PF_US);
    2291     bool        fWriteFault          = !!(uErr & X86_TRAP_PF_RW);
    2292 # if PGM_WITH_NX(PGM_GST_TYPE, PGM_SHW_TYPE)
    2293     bool        fMaybeNXEFault       =   (uErr & X86_TRAP_PF_ID) && GST_IS_NX_ACTIVE(pVCpu);
    2294 # endif
    2295     bool        fMaybeWriteProtFault = fWriteFault && (fUserLevelFault || CPUMIsGuestR0WriteProtEnabled(pVCpu));
    2296     PVM         pVM                  = pVCpu->CTX_SUFF(pVM);
    2297     int         rc;
    2298 
    2299     LogFlow(("CheckPageFault: GCPtrPage=%RGv uErr=%#x PdeSrc=%08x\n", GCPtrPage, uErr, pPdeSrc->u));
    2300 
    2301     /*
    2302      * Note! For PAE it is safe to assume that bad guest physical addresses
    2303      *       (which returns all FFs) in the translation tables will cause
    2304      *       #PF(RSVD).  The same will be the case for long mode provided the
    2305      *       physical address width is less than 52 bits - this we ASSUME.
    2306      *
    2307      * Note! No convenient shortcuts here, we have to validate everything!
    2308      */
    2309 
    2310 # if PGM_GST_TYPE == PGM_TYPE_AMD64
    2311     /*
    2312      * Real page fault? (PML4E level)
    2313      */
    2314     PX86PML4    pPml4Src  = pgmGstGetLongModePML4Ptr(pVCpu);
    2315     if (RT_UNLIKELY(!pPml4Src))
    2316         return PGM_BTH_NAME(CheckPageFaultReturnRSVD)(pVCpu, uErr, GCPtrPage, 0);
    2317 
    2318     PX86PML4E   pPml4eSrc = &pPml4Src->a[(GCPtrPage >> X86_PML4_SHIFT) & X86_PML4_MASK];
    2319     if (!pPml4eSrc->n.u1Present)
    2320         return PGM_BTH_NAME(CheckPageFaultReturnNP)(pVCpu, uErr, GCPtrPage, 0);
    2321     if (RT_UNLIKELY(!GST_IS_PML4E_VALID(pVCpu, *pPml4eSrc)))
    2322         return PGM_BTH_NAME(CheckPageFaultReturnRSVD)(pVCpu, uErr, GCPtrPage, 0);
    2323     if (   (fMaybeWriteProtFault && !pPml4eSrc->n.u1Write)
    2324         || (fMaybeNXEFault       &&  pPml4eSrc->n.u1NoExecute)
    2325         || (fUserLevelFault      && !pPml4eSrc->n.u1User) )
    2326         return PGM_BTH_NAME(CheckPageFaultReturnProt)(pVCpu, uErr, GCPtrPage, 0);
    2327 
    2328     /*
    2329      * Real page fault? (PDPE level)
    2330      */
    2331     PX86PDPT pPdptSrc;
    2332     rc = PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, pPml4eSrc->u & X86_PML4E_PG_MASK, &pPdptSrc);
    2333     if (RT_FAILURE(rc))
    2334     {
    2335         AssertMsgReturn(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc), rc);
    2336         return PGM_BTH_NAME(CheckPageFaultReturnRSVD)(pVCpu, uErr, GCPtrPage, 1);
    2337     }
    2338 
    2339     PX86PDPE pPdpeSrc = &pPdptSrc->a[(GCPtrPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64];
    2340     if (!pPdpeSrc->n.u1Present)
    2341         return PGM_BTH_NAME(CheckPageFaultReturnNP)(pVCpu, uErr, GCPtrPage, 1);
    2342     if (!GST_IS_PDPE_VALID(pVCpu, *pPdpeSrc))
    2343         return PGM_BTH_NAME(CheckPageFaultReturnRSVD)(pVCpu, uErr, GCPtrPage, 1);
    2344     if (   (fMaybeWriteProtFault && !pPdpeSrc->lm.u1Write)
    2345         || (fMaybeNXEFault       &&  pPdpeSrc->lm.u1NoExecute)
    2346         || (fUserLevelFault      && !pPdpeSrc->lm.u1User) )
    2347         return PGM_BTH_NAME(CheckPageFaultReturnProt)(pVCpu, uErr, GCPtrPage, 1);
    2348 
    2349 # elif PGM_GST_TYPE == PGM_TYPE_PAE
    2350     /*
    2351      * Real page fault? (PDPE level)
    2352      */
    2353     PX86PDPT pPdptSrc = pgmGstGetPaePDPTPtr(pVCpu);
    2354     if (RT_UNLIKELY(!pPdptSrc))
    2355         return PGM_BTH_NAME(CheckPageFaultReturnRSVD)(pVCpu, uErr, GCPtrPage, 1);
    2356 /** @todo Handle bad CR3 address. */
    2357     PX86PDPE pPdpeSrc = pgmGstGetPaePDPEPtr(pVCpu, GCPtrPage);
    2358     if (!pPdpeSrc->n.u1Present)
    2359         return PGM_BTH_NAME(CheckPageFaultReturnNP)(pVCpu, uErr, GCPtrPage, 1);
    2360     if (!GST_IS_PDPE_VALID(pVCpu, *pPdpeSrc))
    2361         return PGM_BTH_NAME(CheckPageFaultReturnRSVD)(pVCpu, uErr, GCPtrPage, 1);
    2362 # endif /* PGM_GST_TYPE == PGM_TYPE_PAE */
    2363 
    2364     /*
    2365      * Real page fault? (PDE level)
    2366      */
    2367     if (!pPdeSrc->n.u1Present)
    2368         return PGM_BTH_NAME(CheckPageFaultReturnNP)(pVCpu, uErr, GCPtrPage, 2);
    2369     bool const fBigPage = pPdeSrc->b.u1Size && GST_IS_PSE_ACTIVE(pVCpu);
    2370     if (!fBigPage ? !GST_IS_PDE_VALID(pVCpu, *pPdeSrc) : !GST_IS_BIG_PDE_VALID(pVCpu, *pPdeSrc))
    2371         return PGM_BTH_NAME(CheckPageFaultReturnRSVD)(pVCpu, uErr, GCPtrPage, 2);
    2372     if (   (fMaybeWriteProtFault && !pPdeSrc->n.u1Write)
    2373 # if PGM_WITH_NX(PGM_GST_TYPE, PGM_SHW_TYPE)
    2374         || (fMaybeNXEFault       &&  pPdeSrc->n.u1NoExecute)
    2375 # endif
    2376         || (fUserLevelFault      && !pPdeSrc->n.u1User) )
    2377         return PGM_BTH_NAME(CheckPageFaultReturnProt)(pVCpu, uErr, GCPtrPage, 2);
    2378 
    2379     /*
    2380      * First check the easy case where the page directory has been marked
    2381      * read-only to track the dirty bit of an emulated BIG page.
    2382      */
    2383     if (fBigPage)
    2384     {
    2385         /* Mark guest page directory as accessed */
    2386 # if PGM_GST_TYPE == PGM_TYPE_AMD64
    2387         pPml4eSrc->n.u1Accessed = 1;
    2388         pPdpeSrc->lm.u1Accessed = 1;
    2389 # endif
    2390         pPdeSrc->b.u1Accessed   = 1;
    2391 
    2392         /* Mark the entry guest PDE dirty it it's a write access. */
    2393         if (fWriteFault)
    2394             pPdeSrc->b.u1Dirty = 1;
    2395     }
    2396     else
    2397     {
    2398         /*
    2399          * Map the guest page table.
    2400          */
    2401         PGSTPT  pPTSrc;
    2402         PGSTPTE pPteSrc;
    2403         GSTPTE  PteSrc;
    2404         rc = PGM_GCPHYS_2_PTR(pVM, pPdeSrc->u & GST_PDE_PG_MASK, &pPTSrc);
    2405         if (RT_SUCCESS(rc))
    2406         {
    2407             pPteSrc = &pPTSrc->a[(GCPtrPage >> GST_PT_SHIFT) & GST_PT_MASK];
    2408             PteSrc.u = pPteSrc->u;
    2409         }
    2410         else if (rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS)
    2411         {
    2412             /* All bits in the PTE are set. */
    2413 # if PGM_GST_TYPE == PGM_TYPE_32BIT
    2414             PteSrc.u = UINT32_MAX;
    2415 # else
    2416             PteSrc.u = UINT64_MAX;
    2417 # endif
    2418             pPteSrc = &PteSrc;
    2419         }
    2420         else
    2421         {
    2422             AssertRC(rc);
    2423             return rc;
    2424         }
    2425 
    2426         /*
    2427          * Real page fault?
    2428          */
    2429         if (!PteSrc.n.u1Present)
    2430             return PGM_BTH_NAME(CheckPageFaultReturnNP)(pVCpu, uErr, GCPtrPage, 3);
    2431         if (!GST_IS_PTE_VALID(pVCpu, PteSrc))
    2432             return PGM_BTH_NAME(CheckPageFaultReturnRSVD)(pVCpu, uErr, GCPtrPage, 3);
    2433         if (   (fMaybeWriteProtFault && !PteSrc.n.u1Write)
    2434 # if PGM_WITH_NX(PGM_GST_TYPE, PGM_SHW_TYPE)
    2435             || (fMaybeNXEFault       &&  PteSrc.n.u1NoExecute)
    2436 # endif
    2437             || (fUserLevelFault      && !PteSrc.n.u1User) )
    2438             return PGM_BTH_NAME(CheckPageFaultReturnProt)(pVCpu, uErr, GCPtrPage, 0);
    2439 
    2440         LogFlow(("CheckPageFault: page fault at %RGv PteSrc.u=%08x\n", GCPtrPage, PteSrc.u));
    2441 
    2442         /*
    2443          * Set the accessed bits in the page directory and the page table.
    2444          */
    2445 # if PGM_GST_TYPE == PGM_TYPE_AMD64
    2446         pPml4eSrc->n.u1Accessed = 1;
    2447         pPdpeSrc->lm.u1Accessed = 1;
    2448 # endif
    2449         pPdeSrc->n.u1Accessed   = 1;
    2450         pPteSrc->n.u1Accessed   = 1;
    2451 
    2452         /*
    2453          * Set the dirty flag in the PTE if it's a write access.
    2454          */
    2455         if (fWriteFault)
    2456         {
    2457 # ifdef VBOX_WITH_STATISTICS
    2458             if (!pPteSrc->n.u1Dirty)
    2459                 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,DirtiedPage));
    2460             else
    2461                 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageAlreadyDirty));
    2462 # endif
    2463 
    2464             pPteSrc->n.u1Dirty = 1;
    2465         }
    2466     }
    2467     return VINF_SUCCESS;
    2468 }
    2469 
    2470 
    2471 /**
    24722274 * Handle dirty bit tracking faults.
    24732275 *
     
    24792281 * @param   GCPtrPage   Guest context page address.
    24802282 */
    2481 PGM_BTH_DECL(int, CheckDirtyPageFault)(PVMCPU pVCpu, uint32_t uErr, PSHWPDE pPdeDst, GSTPDE const *pPdeSrc, RTGCPTR GCPtrPage)
     2283static int PGM_BTH_NAME(CheckDirtyPageFault)(PVMCPU pVCpu, uint32_t uErr, PSHWPDE pPdeDst, GSTPDE const *pPdeSrc, RTGCPTR GCPtrPage)
    24822284{
    24832285    PVM         pVM   = pVCpu->CTX_SUFF(pVM);
     
    26602462 * @param   GCPtrPage   GC Pointer of the page that caused the fault
    26612463 */
    2662 PGM_BTH_DECL(int, SyncPT)(PVMCPU pVCpu, unsigned iPDSrc, PGSTPD pPDSrc, RTGCPTR GCPtrPage)
     2464static int PGM_BTH_NAME(SyncPT)(PVMCPU pVCpu, unsigned iPDSrc, PGSTPD pPDSrc, RTGCPTR GCPtrPage)
    26632465{
    26642466    PVM             pVM      = pVCpu->CTX_SUFF(pVM);
     
    26732475    Assert(PGMIsLocked(pVM));
    26742476
    2675 #if   (   PGM_GST_TYPE == PGM_TYPE_32BIT \
    2676        || PGM_GST_TYPE == PGM_TYPE_PAE    \
    2677        || PGM_GST_TYPE == PGM_TYPE_AMD64) \
    2678     && PGM_SHW_TYPE != PGM_TYPE_NESTED    \
    2679     && PGM_SHW_TYPE != PGM_TYPE_EPT
     2477#if (   PGM_GST_TYPE == PGM_TYPE_32BIT \
     2478     || PGM_GST_TYPE == PGM_TYPE_PAE \
     2479     || PGM_GST_TYPE == PGM_TYPE_AMD64) \
     2480 && PGM_SHW_TYPE != PGM_TYPE_NESTED \
     2481 && PGM_SHW_TYPE != PGM_TYPE_EPT
    26802482
    26812483    int             rc       = VINF_SUCCESS;
  • trunk/src/VBox/VMM/VMMAll/PGMAllGst.h

    r31123 r31140  
    2424 || PGM_GST_TYPE == PGM_TYPE_PAE \
    2525 || PGM_GST_TYPE == PGM_TYPE_AMD64
    26 PGM_GST_DECL(int,  Walk)(PVMCPU pVCpu, RTGCPTR GCPtr, PGSTPTWALK pWalk);
     26static int PGM_GST_NAME(Walk)(PVMCPU pVCpu, RTGCPTR GCPtr, PGSTPTWALK pWalk);
    2727#endif
    2828PGM_GST_DECL(int,  GetPage)(PVMCPU pVCpu, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys);
     
    7373 * @param   pWalk       Where to return the walk result. This is always set.
    7474 */
    75 PGM_GST_DECL(int, Walk)(PVMCPU pVCpu, RTGCPTR GCPtr, PGSTPTWALK pWalk)
     75static int PGM_GST_NAME(Walk)(PVMCPU pVCpu, RTGCPTR GCPtr, PGSTPTWALK pWalk)
    7676{
    7777    int rc;
     
    285285    if (pfFlags)
    286286    {
    287         /* The RW and US flags are determined via bitwise AND across all levels. */
    288         uint64_t fUpperRwUs = (X86_PTE_RW | X86_PTE_US)
    289 #  if PGM_GST_TYPE == PGM_TYPE_AMD64
    290                             & Walk.Pml4e.u
    291                             & Walk.Pdpe.u
    292 #  endif
    293                             & Walk.Pde.u;
    294         fUpperRwUs |= ~(uint64_t)(X86_PTE_RW | X86_PTE_US);
    295 
    296         /* The RW and US flags are determined via bitwise AND across all levels. */
     287        if (!Walk.Core.fBigPage)
     288            *pfFlags = (Walk.Pte.u & ~(GST_PTE_PG_MASK | X86_PTE_RW | X86_PTE_US))                      /* NX not needed */
     289                     | (Walk.Core.fEffectiveRW ? X86_PTE_RW : 0)
     290                     | (Walk.Core.fEffectiveUS ? X86_PTE_US : 0)
    297291# if PGM_WITH_NX(PGM_GST_TYPE, PGM_GST_TYPE)
    298         uint32_t fUpperNx   = 0
    299 #  if PGM_GST_TYPE == PGM_TYPE_AMD64
    300                             | Walk.Pml4e.n.u1NoExecute
    301                             | Walk.Pdpe.lm.u1NoExecute
    302 #  endif
    303                             | Walk.Pde.n.u1NoExecute;
    304 # endif
    305 
    306         if (!Walk.Core.fBigPage)
    307         {
    308             *pfFlags = (Walk.Pte.u & ~GST_PTE_PG_MASK) & fUpperRwUs;
    309 # if PGM_WITH_NX(PGM_GST_TYPE, PGM_GST_TYPE)
    310             if (Walk.Pte.n.u1NoExecute || fUpperNx)
    311             {
    312                 Assert(GST_IS_NX_ACTIVE(pVCpu)); /* should trigger RSVD error otherwise. */
    313                 *pfFlags |= X86_PTE_PAE_NX;
    314             }
    315 # endif
    316         }
     292                     | (Walk.Core.fEffectiveNX ? X86_PTE_PAE_NX : 0)
     293# endif
     294                     ;
    317295        else
    318296        {
    319             *pfFlags = (  (Walk.Pde.u & ~(GST_PTE_PG_MASK | X86_PTE_PAT))
    320                         | ((Walk.Pde.u & X86_PDE4M_PAT) >> X86_PDE4M_PAT_SHIFT))
    321                      & fUpperRwUs;
     297            *pfFlags = (Walk.Pde.u & ~(GST_PTE_PG_MASK | X86_PDE4M_RW | X86_PDE4M_US | X86_PDE4M_PS))   /* NX not needed */
     298                     | ((Walk.Pde.u & X86_PDE4M_PAT) >> X86_PDE4M_PAT_SHIFT)
     299                     | (Walk.Core.fEffectiveRW ? X86_PTE_RW : 0)
     300                     | (Walk.Core.fEffectiveUS ? X86_PTE_US : 0)
    322301# if PGM_WITH_NX(PGM_GST_TYPE, PGM_GST_TYPE)
    323             if (fUpperNx)
    324             {
    325                 Assert(GST_IS_NX_ACTIVE(pVCpu)); /* should trigger RSVD error otherwise. */
    326                 *pfFlags |= X86_PTE_PAE_NX;
    327             }
    328 # endif
     302                     | (Walk.Core.fEffectiveNX ? X86_PTE_PAE_NX : 0)
     303# endif
     304                     ;
    329305        }
    330306    }
  • trunk/src/VBox/VMM/testcase/tstVMStructRC.cpp

    r31123 r31140  
    510510    GEN_CHECK_OFF(PGMCPU, pfnR3BthSyncCR3);
    511511    GEN_CHECK_OFF(PGMCPU, pfnR3BthInvalidatePage);
    512     GEN_CHECK_OFF(PGMCPU, pfnR3BthSyncPage);
    513512    GEN_CHECK_OFF(PGMCPU, pfnR3BthPrefetchPage);
    514513    GEN_CHECK_OFF(PGMCPU, pfnR3BthVerifyAccessSyncPage);
     
    516515    GEN_CHECK_OFF(PGMCPU, pfnRCBthTrap0eHandler);
    517516    GEN_CHECK_OFF(PGMCPU, pfnRCBthInvalidatePage);
    518     GEN_CHECK_OFF(PGMCPU, pfnRCBthSyncPage);
    519517    GEN_CHECK_OFF(PGMCPU, pfnRCBthPrefetchPage);
    520518    GEN_CHECK_OFF(PGMCPU, pfnRCBthVerifyAccessSyncPage);
    521519    GEN_CHECK_OFF(PGMCPU, pfnRCBthAssertCR3);
     520    GEN_CHECK_OFF(PGMCPU, pfnR0BthTrap0eHandler);
     521    GEN_CHECK_OFF(PGMCPU, pfnR0BthInvalidatePage);
     522    GEN_CHECK_OFF(PGMCPU, pfnR0BthPrefetchPage);
     523    GEN_CHECK_OFF(PGMCPU, pfnR0BthVerifyAccessSyncPage);
     524    GEN_CHECK_OFF(PGMCPU, pfnR0BthAssertCR3);
    522525    GEN_CHECK_OFF(PGMCPU, DisState);
    523526    GEN_CHECK_OFF(PGMCPU, cGuestModeChanges);
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette