VirtualBox

Changeset 31101 in vbox


Ignore:
Timestamp:
Jul 26, 2010 9:33:19 AM (15 years ago)
Author:
vboxsync
Message:

PGM: Moved the code dealing with access handlers out of the #PF function.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/PGMAllBth.h

    r31093 r31101  
    128128
    129129/**
     130 * Deal with a guest page fault.
     131 *
     132 * @returns Strict VBox status code.
     133 * @retval  VINF_EM_RAW_GUEST_TRAP
     134 * @retval  VINF_EM_RAW_EMULATE_INSTR
     135 *
     136 * @param   pVCpu           The current CPU.
     137 *
     138 * @param   uErr            The error code.
     139 *
     140 * @param   pGstWalk        The guest page table walk result.
     141 */
     142static VBOXSTRICTRC PGM_BTH_NAME(Trap0eHandlerDoAccessHandlers)(PVMCPU pVCpu, RTGCUINT uErr, PCPUMCTXCORE pRegFrame,
     143# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
     144                                                                RTGCPTR pvFault, PPGMPAGE pPage, PGSTPTWALK pGstWalk)
     145# else
     146                                                                RTGCPTR pvFault, PPGMPAGE pPage)
     147# endif
     148{
     149    STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b);
     150# if !PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
     151    GSTPDE const    PdeSrcDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A};
     152#endif
     153    PVM             pVM         = pVCpu->CTX_SUFF(pVM);
     154    int             rc;
     155
     156    if (PGM_PAGE_HAS_ANY_PHYSICAL_HANDLERS(pPage))
     157    {
     158        /*
     159         * Physical page access handler.
     160         */
     161# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
     162        const RTGCPHYS  GCPhysFault = pGstWalk->Core.GCPhys;
     163# else
     164        const RTGCPHYS  GCPhysFault = (RTGCPHYS)pvFault;
     165# endif
     166        PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhysFault);
     167        if (pCur)
     168        {
     169#  ifdef PGM_SYNC_N_PAGES
     170            /*
     171             * If the region is write protected and we got a page not present fault, then sync
     172             * the pages. If the fault was caused by a read, then restart the instruction.
     173             * In case of write access continue to the GC write handler.
     174             *
     175             * ASSUMES that there is only one handler per page or that they have similar write properties.
     176             */
     177            if (    pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_WRITE
     178                && !(uErr & X86_TRAP_PF_P))
     179            {
     180#   if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
     181                rc = PGM_BTH_NAME(SyncPage)(pVCpu, pGstWalk->Pde, pvFault, PGM_SYNC_NR_PAGES, uErr);
     182#   else
     183                rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrcDummy, pvFault, PGM_SYNC_NR_PAGES, uErr);
     184#   endif
     185                if (    RT_FAILURE(rc)
     186                    || !(uErr & X86_TRAP_PF_RW)
     187                    || rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
     188                {
     189                    AssertRC(rc);
     190                    STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eHandlersOutOfSync);
     191                    STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b);
     192                    STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2OutOfSyncHndPhys; });
     193                    return rc;
     194                }
     195            }
     196#  endif
     197
     198            AssertMsg(   pCur->enmType != PGMPHYSHANDLERTYPE_PHYSICAL_WRITE
     199                      || (pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_WRITE && (uErr & X86_TRAP_PF_RW)),
     200                      ("Unexpected trap for physical handler: %08X (phys=%08x) pPage=%R[pgmpage] uErr=%X, enum=%d\n",
     201                       pvFault, GCPhysFault, pPage, uErr, pCur->enmType));
     202
     203# if defined(IN_RC) || defined(IN_RING0) /** @todo remove this */
     204            if (pCur->CTX_SUFF(pfnHandler))
     205            {
     206                PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
     207#  ifdef IN_RING0
     208                PFNPGMR0PHYSHANDLER pfnHandler = pCur->CTX_SUFF(pfnHandler);
     209#  else
     210                PFNPGMRCPHYSHANDLER pfnHandler = pCur->CTX_SUFF(pfnHandler);
     211#  endif
     212                bool  fLeaveLock = (pfnHandler != pPool->CTX_SUFF(pfnAccessHandler));
     213                void *pvUser = pCur->CTX_SUFF(pvUser);
     214
     215                STAM_PROFILE_START(&pCur->Stat, h);
     216                if (fLeaveLock)
     217                    pgmUnlock(pVM); /* @todo: Not entirely safe. */
     218
     219                rc = pfnHandler(pVM, uErr, pRegFrame, pvFault, GCPhysFault, pvUser);
     220                if (fLeaveLock)
     221                    pgmLock(pVM);
     222#  ifdef VBOX_WITH_STATISTICS
     223                pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhysFault);
     224                if (pCur)
     225                    STAM_PROFILE_STOP(&pCur->Stat, h);
     226#  else
     227                pCur = NULL;    /* might be invalid by now. */
     228#  endif
     229
     230            }
     231            else
     232# endif /* IN_RC || IN_RING0 */
     233                rc = VINF_EM_RAW_EMULATE_INSTR;
     234
     235            STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eHandlersPhysical);
     236            STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b);
     237            STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2HndPhys; });
     238            return rc;
     239        }
     240    }
     241# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
     242    else
     243    {
     244#  ifdef PGM_SYNC_N_PAGES
     245        /*
     246         * If the region is write protected and we got a page not present fault, then sync
     247         * the pages. If the fault was caused by a read, then restart the instruction.
     248         * In case of write access continue to the GC write handler.
     249         */
     250        if (    PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) < PGM_PAGE_HNDL_PHYS_STATE_ALL
     251            && !(uErr & X86_TRAP_PF_P))
     252        {
     253            rc = PGM_BTH_NAME(SyncPage)(pVCpu, pGstWalk->Pde, pvFault, PGM_SYNC_NR_PAGES, uErr);
     254            if (    RT_FAILURE(rc)
     255                ||  rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE
     256                ||  !(uErr & X86_TRAP_PF_RW))
     257            {
     258                AssertRC(rc);
     259                STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eHandlersOutOfSync);
     260                STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b);
     261                STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2OutOfSyncHndVirt; });
     262                return rc;
     263            }
     264        }
     265#  endif
     266        /*
     267         * Ok, it's an virtual page access handler.
     268         *
     269         * Since it's faster to search by address, we'll do that first
     270         * and then retry by GCPhys if that fails.
     271         */
     272        /** @todo r=bird: perhaps we should consider looking up by physical address directly now?
     273         * r=svl: true, but lookup on virtual address should remain as a fallback as phys & virt trees might be
     274         *        out of sync, because the page was changed without us noticing it (not-present -> present
     275         *        without invlpg or mov cr3, xxx).
     276         */
     277        PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)RTAvlroGCPtrRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, pvFault);
     278        if (pCur)
     279        {
     280            AssertMsg(!(pvFault - pCur->Core.Key < pCur->cb)
     281                      || (     pCur->enmType != PGMVIRTHANDLERTYPE_WRITE
     282                           || !(uErr & X86_TRAP_PF_P)
     283                           || (pCur->enmType == PGMVIRTHANDLERTYPE_WRITE && (uErr & X86_TRAP_PF_RW))),
     284                      ("Unexpected trap for virtual handler: %RGv (phys=%RGp) pPage=%R[pgmpage] uErr=%X, enum=%d\n",
     285                       pvFault, pGstWalk->Core.GCPhys, pPage, uErr, pCur->enmType));
     286
     287            if (    pvFault - pCur->Core.Key < pCur->cb
     288                &&  (    uErr & X86_TRAP_PF_RW
     289                     ||  pCur->enmType != PGMVIRTHANDLERTYPE_WRITE ) )
     290            {
     291#   ifdef IN_RC
     292                STAM_PROFILE_START(&pCur->Stat, h);
     293                pgmUnlock(pVM);
     294                rc = pCur->CTX_SUFF(pfnHandler)(pVM, uErr, pRegFrame, pvFault, pCur->Core.Key, pvFault - pCur->Core.Key);
     295                pgmLock(pVM);
     296                STAM_PROFILE_STOP(&pCur->Stat, h);
     297#   else
     298                rc = VINF_EM_RAW_EMULATE_INSTR; /** @todo for VMX */
     299#   endif
     300                STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eHandlersVirtual);
     301                STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b);
     302                STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2HndVirt; });
     303                return rc;
     304            }
     305            /* Unhandled part of a monitored page */
     306        }
     307        else
     308        {
     309           /* Check by physical address. */
     310            unsigned iPage;
     311            rc = pgmHandlerVirtualFindByPhysAddr(pVM, pGstWalk->Core.GCPhys, &pCur, &iPage);
     312            Assert(RT_SUCCESS(rc) || !pCur);
     313            if (    pCur
     314                &&  (   uErr & X86_TRAP_PF_RW
     315                     || pCur->enmType != PGMVIRTHANDLERTYPE_WRITE ) )
     316            {
     317                Assert((pCur->aPhysToVirt[iPage].Core.Key & X86_PTE_PAE_PG_MASK) == (pGstWalk->Core.GCPhys & X86_PTE_PAE_PG_MASK));
     318#   ifdef IN_RC
     319                RTGCPTR off = (iPage << PAGE_SHIFT) + (pvFault & PAGE_OFFSET_MASK) - (pCur->Core.Key & PAGE_OFFSET_MASK);
     320                Assert(off < pCur->cb);
     321                STAM_PROFILE_START(&pCur->Stat, h);
     322                pgmUnlock(pVM);
     323                rc = pCur->CTX_SUFF(pfnHandler)(pVM, uErr, pRegFrame, pvFault, pCur->Core.Key, off);
     324                pgmLock(pVM);
     325                STAM_PROFILE_STOP(&pCur->Stat, h);
     326#   else
     327                rc = VINF_EM_RAW_EMULATE_INSTR; /** @todo for VMX */
     328#   endif
     329                STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eHandlersVirtualByPhys);
     330                STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b);
     331                STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2HndVirt; });
     332                return rc;
     333            }
     334        }
     335    }
     336#  endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
     337
     338    /*
     339     * There is a handled area of the page, but this fault doesn't belong to it.
     340     * We must emulate the instruction.
     341     *
     342     * To avoid crashing (non-fatal) in the interpreter and go back to the recompiler
     343     * we first check if this was a page-not-present fault for a page with only
     344     * write access handlers. Restart the instruction if it wasn't a write access.
     345     */
     346    STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eHandlersUnhandled);
     347
     348    if (    !PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)
     349        &&  !(uErr & X86_TRAP_PF_P))
     350    {
     351#  if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
     352        rc = PGM_BTH_NAME(SyncPage)(pVCpu, pGstWalk->Pde, pvFault, PGM_SYNC_NR_PAGES, uErr);
     353#  else
     354        rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrcDummy, pvFault, PGM_SYNC_NR_PAGES, uErr);
     355#  endif
     356        if (    RT_FAILURE(rc)
     357            ||  rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE
     358            ||  !(uErr & X86_TRAP_PF_RW))
     359        {
     360            AssertRC(rc);
     361            STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eHandlersOutOfSync);
     362            STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b);
     363            STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2OutOfSyncHndPhys; });
     364            return rc;
     365        }
     366    }
     367
     368    /** @todo This particular case can cause quite a lot of overhead. E.g. early stage of kernel booting in Ubuntu 6.06
     369     *        It's writing to an unhandled part of the LDT page several million times.
     370     */
     371    rc = PGMInterpretInstruction(pVM, pVCpu, pRegFrame, pvFault);
     372    LogFlow(("PGM: PGMInterpretInstruction -> rc=%d pPage=%R[pgmpage]\n", rc, pPage));
     373    STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b);
     374    STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2HndUnhandled; });
     375    return rc;
     376} /* if any kind of handler */
     377
     378
     379/**
    130380 * #PF Handler for raw-mode guest execution.
    131381 *
     
    162412    if (RT_FAILURE_NP(rc))
    163413        return VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerGuestFault)(pVCpu, &GstWalk, uErr));
     414
     415    /* assert some GstWalk sanity. */
    164416#   if PGM_GST_TYPE == PGM_TYPE_AMD64
    165417    AssertMsg(GstWalk.Pml4e.u == GstWalk.pPml4e->u, ("%RX64 %RX64\n", (uint64_t)GstWalk.Pml4e.u, (uint64_t)GstWalk.pPml4e->u));
     
    170422    AssertMsg(GstWalk.Pde.u == GstWalk.pPde->u, ("%RX64 %RX64\n", (uint64_t)GstWalk.Pde.u, (uint64_t)GstWalk.pPde->u));
    171423    AssertMsg(GstWalk.Core.fBigPage || GstWalk.Pte.u == GstWalk.pPte->u, ("%RX64 %RX64\n", (uint64_t)GstWalk.Pte.u, (uint64_t)GstWalk.pPte->u));
     424    Assert(GstWalk.Core.fSucceeded);
    172425
    173426    if (uErr & (X86_TRAP_PF_RW | X86_TRAP_PF_US | X86_TRAP_PF_ID))
     
    182435            return VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerGuestFault)(pVCpu, &GstWalk, uErr));
    183436    }
    184 
     437#  endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
     438
     439#  ifdef PGM_WITH_MMIO_OPTIMIZATIONS
     440    /*
     441     * If it is a reserved bit fault we know that it is an MMIO or access
     442     * handler related fault and can skip the dirty page stuff below.
     443     */
     444    if (uErr & X86_TRAP_PF_RSVD)
     445    {
     446        Assert(uErr & X86_TRAP_PF_P);
     447        PPGMPAGE pPage;
     448/** @todo Only all physical access handlers here, so optimize further. */
     449#   if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
     450        rc = pgmPhysGetPageEx(&pVM->pgm.s, GstWalk.Core.GCPhys, &pPage);
     451        if (RT_SUCCESS(rc) && PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
     452            return VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerDoAccessHandlers)(pVCpu, uErr, pRegFrame, pvFault, pPage,
     453                                                                                 &GstWalk));
     454#   else
     455        rc = pgmPhysGetPageEx(&pVM->pgm.s, (RTGCPHYS)pvFault, &pPage);
     456        if (RT_SUCCESS(rc) && PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
     457            return VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerDoAccessHandlers)(pVCpu, uErr, pRegFrame, pvFault, pPage));
     458#   endif
     459    }
     460#  endif /* PGM_WITH_MMIO_OPTIMIZATIONS */
     461
     462#  if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
    185463    /*
    186464     * Set the accessed and dirty flags.
     
    232510              ("%RX64 %RX64 pPte=%p pPde=%p Pte=%RX64\n", (uint64_t)GstWalk.Pde.u, (uint64_t)GstWalk.pPde->u, GstWalk.pPte, GstWalk.pPde, (uint64_t)GstWalk.pPte->u));
    233511#  else  /* !PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
    234     GSTPDE const PdeSrcDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A};
     512    GSTPDE const PdeSrcDummy = { X86_PDE_P | X86_PDE_US | X86_PDE_RW | X86_PDE_A}; /** @todo eliminate this */
    235513#  endif /* !PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
    236514
     
    248526#  elif PGM_SHW_TYPE == PGM_TYPE_PAE
    249527    const unsigned  iPDDst = (pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK;   /* pPDDst index, not used with the pool. */
    250 
    251528    PX86PDPAE       pPDDst;
    252529#   if PGM_GST_TYPE == PGM_TYPE_PAE
     
    255532    rc = pgmShwSyncPaePDPtr(pVCpu, pvFault, X86_PDPE_P, &pPDDst);       /* RW, US and A are reserved in PAE mode. */
    256533#   endif
    257     if (rc != VINF_SUCCESS)
    258     {
    259         AssertRC(rc);
    260         return rc;
    261     }
    262     Assert(pPDDst);
     534    AssertMsgReturn(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_INTERNAL_ERROR_4);
    263535
    264536#  elif PGM_SHW_TYPE == PGM_TYPE_AMD64
     
    271543    rc = pgmShwSyncLongModePDPtr(pVCpu, pvFault, GstWalk.Pml4e.u, GstWalk.Pdpe.u, &pPDDst);
    272544#   endif
    273     if (rc != VINF_SUCCESS)
    274     {
    275         AssertRC(rc);
    276         return rc;
    277     }
    278     Assert(pPDDst);
     545    AssertMsgReturn(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_INTERNAL_ERROR_4);
    279546
    280547#  elif PGM_SHW_TYPE == PGM_TYPE_EPT
    281548    const unsigned  iPDDst = ((pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK);
    282549    PEPTPD          pPDDst;
    283 
    284550    rc = pgmShwGetEPTPDPtr(pVCpu, pvFault, NULL, &pPDDst);
    285     if (rc != VINF_SUCCESS)
    286     {
    287         AssertRC(rc);
    288         return rc;
    289     }
     551    AssertMsgReturn(rc == VINF_SUCCESS, ("rc=%Rrc\n", rc), RT_FAILURE_NP(rc) ? rc : VERR_INTERNAL_ERROR_4);
     552#  endif
    290553    Assert(pPDDst);
    291 #  endif
    292554
    293555#  if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
    294     /* Dirty page handling. */
     556    /*
     557     * Dirty page handling.
     558     *
     559     * If we successfully correct the write protection fault due to dirty bit
     560     * tracking, then return immediately.
     561     */
    295562    if (uErr & X86_TRAP_PF_RW)  /* write fault? */
    296563    {
    297         /*
    298          * If we successfully correct the write protection fault due to dirty bit
    299          * tracking, then return immediately.
    300          */
    301564        STAM_PROFILE_START(&pVCpu->pgm.s.CTX_MID_Z(Stat,DirtyBitTracking), a);
    302565        rc = PGM_BTH_NAME(CheckDirtyPageFault)(pVCpu, uErr, &pPDDst->a[iPDDst], GstWalk.pPde, pvFault);
     
    334597#  endif
    335598    if (    !(uErr & X86_TRAP_PF_P) /* not set means page not present instead of page protection violation */
    336         &&  !pPDDst->a[iPDDst].n.u1Present
    337        )
     599        &&  !pPDDst->a[iPDDst].n.u1Present)
    338600    {
    339601        STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2SyncPT; });
     
    437699     * in page tables which the guest believes to be present.
    438700     */
    439     STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b);
    440701#  if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
    441702    RTGCPHYS GCPhys = GstWalk.Core.GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK;
     
    454715        LogFlow(("PGM #PF: pgmPhysGetPageEx(%RGp) failed with %Rrc\n", GCPhys, rc));
    455716        STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eHandlersInvalid);
    456         STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b);
    457717        return VINF_EM_RAW_EMULATE_INSTR;
    458718    }
    459719
    460720    /*
    461      * Any handlers?
     721     * Any handlers for this page?
    462722     */
    463723    if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
    464     {
    465         if (PGM_PAGE_HAS_ANY_PHYSICAL_HANDLERS(pPage))
    466         {
    467             /*
    468              * Physical page access handler.
    469              */
    470             const RTGCPHYS  GCPhysFault = GCPhys | (pvFault & PAGE_OFFSET_MASK);
    471             PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhysFault);
    472             if (pCur)
    473             {
    474 #  ifdef PGM_SYNC_N_PAGES
    475                 /*
    476                  * If the region is write protected and we got a page not present fault, then sync
    477                  * the pages. If the fault was caused by a read, then restart the instruction.
    478                  * In case of write access continue to the GC write handler.
    479                  *
    480                  * ASSUMES that there is only one handler per page or that they have similar write properties.
    481                  */
    482                 if (    pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_WRITE
    483                     && !(uErr & X86_TRAP_PF_P))
    484                 {
    485 #   if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
    486                     rc = PGM_BTH_NAME(SyncPage)(pVCpu, GstWalk.Pde, pvFault, PGM_SYNC_NR_PAGES, uErr);
    487 #   else
    488                     rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrcDummy, pvFault, PGM_SYNC_NR_PAGES, uErr);
    489 #   endif
    490                     if (    RT_FAILURE(rc)
    491                         || !(uErr & X86_TRAP_PF_RW)
    492                         || rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE)
    493                     {
    494                         AssertRC(rc);
    495                         STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eHandlersOutOfSync);
    496                         STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b);
    497                         STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2OutOfSyncHndPhys; });
    498                         return rc;
    499                     }
    500                 }
    501 #  endif
    502 
    503                 AssertMsg(   pCur->enmType != PGMPHYSHANDLERTYPE_PHYSICAL_WRITE
    504                           || (pCur->enmType == PGMPHYSHANDLERTYPE_PHYSICAL_WRITE && (uErr & X86_TRAP_PF_RW)),
    505                           ("Unexpected trap for physical handler: %08X (phys=%08x) pPage=%R[pgmpage] uErr=%X, enum=%d\n", pvFault, GCPhys, pPage, uErr, pCur->enmType));
    506 
    507 # if defined(IN_RC) || defined(IN_RING0)
    508                 if (pCur->CTX_SUFF(pfnHandler))
    509                 {
    510                     PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
    511 #  ifdef IN_RING0
    512                     PFNPGMR0PHYSHANDLER pfnHandler = pCur->CTX_SUFF(pfnHandler);
    513 #  else
    514                     PFNPGMRCPHYSHANDLER pfnHandler = pCur->CTX_SUFF(pfnHandler);
    515 #  endif
    516                     bool  fLeaveLock = (pfnHandler != pPool->CTX_SUFF(pfnAccessHandler));
    517                     void *pvUser = pCur->CTX_SUFF(pvUser);
    518 
    519                     STAM_PROFILE_START(&pCur->Stat, h);
    520                     if (fLeaveLock)
    521                         pgmUnlock(pVM); /* @todo: Not entirely safe. */
    522 
    523                     rc = pfnHandler(pVM, uErr, pRegFrame, pvFault, GCPhysFault, pvUser);
    524                     if (fLeaveLock)
    525                         pgmLock(pVM);
    526 #  ifdef VBOX_WITH_STATISTICS
    527                     pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhysFault);
    528                     if (pCur)
    529                         STAM_PROFILE_STOP(&pCur->Stat, h);
    530 #  else
    531                     pCur = NULL;    /* might be invalid by now. */
    532 #  endif
    533 
    534                 }
    535                 else
    536 # endif
    537                     rc = VINF_EM_RAW_EMULATE_INSTR;
    538 
    539                 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eHandlersPhysical);
    540                 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b);
    541                 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2HndPhys; });
    542                 return rc;
    543             }
    544         }
    545 #  if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
    546         else
    547         {
    548 #  ifdef PGM_SYNC_N_PAGES
    549             /*
    550              * If the region is write protected and we got a page not present fault, then sync
    551              * the pages. If the fault was caused by a read, then restart the instruction.
    552              * In case of write access continue to the GC write handler.
    553              */
    554             if (    PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) < PGM_PAGE_HNDL_PHYS_STATE_ALL
    555                 && !(uErr & X86_TRAP_PF_P))
    556             {
    557                 rc = PGM_BTH_NAME(SyncPage)(pVCpu, GstWalk.Pde, pvFault, PGM_SYNC_NR_PAGES, uErr);
    558                 if (    RT_FAILURE(rc)
    559                     ||  rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE
    560                     ||  !(uErr & X86_TRAP_PF_RW))
    561                 {
    562                     AssertRC(rc);
    563                     STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eHandlersOutOfSync);
    564                     STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b);
    565                     STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2OutOfSyncHndVirt; });
    566                     return rc;
    567                 }
    568             }
    569 #  endif
    570             /*
    571              * Ok, it's an virtual page access handler.
    572              *
    573              * Since it's faster to search by address, we'll do that first
    574              * and then retry by GCPhys if that fails.
    575              */
    576             /** @todo r=bird: perhaps we should consider looking up by physical address directly now? */
    577             /** @note r=svl: true, but lookup on virtual address should remain as a fallback as phys & virt trees might be out of sync, because the
    578               *              page was changed without us noticing it (not-present -> present without invlpg or mov cr3, xxx)
    579               */
    580             PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)RTAvlroGCPtrRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, pvFault);
    581             if (pCur)
    582             {
    583                 AssertMsg(!(pvFault - pCur->Core.Key < pCur->cb)
    584                           || (     pCur->enmType != PGMVIRTHANDLERTYPE_WRITE
    585                                || !(uErr & X86_TRAP_PF_P)
    586                                || (pCur->enmType == PGMVIRTHANDLERTYPE_WRITE && (uErr & X86_TRAP_PF_RW))),
    587                           ("Unexpected trap for virtual handler: %RGv (phys=%RGp) pPage=%R[pgmpage] uErr=%X, enum=%d\n", pvFault, GCPhys, pPage, uErr, pCur->enmType));
    588 
    589                 if (    pvFault - pCur->Core.Key < pCur->cb
    590                     &&  (    uErr & X86_TRAP_PF_RW
    591                          ||  pCur->enmType != PGMVIRTHANDLERTYPE_WRITE ) )
    592                 {
    593 #   ifdef IN_RC
    594                     STAM_PROFILE_START(&pCur->Stat, h);
    595                     pgmUnlock(pVM);
    596                     rc = pCur->CTX_SUFF(pfnHandler)(pVM, uErr, pRegFrame, pvFault, pCur->Core.Key, pvFault - pCur->Core.Key);
    597                     pgmLock(pVM);
    598                     STAM_PROFILE_STOP(&pCur->Stat, h);
    599 #   else
    600                     rc = VINF_EM_RAW_EMULATE_INSTR; /** @todo for VMX */
    601 #   endif
    602                     STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eHandlersVirtual);
    603                     STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b);
    604                     STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2HndVirt; });
    605                     return rc;
    606                 }
    607                 /* Unhandled part of a monitored page */
    608             }
    609             else
    610             {
    611                /* Check by physical address. */
    612                 unsigned iPage;
    613                 rc = pgmHandlerVirtualFindByPhysAddr(pVM, GCPhys + (pvFault & PAGE_OFFSET_MASK), &pCur, &iPage);
    614                 Assert(RT_SUCCESS(rc) || !pCur);
    615                 if (    pCur
    616                     &&  (   uErr & X86_TRAP_PF_RW
    617                          || pCur->enmType != PGMVIRTHANDLERTYPE_WRITE ) )
    618                 {
    619                     Assert((pCur->aPhysToVirt[iPage].Core.Key & X86_PTE_PAE_PG_MASK) == GCPhys);
    620 #   ifdef IN_RC
    621                     RTGCPTR off = (iPage << PAGE_SHIFT) + (pvFault & PAGE_OFFSET_MASK) - (pCur->Core.Key & PAGE_OFFSET_MASK);
    622                     Assert(off < pCur->cb);
    623                     STAM_PROFILE_START(&pCur->Stat, h);
    624                     pgmUnlock(pVM);
    625                     rc = pCur->CTX_SUFF(pfnHandler)(pVM, uErr, pRegFrame, pvFault, pCur->Core.Key, off);
    626                     pgmLock(pVM);
    627                     STAM_PROFILE_STOP(&pCur->Stat, h);
    628 #   else
    629                     rc = VINF_EM_RAW_EMULATE_INSTR; /** @todo for VMX */
    630 #   endif
    631                     STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eHandlersVirtualByPhys);
    632                     STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b);
    633                     STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2HndVirt; });
    634                     return rc;
    635                 }
    636             }
    637         }
    638 #  endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
    639 
    640         /*
    641          * There is a handled area of the page, but this fault doesn't belong to it.
    642          * We must emulate the instruction.
    643          *
    644          * To avoid crashing (non-fatal) in the interpreter and go back to the recompiler
    645          * we first check if this was a page-not-present fault for a page with only
    646          * write access handlers. Restart the instruction if it wasn't a write access.
    647          */
    648         STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eHandlersUnhandled);
    649 
    650         if (    !PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)
    651             &&  !(uErr & X86_TRAP_PF_P))
    652         {
    653 #  if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
    654             rc = PGM_BTH_NAME(SyncPage)(pVCpu, GstWalk.Pde, pvFault, PGM_SYNC_NR_PAGES, uErr);
    655 #  else
    656             rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrcDummy, pvFault, PGM_SYNC_NR_PAGES, uErr);
    657 #  endif
    658             if (    RT_FAILURE(rc)
    659                 ||  rc == VINF_PGM_SYNCPAGE_MODIFIED_PDE
    660                 ||  !(uErr & X86_TRAP_PF_RW))
    661             {
    662                 AssertRC(rc);
    663                 STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eHandlersOutOfSync);
    664                 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b);
    665                 STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2OutOfSyncHndPhys; });
    666                 return rc;
    667             }
    668         }
    669 
    670         /** @todo This particular case can cause quite a lot of overhead. E.g. early stage of kernel booting in Ubuntu 6.06
    671          *        It's writing to an unhandled part of the LDT page several million times.
    672          */
    673         rc = PGMInterpretInstruction(pVM, pVCpu, pRegFrame, pvFault);
    674         LogFlow(("PGM: PGMInterpretInstruction -> rc=%d pPage=%R[pgmpage]\n", rc, pPage));
    675         STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b);
    676         STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2HndUnhandled; });
    677         return rc;
    678     } /* if any kind of handler */
    679 
     724# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
     725        return VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerDoAccessHandlers)(pVCpu, uErr, pRegFrame, pvFault, pPage, &GstWalk));
     726# else
     727        return VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerDoAccessHandlers)(pVCpu, uErr, pRegFrame, pvFault, pPage));
     728# endif
     729
     730    STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0eTimeOutOfSync, c);
    680731
    681732#  if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
     
    712763#   endif
    713764                STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eHandlersVirtualUnmarked);
    714                 STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b);
     765                STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeOutOfSync, c);
    715766                STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2HndVirt; });
    716767                return rc;
     
    719770    }
    720771#  endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */
    721 
    722     STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b);
    723772
    724773    /*
     
    728777     * Check it for page out-of-sync situation.
    729778     */
    730     STAM_PROFILE_START(&pVCpu->pgm.s.StatRZTrap0eTimeOutOfSync, c);
    731779    if (!(uErr & X86_TRAP_PF_P))
    732780    {
     
    747795            LogFlow(("PGM: PGMInterpretInstruction balloon -> rc=%d pPage=%R[pgmpage]\n", rc, pPage));
    748796            STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_MID_Z(Stat,PageOutOfSyncBallloon));
    749             STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeHandlers, b);
     797            STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeOutOfSync, c);
    750798            STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2HndUnhandled; });
    751799            return rc;
     
    879927                    AssertMsg(rc == VINF_PGM_SYNC_CR3 || RT_FAILURE(rc), ("%Rrc\n", rc));
    880928                    STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeOutOfSync, c);
     929/// @todo STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2MakeWritable; });
    881930                    return rc;
    882931                }
     
    900949                    STAM_COUNTER_INC(&pVCpu->pgm.s.StatRZTrap0eWPEmulToR3);
    901950                STAM_PROFILE_STOP(&pVCpu->pgm.s.StatRZTrap0eTimeOutOfSync, c);
     951/// @todo STAM_STATS({ pVCpu->pgm.s.CTX_SUFF(pStatTrap0eAttribution) = &pVCpu->pgm.s.StatRZTrap0eTime2WPEmulation; });
    902952                return rc;
    903953            }
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette