VirtualBox

Changeset 36009 in vbox for trunk/src/VBox/VMM


Ignore:
Timestamp:
Feb 17, 2011 10:15:02 AM (14 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
70091
Message:

PGM: Fixed large pages and write monitoring (live snapshot). Added checks for PGM_PAGE_PDE_TYPE_PDE_DISABLED in a few places where only PGM_PAGE_PDE_TYPE_PDE was checked for (might have missed some).

Location:
trunk/src/VBox/VMM
Files:
9 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/PGMAll.cpp

    r35346 r36009  
    23062306VMMDECL(void) PGMSetLargePageUsage(PVM pVM, bool fUseLargePages)
    23072307{
    2308       pVM->fUseLargePages = fUseLargePages;
     2308    pVM->fUseLargePages = fUseLargePages;
    23092309}
    23102310
  • trunk/src/VBox/VMM/VMMAll/PGMAllBth.h

    r35754 r36009  
    30743074    if (BTH_IS_NP_ACTIVE(pVM))
    30753075    {
     3076        /* Check if we allocated a big page before for this 2 MB range. */
    30763077        PPGMPAGE pPage;
    3077 
    3078         /* Check if we allocated a big page before for this 2 MB range. */
    30793078        rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPtrPage & X86_PDE2M_PAE_PG_MASK, &pPage);
    30803079        if (RT_SUCCESS(rc))
    30813080        {
    30823081            RTHCPHYS HCPhys = NIL_RTHCPHYS;
    3083 
    30843082            if (PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE)
    30853083            {
     
    30913089            {
    30923090                /* Recheck the entire 2 MB range to see if we can use it again as a large page. */
    3093                 rc = pgmPhysIsValidLargePage(pVM, GCPtrPage, pPage);
     3091                rc = pgmPhysRecheckLargePage(pVM, GCPtrPage, pPage);
    30943092                if (RT_SUCCESS(rc))
    30953093                {
  • trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp

    r35346 r36009  
    383383
    384384# ifdef PGM_WITH_LARGE_PAGES
     385    /*
     386     * Try allocate a large page if applicable.
     387     */
    385388    if (    PGMIsUsingLargePages(pVM)
    386389        &&  PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM)
     
    499502
    500503#ifdef PGM_WITH_LARGE_PAGES
     504
    501505/**
    502506 * Replace a 2 MB range of zero pages with new pages that we can write to.
     
    526530    Assert(PGMIsUsingLargePages(pVM));
    527531
    528     PPGMPAGE pPage;
    529     int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysBase, &pPage);
     532    PPGMPAGE pFirstPage;
     533    int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysBase, &pFirstPage);
    530534    if (    RT_SUCCESS(rc)
    531         &&  PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM)
    532     {
    533         unsigned uPDEType = PGM_PAGE_GET_PDE_TYPE(pPage);
     535        &&  PGM_PAGE_GET_TYPE(pFirstPage) == PGMPAGETYPE_RAM)
     536    {
     537        unsigned uPDEType = PGM_PAGE_GET_PDE_TYPE(pFirstPage);
    534538
    535539        /* Don't call this function for already allocated pages. */
    536540        Assert(uPDEType != PGM_PAGE_PDE_TYPE_PDE);
    537541
    538         if  (   uPDEType == PGM_PAGE_PDE_TYPE_DONTCARE
    539              && PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ZERO)
    540         {
     542        if (   uPDEType == PGM_PAGE_PDE_TYPE_DONTCARE
     543            && PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ZERO)
     544        {
     545            /* Lazy approach: check all pages in the 2 MB range.
     546             * The whole range must be ram and unallocated. */
     547            GCPhys = GCPhysBase;
    541548            unsigned iPage;
    542 
    543             GCPhys = GCPhysBase;
    544 
    545             /* Lazy approach: check all pages in the 2 MB range.
    546              * The whole range must be ram and unallocated
    547              */
    548549            for (iPage = 0; iPage < _2M/PAGE_SIZE; iPage++)
    549550            {
    550                 rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
     551                PPGMPAGE pSubPage;
     552                rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pSubPage);
    551553                if  (   RT_FAILURE(rc)
    552                      || PGM_PAGE_GET_TYPE(pPage)  != PGMPAGETYPE_RAM        /* Anything other than ram implies monitoring. */
    553                      || PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ZERO)   /* allocated, monitored or shared means we can't use a large page here */
     554                     || PGM_PAGE_GET_TYPE(pSubPage)  != PGMPAGETYPE_RAM      /* Anything other than ram implies monitoring. */
     555                     || PGM_PAGE_GET_STATE(pSubPage) != PGM_PAGE_STATE_ZERO) /* Allocated, monitored or shared means we can't use a large page here */
    554556                {
    555                     LogFlow(("Found page %RGp with wrong attributes (type=%d; state=%d); cancel check. rc=%d\n", GCPhys, PGM_PAGE_GET_TYPE(pPage), PGM_PAGE_GET_STATE(pPage), rc));
     557                    LogFlow(("Found page %RGp with wrong attributes (type=%d; state=%d); cancel check. rc=%d\n", GCPhys, PGM_PAGE_GET_TYPE(pSubPage), PGM_PAGE_GET_STATE(pSubPage), rc));
    556558                    break;
    557559                }
    558                 Assert(PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_DONTCARE);
     560                Assert(PGM_PAGE_GET_PDE_TYPE(pSubPage) == PGM_PAGE_PDE_TYPE_DONTCARE);
    559561                GCPhys += PAGE_SIZE;
    560562            }
    561             /* Fetch the start page of the 2 MB range again. */
    562             rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysBase, &pPage);
    563             AssertRC(rc);   /* can't fail */
    564 
    565563            if (iPage != _2M/PAGE_SIZE)
    566564            {
    567565                /* Failed. Mark as requiring a PT so we don't check the whole thing again in the future. */
    568566                STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRefused);
    569                 PGM_PAGE_SET_PDE_TYPE(pPage, PGM_PAGE_PDE_TYPE_PT);
     567                PGM_PAGE_SET_PDE_TYPE(pFirstPage, PGM_PAGE_PDE_TYPE_PT);
    570568                return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
    571569            }
    572             else
     570
     571            /*
     572             * Do the allocation.
     573             */
     574# ifdef IN_RING3
     575            rc = PGMR3PhysAllocateLargeHandyPage(pVM, GCPhysBase);
     576# else
     577            rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_ALLOCATE_LARGE_HANDY_PAGE, GCPhysBase);
     578# endif
     579            if (RT_SUCCESS(rc))
    573580            {
    574 # ifdef IN_RING3
    575                 rc = PGMR3PhysAllocateLargeHandyPage(pVM, GCPhysBase);
    576 # else
    577                 rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_ALLOCATE_LARGE_HANDY_PAGE, GCPhysBase);
    578 # endif
    579                 if (RT_SUCCESS(rc))
    580                 {
    581                     Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
    582                     STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageAlloc);
    583                     return VINF_SUCCESS;
    584                 }
    585                 LogFlow(("pgmPhysAllocLargePage failed with %Rrc\n", rc));
    586 
    587                 /* If we fail once, it most likely means the host's memory is too fragmented; don't bother trying again. */
    588                 PGMSetLargePageUsage(pVM, false);
    589                 return rc;
     581                Assert(PGM_PAGE_GET_STATE(pFirstPage) == PGM_PAGE_STATE_ALLOCATED);
     582                pVM->pgm.s.cLargePages++;
     583                return VINF_SUCCESS;
    590584            }
     585
     586            /* If we fail once, it most likely means the host's memory is too
     587               fragmented; don't bother trying again. */
     588            LogFlow(("pgmPhysAllocLargePage failed with %Rrc\n", rc));
     589            PGMSetLargePageUsage(pVM, false);
     590            return rc;
    591591        }
    592592    }
    593593    return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
    594594}
     595
    595596
    596597/**
     
    605606 * @param   pLargePage  Page structure of the base page
    606607 */
    607 int pgmPhysIsValidLargePage(PVM pVM, RTGCPHYS GCPhys, PPGMPAGE pLargePage)
    608 {
    609     unsigned i;
    610 
     608int pgmPhysRecheckLargePage(PVM pVM, RTGCPHYS GCPhys, PPGMPAGE pLargePage)
     609{
    611610    STAM_REL_COUNTER_INC(&pVM->pgm.s.StatLargePageRecheck);
    612611
     
    619618        ||  PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
    620619    {
    621         LogFlow(("pgmPhysIsValidLargePage: checks failed for base page %x %x %x\n", PGM_PAGE_GET_STATE(pLargePage), PGM_PAGE_GET_TYPE(pLargePage), PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage)));
     620        LogFlow(("pgmPhysRecheckLargePage: checks failed for base page %x %x %x\n", PGM_PAGE_GET_STATE(pLargePage), PGM_PAGE_GET_TYPE(pLargePage), PGM_PAGE_GET_HNDL_PHYS_STATE(pLargePage)));
    622621        return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
    623622    }
     
    625624    STAM_PROFILE_START(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,IsValidLargePage), a);
    626625    /* Check all remaining pages in the 2 MB range. */
     626    unsigned i;
    627627    GCPhys += PAGE_SIZE;
    628628    for (i = 1; i < _2M/PAGE_SIZE; i++)
     
    637637            ||  PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE)
    638638        {
    639             LogFlow(("pgmPhysIsValidLargePage: checks failed for page %d; %x %x %x\n", i, PGM_PAGE_GET_STATE(pPage), PGM_PAGE_GET_TYPE(pPage), PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)));
     639            LogFlow(("pgmPhysRecheckLargePage: checks failed for page %d; %x %x %x\n", i, PGM_PAGE_GET_STATE(pPage), PGM_PAGE_GET_TYPE(pPage), PGM_PAGE_GET_HNDL_PHYS_STATE(pPage)));
    640640            break;
    641641        }
     
    648648    {
    649649        PGM_PAGE_SET_PDE_TYPE(pLargePage, PGM_PAGE_PDE_TYPE_PDE);
    650         Log(("pgmPhysIsValidLargePage: page %RGp can be reused!\n", GCPhys - _2M));
     650        pVM->pgm.s.cLargePagesDisabled--;
     651        Log(("pgmPhysRecheckLargePage: page %RGp can be reused!\n", GCPhys - _2M));
    651652        return VINF_SUCCESS;
    652653    }
  • trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp

    r35346 r36009  
    34443444    if (PGM_PAGE_GET_PDE_TYPE(pPhysPage) == PGM_PAGE_PDE_TYPE_PDE)
    34453445    {
    3446         PPGMPAGE pPhysBase;
    34473446        RTGCPHYS GCPhysBase = GCPhysPage & X86_PDE2M_PAE_PG_MASK;
    3448 
    34493447        GCPhysPage &= X86_PDE_PAE_PG_MASK;
    34503448
    34513449        /* Fetch the large page base. */
     3450        PPGMPAGE pLargePage;
    34523451        if (GCPhysBase != GCPhysPage)
    34533452        {
    3454             pPhysBase = pgmPhysGetPage(&pVM->pgm.s, GCPhysBase);
    3455             AssertFatal(pPhysBase);
     3453            pLargePage = pgmPhysGetPage(&pVM->pgm.s, GCPhysBase);
     3454            AssertFatal(pLargePage);
    34563455        }
    34573456        else
    3458             pPhysBase = pPhysPage;
     3457            pLargePage = pPhysPage;
    34593458
    34603459        Log(("pgmPoolTrackUpdateGCPhys: update large page PDE for %RGp (%RGp)\n", GCPhysBase, GCPhysPage));
    34613460
    3462         if (PGM_PAGE_GET_PDE_TYPE(pPhysBase) == PGM_PAGE_PDE_TYPE_PDE)
     3461        if (PGM_PAGE_GET_PDE_TYPE(pLargePage) == PGM_PAGE_PDE_TYPE_PDE)
    34633462        {
    34643463            /* Mark the large page as disabled as we need to break it up to change a single page in the 2 MB range. */
    3465             PGM_PAGE_SET_PDE_TYPE(pPhysBase, PGM_PAGE_PDE_TYPE_PDE_DISABLED);
     3464            PGM_PAGE_SET_PDE_TYPE(pLargePage, PGM_PAGE_PDE_TYPE_PDE_DISABLED);
     3465            pVM->pgm.s.cLargePagesDisabled++;
    34663466
    34673467            /* Update the base as that *only* that one has a reference and there's only one PDE to clear. */
    3468             rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhysBase, pPhysBase, fFlushPTEs, pfFlushTLBs);
     3468            rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhysBase, pLargePage, fFlushPTEs, pfFlushTLBs);
    34693469
    34703470            *pfFlushTLBs = true;
  • trunk/src/VBox/VMM/VMMR3/PGM.cpp

    r35696 r36009  
    16081608    STAM_REL_REG(pVM, &pPGM->cBalloonedPages,                    STAMTYPE_U32,     "/PGM/Page/cBalloonedPages",          STAMUNIT_COUNT,     "The number of ballooned pages.");
    16091609    STAM_REL_REG(pVM, &pPGM->cHandyPages,                        STAMTYPE_U32,     "/PGM/Page/cHandyPages",              STAMUNIT_COUNT,     "The number of handy pages (not included in cAllPages).");
     1610    STAM_REL_REG(pVM, &pPGM->cLargePages,                        STAMTYPE_U32,     "/PGM/Page/cLargePages",              STAMUNIT_COUNT,     "The number of large pages allocated (includes disabled).");
     1611    STAM_REL_REG(pVM, &pPGM->cLargePagesDisabled,                STAMTYPE_U32,     "/PGM/Page/cLargePagesDisabled",      STAMUNIT_COUNT,     "The number of disabled large pages.");
    16101612    STAM_REL_REG(pVM, &pPGM->cRelocations,                       STAMTYPE_COUNTER, "/PGM/cRelocations",                  STAMUNIT_OCCURENCES,"Number of hypervisor relocations.");
    16111613    STAM_REL_REG(pVM, &pPGM->ChunkR3Map.c,                       STAMTYPE_U32,     "/PGM/ChunkR3Map/c",                  STAMUNIT_COUNT,     "Number of mapped chunks.");
     
    16141616    STAM_REL_REG(pVM, &pPGM->cUnmappedChunks,                    STAMTYPE_U32,     "/PGM/ChunkR3Map/Unmapped",           STAMUNIT_COUNT,     "Number of times we unmapped a chunk.");
    16151617
    1616     STAM_REL_REG(pVM, &pPGM->StatLargePageAlloc,                 STAMTYPE_COUNTER, "/PGM/LargePage/Alloc",               STAMUNIT_OCCURENCES, "The number of large pages we've used.");
    16171618    STAM_REL_REG(pVM, &pPGM->StatLargePageReused,                STAMTYPE_COUNTER, "/PGM/LargePage/Reused",              STAMUNIT_OCCURENCES, "The number of times we've reused a large page.");
    16181619    STAM_REL_REG(pVM, &pPGM->StatLargePageRefused,               STAMTYPE_COUNTER, "/PGM/LargePage/Refused",             STAMUNIT_OCCURENCES, "The number of times we couldn't use a large page.");
  • trunk/src/VBox/VMM/VMMR3/PGMPhys.cpp

    r35346 r36009  
    995995            {
    996996                /*
    997                     * A RAM page.
    998                     */
     997                 * A RAM page.
     998                 */
    999999                switch (PGM_PAGE_GET_STATE(pPage))
    10001000                {
    1001                 case PGM_PAGE_STATE_ALLOCATED:
    1002                     /** @todo Optimize this: Don't always re-enable write
    1003                         * monitoring if the page is known to be very busy. */
    1004                     if (PGM_PAGE_IS_WRITTEN_TO(pPage))
    1005                     {
    1006                         PGM_PAGE_CLEAR_WRITTEN_TO(pPage);
    1007                         /* Remember this dirty page for the next (memory) sync. */
    1008                         PGM_PAGE_SET_FT_DIRTY(pPage);
    1009                     }
    1010 
    1011                     PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_WRITE_MONITORED);
    1012                     pVM->pgm.s.cMonitoredPages++;
    1013                     break;
    1014 
    1015                 case PGM_PAGE_STATE_SHARED:
    1016                     AssertFailed();
    1017                     break;
    1018 
    1019                 case PGM_PAGE_STATE_WRITE_MONITORED:    /* nothing to change. */
    1020                 default:
    1021                     break;
     1001                    case PGM_PAGE_STATE_ALLOCATED:
     1002                        /** @todo Optimize this: Don't always re-enable write
     1003                         * monitoring if the page is known to be very busy. */
     1004                        if (PGM_PAGE_IS_WRITTEN_TO(pPage))
     1005                        {
     1006                            PGM_PAGE_CLEAR_WRITTEN_TO(pPage);
     1007                            /* Remember this dirty page for the next (memory) sync. */
     1008                            PGM_PAGE_SET_FT_DIRTY(pPage);
     1009                        }
     1010   
     1011                        pgmPhysPageWriteMonitor(pVM, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT));
     1012                        break;
     1013   
     1014                    case PGM_PAGE_STATE_SHARED:
     1015                        AssertFailed();
     1016                        break;
     1017   
     1018                    case PGM_PAGE_STATE_WRITE_MONITORED:    /* nothing to change. */
     1019                    default:
     1020                        break;
    10221021                }
    10231022            }
     
    16621661                {
    16631662                    case PGMPAGETYPE_RAM:
    1664                         /* Do not replace pages part of a 2 MB continuous range with zero pages, but zero them instead. */
    1665                         if (PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE)
     1663                        /* Do not replace pages part of a 2 MB continuous range
     1664                           with zero pages, but zero them instead. */
     1665                        if (   PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE
     1666                            || PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED)
    16661667                        {
    16671668                            void *pvPage;
     
    16701671                            ASMMemZeroPage(pvPage);
    16711672                        }
    1672                         else
    1673                         if (PGM_PAGE_IS_BALLOONED(pPage))
     1673                        else if (PGM_PAGE_IS_BALLOONED(pPage))
    16741674                        {
    16751675                            /* Turn into a zero page; the balloon status is lost when the VM reboots. */
    16761676                            PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_ZERO);
    16771677                        }
    1678                         else
    1679                         if (!PGM_PAGE_IS_ZERO(pPage))
     1678                        else if (!PGM_PAGE_IS_ZERO(pPage))
    16801679                        {
    16811680                            rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage, pRam->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT));
     
    41404139        return VMSetError(pVM, VERR_PGM_PHYS_NOT_RAM, RT_SRC_POS, "GCPhys=%RGp type=%d", GCPhys, PGM_PAGE_GET_TYPE(pPage));
    41414140    }
    4142     Assert(PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE);
     4141
     4142    /** @todo What about ballooning of large pages??! */
     4143    Assert(   PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
     4144           && PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE_DISABLED);
    41434145
    41444146    if (    PGM_PAGE_IS_ZERO(pPage)
  • trunk/src/VBox/VMM/VMMR3/PGMSavedState.cpp

    r35346 r36009  
    13591359                                }
    13601360
    1361                                 PGM_PAGE_SET_STATE(&pCur->aPages[iPage], PGM_PAGE_STATE_WRITE_MONITORED);
    1362                                 pVM->pgm.s.cMonitoredPages++;
     1361                                pgmPhysPageWriteMonitor(pVM, &pCur->aPages[iPage],
     1362                                                        pCur->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT));
    13631363                                paLSPages[iPage].fWriteMonitored        = 1;
    13641364                                paLSPages[iPage].fWriteMonitoredJustNow = 1;
     
    26582658                        /* Free it only if it's not part of a previously
    26592659                           allocated large page (no need to clear the page). */
    2660                         else if (PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE)
     2660                        else if (   PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE
     2661                                 && PGM_PAGE_GET_PDE_TYPE(pPage) != PGM_PAGE_PDE_TYPE_PDE_DISABLED)
    26612662                        {
    26622663                            rc = pgmPhysFreePage(pVM, pReq, &cPendingPages, pPage, GCPhys);
  • trunk/src/VBox/VMM/include/PGMInline.h

    r35346 r36009  
    563563
    564564#endif /* !IN_RC */
     565
     566
     567/**
     568 * Enables write monitoring for an allocated page.
     569 * 
     570 * The caller is responsible for updating the shadow page tables.
     571 * 
     572 * @param   pVM         The VM handle.
     573 * @param   pPage       The page to write monitor.
     574 * @param   GCPhysPage  The address of the page.
     575 */
     576DECLINLINE(void) pgmPhysPageWriteMonitor(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhysPage)
     577{
     578    Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
     579    Assert(PGMIsLockOwner(pVM));
     580
     581    PGM_PAGE_SET_STATE(pPage, PGM_PAGE_STATE_WRITE_MONITORED);
     582    pVM->pgm.s.cMonitoredPages++;
     583
     584    /* Large pages must disabled. */
     585    if (PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE)
     586    {
     587        PPGMPAGE pFirstPage = pgmPhysGetPage(&pVM->pgm.s, GCPhysPage & X86_PDE2M_PAE_PG_MASK);
     588        AssertFatal(pFirstPage);
     589        if (PGM_PAGE_GET_PDE_TYPE(pFirstPage) == PGM_PAGE_PDE_TYPE_PDE)
     590        {
     591            PGM_PAGE_SET_PDE_TYPE(pFirstPage, PGM_PAGE_PDE_TYPE_PDE_DISABLED);
     592            pVM->pgm.s.cLargePagesDisabled++;
     593        }
     594        else
     595            Assert(PGM_PAGE_GET_PDE_TYPE(pFirstPage) == PGM_PAGE_PDE_TYPE_PDE_DISABLED);
     596    }
     597}
    565598
    566599
  • trunk/src/VBox/VMM/include/PGMInternal.h

    r35696 r36009  
    32133213    uint32_t                        cMappedChunks;      /**< Number of times we mapped a chunk. */
    32143214    uint32_t                        cUnmappedChunks;    /**< Number of times we unmapped a chunk. */
     3215    uint32_t                        cLargePages;        /**< The number of large pages. */
     3216    uint32_t                        cLargePagesDisabled;/**< The number of disabled large pages. */
    32153217/*    uint32_t                        aAlignment4[1]; */
    32163218
     
    32183220    STAMCOUNTER                     cRelocations;
    32193221
    3220     STAMCOUNTER                     StatLargePageAlloc;                 /**< The number of large pages we've allocated.*/
    32213222    STAMCOUNTER                     StatLargePageReused;                /**< The number of large pages we've reused.*/
    32223223    STAMCOUNTER                     StatLargePageRefused;               /**< The number of times we couldn't use a large page.*/
     
    37663767int             pgmPhysAllocPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys);
    37673768int             pgmPhysAllocLargePage(PVM pVM, RTGCPHYS GCPhys);
    3768 int             pgmPhysIsValidLargePage(PVM pVM, RTGCPHYS GCPhys, PPGMPAGE pLargePage);
     3769int             pgmPhysRecheckLargePage(PVM pVM, RTGCPHYS GCPhys, PPGMPAGE pLargePage);
    37693770int             pgmPhysPageLoadIntoTlb(PPGM pPGM, RTGCPHYS GCPhys);
    37703771int             pgmPhysPageLoadIntoTlbWithPage(PPGM pPGM, PPGMPAGE pPage, RTGCPHYS GCPhys);
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette