VirtualBox

Changeset 26625 in vbox


Ignore:
Timestamp:
Feb 18, 2010 10:39:28 AM (15 years ago)
Author:
vboxsync
Message:

Large page code cleanup

Location:
trunk
Files:
4 edited

Legend:

Unmodified
Added
Removed
  • trunk/include/VBox/err.h

    r26176 r26625  
    467467/** PGM pool flush pending - return to ring 3. */
    468468#define VINF_PGM_POOL_FLUSH_PENDING             (1644)
     469/** Unable to use the range for a large page. */
     470#define VERR_PGM_INVALID_LARGE_PAGE_RANGE       (-1645)
    469471/** @} */
    470472
  • trunk/src/VBox/VMM/PGMInternal.h

    r26622 r26625  
    33213321
    33223322int             pgmPhysAllocPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys);
    3323 int             pgmPhysAllocLargePage(PVM pVM, RTGCPHYS GCPhys);
     3323int             pgmPhysAllocLargePage(PVM pVM, RTGCPHYS GCPhys, RTHCPHYS *pHCPhys);
    33243324int             pgmPhysPageLoadIntoTlb(PPGM pPGM, RTGCPHYS GCPhys);
    33253325int             pgmPhysPageLoadIntoTlbWithPage(PPGM pPGM, PPGMPAGE pPage, RTGCPHYS GCPhys);
  • trunk/src/VBox/VMM/VMMAll/PGMAllBth.h

    r26621 r26625  
    29382938
    29392939# if (PGM_SHW_TYPE == PGM_TYPE_EPT) && (HC_ARCH_BITS == 64) && defined(RT_OS_WINDOWS) && defined(DEBUG_sandervl)
    2940     PPGMPAGE pPage;
    2941     rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPtrPage & SHW_PDE_PG_MASK, &pPage);
    2942     if (    RT_SUCCESS(rc)
    2943         &&  PGM_PAGE_GET_TYPE(pPage)  == PGMPAGETYPE_RAM)
    2944     {
    2945         RTHCPHYS HCPhys = NIL_RTHCPHYS;
    2946         unsigned uPDEType = PGM_PAGE_GET_PDE_TYPE(pPage);
    2947 
    2948         if  (uPDEType == PGM_PAGE_PDE_TYPE_PDE)
    2949         {
    2950             /* Previously allocated 2 MB range can be reused. */
    2951             Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
    2952             HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
    2953         }
    2954         else
    2955         if  (   uPDEType == PGM_PAGE_PDE_TYPE_DONTCARE
    2956              && PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ZERO)
    2957         {
    2958             RTGCPHYS GCPhysBase = GCPtrPage & SHW_PDE_PG_MASK;
    2959             RTGCPHYS GCPhys = GCPhysBase;
    2960             unsigned iPage;
    2961 
    2962             /* Lazy approach: check all pages in the 2 MB range.
    2963              * The whole range must be ram and unallocated
    2964              */
    2965             for (iPage = 0; iPage < _2M/PAGE_SIZE; iPage++)
    2966             {
    2967                 rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
    2968                 if  (   RT_FAILURE(rc)
    2969                      || PGM_PAGE_GET_TYPE(pPage)  != PGMPAGETYPE_RAM
    2970                      || PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED)
    2971                 {
    2972                     LogFlow(("Found page with wrong attributes; cancel check. rc=%d\n", rc));
    2973                     break;
    2974                 }
    2975                 Assert(PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_DONTCARE);
    2976                 GCPhys += PAGE_SIZE;
    2977             }
    2978             /* Fetch the start page of the 2 MB range again. */
    2979             rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysBase, &pPage);
    2980             AssertRC(rc);   /* can't fail */
    2981 
    2982             if (iPage != _2M/PAGE_SIZE)
    2983             {
    2984                 /* Failed. Mark as requiring a PT so we don't check the whole thing again in the future. */
    2985                 STAM_COUNTER_INC(&pVM->pgm.s.StatLargePageRefused);
    2986                 PGM_PAGE_SET_PDE_TYPE(pPage, PGM_PAGE_PDE_TYPE_PT);
    2987             }
    2988             else
    2989             {
    2990                 rc = pgmPhysAllocLargePage(pVM, GCPhysBase);
    2991                 if (RT_SUCCESS(rc))
    2992                 {   
    2993                     Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
    2994                     HCPhys = PGM_PAGE_GET_HCPHYS(pPage);
    2995                     STAM_COUNTER_INC(&pVM->pgm.s.StatLargePageUsed);
    2996                 }
    2997                 else
    2998                     LogFlow(("pgmPhysAllocLargePage failed with %Rrc\n", rc));
    2999             }
    3000         }
    3001 
    3002         if (HCPhys != NIL_RTHCPHYS)
    3003         {
     2940    {
     2941        RTHCPHYS HCPhys;
     2942        rc = pgmPhysAllocLargePage(pVM, GCPtrPage & SHW_PDE_PG_MASK, &HCPhys);
     2943        if (RT_SUCCESS(rc))
     2944        {   
    30042945            PdeDst.u &= X86_PDE_AVL_MASK;
    30052946            PdeDst.u |= HCPhys;
     
    30142955            STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncPT), a);
    30152956            return VINF_SUCCESS;
     2957
    30162958        }
     2959        else
     2960            LogFlow(("pgmPhysAllocLargePage failed with %Rrc\n", rc));
    30172961    }
    30182962# endif
  • trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp

    r26616 r26625  
    457457 * @param   pVM         The VM address.
    458458 * @param   GCPhys      The address of the page.
     459 * @param   pHCPhys     Pointer to HC physical address (out)
    459460 *
    460461 * @remarks Must be called from within the PGM critical section. It may
    461462 *          nip back to ring-3/0 in some cases.
    462  *
    463  * @remarks This function shouldn't really fail, however if it does
    464  *          it probably means we've screwed up the size of handy pages and/or
    465  *          the low-water mark. Or, that some device I/O is causing a lot of
    466  *          pages to be allocated while while the host is in a low-memory
    467  *          condition. This latter should be handled elsewhere and in a more
    468  *          controlled manner, it's on the @bugref{3170} todo list...
    469  */
    470 int pgmPhysAllocLargePage(PVM pVM, RTGCPHYS GCPhys)
    471 {
    472     LogFlow(("pgmPhysAllocLargePage: %RGp\n", GCPhys));
     463 */
     464int pgmPhysAllocLargePage(PVM pVM, RTGCPHYS GCPhys, RTHCPHYS *pHCPhys)
     465{
     466    RTGCPHYS GCPhysBase = GCPhys & X86_PDE_PAE_PG_MASK_FULL;
     467    LogFlow(("pgmPhysAllocLargePage: %RGp base %RGp\n", GCPhys, GCPhysBase));
    473468
    474469    /*
     
    477472    Assert(PGMIsLocked(pVM));
    478473    Assert((GCPhys & X86_PD_PAE_MASK) == 0);
    479 
     474    AssertPtr(pHCPhys);
     475
     476    PPGMPAGE pPage;
     477    int rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysBase, &pPage);
     478    if (    RT_SUCCESS(rc)
     479        &&  PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM)
     480    {
     481        RTHCPHYS HCPhys = NIL_RTHCPHYS;
     482        unsigned uPDEType = PGM_PAGE_GET_PDE_TYPE(pPage);
     483
     484        if  (uPDEType == PGM_PAGE_PDE_TYPE_PDE)
     485        {
     486            /* Previously allocated 2 MB range can be reused. */
     487            Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
     488
     489            *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage);
     490            return VINF_SUCCESS;
     491        }
     492        else
     493        if  (   uPDEType == PGM_PAGE_PDE_TYPE_DONTCARE
     494             && PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ZERO)
     495        {
     496            unsigned iPage;
     497
     498            GCPhys = GCPhysBase;
     499
     500            /* Lazy approach: check all pages in the 2 MB range.
     501             * The whole range must be ram and unallocated
     502             */
     503            for (iPage = 0; iPage < _2M/PAGE_SIZE; iPage++)
     504            {
     505                rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhys, &pPage);
     506                if  (   RT_FAILURE(rc)
     507                     || PGM_PAGE_GET_TYPE(pPage)  != PGMPAGETYPE_RAM
     508                     || PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED)
     509                {
     510                    LogFlow(("Found page with wrong attributes; cancel check. rc=%d\n", rc));
     511                    break;
     512                }
     513                Assert(PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_DONTCARE);
     514                GCPhys += PAGE_SIZE;
     515            }
     516            /* Fetch the start page of the 2 MB range again. */
     517            rc = pgmPhysGetPageEx(&pVM->pgm.s, GCPhysBase, &pPage);
     518            AssertRC(rc);   /* can't fail */
     519
     520            if (iPage != _2M/PAGE_SIZE)
     521            {
     522                /* Failed. Mark as requiring a PT so we don't check the whole thing again in the future. */
     523                STAM_COUNTER_INC(&pVM->pgm.s.StatLargePageRefused);
     524                PGM_PAGE_SET_PDE_TYPE(pPage, PGM_PAGE_PDE_TYPE_PT);
     525                return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
     526            }
     527            else
     528            {
    480529#ifdef IN_RING3
    481     int rc = PGMR3PhysAllocateLargeHandyPage(pVM, GCPhys);
     530                rc = PGMR3PhysAllocateLargeHandyPage(pVM, GCPhysBase);
    482531#else
    483     int rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_ALLOCATE_LARGE_HANDY_PAGE, GCPhys);
     532                rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_ALLOCATE_LARGE_HANDY_PAGE, GCPhysBase);
    484533#endif
    485     return rc;
     534                if (RT_SUCCESS(rc))
     535                {   
     536                    Assert(PGM_PAGE_GET_STATE(pPage) == PGM_PAGE_STATE_ALLOCATED);
     537                    *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage);
     538                    STAM_COUNTER_INC(&pVM->pgm.s.StatLargePageUsed);
     539                    return VINF_SUCCESS;
     540                }
     541                LogFlow(("pgmPhysAllocLargePage failed with %Rrc\n", rc));
     542                return rc;
     543            }
     544        }
     545    }
     546    return VERR_PGM_INVALID_LARGE_PAGE_RANGE;
    486547}
    487548
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette