VirtualBox

Changeset 31178 in vbox


Ignore:
Timestamp:
Jul 28, 2010 5:21:13 PM (15 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
64165
Message:

PGM: Some more preparatory cleanup.

Location:
trunk/src/VBox/VMM
Files:
6 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/PGMInline.h

    r31170 r31178  
    329329 *
    330330 * @returns See PGMDynMapGCPage.
     331 * @param   pVM         The VM handle.
    331332 * @param   pVCpu       The current CPU.
    332333 * @param   GCPhys      The guest physical address of the page.
    333334 * @param   ppv         Where to store the mapping address.
    334335 */
    335 DECLINLINE(int) pgmR0DynMapGCPageInlined(PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv)
     336DECLINLINE(int) pgmR0DynMapGCPageV2Inlined(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv)
    336337{
    337338    STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapGCPageInl, a);
    338339    AssertMsg(!(GCPhys & PAGE_OFFSET_MASK), ("%RGp\n", GCPhys));
     340
     341    /*
     342     * Get the ram range.
     343     */
     344    PPGMRAMRANGE    pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
     345    RTGCPHYS        off  = GCPhys - pRam->GCPhys;
     346    if (RT_UNLIKELY(off >= pRam->cb
     347        /** @todo   || page state stuff */))
     348    {
     349        /* This case is not counted into StatR0DynMapGCPageInl. */
     350        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapGCPageInlRamMisses);
     351        return PGMDynMapGCPage(pVM, GCPhys, ppv);
     352    }
     353
     354    RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[off >> PAGE_SHIFT]);
     355    STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapGCPageInlRamHits);
     356
     357    /*
     358     * pgmR0DynMapHCPageInlined with out stats.
     359     */
     360    PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet;
     361    Assert(!(HCPhys & PAGE_OFFSET_MASK));
     362    Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
     363
     364    unsigned    iHash   = PGMMAPSET_HASH(HCPhys);
     365    unsigned    iEntry  = pSet->aiHashTable[iHash];
     366    if (    iEntry < pSet->cEntries
     367        &&  pSet->aEntries[iEntry].HCPhys == HCPhys)
     368    {
     369        *ppv = pSet->aEntries[iEntry].pvPage;
     370        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapGCPageInlHits);
     371    }
     372    else
     373    {
     374        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapGCPageInlMisses);
     375        pgmR0DynMapHCPageCommon(pSet, HCPhys, ppv);
     376    }
     377
     378    STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapGCPageInl, a);
     379    return VINF_SUCCESS;
     380}
     381
     382
     383/**
     384 * Inlined version of the ring-0 version of PGMDynMapGCPage that optimizes
     385 * access to pages already in the set.
     386 *
     387 * @returns See PGMDynMapGCPage.
     388 * @param   pVCpu       The current CPU.
     389 * @param   GCPhys      The guest physical address of the page.
     390 * @param   ppv         Where to store the mapping address.
     391 */
     392DECLINLINE(int) pgmR0DynMapGCPageInlined(PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv)
     393{
     394    return pgmR0DynMapGCPageV2Inlined(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhys, ppv);
     395}
     396
     397
     398/**
     399 * Inlined version of the ring-0 version of PGMDynMapGCPageOff that optimizes
     400 * access to pages already in the set.
     401 *
     402 * @returns See PGMDynMapGCPage.
     403 * @param   pVCpu       The current CPU.
     404 * @param   HCPhys      The physical address of the page.
     405 * @param   ppv         Where to store the mapping address.
     406 */
     407DECLINLINE(int) pgmR0DynMapGCPageOffInlined(PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv)
     408{
     409    STAM_PROFILE_START(&pVCpu->pgm.s.StatR0DynMapGCPageInl, a);
    339410
    340411    /*
     
    349420        /* This case is not counted into StatR0DynMapGCPageInl. */
    350421        STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapGCPageInlRamMisses);
    351         return PGMDynMapGCPage(pVM, GCPhys, ppv);
    352     }
    353 
    354     RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[off >> PAGE_SHIFT]);
    355     STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapGCPageInlRamHits);
    356 
    357     /*
    358      * pgmR0DynMapHCPageInlined with out stats.
    359      */
    360     PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet;
    361     Assert(!(HCPhys & PAGE_OFFSET_MASK));
    362     Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));
    363 
    364     unsigned    iHash   = PGMMAPSET_HASH(HCPhys);
    365     unsigned    iEntry  = pSet->aiHashTable[iHash];
    366     if (    iEntry < pSet->cEntries
    367         &&  pSet->aEntries[iEntry].HCPhys == HCPhys)
    368     {
    369         *ppv = pSet->aEntries[iEntry].pvPage;
    370         STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapGCPageInlHits);
    371     }
    372     else
    373     {
    374         STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapGCPageInlMisses);
    375         pgmR0DynMapHCPageCommon(pSet, HCPhys, ppv);
    376     }
    377 
    378     STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapGCPageInl, a);
    379     return VINF_SUCCESS;
    380 }
    381 
    382 
    383 /**
    384  * Inlined version of the ring-0 version of PGMDynMapGCPageOff that optimizes
    385  * access to pages already in the set.
    386  *
    387  * @returns See PGMDynMapGCPage.
    388  * @param   pVCpu       The current CPU.
    389  * @param   HCPhys      The physical address of the page.
    390  * @param   ppv         Where to store the mapping address.
    391  */
    392 DECLINLINE(int) pgmR0DynMapGCPageOffInlined(PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv)
    393 {
    394     STAM_PROFILE_START(&pVCpu->pgm.s.StatR0DynMapGCPageInl, a);
    395 
    396     /*
    397      * Get the ram range.
    398      */
    399     PVM             pVM  = pVCpu->CTX_SUFF(pVM);
    400     PPGMRAMRANGE    pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);
    401     RTGCPHYS        off  = GCPhys - pRam->GCPhys;
    402     if (RT_UNLIKELY(off >= pRam->cb
    403         /** @todo   || page state stuff */))
    404     {
    405         /* This case is not counted into StatR0DynMapGCPageInl. */
    406         STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatR0DynMapGCPageInlRamMisses);
    407422        return PGMDynMapGCPageOff(pVM, GCPhys, ppv);
    408423    }
     
    461476    }
    462477    AssertFatalMsgFailed(("pgmPoolMapPageInlined invalid page index %x\n", pPage->idx));
     478}
     479
     480/**
     481 * Maps the page into current context (RC and maybe R0).
     482 *
     483 * @returns pointer to the mapping.
     484 * @param   pVM         Pointer to the PGM instance data.
     485 * @param   pVCpu       The current CPU.
     486 * @param   pPage       The page.
     487 */
     488DECLINLINE(void *) pgmPoolMapPageV2Inlined(PVM pVM, PVMCPU pVCpu, PPGMPOOLPAGE pPage)
     489{
     490    if (pPage->idx >= PGMPOOL_IDX_FIRST)
     491    {
     492        Assert(pPage->idx < pVM->pgm.s.CTX_SUFF(pPool)->cCurPages);
     493        void *pv;
     494# ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
     495        Assert(pVCpu == VMMGetCpu(pVM));
     496        pgmR0DynMapHCPageInlined(pVCpu, pPage->Core.Key, &pv);
     497# else
     498        PGMDynMapHCPage(pVM, pPage->Core.Key, &pv);
     499# endif
     500        return pv;
     501    }
     502    AssertFatalMsgFailed(("pgmPoolMapPageV2Inlined invalid page index %x\n", pPage->idx));
    463503}
    464504
  • trunk/src/VBox/VMM/PGMInternal.h

    r31170 r31178  
    228228 *
    229229 * @returns VBox status code.
     230 * @param   pVM         The VM handle.
     231 * @param   pVCpu       The current CPU.
     232 * @param   HCPhys      The HC physical address to map to a virtual one.
     233 * @param   ppv         Where to store the virtual address. No need to cast
     234 *                      this.
     235 *
     236 * @remark  In RC this uses PGMDynMapHCPage(), so it will consume of the small
     237 *          page window employeed by that function. Be careful.
     238 * @remark  There is no need to assert on the result.
     239 */
     240#ifdef IN_RC
     241# define PGM_HCPHYS_2_PTR(pVM, pVCpu, HCPhys, ppv) \
     242     PGMDynMapHCPage(pVM, HCPhys, (void **)(ppv))
     243#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
     244# define PGM_HCPHYS_2_PTR(pVM, pVCpu, HCPhys, ppv) \
     245     pgmR0DynMapHCPageInlined(pVCpu, HCPhys, (void **)(ppv))
     246#else
     247# define PGM_HCPHYS_2_PTR(pVM, pVCpu, HCPhys, ppv) \
     248     MMPagePhys2PageEx(pVM, HCPhys, (void **)(ppv))
     249#endif
     250
     251/** @def PGM_GCPHYS_2_PTR_V2
     252 * Maps a GC physical page address to a virtual address.
     253 *
     254 * @returns VBox status code.
    230255 * @param   pVM     The VM handle.
    231  * @param   HCPhys  The HC physical address to map to a virtual one.
     256 * @param   pVCpu   The current CPU.
     257 * @param   GCPhys  The GC physical address to map to a virtual one.
    232258 * @param   ppv     Where to store the virtual address. No need to cast this.
    233259 *
    234  * @remark  In GC this uses PGMGCDynMapHCPage(), so it will consume of the
     260 * @remark  In GC this uses PGMGCDynMapGCPage(), so it will consume of the
    235261 *          small page window employeed by that function. Be careful.
    236262 * @remark  There is no need to assert on the result.
    237263 */
    238264#ifdef IN_RC
    239 # define PGM_HCPHYS_2_PTR(pVM, HCPhys, ppv) \
    240      PGMDynMapHCPage(pVM, HCPhys, (void **)(ppv))
     265# define PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, GCPhys, ppv) \
     266     PGMDynMapGCPage(pVM, GCPhys, (void **)(ppv))
    241267#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
    242 # define PGM_HCPHYS_2_PTR(pVM, HCPhys, ppv) \
    243      pgmR0DynMapHCPageInlined(VMMGetCpu(pVM), HCPhys, (void **)(ppv))
     268# define PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, GCPhys, ppv) \
     269     pgmR0DynMapGCPageV2Inlined(pVM, pVCpu, GCPhys, (void **)(ppv))
    244270#else
    245 # define PGM_HCPHYS_2_PTR(pVM, HCPhys, ppv) \
    246      MMPagePhys2PageEx(pVM, HCPhys, (void **)(ppv))
     271# define PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, GCPhys, ppv) \
     272     PGMPhysGCPhys2R3Ptr(pVM, GCPhys, 1 /* one page only */, (PRTR3PTR)(ppv)) /** @todo this isn't asserting, use PGMRamGCPhys2HCPtr! */
    247273#endif
    248274
     
    259285 * @remark  There is no need to assert on the result.
    260286 */
    261 #ifdef IN_RC
    262 # define PGM_GCPHYS_2_PTR(pVM, GCPhys, ppv) \
    263      PGMDynMapGCPage(pVM, GCPhys, (void **)(ppv))
    264 #elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
    265 # define PGM_GCPHYS_2_PTR(pVM, GCPhys, ppv) \
    266      pgmR0DynMapGCPageInlined(VMMGetCpu(pVM), GCPhys, (void **)(ppv))
    267 #else
    268 # define PGM_GCPHYS_2_PTR(pVM, GCPhys, ppv) \
    269      PGMPhysGCPhys2R3Ptr(pVM, GCPhys, 1 /* one page only */, (PRTR3PTR)(ppv)) /** @todo this isn't asserting, use PGMRamGCPhys2HCPtr! */
    270 #endif
     287#define PGM_GCPHYS_2_PTR(pVM, GCPhys, ppv) PGM_GCPHYS_2_PTR_V2(pVM, VMMGetCpu(pVM), GCPhys, ppv)
    271288
    272289/** @def PGM_GCPHYS_2_PTR_BY_VMCPU
     
    282299 * @remark  There is no need to assert on the result.
    283300 */
    284 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    285 # define PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhys, ppv) \
    286      pgmR0DynMapGCPageInlined(pVCpu, GCPhys, (void **)(ppv))
    287 #else
    288 # define PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhys, ppv) \
    289      PGM_GCPHYS_2_PTR((pVCpu)->CTX_SUFF(pVM), GCPhys, ppv)
    290 #endif
     301#define PGM_GCPHYS_2_PTR_BY_VMCPU(pVCpu, GCPhys, ppv) PGM_GCPHYS_2_PTR_V2((pVCpu)->CTX_SUFF(pVM), pVCpu, GCPhys, ppv)
    291302
    292303/** @def PGM_GCPHYS_2_PTR_EX
     
    21682179 */
    21692180#if defined(IN_RC)
    2170 # define PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pPage)   pgmPoolMapPageInlined((pVM), (pPage))
     2181# define PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pPage)   pgmPoolMapPageV2Inlined((pVM), (pVCpu), (pPage))
    21712182#elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
    2172 # define PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pPage)   pgmPoolMapPageInlined((pVM), (pPage))
     2183# define PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pPage)   pgmPoolMapPageV2Inlined((pVM), (pVCpu), (pPage))
    21732184#else
    21742185# define PGMPOOL_PAGE_2_PTR_V2(pVM, pVCpu, pPage)   PGMPOOL_PAGE_2_PTR((pVM), (pPage))
  • trunk/src/VBox/VMM/VMMAll/PGMAllBth.h

    r31170 r31178  
    18271827                 */
    18281828                PGSTPT pPTSrc;
    1829                 int rc = PGM_GCPHYS_2_PTR(pVM, PdeSrc.u & GST_PDE_PG_MASK, &pPTSrc);
     1829                int rc = PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, PdeSrc.u & GST_PDE_PG_MASK, &pPTSrc);
    18301830                if (RT_SUCCESS(rc))
    18311831                {
     
    23322332     */
    23332333    PGSTPT pPTSrc;
    2334     int rc = PGM_GCPHYS_2_PTR(pVM, pPdeSrc->u & GST_PDE_PG_MASK, &pPTSrc);
     2334    int rc = PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, pPdeSrc->u & GST_PDE_PG_MASK, &pPTSrc);
    23352335    if (RT_FAILURE(rc))
    23362336    {
     
    39033903                        */
    39043904                        const GSTPT *pPTSrc;
    3905                         rc = PGM_GCPHYS_2_PTR(pVM, GCPhysGst & ~(RTGCPHYS)(PAGE_SIZE - 1), &pPTSrc);
     3905                        rc = PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, GCPhysGst & ~(RTGCPHYS)(PAGE_SIZE - 1), &pPTSrc);
    39063906                        if (RT_FAILURE(rc))
    39073907                        {
  • trunk/src/VBox/VMM/VMMAll/PGMAllGst.h

    r31140 r31178  
    501501                 */
    502502                PGSTPT pPT;
    503                 int rc = PGM_GCPHYS_2_PTR(pVM, Pde.u & GST_PDE_PG_MASK, &pPT);
     503                int rc = PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, Pde.u & GST_PDE_PG_MASK, &pPT);
    504504                if (RT_SUCCESS(rc))
    505505                {
  • trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp

    r31170 r31178  
    30833083            /** @todo we should check reserved bits ... */
    30843084            void *pvSrc;
    3085             rc = PGM_GCPHYS_2_PTR(pVM, GCPhys, &pvSrc);
     3085            rc = PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, GCPhys, &pvSrc);
    30863086            switch (rc)
    30873087            {
     
    31253125            AssertMsgFailed(("cb=%d cb1=%d cb2=%d GCPtrSrc=%RGv\n", cb, cb1, cb2, GCPtrSrc));
    31263126            void *pvSrc1;
    3127             rc = PGM_GCPHYS_2_PTR(pVM, GCPhys1, &pvSrc1);
     3127            rc = PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, GCPhys1, &pvSrc1);
    31283128            switch (rc)
    31293129            {
     
    31393139
    31403140            void *pvSrc2;
    3141             rc = PGM_GCPHYS_2_PTR(pVM, GCPhys2, &pvSrc2);
     3141            rc = PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, GCPhys2, &pvSrc2);
    31423142            switch (rc)
    31433143            {
  • trunk/src/VBox/VMM/VMMAll/PGMAllShw.h

    r31167 r31178  
    157157    /* PDPT */
    158158    PX86PDPT        pPDPT;
    159     int rc = PGM_HCPHYS_2_PTR(pVM, Pml4e.u & X86_PML4E_PG_MASK, &pPDPT);
     159    int rc = PGM_HCPHYS_2_PTR(pVM, pVCpu, Pml4e.u & X86_PML4E_PG_MASK, &pPDPT);
    160160    if (RT_FAILURE(rc))
    161161        return rc;
     
    167167    /* PD */
    168168    PX86PDPAE       pPd;
    169     rc = PGM_HCPHYS_2_PTR(pVM, Pdpe.u & X86_PDPE_PG_MASK, &pPd);
     169    rc = PGM_HCPHYS_2_PTR(pVM, pVCpu, Pdpe.u & X86_PDPE_PG_MASK, &pPd);
    170170    if (RT_FAILURE(rc))
    171171        return rc;
     
    231231    if (!(Pde.u & PGM_PDFLAGS_MAPPING))
    232232    {
    233         int rc2 = PGM_HCPHYS_2_PTR(pVM, Pde.u & SHW_PDE_PG_MASK, &pPT);
     233        int rc2 = PGM_HCPHYS_2_PTR(pVM, pVCpu, Pde.u & SHW_PDE_PG_MASK, &pPT);
    234234        if (RT_FAILURE(rc2))
    235235            return rc2;
     
    323323        /* PDPT */
    324324        PX86PDPT        pPDPT;
    325         rc = PGM_HCPHYS_2_PTR(pVM, Pml4e.u & X86_PML4E_PG_MASK, &pPDPT);
     325        rc = PGM_HCPHYS_2_PTR(pVM, pVCpu, Pml4e.u & X86_PML4E_PG_MASK, &pPDPT);
    326326        if (RT_FAILURE(rc))
    327327            return rc;
     
    333333        /* PD */
    334334        PX86PDPAE       pPd;
    335         rc = PGM_HCPHYS_2_PTR(pVM, Pdpe.u & X86_PDPE_PG_MASK, &pPd);
     335        rc = PGM_HCPHYS_2_PTR(pVM, pVCpu, Pdpe.u & X86_PDPE_PG_MASK, &pPd);
    336336        if (RT_FAILURE(rc))
    337337            return rc;
     
    368368         */
    369369        PSHWPT          pPT;
    370         rc = PGM_HCPHYS_2_PTR(pVM, Pde.u & SHW_PDE_PG_MASK, &pPT);
     370        rc = PGM_HCPHYS_2_PTR(pVM, pVCpu, Pde.u & SHW_PDE_PG_MASK, &pPT);
    371371        if (RT_FAILURE(rc))
    372372            return rc;
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette