VirtualBox

Changeset 19872 in vbox for trunk/src/VBox/VMM/VMMAll


Ignore:
Timestamp:
May 20, 2009 3:05:53 PM (16 years ago)
Author:
vboxsync
Message:

Cleaned up

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp

    r19869 r19872  
    38073807    PGMPOOL_UNLOCK_PTR(pPool->CTX_SUFF(pVM), pvShw);
    38083808}
    3809 
    38103809#endif /* PGMPOOL_WITH_USER_TRACKING */
     3810
     3811/**
     3812 * Flushes a pool page.
     3813 *
     3814 * This moves the page to the free list after removing all user references to it.
     3815 *
     3816 * @returns VBox status code.
     3817 * @retval  VINF_SUCCESS on success.
     3818 * @param   pPool       The pool.
     3819 * @param   HCPhys      The HC physical address of the shadow page.
     3820 */
     3821int pgmPoolFlushPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
     3822{
     3823    PVM pVM = pPool->CTX_SUFF(pVM);
     3824
     3825    int rc = VINF_SUCCESS;
     3826    STAM_PROFILE_START(&pPool->StatFlushPage, f);
     3827    LogFlow(("pgmPoolFlushPage: pPage=%p:{.Key=%RHp, .idx=%d, .enmKind=%s, .GCPhys=%RGp}\n",
     3828             pPage, pPage->Core.Key, pPage->idx, pgmPoolPoolKindToStr(pPage->enmKind), pPage->GCPhys));
     3829
     3830    /*
     3831     * Quietly reject any attempts at flushing any of the special root pages.
     3832     */
     3833    if (pPage->idx < PGMPOOL_IDX_FIRST)
     3834    {
     3835        AssertFailed(); /* can no longer happen */
     3836        Log(("pgmPoolFlushPage: special root page, rejected. enmKind=%s idx=%d\n", pgmPoolPoolKindToStr(pPage->enmKind), pPage->idx));
     3837        return VINF_SUCCESS;
     3838    }
     3839
     3840    pgmLock(pVM);
     3841
     3842    /*
     3843     * Quietly reject any attempts at flushing the currently active shadow CR3 mapping
     3844     */
     3845    if (pgmPoolIsPageLocked(&pVM->pgm.s, pPage))
     3846    {
     3847        AssertMsg(   pPage->enmKind == PGMPOOLKIND_64BIT_PML4
     3848                  || pPage->enmKind == PGMPOOLKIND_PAE_PDPT
     3849                  || pPage->enmKind == PGMPOOLKIND_PAE_PDPT_FOR_32BIT
     3850                  || pPage->enmKind == PGMPOOLKIND_32BIT_PD
     3851                  || pPage->enmKind == PGMPOOLKIND_PAE_PD_FOR_PAE_PD
     3852                  || pPage->enmKind == PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD
     3853                  || pPage->enmKind == PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD
     3854                  || pPage->enmKind == PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD
     3855                  || pPage->enmKind == PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD,
     3856                  ("Can't free the shadow CR3! (%RHp vs %RHp kind=%d\n", PGMGetHyperCR3(VMMGetCpu(pVM)), pPage->Core.Key, pPage->enmKind));
     3857        Log(("pgmPoolFlushPage: current active shadow CR3, rejected. enmKind=%s idx=%d\n", pgmPoolPoolKindToStr(pPage->enmKind), pPage->idx));
     3858        pgmUnlock(pVM);
     3859        return VINF_SUCCESS;
     3860    }
     3861
     3862#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
     3863    /* Start a subset so we won't run out of mapping space. */
     3864    PVMCPU pVCpu = VMMGetCpu(pVM);
     3865    uint32_t iPrevSubset = PGMDynMapPushAutoSubset(pVCpu);
     3866#endif
     3867
     3868    /*
     3869     * Mark the page as being in need of a ASMMemZeroPage().
     3870     */
     3871    pPage->fZeroed = false;
     3872
     3873#ifdef PGMPOOL_WITH_USER_TRACKING
     3874    /*
     3875     * Clear the page.
     3876     */
     3877    pgmPoolTrackClearPageUsers(pPool, pPage);
     3878    STAM_PROFILE_START(&pPool->StatTrackDeref,a);
     3879    pgmPoolTrackDeref(pPool, pPage);
     3880    STAM_PROFILE_STOP(&pPool->StatTrackDeref,a);
     3881#endif
     3882
     3883#ifdef PGMPOOL_WITH_CACHE
     3884    /*
     3885     * Flush it from the cache.
     3886     */
     3887    pgmPoolCacheFlushPage(pPool, pPage);
     3888#endif /* PGMPOOL_WITH_CACHE */
     3889
     3890#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
     3891    /* Heavy stuff done. */
     3892    PGMDynMapPopAutoSubset(pVCpu, iPrevSubset);
     3893#endif
     3894
     3895#ifdef PGMPOOL_WITH_MONITORING
     3896    /*
     3897     * Deregistering the monitoring.
     3898     */
     3899    if (pPage->fMonitored)
     3900        rc = pgmPoolMonitorFlush(pPool, pPage);
     3901#endif
     3902
     3903    /*
     3904     * Free the page.
     3905     */
     3906    Assert(pPage->iNext == NIL_PGMPOOL_IDX);
     3907    pPage->iNext = pPool->iFreeHead;
     3908    pPool->iFreeHead = pPage->idx;
     3909    pPage->enmKind = PGMPOOLKIND_FREE;
     3910    pPage->GCPhys = NIL_RTGCPHYS;
     3911    pPage->fReusedFlushPending = false;
     3912
     3913    pPool->cUsedPages--;
     3914    pgmUnlock(pVM);
     3915    STAM_PROFILE_STOP(&pPool->StatFlushPage, f);
     3916    return rc;
     3917}
     3918
     3919
     3920/**
     3921 * Frees a usage of a pool page.
     3922 *
     3923 * The caller is responsible to updating the user table so that it no longer
     3924 * references the shadow page.
     3925 *
     3926 * @param   pPool       The pool.
     3927 * @param   HCPhys      The HC physical address of the shadow page.
     3928 * @param   iUser       The shadow page pool index of the user table.
     3929 * @param   iUserTable  The index into the user table (shadowed).
     3930 */
     3931void pgmPoolFreeByPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage, uint16_t iUser, uint32_t iUserTable)
     3932{
     3933    PVM pVM = pPool->CTX_SUFF(pVM);
     3934
     3935    STAM_PROFILE_START(&pPool->StatFree, a);
     3936    LogFlow(("pgmPoolFreeByPage: pPage=%p:{.Key=%RHp, .idx=%d, enmKind=%s} iUser=%#x iUserTable=%#x\n",
     3937             pPage, pPage->Core.Key, pPage->idx, pgmPoolPoolKindToStr(pPage->enmKind), iUser, iUserTable));
     3938    Assert(pPage->idx >= PGMPOOL_IDX_FIRST);
     3939    pgmLock(pVM);
     3940#ifdef PGMPOOL_WITH_USER_TRACKING
     3941    pgmPoolTrackFreeUser(pPool, pPage, iUser, iUserTable);
     3942#endif
     3943#ifdef PGMPOOL_WITH_CACHE
     3944    if (!pPage->fCached)
     3945#endif
     3946        pgmPoolFlushPage(pPool, pPage);
     3947    pgmUnlock(pVM);
     3948    STAM_PROFILE_STOP(&pPool->StatFree, a);
     3949}
     3950
     3951
     3952/**
     3953 * Makes one or more free page free.
     3954 *
     3955 * @returns VBox status code.
     3956 * @retval  VINF_SUCCESS on success.
     3957 * @retval  VERR_PGM_POOL_FLUSHED if the pool was flushed.
     3958 *
     3959 * @param   pPool       The pool.
     3960 * @param   enmKind     Page table kind
     3961 * @param   iUser       The user of the page.
     3962 */
     3963static int pgmPoolMakeMoreFreePages(PPGMPOOL pPool, PGMPOOLKIND enmKind, uint16_t iUser)
     3964{
     3965    LogFlow(("pgmPoolMakeMoreFreePages: iUser=%#x\n", iUser));
     3966
     3967    /*
     3968     * If the pool isn't full grown yet, expand it.
     3969     */
     3970    if (    pPool->cCurPages < pPool->cMaxPages
     3971#if defined(IN_RC)
     3972        /* Hack alert: we can't deal with jumps to ring 3 when called from MapCR3 and allocating pages for PAE PDs. */
     3973        &&  enmKind != PGMPOOLKIND_PAE_PD_FOR_PAE_PD
     3974        &&  (enmKind < PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD || enmKind > PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD)
     3975#endif
     3976        )
     3977    {
     3978        STAM_PROFILE_ADV_SUSPEND(&pPool->StatAlloc, a);
     3979#ifdef IN_RING3
     3980        int rc = PGMR3PoolGrow(pPool->pVMR3);
     3981#else
     3982        int rc = CTXALLMID(VMM, CallHost)(pPool->CTX_SUFF(pVM), VMMCALLHOST_PGM_POOL_GROW, 0);
     3983#endif
     3984        if (RT_FAILURE(rc))
     3985            return rc;
     3986        STAM_PROFILE_ADV_RESUME(&pPool->StatAlloc, a);
     3987        if (pPool->iFreeHead != NIL_PGMPOOL_IDX)
     3988            return VINF_SUCCESS;
     3989    }
     3990
     3991#ifdef PGMPOOL_WITH_CACHE
     3992    /*
     3993     * Free one cached page.
     3994     */
     3995    return pgmPoolCacheFreeOne(pPool, iUser);
     3996#else
     3997    /*
     3998     * Flush the pool.
     3999     *
     4000     * If we have tracking enabled, it should be possible to come up with
     4001     * a cheap replacement strategy...
     4002     */
     4003    /* @todo This path no longer works (CR3 root pages will be flushed)!! */
     4004    AssertCompileFailed();
     4005    Assert(!CPUMIsGuestInLongMode(pVM));
     4006    pgmPoolFlushAllInt(pPool);
     4007    return VERR_PGM_POOL_FLUSHED;
     4008#endif
     4009}
     4010
     4011
     4012/**
     4013 * Allocates a page from the pool.
     4014 *
     4015 * This page may actually be a cached page and not in need of any processing
     4016 * on the callers part.
     4017 *
     4018 * @returns VBox status code.
     4019 * @retval  VINF_SUCCESS if a NEW page was allocated.
     4020 * @retval  VINF_PGM_CACHED_PAGE if a CACHED page was returned.
     4021 * @retval  VERR_PGM_POOL_FLUSHED if the pool was flushed.
     4022 * @param   pVM         The VM handle.
     4023 * @param   GCPhys      The GC physical address of the page we're gonna shadow.
     4024 *                      For 4MB and 2MB PD entries, it's the first address the
     4025 *                      shadow PT is covering.
     4026 * @param   enmKind     The kind of mapping.
     4027 * @param   iUser       The shadow page pool index of the user table.
     4028 * @param   iUserTable  The index into the user table (shadowed).
     4029 * @param   ppPage      Where to store the pointer to the page. NULL is stored here on failure.
     4030 */
     4031int pgmPoolAlloc(PVM pVM, RTGCPHYS GCPhys, PGMPOOLKIND enmKind, uint16_t iUser, uint32_t iUserTable, PPPGMPOOLPAGE ppPage)
     4032{
     4033    PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
     4034    STAM_PROFILE_ADV_START(&pPool->StatAlloc, a);
     4035    LogFlow(("pgmPoolAlloc: GCPhys=%RGp enmKind=%s iUser=%#x iUserTable=%#x\n", GCPhys, pgmPoolPoolKindToStr(enmKind), iUser, iUserTable));
     4036    *ppPage = NULL;
     4037    /** @todo CSAM/PGMPrefetchPage messes up here during CSAMR3CheckGates
     4038     *  (TRPMR3SyncIDT) because of FF priority. Try fix that?
     4039     *  Assert(!(pVM->pgm.s.fGlobalSyncFlags & PGM_GLOBAL_SYNC_CLEAR_PGM_POOL)); */
     4040
     4041    pgmLock(pVM);
     4042
     4043#ifdef PGMPOOL_WITH_CACHE
     4044    if (pPool->fCacheEnabled)
     4045    {
     4046        int rc2 = pgmPoolCacheAlloc(pPool, GCPhys, enmKind, iUser, iUserTable, ppPage);
     4047        if (RT_SUCCESS(rc2))
     4048        {
     4049            pgmUnlock(pVM);
     4050            STAM_PROFILE_ADV_STOP(&pPool->StatAlloc, a);
     4051            LogFlow(("pgmPoolAlloc: cached returns %Rrc *ppPage=%p:{.Key=%RHp, .idx=%d}\n", rc2, *ppPage, (*ppPage)->Core.Key, (*ppPage)->idx));
     4052            return rc2;
     4053        }
     4054    }
     4055#endif
     4056
     4057    /*
     4058     * Allocate a new one.
     4059     */
     4060    int         rc = VINF_SUCCESS;
     4061    uint16_t    iNew = pPool->iFreeHead;
     4062    if (iNew == NIL_PGMPOOL_IDX)
     4063    {
     4064        rc = pgmPoolMakeMoreFreePages(pPool, enmKind, iUser);
     4065        if (RT_FAILURE(rc))
     4066        {
     4067            pgmUnlock(pVM);
     4068            Log(("pgmPoolAlloc: returns %Rrc (Free)\n", rc));
     4069            STAM_PROFILE_ADV_STOP(&pPool->StatAlloc, a);
     4070            return rc;
     4071        }
     4072        iNew = pPool->iFreeHead;
     4073        AssertReleaseReturn(iNew != NIL_PGMPOOL_IDX, VERR_INTERNAL_ERROR);
     4074    }
     4075
     4076    /* unlink the free head */
     4077    PPGMPOOLPAGE pPage = &pPool->aPages[iNew];
     4078    pPool->iFreeHead = pPage->iNext;
     4079    pPage->iNext = NIL_PGMPOOL_IDX;
     4080
     4081    /*
     4082     * Initialize it.
     4083     */
     4084    pPool->cUsedPages++;                /* physical handler registration / pgmPoolTrackFlushGCPhysPTsSlow requirement. */
     4085    pPage->enmKind = enmKind;
     4086    pPage->GCPhys = GCPhys;
     4087    pPage->fSeenNonGlobal = false;      /* Set this to 'true' to disable this feature. */
     4088    pPage->fMonitored = false;
     4089    pPage->fCached = false;
     4090    pPage->fReusedFlushPending = false;
     4091#ifdef PGMPOOL_WITH_MONITORING
     4092    pPage->cModifications = 0;
     4093    pPage->iModifiedNext = NIL_PGMPOOL_IDX;
     4094    pPage->iModifiedPrev = NIL_PGMPOOL_IDX;
     4095#else
     4096    pPage->fCR3Mix = false;
     4097#endif
     4098#ifdef PGMPOOL_WITH_USER_TRACKING
     4099    pPage->cPresent = 0;
     4100    pPage->iFirstPresent = ~0;
     4101
     4102    /*
     4103     * Insert into the tracking and cache. If this fails, free the page.
     4104     */
     4105    int rc3 = pgmPoolTrackInsert(pPool, pPage, GCPhys, iUser, iUserTable);
     4106    if (RT_FAILURE(rc3))
     4107    {
     4108        pPool->cUsedPages--;
     4109        pPage->enmKind = PGMPOOLKIND_FREE;
     4110        pPage->GCPhys = NIL_RTGCPHYS;
     4111        pPage->iNext = pPool->iFreeHead;
     4112        pPool->iFreeHead = pPage->idx;
     4113        pgmUnlock(pVM);
     4114        STAM_PROFILE_ADV_STOP(&pPool->StatAlloc, a);
     4115        Log(("pgmPoolAlloc: returns %Rrc (Insert)\n", rc3));
     4116        return rc3;
     4117    }
     4118#endif /* PGMPOOL_WITH_USER_TRACKING */
     4119
     4120    /*
     4121     * Commit the allocation, clear the page and return.
     4122     */
     4123#ifdef VBOX_WITH_STATISTICS
     4124    if (pPool->cUsedPages > pPool->cUsedPagesHigh)
     4125        pPool->cUsedPagesHigh = pPool->cUsedPages;
     4126#endif
     4127
     4128    if (!pPage->fZeroed)
     4129    {
     4130        STAM_PROFILE_START(&pPool->StatZeroPage, z);
     4131        void *pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
     4132        ASMMemZeroPage(pv);
     4133        STAM_PROFILE_STOP(&pPool->StatZeroPage, z);
     4134    }
     4135
     4136    *ppPage = pPage;
     4137    pgmUnlock(pVM);
     4138    LogFlow(("pgmPoolAlloc: returns %Rrc *ppPage=%p:{.Key=%RHp, .idx=%d, .fCached=%RTbool, .fMonitored=%RTbool}\n",
     4139             rc, pPage, pPage->Core.Key, pPage->idx, pPage->fCached, pPage->fMonitored));
     4140    STAM_PROFILE_ADV_STOP(&pPool->StatAlloc, a);
     4141    return rc;
     4142}
     4143
     4144
     4145/**
     4146 * Frees a usage of a pool page.
     4147 *
     4148 * @param   pVM         The VM handle.
     4149 * @param   HCPhys      The HC physical address of the shadow page.
     4150 * @param   iUser       The shadow page pool index of the user table.
     4151 * @param   iUserTable  The index into the user table (shadowed).
     4152 */
     4153void pgmPoolFree(PVM pVM, RTHCPHYS HCPhys, uint16_t iUser, uint32_t iUserTable)
     4154{
     4155    LogFlow(("pgmPoolFree: HCPhys=%RHp iUser=%#x iUserTable=%#x\n", HCPhys, iUser, iUserTable));
     4156    PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
     4157    pgmPoolFreeByPage(pPool, pgmPoolGetPage(pPool, HCPhys), iUser, iUserTable);
     4158}
     4159
     4160/**
     4161 * Internal worker for finding a 'in-use' shadow page give by it's physical address.
     4162 *
     4163 * @returns Pointer to the shadow page structure.
     4164 * @param   pPool       The pool.
     4165 * @param   HCPhys      The HC physical address of the shadow page.
     4166 */
     4167PPGMPOOLPAGE pgmPoolGetPage(PPGMPOOL pPool, RTHCPHYS HCPhys)
     4168{
     4169    PVM pVM = pPool->CTX_SUFF(pVM);
     4170
     4171    /*
     4172     * Look up the page.
     4173     */
     4174    pgmLock(pVM);
     4175    PPGMPOOLPAGE pPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, HCPhys & X86_PTE_PAE_PG_MASK);
     4176    pgmUnlock(pVM);
     4177
     4178    AssertFatalMsg(pPage && pPage->enmKind != PGMPOOLKIND_FREE, ("HCPhys=%RHp pPage=%p idx=%d\n", HCPhys, pPage, (pPage) ? pPage->idx : 0));
     4179    return pPage;
     4180}
     4181
     4182
    38114183#ifdef IN_RING3
    38124184/**
     
    38174189 *
    38184190 * @param   pPool       The pool.
    3819  *
    3820  * @remark Only used during reset now, we might want to rename and/or move it.
    3821  */
    3822 static void pgmPoolFlushAllInt(PPGMPOOL pPool)
    3823 {
    3824     PVM pVM = pPool->CTX_SUFF(pVM);
     4191 */
     4192void pgmR3PoolReset(PVM pVM)
     4193{
     4194    PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
    38254195
    38264196    Assert(PGMIsLockOwner(pVM));
     
    38414211     * including the root page.
    38424212     */
    3843     /** @todo Need to synchronize this across all VCPUs! */
    3844     Assert(pVM->cCPUs == 1);
    38454213    for (unsigned i=0;i<pVM->cCPUs;i++)
    38464214    {
     
    39984366
    39994367    STAM_PROFILE_STOP(&pPool->StatFlushAllInt, a);
    4000 }
    4001 
    4002 #endif /* IN_RING3 */
    4003 
    4004 /**
    4005  * Flushes a pool page.
    4006  *
    4007  * This moves the page to the free list after removing all user references to it.
    4008  *
    4009  * @returns VBox status code.
    4010  * @retval  VINF_SUCCESS on success.
    4011  * @param   pPool       The pool.
    4012  * @param   HCPhys      The HC physical address of the shadow page.
    4013  */
    4014 int pgmPoolFlushPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage)
    4015 {
    4016     PVM pVM = pPool->CTX_SUFF(pVM);
    4017 
    4018     int rc = VINF_SUCCESS;
    4019     STAM_PROFILE_START(&pPool->StatFlushPage, f);
    4020     LogFlow(("pgmPoolFlushPage: pPage=%p:{.Key=%RHp, .idx=%d, .enmKind=%s, .GCPhys=%RGp}\n",
    4021              pPage, pPage->Core.Key, pPage->idx, pgmPoolPoolKindToStr(pPage->enmKind), pPage->GCPhys));
    4022 
    4023     /*
    4024      * Quietly reject any attempts at flushing any of the special root pages.
    4025      */
    4026     if (pPage->idx < PGMPOOL_IDX_FIRST)
    4027     {
    4028         AssertFailed(); /* can no longer happen */
    4029         Log(("pgmPoolFlushPage: special root page, rejected. enmKind=%s idx=%d\n", pgmPoolPoolKindToStr(pPage->enmKind), pPage->idx));
    4030         return VINF_SUCCESS;
    4031     }
    4032 
    4033     pgmLock(pVM);
    4034 
    4035     /*
    4036      * Quietly reject any attempts at flushing the currently active shadow CR3 mapping
    4037      */
    4038     if (pgmPoolIsPageLocked(&pVM->pgm.s, pPage))
    4039     {
    4040         AssertMsg(   pPage->enmKind == PGMPOOLKIND_64BIT_PML4
    4041                   || pPage->enmKind == PGMPOOLKIND_PAE_PDPT
    4042                   || pPage->enmKind == PGMPOOLKIND_PAE_PDPT_FOR_32BIT
    4043                   || pPage->enmKind == PGMPOOLKIND_32BIT_PD
    4044                   || pPage->enmKind == PGMPOOLKIND_PAE_PD_FOR_PAE_PD
    4045                   || pPage->enmKind == PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD
    4046                   || pPage->enmKind == PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD
    4047                   || pPage->enmKind == PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD
    4048                   || pPage->enmKind == PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD,
    4049                   ("Can't free the shadow CR3! (%RHp vs %RHp kind=%d\n", PGMGetHyperCR3(VMMGetCpu(pVM)), pPage->Core.Key, pPage->enmKind));
    4050         Log(("pgmPoolFlushPage: current active shadow CR3, rejected. enmKind=%s idx=%d\n", pgmPoolPoolKindToStr(pPage->enmKind), pPage->idx));
    4051         pgmUnlock(pVM);
    4052         return VINF_SUCCESS;
    4053     }
    4054 
    4055 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    4056     /* Start a subset so we won't run out of mapping space. */
    4057     PVMCPU pVCpu = VMMGetCpu(pVM);
    4058     uint32_t iPrevSubset = PGMDynMapPushAutoSubset(pVCpu);
    4059 #endif
    4060 
    4061     /*
    4062      * Mark the page as being in need of a ASMMemZeroPage().
    4063      */
    4064     pPage->fZeroed = false;
    4065 
    4066 #ifdef PGMPOOL_WITH_USER_TRACKING
    4067     /*
    4068      * Clear the page.
    4069      */
    4070     pgmPoolTrackClearPageUsers(pPool, pPage);
    4071     STAM_PROFILE_START(&pPool->StatTrackDeref,a);
    4072     pgmPoolTrackDeref(pPool, pPage);
    4073     STAM_PROFILE_STOP(&pPool->StatTrackDeref,a);
    4074 #endif
    4075 
    4076 #ifdef PGMPOOL_WITH_CACHE
    4077     /*
    4078      * Flush it from the cache.
    4079      */
    4080     pgmPoolCacheFlushPage(pPool, pPage);
    4081 #endif /* PGMPOOL_WITH_CACHE */
    4082 
    4083 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    4084     /* Heavy stuff done. */
    4085     PGMDynMapPopAutoSubset(pVCpu, iPrevSubset);
    4086 #endif
    4087 
    4088 #ifdef PGMPOOL_WITH_MONITORING
    4089     /*
    4090      * Deregistering the monitoring.
    4091      */
    4092     if (pPage->fMonitored)
    4093         rc = pgmPoolMonitorFlush(pPool, pPage);
    4094 #endif
    4095 
    4096     /*
    4097      * Free the page.
    4098      */
    4099     Assert(pPage->iNext == NIL_PGMPOOL_IDX);
    4100     pPage->iNext = pPool->iFreeHead;
    4101     pPool->iFreeHead = pPage->idx;
    4102     pPage->enmKind = PGMPOOLKIND_FREE;
    4103     pPage->GCPhys = NIL_RTGCPHYS;
    4104     pPage->fReusedFlushPending = false;
    4105 
    4106     pPool->cUsedPages--;
    4107     pgmUnlock(pVM);
    4108     STAM_PROFILE_STOP(&pPool->StatFlushPage, f);
    4109     return rc;
    4110 }
    4111 
    4112 
    4113 /**
    4114  * Frees a usage of a pool page.
    4115  *
    4116  * The caller is responsible to updating the user table so that it no longer
    4117  * references the shadow page.
    4118  *
    4119  * @param   pPool       The pool.
    4120  * @param   HCPhys      The HC physical address of the shadow page.
    4121  * @param   iUser       The shadow page pool index of the user table.
    4122  * @param   iUserTable  The index into the user table (shadowed).
    4123  */
    4124 void pgmPoolFreeByPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage, uint16_t iUser, uint32_t iUserTable)
    4125 {
    4126     PVM pVM = pPool->CTX_SUFF(pVM);
    4127 
    4128     STAM_PROFILE_START(&pPool->StatFree, a);
    4129     LogFlow(("pgmPoolFreeByPage: pPage=%p:{.Key=%RHp, .idx=%d, enmKind=%s} iUser=%#x iUserTable=%#x\n",
    4130              pPage, pPage->Core.Key, pPage->idx, pgmPoolPoolKindToStr(pPage->enmKind), iUser, iUserTable));
    4131     Assert(pPage->idx >= PGMPOOL_IDX_FIRST);
    4132     pgmLock(pVM);
    4133 #ifdef PGMPOOL_WITH_USER_TRACKING
    4134     pgmPoolTrackFreeUser(pPool, pPage, iUser, iUserTable);
    4135 #endif
    4136 #ifdef PGMPOOL_WITH_CACHE
    4137     if (!pPage->fCached)
    4138 #endif
    4139         pgmPoolFlushPage(pPool, pPage);
    4140     pgmUnlock(pVM);
    4141     STAM_PROFILE_STOP(&pPool->StatFree, a);
    4142 }
    4143 
    4144 
    4145 /**
    4146  * Makes one or more free page free.
    4147  *
    4148  * @returns VBox status code.
    4149  * @retval  VINF_SUCCESS on success.
    4150  * @retval  VERR_PGM_POOL_FLUSHED if the pool was flushed.
    4151  *
    4152  * @param   pPool       The pool.
    4153  * @param   enmKind     Page table kind
    4154  * @param   iUser       The user of the page.
    4155  */
    4156 static int pgmPoolMakeMoreFreePages(PPGMPOOL pPool, PGMPOOLKIND enmKind, uint16_t iUser)
    4157 {
    4158     LogFlow(("pgmPoolMakeMoreFreePages: iUser=%#x\n", iUser));
    4159 
    4160     /*
    4161      * If the pool isn't full grown yet, expand it.
    4162      */
    4163     if (    pPool->cCurPages < pPool->cMaxPages
    4164 #if defined(IN_RC)
    4165         /* Hack alert: we can't deal with jumps to ring 3 when called from MapCR3 and allocating pages for PAE PDs. */
    4166         &&  enmKind != PGMPOOLKIND_PAE_PD_FOR_PAE_PD
    4167         &&  (enmKind < PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD || enmKind > PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD)
    4168 #endif
    4169         )
    4170     {
    4171         STAM_PROFILE_ADV_SUSPEND(&pPool->StatAlloc, a);
    4172 #ifdef IN_RING3
    4173         int rc = PGMR3PoolGrow(pPool->pVMR3);
    4174 #else
    4175         int rc = CTXALLMID(VMM, CallHost)(pPool->CTX_SUFF(pVM), VMMCALLHOST_PGM_POOL_GROW, 0);
    4176 #endif
    4177         if (RT_FAILURE(rc))
    4178             return rc;
    4179         STAM_PROFILE_ADV_RESUME(&pPool->StatAlloc, a);
    4180         if (pPool->iFreeHead != NIL_PGMPOOL_IDX)
    4181             return VINF_SUCCESS;
    4182     }
    4183 
    4184 #ifdef PGMPOOL_WITH_CACHE
    4185     /*
    4186      * Free one cached page.
    4187      */
    4188     return pgmPoolCacheFreeOne(pPool, iUser);
    4189 #else
    4190     /*
    4191      * Flush the pool.
    4192      *
    4193      * If we have tracking enabled, it should be possible to come up with
    4194      * a cheap replacement strategy...
    4195      */
    4196     /* @todo This path no longer works (CR3 root pages will be flushed)!! */
    4197     AssertCompileFailed();
    4198     Assert(!CPUMIsGuestInLongMode(pVM));
    4199     pgmPoolFlushAllInt(pPool);
    4200     return VERR_PGM_POOL_FLUSHED;
    4201 #endif
    4202 }
    4203 
    4204 
    4205 /**
    4206  * Allocates a page from the pool.
    4207  *
    4208  * This page may actually be a cached page and not in need of any processing
    4209  * on the callers part.
    4210  *
    4211  * @returns VBox status code.
    4212  * @retval  VINF_SUCCESS if a NEW page was allocated.
    4213  * @retval  VINF_PGM_CACHED_PAGE if a CACHED page was returned.
    4214  * @retval  VERR_PGM_POOL_FLUSHED if the pool was flushed.
    4215  * @param   pVM         The VM handle.
    4216  * @param   GCPhys      The GC physical address of the page we're gonna shadow.
    4217  *                      For 4MB and 2MB PD entries, it's the first address the
    4218  *                      shadow PT is covering.
    4219  * @param   enmKind     The kind of mapping.
    4220  * @param   iUser       The shadow page pool index of the user table.
    4221  * @param   iUserTable  The index into the user table (shadowed).
    4222  * @param   ppPage      Where to store the pointer to the page. NULL is stored here on failure.
    4223  */
    4224 int pgmPoolAlloc(PVM pVM, RTGCPHYS GCPhys, PGMPOOLKIND enmKind, uint16_t iUser, uint32_t iUserTable, PPPGMPOOLPAGE ppPage)
    4225 {
    4226     PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
    4227     STAM_PROFILE_ADV_START(&pPool->StatAlloc, a);
    4228     LogFlow(("pgmPoolAlloc: GCPhys=%RGp enmKind=%s iUser=%#x iUserTable=%#x\n", GCPhys, pgmPoolPoolKindToStr(enmKind), iUser, iUserTable));
    4229     *ppPage = NULL;
    4230     /** @todo CSAM/PGMPrefetchPage messes up here during CSAMR3CheckGates
    4231      *  (TRPMR3SyncIDT) because of FF priority. Try fix that?
    4232      *  Assert(!(pVM->pgm.s.fGlobalSyncFlags & PGM_GLOBAL_SYNC_CLEAR_PGM_POOL)); */
    4233 
    4234     pgmLock(pVM);
    4235 
    4236 #ifdef PGMPOOL_WITH_CACHE
    4237     if (pPool->fCacheEnabled)
    4238     {
    4239         int rc2 = pgmPoolCacheAlloc(pPool, GCPhys, enmKind, iUser, iUserTable, ppPage);
    4240         if (RT_SUCCESS(rc2))
    4241         {
    4242             pgmUnlock(pVM);
    4243             STAM_PROFILE_ADV_STOP(&pPool->StatAlloc, a);
    4244             LogFlow(("pgmPoolAlloc: cached returns %Rrc *ppPage=%p:{.Key=%RHp, .idx=%d}\n", rc2, *ppPage, (*ppPage)->Core.Key, (*ppPage)->idx));
    4245             return rc2;
    4246         }
    4247     }
    4248 #endif
    4249 
    4250     /*
    4251      * Allocate a new one.
    4252      */
    4253     int         rc = VINF_SUCCESS;
    4254     uint16_t    iNew = pPool->iFreeHead;
    4255     if (iNew == NIL_PGMPOOL_IDX)
    4256     {
    4257         rc = pgmPoolMakeMoreFreePages(pPool, enmKind, iUser);
    4258         if (RT_FAILURE(rc))
    4259         {
    4260             pgmUnlock(pVM);
    4261             Log(("pgmPoolAlloc: returns %Rrc (Free)\n", rc));
    4262             STAM_PROFILE_ADV_STOP(&pPool->StatAlloc, a);
    4263             return rc;
    4264         }
    4265         iNew = pPool->iFreeHead;
    4266         AssertReleaseReturn(iNew != NIL_PGMPOOL_IDX, VERR_INTERNAL_ERROR);
    4267     }
    4268 
    4269     /* unlink the free head */
    4270     PPGMPOOLPAGE pPage = &pPool->aPages[iNew];
    4271     pPool->iFreeHead = pPage->iNext;
    4272     pPage->iNext = NIL_PGMPOOL_IDX;
    4273 
    4274     /*
    4275      * Initialize it.
    4276      */
    4277     pPool->cUsedPages++;                /* physical handler registration / pgmPoolTrackFlushGCPhysPTsSlow requirement. */
    4278     pPage->enmKind = enmKind;
    4279     pPage->GCPhys = GCPhys;
    4280     pPage->fSeenNonGlobal = false;      /* Set this to 'true' to disable this feature. */
    4281     pPage->fMonitored = false;
    4282     pPage->fCached = false;
    4283     pPage->fReusedFlushPending = false;
    4284 #ifdef PGMPOOL_WITH_MONITORING
    4285     pPage->cModifications = 0;
    4286     pPage->iModifiedNext = NIL_PGMPOOL_IDX;
    4287     pPage->iModifiedPrev = NIL_PGMPOOL_IDX;
    4288 #else
    4289     pPage->fCR3Mix = false;
    4290 #endif
    4291 #ifdef PGMPOOL_WITH_USER_TRACKING
    4292     pPage->cPresent = 0;
    4293     pPage->iFirstPresent = ~0;
    4294 
    4295     /*
    4296      * Insert into the tracking and cache. If this fails, free the page.
    4297      */
    4298     int rc3 = pgmPoolTrackInsert(pPool, pPage, GCPhys, iUser, iUserTable);
    4299     if (RT_FAILURE(rc3))
    4300     {
    4301         pPool->cUsedPages--;
    4302         pPage->enmKind = PGMPOOLKIND_FREE;
    4303         pPage->GCPhys = NIL_RTGCPHYS;
    4304         pPage->iNext = pPool->iFreeHead;
    4305         pPool->iFreeHead = pPage->idx;
    4306         pgmUnlock(pVM);
    4307         STAM_PROFILE_ADV_STOP(&pPool->StatAlloc, a);
    4308         Log(("pgmPoolAlloc: returns %Rrc (Insert)\n", rc3));
    4309         return rc3;
    4310     }
    4311 #endif /* PGMPOOL_WITH_USER_TRACKING */
    4312 
    4313     /*
    4314      * Commit the allocation, clear the page and return.
    4315      */
    4316 #ifdef VBOX_WITH_STATISTICS
    4317     if (pPool->cUsedPages > pPool->cUsedPagesHigh)
    4318         pPool->cUsedPagesHigh = pPool->cUsedPages;
    4319 #endif
    4320 
    4321     if (!pPage->fZeroed)
    4322     {
    4323         STAM_PROFILE_START(&pPool->StatZeroPage, z);
    4324         void *pv = PGMPOOL_PAGE_2_PTR(pVM, pPage);
    4325         ASMMemZeroPage(pv);
    4326         STAM_PROFILE_STOP(&pPool->StatZeroPage, z);
    4327     }
    4328 
    4329     *ppPage = pPage;
    4330     pgmUnlock(pVM);
    4331     LogFlow(("pgmPoolAlloc: returns %Rrc *ppPage=%p:{.Key=%RHp, .idx=%d, .fCached=%RTbool, .fMonitored=%RTbool}\n",
    4332              rc, pPage, pPage->Core.Key, pPage->idx, pPage->fCached, pPage->fMonitored));
    4333     STAM_PROFILE_ADV_STOP(&pPool->StatAlloc, a);
    4334     return rc;
    4335 }
    4336 
    4337 
    4338 /**
    4339  * Frees a usage of a pool page.
    4340  *
    4341  * @param   pVM         The VM handle.
    4342  * @param   HCPhys      The HC physical address of the shadow page.
    4343  * @param   iUser       The shadow page pool index of the user table.
    4344  * @param   iUserTable  The index into the user table (shadowed).
    4345  */
    4346 void pgmPoolFree(PVM pVM, RTHCPHYS HCPhys, uint16_t iUser, uint32_t iUserTable)
    4347 {
    4348     LogFlow(("pgmPoolFree: HCPhys=%RHp iUser=%#x iUserTable=%#x\n", HCPhys, iUser, iUserTable));
    4349     PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
    4350     pgmPoolFreeByPage(pPool, pgmPoolGetPage(pPool, HCPhys), iUser, iUserTable);
    4351 }
    4352 
    4353 /**
    4354  * Internal worker for finding a 'in-use' shadow page give by it's physical address.
    4355  *
    4356  * @returns Pointer to the shadow page structure.
    4357  * @param   pPool       The pool.
    4358  * @param   HCPhys      The HC physical address of the shadow page.
    4359  */
    4360 PPGMPOOLPAGE pgmPoolGetPage(PPGMPOOL pPool, RTHCPHYS HCPhys)
    4361 {
    4362     PVM pVM = pPool->CTX_SUFF(pVM);
    4363 
    4364     /*
    4365      * Look up the page.
    4366      */
    4367     pgmLock(pVM);
    4368     PPGMPOOLPAGE pPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, HCPhys & X86_PTE_PAE_PG_MASK);
    4369     pgmUnlock(pVM);
    4370 
    4371     AssertFatalMsg(pPage && pPage->enmKind != PGMPOOLKIND_FREE, ("HCPhys=%RHp pPage=%p idx=%d\n", HCPhys, pPage, (pPage) ? pPage->idx : 0));
    4372     return pPage;
    4373 }
    4374 
    4375 
    4376 #ifdef IN_RING3
    4377 /**
    4378  * Flushes the entire cache.
    4379  *
    4380  * It will assert a global CR3 flush (FF) and assumes the caller is aware of this
    4381  * and execute this CR3 flush.
    4382  *
    4383  * @param   pPool       The pool.
    4384  */
    4385 void pgmPoolFlushAll(PVM pVM)
    4386 {
    4387     LogFlow(("pgmPoolFlushAll:\n"));
    4388     pgmPoolFlushAllInt(pVM->pgm.s.CTX_SUFF(pPool));
    43894368}
    43904369#endif /* IN_RING3 */
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette