VirtualBox

Changeset 16300 in vbox


Ignore:
Timestamp:
Jan 28, 2009 12:06:35 PM (16 years ago)
Author:
vboxsync
Message:

More paging updates

Location:
trunk/src/VBox/VMM
Files:
4 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/PGM.cpp

    r16203 r16300  
    34123412    }
    34133413
     3414#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    34143415    /** @todo This is a bug!
    34153416     *
     
    34283429        pgmPoolFlushAll(pVM);
    34293430    }
     3431#endif
    34303432
    34313433    /*
  • trunk/src/VBox/VMM/PGMGst.h

    r16260 r16300  
    284284}
    285285
     286
     287#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    286288
    287289#if PGM_GST_TYPE == PGM_TYPE_32BIT
     
    404406
    405407#endif /* PAE */
    406 
     408#endif /* !VBOX_WITH_PGMPOOL_PAGING_ONLY */
  • trunk/src/VBox/VMM/PGMMap.cpp

    r16203 r16300  
    4040*   Internal Functions                                                         *
    4141*******************************************************************************/
    42 static void pgmR3MapClearPDEs(PPGM pPGM, PPGMMAPPING pMap, unsigned iOldPDE);
     42static void pgmR3MapClearPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iOldPDE);
    4343static void pgmR3MapSetPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE);
    4444static int  pgmR3MapIntermediateCheckOne(PVM pVM, uintptr_t uAddress, unsigned cPages, PX86PT pPTDefault, PX86PTPAE pPTPaeDefault);
    4545static void pgmR3MapIntermediateDoOne(PVM pVM, uintptr_t uAddress, RTHCPHYS HCPhys, unsigned cPages, PX86PT pPTDefault, PX86PTPAE pPTPaeDefault);
    46 
     46#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
     47static void pgmR3MapClearShadowPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iOldPDE);
     48static void pgmR3MapSetShadowPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE);
     49#endif
    4750
    4851
     
    253256             */
    254257            MMHyperFree(pVM, pCur->aPTs[0].pPTR3);
    255             pgmR3MapClearPDEs(&pVM->pgm.s, pCur, pCur->GCPtr >> X86_PD_SHIFT);
     258            pgmR3MapClearPDEs(pVM, pCur, pCur->GCPtr >> X86_PD_SHIFT);
    256259            MMHyperFree(pVM, pCur);
    257260
     
    601604         * Relocate the page table(s).
    602605         */
    603         pgmR3MapClearPDEs(&pVM->pgm.s, pCur, iPDOld);
     606        pgmR3MapClearPDEs(pVM, pCur, iPDOld);
    604607        pgmR3MapSetPDEs(pVM, pCur, iPDNew);
    605608
     
    892895
    893896/**
    894  * Clears all PDEs involved with the mapping.
    895  *
    896  * @param   pPGM        Pointer to the PGM instance data.
     897 * Clears all PDEs involved with the mapping in the shadow and intermediate page tables.
     898 *
     899 * @param   pVM         The VM handle.
    897900 * @param   pMap        Pointer to the mapping in question.
    898901 * @param   iOldPDE     The index of the 32-bit PDE corresponding to the base of the mapping.
    899902 */
    900 static void pgmR3MapClearPDEs(PPGM pPGM, PPGMMAPPING pMap, unsigned iOldPDE)
     903static void pgmR3MapClearPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iOldPDE)
    901904{
    902905    unsigned i = pMap->cPTs;
     906
     907#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
     908    pgmR3MapClearShadowPDEs(pVM, pMap, iOldPDE);
     909#endif
     910
    903911    iOldPDE += i;
    904912    while (i-- > 0)
     
    909917         * 32-bit.
    910918         */
    911         pPGM->pInterPD->a[iOldPDE].u        = 0;
     919        pVM->pgm.s.pInterPD->a[iOldPDE].u        = 0;
    912920#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    913         pPGM->pShw32BitPdR3->a[iOldPDE].u   = 0;
     921        pVM->pgm.s.pShw32BitPdR3->a[iOldPDE].u   = 0;
    914922#endif
    915923        /*
    916924         * PAE.
    917925         */
    918         const unsigned iPD = iOldPDE / 256;
     926        const unsigned iPD = iOldPDE / 256;         /* iOldPDE * 2 / 512; iOldPDE is in 4 MB pages */
    919927        unsigned iPDE = iOldPDE * 2 % 512;
    920         pPGM->apInterPaePDs[iPD]->a[iPDE].u = 0;
     928        pVM->pgm.s.apInterPaePDs[iPD]->a[iPDE].u = 0;
    921929#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    922         pPGM->apShwPaePDsR3[iPD]->a[iPDE].u = 0;
     930        pVM->pgm.s.apShwPaePDsR3[iPD]->a[iPDE].u = 0;
    923931#endif
    924932        iPDE++;
    925         pPGM->apInterPaePDs[iPD]->a[iPDE].u = 0;
     933        AssertFatal(iPDE < 512);
     934        pVM->pgm.s.apInterPaePDs[iPD]->a[iPDE].u = 0;
    926935#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    927         pPGM->apShwPaePDsR3[iPD]->a[iPDE].u = 0;
     936        pVM->pgm.s.apShwPaePDsR3[iPD]->a[iPDE].u = 0;
    928937
    929938        /* Clear the PGM_PDFLAGS_MAPPING flag for the page directory pointer entry. (legacy PAE guest mode) */
    930         pPGM->pShwPaePdptR3->a[iPD].u &= ~PGM_PLXFLAGS_MAPPING;
    931 #endif
    932     }
    933 }
    934 
    935 
    936 /**
    937  * Sets all PDEs involved with the mapping.
     939        pVM->pgm.s.pShwPaePdptR3->a[iPD].u &= ~PGM_PLXFLAGS_MAPPING;
     940#endif
     941    }
     942}
     943
     944
     945#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
     946/**
     947 * Clears all PDEs involved with the mapping in the shadow page table.
     948 *
     949 * @param   pVM         The VM handle.
     950 * @param   pMap        Pointer to the mapping in question.
     951 * @param   iOldPDE     The index of the 32-bit PDE corresponding to the base of the mapping.
     952 */
     953static void pgmR3MapClearShadowPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iOldPDE)
     954{
     955    unsigned i = pMap->cPTs;
     956    PGMMODE  enmShadowMode = PGMGetShadowMode(pVM);
     957
     958    if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
     959        return;
     960
     961    iOldPDE += i;
     962    while (i-- > 0)
     963    {
     964        iOldPDE--;
     965
     966        switch(enmShadowMode)
     967        {
     968        case PGMMODE_32_BIT:
     969        {
     970            PX86PD pShw32BitPd = pgmShwGet32BitPDPtr(&pVM->pgm.s);
     971            AssertFatal(pShw32BitPd);
     972
     973            pShw32BitPd->a[iOldPDE].u   = 0;
     974            break;
     975        }
     976
     977        case PGMMODE_PAE:
     978        case PGMMODE_PAE_NX:
     979        {
     980            PX86PDPT  pPdpt = NULL;
     981            PX86PDPAE pShwPaePd = NULL;
     982
     983            const unsigned iPD = iOldPDE / 256;         /* iOldPDE * 2 / 512; iOldPDE is in 4 MB pages */
     984            unsigned iPDE = iOldPDE * 2 % 512;
     985            pPdpt     = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
     986            pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, (iPD << X86_PDPT_SHIFT));
     987            AssertFatal(pShwPaePd);
     988
     989            pShwPaePd->a[iPDE].u = 0;
     990
     991            iPDE++;
     992            AssertFatal(iPDE < 512);
     993
     994            pShwPaePd->a[iPDE].u = 0;
     995            /* Clear the PGM_PDFLAGS_MAPPING flag for the page directory pointer entry. (legacy PAE guest mode) */
     996            pPdpt->a[iPD].u &= ~PGM_PLXFLAGS_MAPPING;
     997            break;
     998        }
     999        }
     1000    }
     1001}
     1002#endif
     1003
     1004/**
     1005 * Sets all PDEs involved with the mapping in the shadow and intermediate page tables.
    9381006 *
    9391007 * @param   pVM         The VM handle.
     
    9471015    Assert(!pgmMapAreMappingsEnabled(&pVM->pgm.s) || PGMGetGuestMode(pVM) <= PGMMODE_PAE_NX);
    9481016
     1017#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
     1018    pgmR3MapSetShadowPDEs(pVM, pMap, iNewPDE);
     1019#endif
     1020
    9491021    /*
    9501022     * Init the page tables and insert them into the page directories.
     
    9601032         */
    9611033#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    962         if (   pgmMapAreMappingsEnabled(&pVM->pgm.s)
    963             && pPGM->pShw32BitPdR3->a[iNewPDE].n.u1Present)
     1034        if (    pgmMapAreMappingsEnabled(&pVM->pgm.s)
     1035            &&  pPGM->pShw32BitPdR3->a[iNewPDE].n.u1Present)
     1036        {
     1037            Assert(!(pPGM->pShw32BitPdR3->a[iNewPDE].u & PGM_PDFLAGS_MAPPING));
    9641038            pgmPoolFree(pVM, pPGM->pShw32BitPdR3->a[iNewPDE].u & X86_PDE_PG_MASK, PGMPOOL_IDX_PD, iNewPDE);
     1039        }
    9651040#endif
    9661041        X86PDE Pde;
     
    9801055        if (   pgmMapAreMappingsEnabled(&pVM->pgm.s)
    9811056            && pPGM->apShwPaePDsR3[iPD]->a[iPDE].n.u1Present)
     1057        {
     1058            Assert(!(pPGM->apShwPaePDsR3[iPD]->a[iPDE].u & PGM_PDFLAGS_MAPPING));
    9821059            pgmPoolFree(pVM, pPGM->apShwPaePDsR3[iPD]->a[iPDE].u & X86_PDE_PAE_PG_MASK, PGMPOOL_IDX_PAE_PD, iNewPDE * 2);
     1060        }
    9831061#endif
    9841062        X86PDEPAE PdePae0;
     
    9901068#endif
    9911069        iPDE++;
     1070        AssertFatal(iPDE < 512);
    9921071#ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    9931072        if (   pgmMapAreMappingsEnabled(&pVM->pgm.s)
    9941073            && pPGM->apShwPaePDsR3[iPD]->a[iPDE].n.u1Present)
     1074        {
     1075            Assert(!(pPGM->apShwPaePDsR3[iPD]->a[iPDE].u & PGM_PDFLAGS_MAPPING));
    9951076            pgmPoolFree(pVM, pPGM->apShwPaePDsR3[iPD]->a[iPDE].u & X86_PDE_PAE_PG_MASK, PGMPOOL_IDX_PAE_PD, iNewPDE * 2 + 1);
     1077        }
    9961078#endif
    9971079        X86PDEPAE PdePae1;
     
    10101092}
    10111093
     1094#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
     1095/**
     1096 * Sets all PDEs involved with the mapping in the shadow page table.
     1097 *
     1098 * @param   pVM         The VM handle.
     1099 * @param   pMap        Pointer to the mapping in question.
     1100 * @param   iNewPDE     The index of the 32-bit PDE corresponding to the base of the mapping.
     1101 */
     1102static void pgmR3MapSetShadowPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE)
     1103{
     1104    PPGM    pPGM = &pVM->pgm.s;
     1105    PGMMODE enmShadowMode = PGMGetShadowMode(pVM);
     1106
     1107    if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
     1108        return;
     1109
     1110    Assert(enmShadowMode <= PGMMODE_PAE_NX);
     1111
     1112    /*
     1113     * Init the page tables and insert them into the page directories.
     1114     */
     1115    unsigned i = pMap->cPTs;
     1116    iNewPDE += i;
     1117    while (i-- > 0)
     1118    {
     1119        iNewPDE--;
     1120
     1121        switch(enmShadowMode)
     1122        {
     1123        case PGMMODE_32_BIT:
     1124        {
     1125            PX86PD pShw32BitPd = pgmShwGet32BitPDPtr(&pVM->pgm.s);
     1126            AssertFatal(pShw32BitPd);
     1127
     1128            if (pShw32BitPd->a[iNewPDE].n.u1Present)
     1129            {
     1130                Assert(!(pShw32BitPd->a[iNewPDE].u & PGM_PDFLAGS_MAPPING));
     1131                pgmPoolFree(pVM, pShw32BitPd->a[iNewPDE].u & X86_PDE_PG_MASK, pVM->pgm.s.pShwPageCR3R3->idx, iNewPDE);
     1132            }
     1133
     1134            X86PDE Pde;
     1135            /* Default mapping page directory flags are read/write and supervisor; individual page attributes determine the final flags */
     1136            Pde.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT;
     1137            pShw32BitPd->a[iNewPDE]   = Pde;
     1138            break;
     1139        }
     1140
     1141        case PGMMODE_PAE:
     1142        case PGMMODE_PAE_NX:
     1143        {
     1144            PX86PDPT  pShwPdpt;
     1145            PX86PDPAE pShwPaePd;
     1146            const unsigned iPdPt = iNewPDE / 256;
     1147            unsigned iPDE = iNewPDE * 2 % 512;
     1148
     1149            pShwPdpt  = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
     1150            Assert(pShwPdpt);
     1151            pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, (iPdPt << X86_PDPT_SHIFT));
     1152            AssertFatal(pShwPaePd);
     1153
     1154            PPGMPOOLPAGE pPoolPagePde = pgmPoolGetPageByHCPhys(pVM, pShwPdpt->a[iPdPt].u & X86_PDPE_PG_MASK);
     1155            AssertFatal(pPoolPagePde);
     1156
     1157            if (pShwPaePd->a[iPDE].n.u1Present)
     1158            {
     1159                Assert(!(pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING));
     1160                pgmPoolFree(pVM, pShwPaePd->a[iPDE].u & X86_PDE_PG_MASK, pPoolPagePde->idx, iNewPDE);
     1161            }
     1162
     1163            X86PDEPAE PdePae0;
     1164            PdePae0.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0;
     1165            pShwPaePd->a[iPDE] = PdePae0;
     1166
     1167            /* 2nd 2 MB PDE of the 4 MB region */
     1168            iPDE++;
     1169            AssertFatal(iPDE < 512);
     1170
     1171            if (pShwPaePd->a[iPDE].n.u1Present)
     1172            {
     1173                Assert(!(pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING));
     1174                pgmPoolFree(pVM, pShwPaePd->a[iPDE].u & X86_PDE_PG_MASK, pPoolPagePde->idx, iNewPDE);
     1175            }
     1176
     1177            X86PDEPAE PdePae1;
     1178            PdePae1.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1;
     1179            pShwPaePd->a[iPDE] = PdePae1;
     1180
     1181            /* Set the PGM_PDFLAGS_MAPPING flag in the page directory pointer entry. (legacy PAE guest mode) */
     1182            pShwPdpt->a[iPdPt].u |= PGM_PLXFLAGS_MAPPING;
     1183        }
     1184        }
     1185    }
     1186}
     1187#endif
    10121188
    10131189/**
     
    10301206     * Relocate the page table(s).
    10311207     */
    1032     pgmR3MapClearPDEs(&pVM->pgm.s, pMapping, iPDOld);
     1208    pgmR3MapClearPDEs(pVM, pMapping, iPDOld);
    10331209    pgmR3MapSetPDEs(pVM, pMapping, iPDNew);
    10341210
     
    11951371        unsigned  iPDSrc;
    11961372        PX86PDPAE pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, (RTGCPTR32)iPDPTE << X86_PDPT_SHIFT, &iPDSrc, NULL);
     1373
     1374#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
     1375        /* It would be annoying to have to deal with a PD that isn't (yet) present in the guest PDPT. */
     1376        if (!pPDSrc)
     1377            continue;
     1378#endif
    11971379
    11981380        /*
     
    13501532        return VINF_SUCCESS;
    13511533
    1352     PGMMODE const enmGuestMode = PGMGetGuestMode(pVM);
    1353     Assert(enmGuestMode <= PGMMODE_PAE_NX);
    1354 
    13551534    /*
    13561535     * Iterate mappings.
    13571536     */
    1358     if (enmGuestMode == PGMMODE_32_BIT)
    1359     {
    1360         /*
    1361          * Resolve the page directory.
    1362          */
    1363         PX86PD pPD = (PX86PD)pVM->pgm.s.pShwPageCR3R3;
    1364         Assert(pPD);
    1365 
    1366         for (PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3)
    1367         {
    1368             unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
    1369             unsigned iPT = pCur->cPTs;
    1370             while (iPT-- > 0)
    1371                 pPD->a[iPDE + iPT].u = 0;
    1372         }
    1373     }
    1374     else if (   enmGuestMode == PGMMODE_PAE
    1375              || enmGuestMode == PGMMODE_PAE_NX)
    1376     {
    1377         for (PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3)
    1378         {
    1379             RTGCPTR   GCPtr = pCur->GCPtr;
    1380             unsigned  iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
    1381 
    1382             unsigned  iPT = pCur->cb >> X86_PD_PAE_SHIFT;
    1383             while (iPT-- > 0)
    1384             {
    1385                 PX86PDEPAE pPDE = pgmShwGetPaePDEPtr(&pVM->pgm.s, GCPtr);
    1386                 pPDE->u = 0;
    1387 
    1388                 GCPtr += (1 << X86_PD_PAE_SHIFT);
    1389             }
    1390         }
    1391     }
    1392     else
    1393         AssertFailed();
     1537    for (PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3)
     1538    {
     1539        unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
     1540
     1541        pgmR3MapSetShadowPDEs(pVM, pCur, iPDE);
     1542    }
    13941543
    13951544    return VINF_SUCCESS;
     
    14101559        return VINF_SUCCESS;
    14111560
    1412     PGMMODE const enmGuestMode = PGMGetGuestMode(pVM);
    1413     Assert(enmGuestMode <= PGMMODE_PAE_NX);
    1414 
    14151561    /*
    14161562     * Iterate mappings.
    14171563     */
    1418     if (enmGuestMode == PGMMODE_32_BIT)
    1419     {
    1420         /*
    1421          * Resolve the page directory.
    1422          */
    1423         PX86PD pPD = (PX86PD)pVM->pgm.s.pShwPageCR3R3;
    1424         Assert(pPD);
    1425 
    1426         for (PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3)
    1427         {
    1428             unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
    1429             unsigned iPT = pCur->cPTs;
    1430             while (iPT-- > 0)
    1431                 pPD->a[iPDE + iPT].u = 0;
    1432         }
    1433     }
    1434     else if (   enmGuestMode == PGMMODE_PAE
    1435              || enmGuestMode == PGMMODE_PAE_NX)
    1436     {
    1437         for (PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3)
    1438         {
    1439             RTGCPTR   GCPtr = pCur->GCPtr;
    1440 
    1441             unsigned  iPT = pCur->cb >> X86_PD_PAE_SHIFT;
    1442             while (iPT-- > 0)
    1443             {
    1444                 PX86PDEPAE pPDE = pgmShwGetPaePDEPtr(&pVM->pgm.s, GCPtr);
    1445                 pPDE->u = 0;
    1446 
    1447                 GCPtr += (1 << X86_PD_PAE_SHIFT);
    1448             }
    1449         }
    1450 
    1451         /* Clear the PGM_PDFLAGS_MAPPING flag for the page directory pointer entries. (legacy PAE guest mode) */
    1452         PX86PDPT pPdpt = (PX86PDPT)pVM->pgm.s.pShwPageCR3R3;
    1453         for (unsigned i=0;i<X86_PG_PAE_PDPE_ENTRIES;i++)
    1454             pPdpt->a[i].u &= ~PGM_PLXFLAGS_MAPPING;
    1455     }
    1456     else
    1457         AssertFailed();
    1458 
     1564    for (PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3)
     1565    {
     1566        unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT;
     1567
     1568        pgmR3MapClearShadowPDEs(pVM, pCur, iPDE);
     1569    }
    14591570    return VINF_SUCCESS;
    14601571}
  • trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp

    r16232 r16300  
    28492849# ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    28502850        case PGMPOOLKIND_32BIT_PD:
     2851            Assert(iUserTable < X86_PG_ENTRIES);
     2852            break;
    28512853# else
    28522854        case PGMPOOLKIND_ROOT_32BIT_PD:
    2853 # endif
    28542855            Assert(iUserTable < X86_PG_ENTRIES);
    28552856            Assert(!(u.pau32[iUserTable] & PGM_PDFLAGS_MAPPING));
    28562857            break;
     2858# endif
    28572859# if !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) && !defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
    28582860        case PGMPOOLKIND_ROOT_PAE_PD:
     
    34353437#endif /* PGMPOOL_WITH_GCPHYS_TRACKING */
    34363438
    3437 /**
    3438  * Clear references to shadowed pages in a PAE (legacy or 64 bits) page directory.
     3439
     3440#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
     3441/**
     3442 * Clear references to shadowed pages in a 32 bits page directory.
    34393443 *
    34403444 * @param   pPool       The pool.
     
    34423446 * @param   pShwPD      The shadow page directory (mapping of the page).
    34433447 */
     3448DECLINLINE(void) pgmPoolTrackDerefPD(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PX86PD pShwPD)
     3449{
     3450    for (unsigned i = 0; i < RT_ELEMENTS(pShwPD->a); i++)
     3451    {
     3452        if (    pShwPD->a[i].n.u1Present
     3453            &&  !(pShwPD->a[i].u & PGM_PDFLAGS_MAPPING)
     3454           )
     3455        {
     3456            PPGMPOOLPAGE pSubPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, pShwPD->a[i].u & X86_PDE_PG_MASK);
     3457            if (pSubPage)
     3458                pgmPoolTrackFreeUser(pPool, pSubPage, pPage->idx, i);
     3459            else
     3460                AssertFatalMsgFailed(("%x\n", pShwPD->a[i].u & X86_PDE_PG_MASK));
     3461            /** @todo 64-bit guests: have to ensure that we're not exhausting the dynamic mappings! */
     3462        }
     3463    }
     3464}
     3465#endif
     3466
     3467/**
     3468 * Clear references to shadowed pages in a PAE (legacy or 64 bits) page directory.
     3469 *
     3470 * @param   pPool       The pool.
     3471 * @param   pPage       The page.
     3472 * @param   pShwPD      The shadow page directory (mapping of the page).
     3473 */
    34443474DECLINLINE(void) pgmPoolTrackDerefPDPae(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PX86PDPAE pShwPD)
    34453475{
    34463476    for (unsigned i = 0; i < RT_ELEMENTS(pShwPD->a); i++)
    34473477    {
    3448         if (pShwPD->a[i].n.u1Present)
     3478        if (    pShwPD->a[i].n.u1Present
     3479#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
     3480            &&  !(pShwPD->a[i].u & PGM_PDFLAGS_MAPPING)
     3481#endif
     3482           )
    34493483        {
    34503484            PPGMPOOLPAGE pSubPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, pShwPD->a[i].u & X86_PDE_PAE_PG_MASK);
     
    34703504    for (unsigned i = 0; i < RT_ELEMENTS(pShwPDPT->a); i++)
    34713505    {
    3472         if (pShwPDPT->a[i].n.u1Present)
     3506        if (    pShwPDPT->a[i].n.u1Present
     3507#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
     3508            &&  !(pShwPDPT->a[i].u & PGM_PLXFLAGS_MAPPING)
     3509#endif
     3510           )
    34733511        {
    34743512            PPGMPOOLPAGE pSubPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, pShwPDPT->a[i].u & X86_PDPE_PG_MASK);
     
    36603698            break;
    36613699
     3700#ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
     3701        case PGMPOOLKIND_32BIT_PD:
     3702            pgmPoolTrackDerefPD(pPool, pPage, (PX86PD)pvShw);
     3703            break;
     3704
     3705        case PGMPOOLKIND_PAE_PDPT_FOR_32BIT:
     3706        case PGMPOOLKIND_PAE_PDPT:
     3707#endif
    36623708        case PGMPOOLKIND_64BIT_PDPT_FOR_PHYS:
    36633709        case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT:
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette