Changeset 16300 in vbox
- Timestamp:
- Jan 28, 2009 12:06:35 PM (16 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/PGM.cpp
r16203 r16300 3412 3412 } 3413 3413 3414 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 3414 3415 /** @todo This is a bug! 3415 3416 * … … 3428 3429 pgmPoolFlushAll(pVM); 3429 3430 } 3431 #endif 3430 3432 3431 3433 /* -
trunk/src/VBox/VMM/PGMGst.h
r16260 r16300 284 284 } 285 285 286 287 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 286 288 287 289 #if PGM_GST_TYPE == PGM_TYPE_32BIT … … 404 406 405 407 #endif /* PAE */ 406 408 #endif /* !VBOX_WITH_PGMPOOL_PAGING_ONLY */ -
trunk/src/VBox/VMM/PGMMap.cpp
r16203 r16300 40 40 * Internal Functions * 41 41 *******************************************************************************/ 42 static void pgmR3MapClearPDEs(P PGM pPGM, PPGMMAPPING pMap, unsigned iOldPDE);42 static void pgmR3MapClearPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iOldPDE); 43 43 static void pgmR3MapSetPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE); 44 44 static int pgmR3MapIntermediateCheckOne(PVM pVM, uintptr_t uAddress, unsigned cPages, PX86PT pPTDefault, PX86PTPAE pPTPaeDefault); 45 45 static void pgmR3MapIntermediateDoOne(PVM pVM, uintptr_t uAddress, RTHCPHYS HCPhys, unsigned cPages, PX86PT pPTDefault, PX86PTPAE pPTPaeDefault); 46 46 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY 47 static void pgmR3MapClearShadowPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iOldPDE); 48 static void pgmR3MapSetShadowPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE); 49 #endif 47 50 48 51 … … 253 256 */ 254 257 MMHyperFree(pVM, pCur->aPTs[0].pPTR3); 255 pgmR3MapClearPDEs( &pVM->pgm.s, pCur, pCur->GCPtr >> X86_PD_SHIFT);258 pgmR3MapClearPDEs(pVM, pCur, pCur->GCPtr >> X86_PD_SHIFT); 256 259 MMHyperFree(pVM, pCur); 257 260 … … 601 604 * Relocate the page table(s). 602 605 */ 603 pgmR3MapClearPDEs( &pVM->pgm.s, pCur, iPDOld);606 pgmR3MapClearPDEs(pVM, pCur, iPDOld); 604 607 pgmR3MapSetPDEs(pVM, pCur, iPDNew); 605 608 … … 892 895 893 896 /** 894 * Clears all PDEs involved with the mapping .895 * 896 * @param p PGM Pointer to the PGM instance data.897 * Clears all PDEs involved with the mapping in the shadow and intermediate page tables. 898 * 899 * @param pVM The VM handle. 897 900 * @param pMap Pointer to the mapping in question. 898 901 * @param iOldPDE The index of the 32-bit PDE corresponding to the base of the mapping. 899 902 */ 900 static void pgmR3MapClearPDEs(P PGM pPGM, PPGMMAPPING pMap, unsigned iOldPDE)903 static void pgmR3MapClearPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iOldPDE) 901 904 { 902 905 unsigned i = pMap->cPTs; 906 907 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY 908 pgmR3MapClearShadowPDEs(pVM, pMap, iOldPDE); 909 #endif 910 903 911 iOldPDE += i; 904 912 while (i-- > 0) … … 909 917 * 32-bit. 910 918 */ 911 p PGM->pInterPD->a[iOldPDE].u = 0;919 pVM->pgm.s.pInterPD->a[iOldPDE].u = 0; 912 920 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 913 p PGM->pShw32BitPdR3->a[iOldPDE].u = 0;921 pVM->pgm.s.pShw32BitPdR3->a[iOldPDE].u = 0; 914 922 #endif 915 923 /* 916 924 * PAE. 917 925 */ 918 const unsigned iPD = iOldPDE / 256; 926 const unsigned iPD = iOldPDE / 256; /* iOldPDE * 2 / 512; iOldPDE is in 4 MB pages */ 919 927 unsigned iPDE = iOldPDE * 2 % 512; 920 p PGM->apInterPaePDs[iPD]->a[iPDE].u = 0;928 pVM->pgm.s.apInterPaePDs[iPD]->a[iPDE].u = 0; 921 929 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 922 p PGM->apShwPaePDsR3[iPD]->a[iPDE].u = 0;930 pVM->pgm.s.apShwPaePDsR3[iPD]->a[iPDE].u = 0; 923 931 #endif 924 932 iPDE++; 925 pPGM->apInterPaePDs[iPD]->a[iPDE].u = 0; 933 AssertFatal(iPDE < 512); 934 pVM->pgm.s.apInterPaePDs[iPD]->a[iPDE].u = 0; 926 935 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 927 p PGM->apShwPaePDsR3[iPD]->a[iPDE].u = 0;936 pVM->pgm.s.apShwPaePDsR3[iPD]->a[iPDE].u = 0; 928 937 929 938 /* Clear the PGM_PDFLAGS_MAPPING flag for the page directory pointer entry. (legacy PAE guest mode) */ 930 pPGM->pShwPaePdptR3->a[iPD].u &= ~PGM_PLXFLAGS_MAPPING; 931 #endif 932 } 933 } 934 935 936 /** 937 * Sets all PDEs involved with the mapping. 939 pVM->pgm.s.pShwPaePdptR3->a[iPD].u &= ~PGM_PLXFLAGS_MAPPING; 940 #endif 941 } 942 } 943 944 945 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY 946 /** 947 * Clears all PDEs involved with the mapping in the shadow page table. 948 * 949 * @param pVM The VM handle. 950 * @param pMap Pointer to the mapping in question. 951 * @param iOldPDE The index of the 32-bit PDE corresponding to the base of the mapping. 952 */ 953 static void pgmR3MapClearShadowPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iOldPDE) 954 { 955 unsigned i = pMap->cPTs; 956 PGMMODE enmShadowMode = PGMGetShadowMode(pVM); 957 958 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s)) 959 return; 960 961 iOldPDE += i; 962 while (i-- > 0) 963 { 964 iOldPDE--; 965 966 switch(enmShadowMode) 967 { 968 case PGMMODE_32_BIT: 969 { 970 PX86PD pShw32BitPd = pgmShwGet32BitPDPtr(&pVM->pgm.s); 971 AssertFatal(pShw32BitPd); 972 973 pShw32BitPd->a[iOldPDE].u = 0; 974 break; 975 } 976 977 case PGMMODE_PAE: 978 case PGMMODE_PAE_NX: 979 { 980 PX86PDPT pPdpt = NULL; 981 PX86PDPAE pShwPaePd = NULL; 982 983 const unsigned iPD = iOldPDE / 256; /* iOldPDE * 2 / 512; iOldPDE is in 4 MB pages */ 984 unsigned iPDE = iOldPDE * 2 % 512; 985 pPdpt = pgmShwGetPaePDPTPtr(&pVM->pgm.s); 986 pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, (iPD << X86_PDPT_SHIFT)); 987 AssertFatal(pShwPaePd); 988 989 pShwPaePd->a[iPDE].u = 0; 990 991 iPDE++; 992 AssertFatal(iPDE < 512); 993 994 pShwPaePd->a[iPDE].u = 0; 995 /* Clear the PGM_PDFLAGS_MAPPING flag for the page directory pointer entry. (legacy PAE guest mode) */ 996 pPdpt->a[iPD].u &= ~PGM_PLXFLAGS_MAPPING; 997 break; 998 } 999 } 1000 } 1001 } 1002 #endif 1003 1004 /** 1005 * Sets all PDEs involved with the mapping in the shadow and intermediate page tables. 938 1006 * 939 1007 * @param pVM The VM handle. … … 947 1015 Assert(!pgmMapAreMappingsEnabled(&pVM->pgm.s) || PGMGetGuestMode(pVM) <= PGMMODE_PAE_NX); 948 1016 1017 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY 1018 pgmR3MapSetShadowPDEs(pVM, pMap, iNewPDE); 1019 #endif 1020 949 1021 /* 950 1022 * Init the page tables and insert them into the page directories. … … 960 1032 */ 961 1033 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 962 if ( pgmMapAreMappingsEnabled(&pVM->pgm.s) 963 && pPGM->pShw32BitPdR3->a[iNewPDE].n.u1Present) 1034 if ( pgmMapAreMappingsEnabled(&pVM->pgm.s) 1035 && pPGM->pShw32BitPdR3->a[iNewPDE].n.u1Present) 1036 { 1037 Assert(!(pPGM->pShw32BitPdR3->a[iNewPDE].u & PGM_PDFLAGS_MAPPING)); 964 1038 pgmPoolFree(pVM, pPGM->pShw32BitPdR3->a[iNewPDE].u & X86_PDE_PG_MASK, PGMPOOL_IDX_PD, iNewPDE); 1039 } 965 1040 #endif 966 1041 X86PDE Pde; … … 980 1055 if ( pgmMapAreMappingsEnabled(&pVM->pgm.s) 981 1056 && pPGM->apShwPaePDsR3[iPD]->a[iPDE].n.u1Present) 1057 { 1058 Assert(!(pPGM->apShwPaePDsR3[iPD]->a[iPDE].u & PGM_PDFLAGS_MAPPING)); 982 1059 pgmPoolFree(pVM, pPGM->apShwPaePDsR3[iPD]->a[iPDE].u & X86_PDE_PAE_PG_MASK, PGMPOOL_IDX_PAE_PD, iNewPDE * 2); 1060 } 983 1061 #endif 984 1062 X86PDEPAE PdePae0; … … 990 1068 #endif 991 1069 iPDE++; 1070 AssertFatal(iPDE < 512); 992 1071 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 993 1072 if ( pgmMapAreMappingsEnabled(&pVM->pgm.s) 994 1073 && pPGM->apShwPaePDsR3[iPD]->a[iPDE].n.u1Present) 1074 { 1075 Assert(!(pPGM->apShwPaePDsR3[iPD]->a[iPDE].u & PGM_PDFLAGS_MAPPING)); 995 1076 pgmPoolFree(pVM, pPGM->apShwPaePDsR3[iPD]->a[iPDE].u & X86_PDE_PAE_PG_MASK, PGMPOOL_IDX_PAE_PD, iNewPDE * 2 + 1); 1077 } 996 1078 #endif 997 1079 X86PDEPAE PdePae1; … … 1010 1092 } 1011 1093 1094 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY 1095 /** 1096 * Sets all PDEs involved with the mapping in the shadow page table. 1097 * 1098 * @param pVM The VM handle. 1099 * @param pMap Pointer to the mapping in question. 1100 * @param iNewPDE The index of the 32-bit PDE corresponding to the base of the mapping. 1101 */ 1102 static void pgmR3MapSetShadowPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE) 1103 { 1104 PPGM pPGM = &pVM->pgm.s; 1105 PGMMODE enmShadowMode = PGMGetShadowMode(pVM); 1106 1107 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s)) 1108 return; 1109 1110 Assert(enmShadowMode <= PGMMODE_PAE_NX); 1111 1112 /* 1113 * Init the page tables and insert them into the page directories. 1114 */ 1115 unsigned i = pMap->cPTs; 1116 iNewPDE += i; 1117 while (i-- > 0) 1118 { 1119 iNewPDE--; 1120 1121 switch(enmShadowMode) 1122 { 1123 case PGMMODE_32_BIT: 1124 { 1125 PX86PD pShw32BitPd = pgmShwGet32BitPDPtr(&pVM->pgm.s); 1126 AssertFatal(pShw32BitPd); 1127 1128 if (pShw32BitPd->a[iNewPDE].n.u1Present) 1129 { 1130 Assert(!(pShw32BitPd->a[iNewPDE].u & PGM_PDFLAGS_MAPPING)); 1131 pgmPoolFree(pVM, pShw32BitPd->a[iNewPDE].u & X86_PDE_PG_MASK, pVM->pgm.s.pShwPageCR3R3->idx, iNewPDE); 1132 } 1133 1134 X86PDE Pde; 1135 /* Default mapping page directory flags are read/write and supervisor; individual page attributes determine the final flags */ 1136 Pde.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT; 1137 pShw32BitPd->a[iNewPDE] = Pde; 1138 break; 1139 } 1140 1141 case PGMMODE_PAE: 1142 case PGMMODE_PAE_NX: 1143 { 1144 PX86PDPT pShwPdpt; 1145 PX86PDPAE pShwPaePd; 1146 const unsigned iPdPt = iNewPDE / 256; 1147 unsigned iPDE = iNewPDE * 2 % 512; 1148 1149 pShwPdpt = pgmShwGetPaePDPTPtr(&pVM->pgm.s); 1150 Assert(pShwPdpt); 1151 pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, (iPdPt << X86_PDPT_SHIFT)); 1152 AssertFatal(pShwPaePd); 1153 1154 PPGMPOOLPAGE pPoolPagePde = pgmPoolGetPageByHCPhys(pVM, pShwPdpt->a[iPdPt].u & X86_PDPE_PG_MASK); 1155 AssertFatal(pPoolPagePde); 1156 1157 if (pShwPaePd->a[iPDE].n.u1Present) 1158 { 1159 Assert(!(pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING)); 1160 pgmPoolFree(pVM, pShwPaePd->a[iPDE].u & X86_PDE_PG_MASK, pPoolPagePde->idx, iNewPDE); 1161 } 1162 1163 X86PDEPAE PdePae0; 1164 PdePae0.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0; 1165 pShwPaePd->a[iPDE] = PdePae0; 1166 1167 /* 2nd 2 MB PDE of the 4 MB region */ 1168 iPDE++; 1169 AssertFatal(iPDE < 512); 1170 1171 if (pShwPaePd->a[iPDE].n.u1Present) 1172 { 1173 Assert(!(pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING)); 1174 pgmPoolFree(pVM, pShwPaePd->a[iPDE].u & X86_PDE_PG_MASK, pPoolPagePde->idx, iNewPDE); 1175 } 1176 1177 X86PDEPAE PdePae1; 1178 PdePae1.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1; 1179 pShwPaePd->a[iPDE] = PdePae1; 1180 1181 /* Set the PGM_PDFLAGS_MAPPING flag in the page directory pointer entry. (legacy PAE guest mode) */ 1182 pShwPdpt->a[iPdPt].u |= PGM_PLXFLAGS_MAPPING; 1183 } 1184 } 1185 } 1186 } 1187 #endif 1012 1188 1013 1189 /** … … 1030 1206 * Relocate the page table(s). 1031 1207 */ 1032 pgmR3MapClearPDEs( &pVM->pgm.s, pMapping, iPDOld);1208 pgmR3MapClearPDEs(pVM, pMapping, iPDOld); 1033 1209 pgmR3MapSetPDEs(pVM, pMapping, iPDNew); 1034 1210 … … 1195 1371 unsigned iPDSrc; 1196 1372 PX86PDPAE pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, (RTGCPTR32)iPDPTE << X86_PDPT_SHIFT, &iPDSrc, NULL); 1373 1374 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY 1375 /* It would be annoying to have to deal with a PD that isn't (yet) present in the guest PDPT. */ 1376 if (!pPDSrc) 1377 continue; 1378 #endif 1197 1379 1198 1380 /* … … 1350 1532 return VINF_SUCCESS; 1351 1533 1352 PGMMODE const enmGuestMode = PGMGetGuestMode(pVM);1353 Assert(enmGuestMode <= PGMMODE_PAE_NX);1354 1355 1534 /* 1356 1535 * Iterate mappings. 1357 1536 */ 1358 if (enmGuestMode == PGMMODE_32_BIT) 1359 { 1360 /* 1361 * Resolve the page directory. 1362 */ 1363 PX86PD pPD = (PX86PD)pVM->pgm.s.pShwPageCR3R3; 1364 Assert(pPD); 1365 1366 for (PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3) 1367 { 1368 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT; 1369 unsigned iPT = pCur->cPTs; 1370 while (iPT-- > 0) 1371 pPD->a[iPDE + iPT].u = 0; 1372 } 1373 } 1374 else if ( enmGuestMode == PGMMODE_PAE 1375 || enmGuestMode == PGMMODE_PAE_NX) 1376 { 1377 for (PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3) 1378 { 1379 RTGCPTR GCPtr = pCur->GCPtr; 1380 unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE; 1381 1382 unsigned iPT = pCur->cb >> X86_PD_PAE_SHIFT; 1383 while (iPT-- > 0) 1384 { 1385 PX86PDEPAE pPDE = pgmShwGetPaePDEPtr(&pVM->pgm.s, GCPtr); 1386 pPDE->u = 0; 1387 1388 GCPtr += (1 << X86_PD_PAE_SHIFT); 1389 } 1390 } 1391 } 1392 else 1393 AssertFailed(); 1537 for (PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3) 1538 { 1539 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT; 1540 1541 pgmR3MapSetShadowPDEs(pVM, pCur, iPDE); 1542 } 1394 1543 1395 1544 return VINF_SUCCESS; … … 1410 1559 return VINF_SUCCESS; 1411 1560 1412 PGMMODE const enmGuestMode = PGMGetGuestMode(pVM);1413 Assert(enmGuestMode <= PGMMODE_PAE_NX);1414 1415 1561 /* 1416 1562 * Iterate mappings. 1417 1563 */ 1418 if (enmGuestMode == PGMMODE_32_BIT) 1419 { 1420 /* 1421 * Resolve the page directory. 1422 */ 1423 PX86PD pPD = (PX86PD)pVM->pgm.s.pShwPageCR3R3; 1424 Assert(pPD); 1425 1426 for (PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3) 1427 { 1428 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT; 1429 unsigned iPT = pCur->cPTs; 1430 while (iPT-- > 0) 1431 pPD->a[iPDE + iPT].u = 0; 1432 } 1433 } 1434 else if ( enmGuestMode == PGMMODE_PAE 1435 || enmGuestMode == PGMMODE_PAE_NX) 1436 { 1437 for (PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3) 1438 { 1439 RTGCPTR GCPtr = pCur->GCPtr; 1440 1441 unsigned iPT = pCur->cb >> X86_PD_PAE_SHIFT; 1442 while (iPT-- > 0) 1443 { 1444 PX86PDEPAE pPDE = pgmShwGetPaePDEPtr(&pVM->pgm.s, GCPtr); 1445 pPDE->u = 0; 1446 1447 GCPtr += (1 << X86_PD_PAE_SHIFT); 1448 } 1449 } 1450 1451 /* Clear the PGM_PDFLAGS_MAPPING flag for the page directory pointer entries. (legacy PAE guest mode) */ 1452 PX86PDPT pPdpt = (PX86PDPT)pVM->pgm.s.pShwPageCR3R3; 1453 for (unsigned i=0;i<X86_PG_PAE_PDPE_ENTRIES;i++) 1454 pPdpt->a[i].u &= ~PGM_PLXFLAGS_MAPPING; 1455 } 1456 else 1457 AssertFailed(); 1458 1564 for (PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3) 1565 { 1566 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT; 1567 1568 pgmR3MapClearShadowPDEs(pVM, pCur, iPDE); 1569 } 1459 1570 return VINF_SUCCESS; 1460 1571 } -
trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp
r16232 r16300 2849 2849 # ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY 2850 2850 case PGMPOOLKIND_32BIT_PD: 2851 Assert(iUserTable < X86_PG_ENTRIES); 2852 break; 2851 2853 # else 2852 2854 case PGMPOOLKIND_ROOT_32BIT_PD: 2853 # endif2854 2855 Assert(iUserTable < X86_PG_ENTRIES); 2855 2856 Assert(!(u.pau32[iUserTable] & PGM_PDFLAGS_MAPPING)); 2856 2857 break; 2858 # endif 2857 2859 # if !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) && !defined(VBOX_WITH_PGMPOOL_PAGING_ONLY) 2858 2860 case PGMPOOLKIND_ROOT_PAE_PD: … … 3435 3437 #endif /* PGMPOOL_WITH_GCPHYS_TRACKING */ 3436 3438 3437 /** 3438 * Clear references to shadowed pages in a PAE (legacy or 64 bits) page directory. 3439 3440 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY 3441 /** 3442 * Clear references to shadowed pages in a 32 bits page directory. 3439 3443 * 3440 3444 * @param pPool The pool. … … 3442 3446 * @param pShwPD The shadow page directory (mapping of the page). 3443 3447 */ 3448 DECLINLINE(void) pgmPoolTrackDerefPD(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PX86PD pShwPD) 3449 { 3450 for (unsigned i = 0; i < RT_ELEMENTS(pShwPD->a); i++) 3451 { 3452 if ( pShwPD->a[i].n.u1Present 3453 && !(pShwPD->a[i].u & PGM_PDFLAGS_MAPPING) 3454 ) 3455 { 3456 PPGMPOOLPAGE pSubPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, pShwPD->a[i].u & X86_PDE_PG_MASK); 3457 if (pSubPage) 3458 pgmPoolTrackFreeUser(pPool, pSubPage, pPage->idx, i); 3459 else 3460 AssertFatalMsgFailed(("%x\n", pShwPD->a[i].u & X86_PDE_PG_MASK)); 3461 /** @todo 64-bit guests: have to ensure that we're not exhausting the dynamic mappings! */ 3462 } 3463 } 3464 } 3465 #endif 3466 3467 /** 3468 * Clear references to shadowed pages in a PAE (legacy or 64 bits) page directory. 3469 * 3470 * @param pPool The pool. 3471 * @param pPage The page. 3472 * @param pShwPD The shadow page directory (mapping of the page). 3473 */ 3444 3474 DECLINLINE(void) pgmPoolTrackDerefPDPae(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PX86PDPAE pShwPD) 3445 3475 { 3446 3476 for (unsigned i = 0; i < RT_ELEMENTS(pShwPD->a); i++) 3447 3477 { 3448 if (pShwPD->a[i].n.u1Present) 3478 if ( pShwPD->a[i].n.u1Present 3479 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY 3480 && !(pShwPD->a[i].u & PGM_PDFLAGS_MAPPING) 3481 #endif 3482 ) 3449 3483 { 3450 3484 PPGMPOOLPAGE pSubPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, pShwPD->a[i].u & X86_PDE_PAE_PG_MASK); … … 3470 3504 for (unsigned i = 0; i < RT_ELEMENTS(pShwPDPT->a); i++) 3471 3505 { 3472 if (pShwPDPT->a[i].n.u1Present) 3506 if ( pShwPDPT->a[i].n.u1Present 3507 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY 3508 && !(pShwPDPT->a[i].u & PGM_PLXFLAGS_MAPPING) 3509 #endif 3510 ) 3473 3511 { 3474 3512 PPGMPOOLPAGE pSubPage = (PPGMPOOLPAGE)RTAvloHCPhysGet(&pPool->HCPhysTree, pShwPDPT->a[i].u & X86_PDPE_PG_MASK); … … 3660 3698 break; 3661 3699 3700 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY 3701 case PGMPOOLKIND_32BIT_PD: 3702 pgmPoolTrackDerefPD(pPool, pPage, (PX86PD)pvShw); 3703 break; 3704 3705 case PGMPOOLKIND_PAE_PDPT_FOR_32BIT: 3706 case PGMPOOLKIND_PAE_PDPT: 3707 #endif 3662 3708 case PGMPOOLKIND_64BIT_PDPT_FOR_PHYS: 3663 3709 case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT:
Note:
See TracChangeset
for help on using the changeset viewer.