Changeset 22600 in vbox for trunk/src/VBox/VMM
- Timestamp:
- Aug 31, 2009 12:19:56 PM (15 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/PGMInternal.h
r22510 r22600 343 343 #endif 344 344 345 /** @def PGM_INVL_PG 345 /** @def PGM_INVL_PG_ALL_VCPU 346 346 * Invalidates a page on all VCPUs 347 347 * … … 350 350 */ 351 351 #ifdef IN_RC 352 # define PGM_INVL_ ALL_VCPU_PG(pVM, GCVirt) ASMInvalidatePage((void *)(GCVirt))352 # define PGM_INVL_PG_ALL_VCPU(pVM, GCVirt) ASMInvalidatePage((void *)(GCVirt)) 353 353 #elif defined(IN_RING0) 354 # define PGM_INVL_ ALL_VCPU_PG(pVM, GCVirt) HWACCMInvalidatePageOnAllVCpus(pVM, (RTGCPTR)(GCVirt))354 # define PGM_INVL_PG_ALL_VCPU(pVM, GCVirt) HWACCMInvalidatePageOnAllVCpus(pVM, (RTGCPTR)(GCVirt)) 355 355 #else 356 # define PGM_INVL_ ALL_VCPU_PG(pVM, GCVirt) HWACCMInvalidatePageOnAllVCpus(pVM, (RTGCPTR)(GCVirt))356 # define PGM_INVL_PG_ALL_VCPU(pVM, GCVirt) HWACCMInvalidatePageOnAllVCpus(pVM, (RTGCPTR)(GCVirt)) 357 357 #endif 358 358 … … 1680 1680 #ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT 1681 1681 uint32_t idxDirty; 1682 RTGCPTR pvDirtyFault; 1682 1683 #else 1683 1684 uint32_t bPadding2; -
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r22473 r22600 838 838 /* 839 839 * Page was successfully synced, return to guest. 840 * First invalidate the page as it might be in the TLB. 840 841 */ 842 # if PGM_SHW_TYPE == PGM_TYPE_EPT 843 HWACCMInvalidatePhysPage(pVM, (RTGCPHYS)pvFault); 844 # else 845 PGM_INVL_PG_ALL_VCPU(pVM, pvFault); 846 # endif 841 847 # ifdef VBOX_STRICT 842 848 RTGCPHYS GCPhys; … … 1483 1489 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 1484 1490 PX86PTPAE pGstPT; 1485 1491 1486 1492 pGstPT = (PX86PTPAE)&pPool->aDirtyPages[pShwPage->idxDirty][0]; 1487 1493 pGstPT->a[iPTDst].u = PteSrc.u; -
trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp
r20808 r22600 926 926 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_DISABLED); 927 927 pgmUnlock(pVM); 928 #ifndef IN_RC929 HWACCMInvalidatePhysPage(pVM, GCPhysPage);930 #endif931 928 return VINF_SUCCESS; 932 929 } … … 1048 1045 1049 1046 pgmUnlock(pVM); 1050 #ifndef IN_RC1051 HWACCMInvalidatePhysPage(pVM, GCPhysPage);1052 #endif1053 1047 return VINF_SUCCESS; 1054 1048 } … … 1150 1144 LogFlow(("PGMHandlerPhysicalPageAliasHC: => %R[pgmpage]\n", pPage)); 1151 1145 pgmUnlock(pVM); 1152 #ifndef IN_RC1153 HWACCMInvalidatePhysPage(pVM, GCPhysPage);1154 #endif1155 1146 return VINF_SUCCESS; 1156 1147 } -
trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp
r22537 r22600 1171 1171 pPage->pvLastAccessHandlerFault = pvFault; 1172 1172 pPage->cLastAccessHandlerCount = pVCpu->pgm.s.cPoolAccessHandler; 1173 if (pPage->cModifications > cMaxModifications)1173 if (pPage->cModifications >= cMaxModifications) 1174 1174 { 1175 1175 STAM_COUNTER_INC(&pPool->CTX_MID_Z(StatMonitor,FlushReinit)); … … 1185 1185 */ 1186 1186 bool fReused = false; 1187 bool fNotReusedNotForking = false; 1187 1188 if ( ( pPage->cModifications < cMaxModifications /** @todo #define */ /** @todo need to check that it's not mapping EIP. */ /** @todo adjust this! */ 1188 1189 || pgmPoolIsPageLocked(&pVM->pgm.s, pPage) … … 1276 1277 Log4(("pgmPoolAccessHandler: eax=%#x ecx=%#x edi=%#x esi=%#x rip=%RGv opcode=%d prefix=%#x\n", 1277 1278 pRegFrame->eax, pRegFrame->ecx, pRegFrame->edi, pRegFrame->esi, (RTGCPTR)pRegFrame->rip, pDis->pCurInstr->opcode, pDis->prefix)); 1279 fNotReusedNotForking = true; 1278 1280 } 1279 1281 … … 1282 1284 * leads to pgm pool trashing and an excessive amount of write faults due to page monitoring. 1283 1285 */ 1284 if ( !fReused1286 if ( pPage->cModifications >= cMaxModifications 1285 1287 && !fForcedFlush 1286 1288 && pPage->enmKind == PGMPOOLKIND_PAE_PT_FOR_PAE_PT 1287 && pPage->cModifications >= cMaxModifications) 1289 && ( fNotReusedNotForking 1290 || ( !pgmPoolMonitorIsReused(pVM, pVCpu, pRegFrame, pDis, pvFault) 1291 && !pgmPoolMonitorIsForking(pPool, pDis, GCPhysFault & PAGE_OFFSET_MASK)) 1292 ) 1293 ) 1288 1294 { 1289 1295 Assert(!pgmPoolIsPageLocked(&pVM->pgm.s, pPage)); … … 1319 1325 } 1320 1326 1321 /* Temporarily allow write access to the page table again. */ 1322 rc = PGMHandlerPhysicalPageTempOff(pVM, pPage->GCPhys, pPage->GCPhys); 1323 if (rc == VINF_SUCCESS) 1324 { 1325 rc = PGMShwModifyPage(pVCpu, pvFault, 1, X86_PTE_RW, ~(uint64_t)X86_PTE_RW); 1326 AssertMsg(rc == VINF_SUCCESS 1327 /* In the SMP case the page table might be removed while we wait for the PGM lock in the trap handler. */ 1328 || rc == VERR_PAGE_TABLE_NOT_PRESENT 1329 || rc == VERR_PAGE_NOT_PRESENT, 1330 ("PGMShwModifyPage -> GCPtr=%RGv rc=%d\n", pvFault, rc)); 1331 1332 pgmPoolAddDirtyPage(pVM, pPool, pPage); 1333 1334 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pPool)->CTX_SUFF_Z(StatMonitor), a); 1335 pgmUnlock(pVM); 1336 return rc; 1327 /* The flushing above might fail for locked pages, so double check. */ 1328 if ( pPage->iMonitoredNext == NIL_PGMPOOL_IDX 1329 && pPage->iMonitoredPrev == NIL_PGMPOOL_IDX) 1330 { 1331 /* Temporarily allow write access to the page table again. */ 1332 rc = PGMHandlerPhysicalPageTempOff(pVM, pPage->GCPhys, pPage->GCPhys); 1333 if (rc == VINF_SUCCESS) 1334 { 1335 rc = PGMShwModifyPage(pVCpu, pvFault, 1, X86_PTE_RW, ~(uint64_t)X86_PTE_RW); 1336 AssertMsg(rc == VINF_SUCCESS 1337 /* In the SMP case the page table might be removed while we wait for the PGM lock in the trap handler. */ 1338 || rc == VERR_PAGE_TABLE_NOT_PRESENT 1339 || rc == VERR_PAGE_NOT_PRESENT, 1340 ("PGMShwModifyPage -> GCPtr=%RGv rc=%d\n", pvFault, rc)); 1341 1342 pgmPoolAddDirtyPage(pVM, pPool, pPage); 1343 pPage->pvDirtyFault = pvFault; 1344 1345 STAM_PROFILE_STOP(&pVM->pgm.s.CTX_SUFF(pPool)->CTX_SUFF_Z(StatMonitor), a); 1346 pgmUnlock(pVM); 1347 return rc; 1348 } 1337 1349 } 1338 1350 } … … 1359 1371 # ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT 1360 1372 /** 1373 * Check references to guest physical memory in a PAE / PAE page table. 1374 * 1375 * @param pPool The pool. 1376 * @param pPage The page. 1377 * @param pShwPT The shadow page table (mapping of the page). 1378 * @param pGstPT The guest page table. 1379 */ 1380 DECLINLINE(void) pgmPoolTrackCheckPTPaePae(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PX86PTPAE pShwPT, PCX86PTPAE pGstPT) 1381 { 1382 for (unsigned i = 0; i < RT_ELEMENTS(pShwPT->a); i++) 1383 { 1384 if (pShwPT->a[i].n.u1Present) 1385 { 1386 RTHCPHYS HCPhys = -1; 1387 int rc = PGMPhysGCPhys2HCPhys(pPool->CTX_SUFF(pVM), pGstPT->a[i].u & X86_PTE_PAE_PG_MASK, &HCPhys); 1388 AssertMsg(rc == VINF_SUCCESS && (pShwPT->a[i].u & X86_PTE_PAE_PG_MASK) == HCPhys, ("rc=%d guest %RX64 shw=%RX64 vs %RHp\n", rc, pGstPT->a[i].u, pShwPT->a[i].u, HCPhys)); 1389 } 1390 } 1391 } 1392 1393 /** 1361 1394 * Clear references to guest physical memory in a PAE / PAE page table. 1362 1395 * … … 1366 1399 * @param pShwPT The shadow page table (mapping of the page). 1367 1400 * @param pGstPT The guest page table. 1368 * @param p GstPTThe old cached guest page table.1401 * @param pOldGstPT The old cached guest page table. 1369 1402 */ 1370 1403 DECLINLINE(unsigned) pgmPoolTrackFlushPTPaePae(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PX86PTPAE pShwPT, PCX86PTPAE pGstPT, PCX86PTPAE pOldGstPT) … … 1446 1479 pPage->fDirty = false; 1447 1480 1481 #ifdef VBOX_STRICT 1482 uint64_t fFlags = 0; 1483 rc = PGMShwGetPage(VMMGetCpu(pVM), pPage->pvDirtyFault, &fFlags, NULL); 1484 AssertMsg( (rc == VINF_SUCCESS && !(fFlags & X86_PTE_RW)) 1485 /* In the SMP case the page table might be removed while we wait for the PGM lock in the trap handler. */ 1486 || rc == VERR_PAGE_TABLE_NOT_PRESENT 1487 || rc == VERR_PAGE_NOT_PRESENT, 1488 ("PGMShwGetPage -> GCPtr=%RGv rc=%d flags=%RX64\n", pPage->pvDirtyFault, rc, fFlags)); 1489 #endif 1490 1448 1491 /* This page is likely to be modified again, so reduce the nr of modifications just a bit here. */ 1449 1492 Assert(pPage->cModifications); … … 1463 1506 } 1464 1507 1508 # ifndef IN_RING3 1465 1509 /** 1466 1510 * Add a new dirty page … … 1476 1520 Assert(PGMIsLocked(pVM)); 1477 1521 AssertCompile(RT_ELEMENTS(pPool->aIdxDirtyPages) == 8 || RT_ELEMENTS(pPool->aIdxDirtyPages) == 16); 1478 1479 if (pPage->fDirty) 1480 return; 1522 Assert(!pPage->fDirty); 1481 1523 1482 1524 idxFree = pPool->idxFreeDirtyPage; … … 1489 1531 AssertMsg(pPool->aIdxDirtyPages[idxFree] == NIL_PGMPOOL_IDX, ("idxFree=%d cDirtyPages=%d\n", idxFree, pPool->cDirtyPages)); 1490 1532 1533 Log(("Add dirty page %RGp (slot=%d)\n", pPage->GCPhys, idxFree)); 1534 1491 1535 /* Make a copy of the guest page table as we require valid GCPhys addresses when removing 1492 1536 * references to physical pages. (the HCPhys linear lookup is *extremely* expensive!) 1493 1537 */ 1538 void *pvShw = PGMPOOL_PAGE_2_LOCKED_PTR(pPool->CTX_SUFF(pVM), pPage); 1494 1539 void *pvGst; 1495 1540 int rc = PGM_GCPHYS_2_PTR(pPool->CTX_SUFF(pVM), pPage->GCPhys, &pvGst); AssertReleaseRC(rc); 1496 1541 memcpy(&pPool->aDirtyPages[idxFree][0], pvGst, PAGE_SIZE); 1542 pgmPoolTrackCheckPTPaePae(pPool, pPage, (PX86PTPAE)pvShw, (PCX86PTPAE)pvGst); 1497 1543 1498 1544 STAM_COUNTER_INC(&pPool->StatDirtyPage); 1499 Log(("Mark dirty page %RGp (slot=%d)\n", pPage->GCPhys, idxFree));1500 1545 pPage->fDirty = true; 1501 1546 pPage->idxDirty = idxFree; … … 1523 1568 return; 1524 1569 } 1525 1570 # endif /* !IN_RING3 */ 1526 1571 1527 1572 /** … … 3143 3188 { 3144 3189 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 3145 /* Start a subset here because pgmPoolTrackFlushGCPhysPTsSlow kill the pool otherwise. */3190 /* Start a subset here because pgmPoolTrackFlushGCPhysPTsSlow kills the pool otherwise. */ 3146 3191 uint32_t iPrevSubset = PGMDynMapPushAutoSubset(pVCpu); 3147 3192 # endif -
trunk/src/VBox/VMM/VMMAll/PGMAllShw.h
r20374 r22600 367 367 HWACCMInvalidatePhysPage(pVM, (RTGCPHYS)GCPtr); 368 368 # else 369 PGM_INVL_ ALL_VCPU_PG(pVM, GCPtr);369 PGM_INVL_PG_ALL_VCPU(pVM, GCPtr); 370 370 # endif 371 371 }
Note:
See TracChangeset
for help on using the changeset viewer.