- Timestamp:
- Sep 2, 2009 8:41:52 AM (15 years ago)
- Location:
- trunk
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/pgm.h
r21217 r22695 349 349 VMMDECL(bool) PGMHandlerPhysicalIsRegistered(PVM pVM, RTGCPHYS GCPhys); 350 350 VMMDECL(bool) PGMHandlerVirtualIsRegistered(PVM pVM, RTGCPTR GCPtr); 351 VMMDECL(void) PGMPoolFlushPage(PVM pVM, RTGCPHYS GCPhys); 351 352 VMMDECL(bool) PGMPhysIsA20Enabled(PVMCPU pVCpu); 352 353 VMMDECL(bool) PGMPhysIsGCPhysValid(PVM pVM, RTGCPHYS GCPhys); -
trunk/src/VBox/VMM/PGMInternal.h
r22600 r22695 1795 1795 /** Profiling pgmPoolFree(). */ 1796 1796 STAMPROFILE StatFree; 1797 /** Counting explicit flushes by PGMPoolFlushPage(). */ 1798 STAMCOUNTER StatForceFlushPage; 1797 1799 /** Profiling time spent zeroing pages. */ 1798 1800 STAMPROFILE StatZeroPage; -
trunk/src/VBox/VMM/PGMPhys.cpp
r22138 r22695 369 369 AssertFatalRC(rc2); 370 370 PPGMPAGE pPage = pTlbe->pPage; 371 #if 1372 371 if (PGM_PAGE_IS_MMIO(pPage)) 373 #else374 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))375 #endif376 372 { 377 373 PGMPhysReleasePageMappingLock(pVM, pLock); 378 374 rc = VERR_PGM_PHYS_PAGE_RESERVED; 375 } 376 else 377 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)) 378 { 379 /* We *must* flush any corresponding pgm pool page here, otherwise we'll 380 * not be informed about writes and keep bogus gst->shw mappings around. 381 */ 382 PGMPoolFlushPage(pVM, *pGCPhys); 383 Assert(!PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)); 379 384 } 380 385 } … … 428 433 { 429 434 PPGMPAGE pPage = pTlbe->pPage; 430 #if 1431 435 if (PGM_PAGE_IS_MMIO(pPage)) 432 436 rc = VERR_PGM_PHYS_PAGE_RESERVED; 433 #else437 else 434 438 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)) 435 rc = VERR_PGM_PHYS_PAGE_RESERVED; 436 #endif 439 { 440 /* We *must* flush any corresponding pgm pool page here, otherwise we'll 441 * not be informed about writes and keep bogus gst->shw mappings around. 442 */ 443 PGMPoolFlushPage(pVM, GCPhys); 444 Assert(!PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)); 445 } 437 446 else 438 447 { -
trunk/src/VBox/VMM/PGMPool.cpp
r22510 r22695 325 325 STAM_REG(pVM, &pPool->StatFlushPage, STAMTYPE_PROFILE, "/PGM/Pool/FlushPage", STAMUNIT_TICKS_PER_CALL, "Profiling of pgmPoolFlushPage."); 326 326 STAM_REG(pVM, &pPool->StatFree, STAMTYPE_PROFILE, "/PGM/Pool/Free", STAMUNIT_TICKS_PER_CALL, "Profiling of pgmPoolFree."); 327 STAM_REG(pVM, &pPool->StatForceFlushPage, STAMTYPE_COUNTER, "/PGM/Pool/FlushForce", STAMUNIT_OCCURENCES, "Counting explicit flushes by PGMPoolFlushPage()."); 327 328 STAM_REG(pVM, &pPool->StatZeroPage, STAMTYPE_PROFILE, "/PGM/Pool/ZeroPage", STAMUNIT_TICKS_PER_CALL, "Profiling time spent zeroing pages. Overlaps with Alloc."); 328 329 # ifdef PGMPOOL_WITH_USER_TRACKING -
trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp
r22605 r22695 1380 1380 DECLINLINE(void) pgmPoolTrackCheckPTPaePae(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PX86PTPAE pShwPT, PCX86PTPAE pGstPT) 1381 1381 { 1382 unsigned cErrors = 0; 1382 1383 for (unsigned i = 0; i < RT_ELEMENTS(pShwPT->a); i++) 1383 1384 { … … 1386 1387 RTHCPHYS HCPhys = -1; 1387 1388 int rc = PGMPhysGCPhys2HCPhys(pPool->CTX_SUFF(pVM), pGstPT->a[i].u & X86_PTE_PAE_PG_MASK, &HCPhys); 1388 AssertMsg(rc == VINF_SUCCESS && (pShwPT->a[i].u & X86_PTE_PAE_PG_MASK) == HCPhys, ("rc=%d guest %RX64 shw=%RX64 vs %RHp\n", rc, pGstPT->a[i].u, pShwPT->a[i].u, HCPhys)); 1389 if ( rc != VINF_SUCCESS 1390 || (pShwPT->a[i].u & X86_PTE_PAE_PG_MASK) != HCPhys) 1391 { 1392 RTHCPHYS HCPhysPT = -1; 1393 Log(("rc=%d idx=%d guest %RX64 shw=%RX64 vs %RHp\n", rc, i, pGstPT->a[i].u, pShwPT->a[i].u, HCPhys)); 1394 cErrors++; 1395 1396 int rc = PGMPhysGCPhys2HCPhys(pPool->CTX_SUFF(pVM), pPage->GCPhys, &HCPhysPT); 1397 AssertRC(rc); 1398 1399 for (unsigned i = 0; i < pPool->cCurPages; i++) 1400 { 1401 PPGMPOOLPAGE pTempPage = &pPool->aPages[i]; 1402 1403 if (pTempPage->enmKind == PGMPOOLKIND_PAE_PT_FOR_PAE_PT) 1404 { 1405 PX86PTPAE pShwPT2 = (PX86PTPAE)PGMPOOL_PAGE_2_LOCKED_PTR(pPool->CTX_SUFF(pVM), pTempPage); 1406 1407 for (unsigned j = 0; j < RT_ELEMENTS(pShwPT->a); j++) 1408 { 1409 if ( pShwPT2->a[j].n.u1Present 1410 && pShwPT2->a[j].n.u1Write 1411 && ((pShwPT2->a[j].u & X86_PTE_PAE_PG_MASK) == HCPhysPT)) 1412 { 1413 Log(("GCPhys=%RGp idx=%d %RX64 vs %RX64\n", pTempPage->GCPhys, j, pShwPT->a[j].u, pShwPT2->a[j].u)); 1414 } 1415 } 1416 } 1417 } 1418 } 1419 } 1420 } 1421 Assert(!cErrors); 1422 } 1423 1424 void pgmPoolTrackCheckAllPTPaePae(pVM) 1425 { 1426 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 1427 1428 for (unsigned i = 0; i < pPool->cCurPages; i++) 1429 { 1430 PPGMPOOLPAGE pPage = &pPool->aPages[i]; 1431 1432 if ( pPage->enmKind == PGMPOOLKIND_PAE_PT_FOR_PAE_PT 1433 && !pPage->fDirty) 1434 { 1435 void *pvShw = PGMPOOL_PAGE_2_LOCKED_PTR(pPool->CTX_SUFF(pVM), pPage); 1436 void *pvGst; 1437 int rc = PGM_GCPHYS_2_PTR(pPool->CTX_SUFF(pVM), pPage->GCPhys, &pvGst); AssertReleaseRC(rc); 1438 1439 pgmPoolTrackCheckPTPaePae(pPool, pPage, (PX86PTPAE)pvShw, (PCX86PTPAE)pvGst); 1389 1440 } 1390 1441 } … … 4669 4720 } 4670 4721 4722 /** 4723 * Flush the specified page if present 4724 * 4725 * @param pVM The VM handle. 4726 * @param GCPhys Guest physical address of the page to flush 4727 */ 4728 VMMDECL(void) PGMPoolFlushPage(PVM pVM, RTGCPHYS GCPhys) 4729 { 4730 #ifdef PGMPOOL_WITH_CACHE 4731 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 4732 4733 /* 4734 * Look up the GCPhys in the hash. 4735 */ 4736 GCPhys = GCPhys & ~(RTGCPHYS)(PAGE_SIZE - 1); 4737 unsigned i = pPool->aiHash[PGMPOOL_HASH(GCPhys)]; 4738 if (i == NIL_PGMPOOL_IDX) 4739 return; 4740 4741 do 4742 { 4743 PPGMPOOLPAGE pPage = &pPool->aPages[i]; 4744 if (pPage->GCPhys - GCPhys < PAGE_SIZE) 4745 { 4746 switch (pPage->enmKind) 4747 { 4748 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT: 4749 case PGMPOOLKIND_PAE_PT_FOR_32BIT_PT: 4750 case PGMPOOLKIND_PAE_PT_FOR_PAE_PT: 4751 case PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD: 4752 case PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD: 4753 case PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD: 4754 case PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD: 4755 case PGMPOOLKIND_PAE_PD_FOR_PAE_PD: 4756 case PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD: 4757 case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT: 4758 case PGMPOOLKIND_64BIT_PML4: 4759 case PGMPOOLKIND_32BIT_PD: 4760 case PGMPOOLKIND_PAE_PDPT: 4761 { 4762 Log(("PGMPoolFlushPage: found pgm pool pages for %RGp\n", GCPhys)); 4763 STAM_COUNTER_INC(&pPool->StatForceFlushPage); 4764 Assert(!pgmPoolIsPageLocked(&pVM->pgm.s, pPage)); 4765 pgmPoolMonitorChainFlush(pPool, pPage); 4766 return; 4767 } 4768 4769 /* ignore, no monitoring. */ 4770 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB: 4771 case PGMPOOLKIND_PAE_PT_FOR_PAE_2MB: 4772 case PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB: 4773 case PGMPOOLKIND_32BIT_PT_FOR_PHYS: 4774 case PGMPOOLKIND_PAE_PT_FOR_PHYS: 4775 case PGMPOOLKIND_64BIT_PDPT_FOR_PHYS: 4776 case PGMPOOLKIND_64BIT_PD_FOR_PHYS: 4777 case PGMPOOLKIND_EPT_PDPT_FOR_PHYS: 4778 case PGMPOOLKIND_EPT_PD_FOR_PHYS: 4779 case PGMPOOLKIND_EPT_PT_FOR_PHYS: 4780 case PGMPOOLKIND_ROOT_NESTED: 4781 case PGMPOOLKIND_PAE_PD_PHYS: 4782 case PGMPOOLKIND_PAE_PDPT_PHYS: 4783 case PGMPOOLKIND_32BIT_PD_PHYS: 4784 case PGMPOOLKIND_PAE_PDPT_FOR_32BIT: 4785 break; 4786 4787 default: 4788 AssertFatalMsgFailed(("enmKind=%d idx=%d\n", pPage->enmKind, pPage->idx)); 4789 } 4790 } 4791 4792 /* next */ 4793 i = pPage->iNext; 4794 } while (i != NIL_PGMPOOL_IDX); 4795 #endif 4796 return; 4797 } 4671 4798 4672 4799 #ifdef IN_RING3
Note:
See TracChangeset
for help on using the changeset viewer.