Changeset 36891 in vbox
- Timestamp:
- Apr 29, 2011 1:22:57 PM (14 years ago)
- svn:sync-xref-src-repo-rev:
- 71469
- Location:
- trunk
- Files:
-
- 24 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/vmm/pgm.h
r36557 r36891 465 465 VMMR3DECL(int) PGMR3PhysGetRange(PVM pVM, uint32_t iRange, PRTGCPHYS pGCPhysStart, PRTGCPHYS pGCPhysLast, 466 466 const char **ppszDesc, bool *pfIsMmio); 467 VMMR3DECL(int) PGMR3QueryVMMMemoryStats(PVM pVM, uint64_t *puTotalAllocSize, uint64_t *puTotalFreeSize, uint64_t *puTotalBalloonSize, uint64_t *puTotalSharedSize); 468 VMMR3DECL(int) PGMR3QueryMemoryStats(PVM pVM, uint64_t *pulTotalMem, uint64_t *pulPrivateMem, uint64_t *puTotalSharedMem, uint64_t *puTotalZeroMem); 467 VMMR3DECL(int) PGMR3QueryMemoryStats(PVM pVM, uint64_t *pcbTotalMem, uint64_t *pcbPrivateMem, uint64_t *pcbSharedMem, uint64_t *pcbZeroMem); 468 VMMR3DECL(int) PGMR3QueryGlobalMemoryStats(PVM pVM, uint64_t *pcbAllocMem, uint64_t *pcbFreeMem, uint64_t *pcbBallonedMem, uint64_t *pcbSharedMem); 469 469 470 VMMR3DECL(int) PGMR3PhysMMIORegister(PVM pVM, RTGCPHYS GCPhys, RTGCPHYS cb, 470 471 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnHandlerR3, RTR3PTR pvUserR3, -
trunk/src/VBox/Main/src-client/GuestImpl.cpp
r36128 r36891 383 383 uint64_t uFreeTotal, uAllocTotal, uBalloonedTotal, uSharedTotal; 384 384 *aMemFreeTotal = 0; 385 int rc = PGMR3Query VMMMemoryStats(pVM.raw(), &uAllocTotal, &uFreeTotal, &uBalloonedTotal, &uSharedTotal);385 int rc = PGMR3QueryGlobalMemoryStats(pVM.raw(), &uAllocTotal, &uFreeTotal, &uBalloonedTotal, &uSharedTotal); 386 386 AssertRC(rc); 387 387 if (rc == VINF_SUCCESS) -
trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
r36196 r36891 734 734 * Check for conflicts and pending CR3 monitoring updates. 735 735 */ 736 if (pgmMapAreMappingsFloating( &pVM->pgm.s))736 if (pgmMapAreMappingsFloating(pVM)) 737 737 { 738 738 if ( pgmGetMapping(pVM, GCPtrPage) … … 1444 1444 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAGE_MASK; 1445 1445 PPGMPAGE pPage; 1446 int rc = pgmPhysGetPageEx( &pVM->pgm.s, GCPhysCR3, &pPage);1446 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage); 1447 1447 if (RT_SUCCESS(rc)) 1448 1448 { … … 1486 1486 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_PAE_PAGE_MASK; 1487 1487 PPGMPAGE pPage; 1488 int rc = pgmPhysGetPageEx( &pVM->pgm.s, GCPhysCR3, &pPage);1488 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage); 1489 1489 if (RT_SUCCESS(rc)) 1490 1490 { … … 1534 1534 1535 1535 PPGMPAGE pPage; 1536 int rc = pgmPhysGetPageEx( &pVM->pgm.s, GCPhys, &pPage);1536 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage); 1537 1537 if (RT_SUCCESS(rc)) 1538 1538 { … … 1596 1596 RTGCPHYS GCPhysCR3 = pVCpu->pgm.s.GCPhysCR3 & X86_CR3_AMD64_PAGE_MASK; 1597 1597 PPGMPAGE pPage; 1598 int rc = pgmPhysGetPageEx( &pVM->pgm.s, GCPhysCR3, &pPage);1598 int rc = pgmPhysGetPageEx(pVM, GCPhysCR3, &pPage); 1599 1599 if (RT_SUCCESS(rc)) 1600 1600 { … … 1814 1814 if (RT_LIKELY(rc == VINF_SUCCESS)) 1815 1815 { 1816 if (pgmMapAreMappingsFloating( &pVM->pgm.s))1816 if (pgmMapAreMappingsFloating(pVM)) 1817 1817 pVCpu->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3; 1818 1818 } … … 1823 1823 pVCpu->pgm.s.GCPhysCR3 = GCPhysOldCR3; 1824 1824 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MAP_CR3; 1825 if (pgmMapAreMappingsFloating( &pVM->pgm.s))1825 if (pgmMapAreMappingsFloating(pVM)) 1826 1826 pVCpu->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3; 1827 1827 } … … 2362 2362 */ 2363 2363 int rc; 2364 PPGMPAGE pPage = pgmPhysGetPage( &pVM->pgm.s, GCPhys);2364 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys); 2365 2365 if (RT_LIKELY(pPage)) 2366 2366 { -
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r36009 r36891 550 550 PPGMPAGE pPage; 551 551 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 552 rc = pgmPhysGetPageEx( &pVM->pgm.s, GstWalk.Core.GCPhys, &pPage);552 rc = pgmPhysGetPageEx(pVM, GstWalk.Core.GCPhys, &pPage); 553 553 if (RT_SUCCESS(rc) && PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) 554 554 return VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerDoAccessHandlers)(pVCpu, uErr, pRegFrame, pvFault, pPage, … … 556 556 rc = PGM_BTH_NAME(SyncPage)(pVCpu, GstWalk.Pde, pvFault, 1, uErr); 557 557 # else 558 rc = pgmPhysGetPageEx( &pVM->pgm.s, (RTGCPHYS)pvFault, &pPage);558 rc = pgmPhysGetPageEx(pVM, (RTGCPHYS)pvFault, &pPage); 559 559 if (RT_SUCCESS(rc) && PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) 560 560 return VBOXSTRICTRC_TODO(PGM_BTH_NAME(Trap0eHandlerDoAccessHandlers)(pVCpu, uErr, pRegFrame, pvFault, pPage, … … 675 675 * (BTW, it's impossible to have physical access handlers in a mapping.) 676 676 */ 677 if (pgmMapAreMappingsEnabled( &pVM->pgm.s))677 if (pgmMapAreMappingsEnabled(pVM)) 678 678 { 679 679 PPGMMAPPING pMapping = pVM->pgm.s.CTX_SUFF(pMappings); … … 687 687 * The first thing we check is if we've got an undetected conflict. 688 688 */ 689 if (pgmMapAreMappingsFloating( &pVM->pgm.s))689 if (pgmMapAreMappingsFloating(pVM)) 690 690 { 691 691 unsigned iPT = pMapping->cb >> GST_PD_SHIFT; … … 751 751 # endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) */ 752 752 PPGMPAGE pPage; 753 rc = pgmPhysGetPageEx( &pVM->pgm.s, GCPhys, &pPage);753 rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage); 754 754 if (RT_FAILURE(rc)) 755 755 { … … 1261 1261 * Conflict - Let SyncPT deal with it to avoid duplicate code. 1262 1262 */ 1263 Assert(pgmMapAreMappingsEnabled( &pVM->pgm.s));1263 Assert(pgmMapAreMappingsEnabled(pVM)); 1264 1264 Assert(PGMGetGuestMode(pVCpu) <= PGMMODE_PAE); 1265 1265 rc = PGM_BTH_NAME(SyncPT)(pVCpu, iPDSrc, pPDSrc, GCPtrPage); … … 1373 1373 else 1374 1374 { 1375 Assert(pgmMapAreMappingsEnabled( &pVM->pgm.s));1375 Assert(pgmMapAreMappingsEnabled(pVM)); 1376 1376 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,InvalidatePagePDMappings)); 1377 1377 } … … 1413 1413 pPool->cPresent--; 1414 1414 1415 PPGMPAGE pPhysPage = pgmPhysGetPage( &pVM->pgm.s, GCPhysPage);1415 PPGMPAGE pPhysPage = pgmPhysGetPage(pVM, GCPhysPage); 1416 1416 AssertRelease(pPhysPage); 1417 1417 pgmTrackDerefGCPhys(pPool, pShwPage, pPhysPage, iPte); … … 1432 1432 * Find the guest address. 1433 1433 */ 1434 for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges );1434 for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangesX); 1435 1435 pRam; 1436 1436 pRam = pRam->CTX_SUFF(pNext)) … … 1625 1625 */ 1626 1626 PPGMPAGE pPage; 1627 int rc = pgmPhysGetPageEx( &pVM->pgm.s, GCPhysPage, &pPage);1627 int rc = pgmPhysGetPageEx(pVM, GCPhysPage, &pPage); 1628 1628 if (RT_SUCCESS(rc)) 1629 1629 { … … 1983 1983 || iPTDst == ((GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK) /* always sync GCPtrPage */ 1984 1984 || !CSAMDoesPageNeedScanning(pVM, GCPtrCurPage) 1985 || ( (pPage = pgmPhysGetPage( &pVM->pgm.s, pPteSrc->u & GST_PTE_PG_MASK))1985 || ( (pPage = pgmPhysGetPage(pVM, pPteSrc->u & GST_PTE_PG_MASK)) 1986 1986 && PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)) 1987 1987 ) … … 2030 2030 /* Find ram range. */ 2031 2031 PPGMPAGE pPage; 2032 int rc = pgmPhysGetPageEx( &pVM->pgm.s, GCPhys, &pPage);2032 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage); 2033 2033 if (RT_SUCCESS(rc)) 2034 2034 { … … 2442 2442 if (SHW_PTE_IS_TRACK_DIRTY(*pPteDst)) 2443 2443 { 2444 PPGMPAGE pPage = pgmPhysGetPage( &pVM->pgm.s, GST_GET_PTE_GCPHYS(*pPteSrc));2444 PPGMPAGE pPage = pgmPhysGetPage(pVM, GST_GET_PTE_GCPHYS(*pPteSrc)); 2445 2445 SHWPTE PteDst = *pPteDst; 2446 2446 … … 2613 2613 if (PdeDst.u & PGM_PDFLAGS_MAPPING) 2614 2614 { 2615 Assert(pgmMapAreMappingsEnabled( &pVM->pgm.s));2615 Assert(pgmMapAreMappingsEnabled(pVM)); 2616 2616 # ifndef IN_RING3 2617 2617 Log(("SyncPT: Conflict at %RGv\n", GCPtrPage)); … … 2817 2817 if ( ((PdeSrc.u & pPTSrc->a[iPTSrc].u) & (X86_PTE_RW | X86_PTE_US)) 2818 2818 || !CSAMDoesPageNeedScanning(pVM, GCPtrCur) 2819 || ( (pPage = pgmPhysGetPage( &pVM->pgm.s, GST_GET_PTE_GCPHYS(PteSrc)))2819 || ( (pPage = pgmPhysGetPage(pVM, GST_GET_PTE_GCPHYS(PteSrc))) 2820 2820 && PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage)) 2821 2821 ) … … 2841 2841 * 2842 2842 * We'll walk the ram range list in parallel and optimize lookups. 2843 * We will only sync on shadow page table at a time.2843 * We will only sync one shadow page table at a time. 2844 2844 */ 2845 2845 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,SyncPT4M)); … … 2888 2888 GCPtrPage, PdeSrc.b.u1Present, PdeSrc.b.u1Write, PdeSrc.b.u1User, (uint64_t)PdeSrc.u, GCPtr, 2889 2889 GCPhys, PdeDst.u & PGM_PDFLAGS_TRACK_DIRTY ? " Track-Dirty" : "")); 2890 PPGMRAMRANGE pRam = p VM->pgm.s.CTX_SUFF(pRamRanges);2890 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys); 2891 2891 unsigned iPTDst = 0; 2892 2892 while ( iPTDst < RT_ELEMENTS(pPTDst->a) 2893 2893 && !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)) 2894 2894 { 2895 /* Advance ram range list. */2896 while (pRam && GCPhys > pRam->GCPhysLast)2897 pRam = pRam->CTX_SUFF(pNext);2898 2895 if (pRam && GCPhys >= pRam->GCPhys) 2899 2896 { … … 2970 2967 } while ( iPTDst < RT_ELEMENTS(pPTDst->a) 2971 2968 && GCPhys <= pRam->GCPhysLast); 2969 2970 /* Advance ram range list. */ 2971 while (pRam && GCPhys > pRam->GCPhysLast) 2972 pRam = pRam->CTX_SUFF(pNext); 2972 2973 } 2973 2974 else if (pRam) … … 3076 3077 /* Check if we allocated a big page before for this 2 MB range. */ 3077 3078 PPGMPAGE pPage; 3078 rc = pgmPhysGetPageEx( &pVM->pgm.s, GCPtrPage & X86_PDE2M_PAE_PG_MASK, &pPage);3079 rc = pgmPhysGetPageEx(pVM, GCPtrPage & X86_PDE2M_PAE_PG_MASK, &pPage); 3079 3080 if (RT_SUCCESS(rc)) 3080 3081 { … … 3585 3586 * Nested / EPT - almost no work. 3586 3587 */ 3587 Assert(!pgmMapAreMappingsEnabled( &pVM->pgm.s));3588 Assert(!pgmMapAreMappingsEnabled(pVM)); 3588 3589 return VINF_SUCCESS; 3589 3590 … … 3593 3594 * out the shadow parts when the guest modifies its tables. 3594 3595 */ 3595 Assert(!pgmMapAreMappingsEnabled( &pVM->pgm.s));3596 Assert(!pgmMapAreMappingsEnabled(pVM)); 3596 3597 return VINF_SUCCESS; 3597 3598 … … 3603 3604 * are enabled and not fixed. 3604 3605 */ 3605 if (pgmMapAreMappingsFloating( &pVM->pgm.s))3606 if (pgmMapAreMappingsFloating(pVM)) 3606 3607 { 3607 3608 int rc = pgmMapResolveConflicts(pVM); … … 3614 3615 } 3615 3616 # else 3616 Assert(!pgmMapAreMappingsEnabled( &pVM->pgm.s));3617 Assert(!pgmMapAreMappingsEnabled(pVM)); 3617 3618 # endif 3618 3619 return VINF_SUCCESS; … … 3681 3682 AssertRCReturn(rc, 1); 3682 3683 HCPhys = NIL_RTHCPHYS; 3683 rc = pgmRamGCPhys2HCPhys( &pVM->pgm.s, cr3 & GST_CR3_PAGE_MASK, &HCPhys);3684 rc = pgmRamGCPhys2HCPhys(pVM, cr3 & GST_CR3_PAGE_MASK, &HCPhys); 3684 3685 AssertMsgReturn(HCPhys == HCPhysShw, ("HCPhys=%RHp HCPhyswShw=%RHp (cr3)\n", HCPhys, HCPhysShw), false); 3685 3686 # if PGM_GST_TYPE == PGM_TYPE_32BIT && defined(IN_RING3) … … 3869 3870 if (PdeDst.u & PGM_PDFLAGS_MAPPING) 3870 3871 { 3871 Assert(pgmMapAreMappingsEnabled( &pVM->pgm.s));3872 Assert(pgmMapAreMappingsEnabled(pVM)); 3872 3873 if ((PdeDst.u & X86_PDE_AVL_MASK) != PGM_PDFLAGS_MAPPING) 3873 3874 { … … 3948 3949 } 3949 3950 3950 PPGMPAGE pPhysPage = pgmPhysGetPage( &pVM->pgm.s, GCPhysGst);3951 PPGMPAGE pPhysPage = pgmPhysGetPage(pVM, GCPhysGst); 3951 3952 if (!pPhysPage) 3952 3953 { … … 4062 4063 # endif 4063 4064 4064 pPhysPage = pgmPhysGetPage( &pVM->pgm.s, GCPhysGst);4065 pPhysPage = pgmPhysGetPage(pVM, GCPhysGst); 4065 4066 if (!pPhysPage) 4066 4067 { … … 4294 4295 } 4295 4296 # endif 4296 pPhysPage = pgmPhysGetPage( &pVM->pgm.s, GCPhysGst);4297 pPhysPage = pgmPhysGetPage(pVM, GCPhysGst); 4297 4298 if (!pPhysPage) 4298 4299 { … … 4416 4417 RTHCPHYS HCPhysGuestCR3; 4417 4418 pgmLock(pVM); 4418 PPGMPAGE pPageCR3 = pgmPhysGetPage( &pVM->pgm.s, GCPhysCR3);4419 PPGMPAGE pPageCR3 = pgmPhysGetPage(pVM, GCPhysCR3); 4419 4420 AssertReturn(pPageCR3, VERR_INTERNAL_ERROR_2); 4420 4421 HCPhysGuestCR3 = PGM_PAGE_GET_HCPHYS(pPageCR3); … … 4464 4465 RTGCPHYS GCPhys = pGuestPDPT->a[i].u & X86_PDPE_PG_MASK; 4465 4466 pgmLock(pVM); 4466 PPGMPAGE pPage = pgmPhysGetPage( &pVM->pgm.s, GCPhys);4467 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys); 4467 4468 AssertReturn(pPage, VERR_INTERNAL_ERROR_2); 4468 4469 HCPhys = PGM_PAGE_GET_HCPHYS(pPage); -
trunk/src/VBox/VMM/VMMAll/PGMAllGst.h
r32036 r36891 517 517 { 518 518 if (pCur->aPhysToVirt[iPage].Core.Key != NIL_RTGCPHYS) 519 pgmHandlerVirtualClearPage( &pVM->pgm.s, pCur, iPage);519 pgmHandlerVirtualClearPage(pVM, pCur, iPage); 520 520 #ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL 521 521 AssertReleaseMsg(!pCur->aPhysToVirt[iPage].offNextAlias, … … 540 540 if (pCur->aPhysToVirt[iPage].Core.Key != NIL_RTGCPHYS) 541 541 { 542 pgmHandlerVirtualClearPage( &pVM->pgm.s, pCur, iPage);542 pgmHandlerVirtualClearPage(pVM, pCur, iPage); 543 543 #ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL 544 544 AssertReleaseMsg(!pCur->aPhysToVirt[iPage].offNextAlias, … … 567 567 { 568 568 if (pCur->aPhysToVirt[iPage].Core.Key != NIL_RTGCPHYS) 569 pgmHandlerVirtualClearPage( &pVM->pgm.s, pCur, iPage);569 pgmHandlerVirtualClearPage(pVM, pCur, iPage); 570 570 #ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL 571 571 AssertReleaseMsg(!pCur->aPhysToVirt[iPage].offNextAlias, … … 590 590 if (pCur->aPhysToVirt[iPage].Core.Key != NIL_RTGCPHYS) 591 591 { 592 pgmHandlerVirtualClearPage( &pVM->pgm.s, pCur, iPage);592 pgmHandlerVirtualClearPage(pVM, pCur, iPage); 593 593 pCur->aPhysToVirt[iPage].Core.Key = NIL_RTGCPHYS; 594 594 pState->fTodo |= PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL; -
trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp
r35346 r36891 120 120 * There is no apparent need to support ranges which cover more than one ram range. 121 121 */ 122 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges); 123 while (pRam && GCPhys > pRam->GCPhysLast) 124 pRam = pRam->CTX_SUFF(pNext); 125 if ( !pRam 126 || GCPhysLast < pRam->GCPhys 127 || GCPhys > pRam->GCPhysLast) 122 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys); 123 if ( !pRam 124 || GCPhysLast < pRam->GCPhys 125 || GCPhys > pRam->GCPhysLast) 128 126 { 129 127 #ifdef IN_RING3 … … 306 304 if (GCPhysStart & PAGE_OFFSET_MASK) 307 305 { 308 PPGMPAGE pPage = pgmPhysGetPage( &pVM->pgm.s, GCPhysStart);306 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhysStart); 309 307 if ( pPage 310 308 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE) … … 323 321 if (GCPhysLast & PAGE_OFFSET_MASK) 324 322 { 325 PPGMPAGE pPage = pgmPhysGetPage( &pVM->pgm.s, GCPhysLast);323 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhysLast); 326 324 if ( pPage 327 325 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_NONE) … … 387 385 { 388 386 PPGMPAGE pPage; 389 int rc = pgmPhysGetPageWithHintEx( &pVM->pgm.s, GCPhys, &pPage, ppRamHint);387 int rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pPage, ppRamHint); 390 388 if ( RT_SUCCESS(rc) 391 389 && PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) < uState) … … 480 478 * Iterate the guest ram pages updating the state. 481 479 */ 482 RTUINT cPages = pCur->cPages;483 RTGCPHYS GCPhys = pCur->Core.Key;480 RTUINT cPages = pCur->cPages; 481 RTGCPHYS GCPhys = pCur->Core.Key; 484 482 PPGMRAMRANGE pRamHint = NULL; 485 PPGM pPGM = &pVM->pgm.s;486 483 for (;;) 487 484 { 488 485 PPGMPAGE pPage; 489 int rc = pgmPhysGetPageWithHintEx(p PGM, GCPhys, &pPage, &pRamHint);486 int rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pPage, &pRamHint); 490 487 if (RT_SUCCESS(rc)) 491 488 { … … 562 559 * There is no apparent need to support ranges which cover more than one ram range. 563 560 */ 564 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges); 565 while (pRam && GCPhys > pRam->GCPhysLast) 566 pRam = pRam->CTX_SUFF(pNext); 567 if ( pRam 568 && GCPhys <= pRam->GCPhysLast 569 && GCPhysLast >= pRam->GCPhys) 561 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys); 562 if ( pRam 563 && GCPhys <= pRam->GCPhysLast 564 && GCPhysLast >= pRam->GCPhys) 570 565 { 571 566 pCur->Core.Key = GCPhys; … … 865 860 { 866 861 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PhysHandlerReset)); /**@Todo move out of switch */ 867 PPGMRAMRANGE pRam = pgmPhysGetRange( &pVM->pgm.s, GCPhys);862 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys); 868 863 Assert(pRam); 869 864 Assert(pRam->GCPhys <= pCur->Core.Key); … … 979 974 */ 980 975 PPGMPAGE pPage; 981 int rc = pgmPhysGetPageEx( &pVM->pgm.s, GCPhysPage, &pPage);976 int rc = pgmPhysGetPageEx(pVM, GCPhysPage, &pPage); 982 977 AssertReturnStmt(RT_SUCCESS_NP(rc), pgmUnlock(pVM), rc); 983 978 if (PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) != PGM_PAGE_HNDL_PHYS_STATE_DISABLED) … … 1062 1057 */ 1063 1058 PPGMPAGE pPageRemap; 1064 int rc = pgmPhysGetPageEx( &pVM->pgm.s, GCPhysPageRemap, &pPageRemap);1059 int rc = pgmPhysGetPageEx(pVM, GCPhysPageRemap, &pPageRemap); 1065 1060 AssertReturnStmt(RT_SUCCESS_NP(rc), pgmUnlock(pVM), rc); 1066 1061 AssertMsgReturnStmt(PGM_PAGE_GET_TYPE(pPageRemap) == PGMPAGETYPE_MMIO2, … … 1069 1064 1070 1065 PPGMPAGE pPage; 1071 rc = pgmPhysGetPageEx( &pVM->pgm.s, GCPhysPage, &pPage);1066 rc = pgmPhysGetPageEx(pVM, GCPhysPage, &pPage); 1072 1067 AssertReturnStmt(RT_SUCCESS_NP(rc), pgmUnlock(pVM), rc); 1073 1068 if (PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO) … … 1182 1177 */ 1183 1178 PPGMPAGE pPage; 1184 int rc = pgmPhysGetPageEx( &pVM->pgm.s, GCPhysPage, &pPage);1179 int rc = pgmPhysGetPageEx(pVM, GCPhysPage, &pPage); 1185 1180 AssertReturnStmt(RT_SUCCESS_NP(rc), pgmUnlock(pVM), rc); 1186 1181 if (PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO) … … 1434 1429 */ 1435 1430 PPGMPAGE pPage; 1436 int rc = pgmPhysGetPageWithHintEx( &pVM->pgm.s, pPhys2Virt->Core.Key, &pPage, &pRamHint);1431 int rc = pgmPhysGetPageWithHintEx(pVM, pPhys2Virt->Core.Key, &pPage, &pRamHint); 1437 1432 if ( RT_SUCCESS(rc) 1438 1433 && PGM_PAGE_GET_HNDL_VIRT_STATE(pPage) < uState) … … 1643 1638 } 1644 1639 1645 PPGMPAGE pPage = pgmPhysGetPage( &pVM->pgm.s, GCPhysGst);1640 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhysGst); 1646 1641 if (!pPage) 1647 1642 { … … 1688 1683 * Check the RAM flags against the handlers. 1689 1684 */ 1690 for (PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges ); pRam; pRam = pRam->CTX_SUFF(pNext))1691 { 1692 const u nsignedcPages = pRam->cb >> PAGE_SHIFT;1693 for (u nsignediPage = 0; iPage < cPages; iPage++)1685 for (PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRangesX); pRam; pRam = pRam->CTX_SUFF(pNext)) 1686 { 1687 const uint32_t cPages = pRam->cb >> PAGE_SHIFT; 1688 for (uint32_t iPage = 0; iPage < cPages; iPage++) 1694 1689 { 1695 1690 PGMPAGE const *pPage = &pRam->aPages[iPage]; -
trunk/src/VBox/VMM/VMMAll/PGMAllMap.cpp
r35346 r36891 270 270 void pgmMapSetShadowPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE) 271 271 { 272 Log4(("pgmMapSetShadowPDEs new pde %x (mappings enabled %d)\n", iNewPDE, pgmMapAreMappingsEnabled( &pVM->pgm.s)));273 274 if ( !pgmMapAreMappingsEnabled( &pVM->pgm.s)272 Log4(("pgmMapSetShadowPDEs new pde %x (mappings enabled %d)\n", iNewPDE, pgmMapAreMappingsEnabled(pVM))); 273 274 if ( !pgmMapAreMappingsEnabled(pVM) 275 275 || pVM->cCpus > 1) 276 276 return; … … 354 354 PPGMPOOLPAGE pPoolPagePd = pgmPoolGetPage(pPool, pShwPdpt->a[iPdPt].u & X86_PDPE_PG_MASK); 355 355 AssertFatal(pPoolPagePd); 356 if (!pgmPoolIsPageLocked( &pVM->pgm.s,pPoolPagePd))356 if (!pgmPoolIsPageLocked(pPoolPagePd)) 357 357 pgmPoolLockPage(pPool, pPoolPagePd); 358 358 #ifdef VBOX_STRICT … … 424 424 void pgmMapClearShadowPDEs(PVM pVM, PPGMPOOLPAGE pShwPageCR3, PPGMMAPPING pMap, unsigned iOldPDE, bool fDeactivateCR3) 425 425 { 426 Log(("pgmMapClearShadowPDEs: old pde %x (cPTs=%x) (mappings enabled %d) fDeactivateCR3=%RTbool\n", iOldPDE, pMap->cPTs, pgmMapAreMappingsEnabled( &pVM->pgm.s), fDeactivateCR3));426 Log(("pgmMapClearShadowPDEs: old pde %x (cPTs=%x) (mappings enabled %d) fDeactivateCR3=%RTbool\n", iOldPDE, pMap->cPTs, pgmMapAreMappingsEnabled(pVM), fDeactivateCR3)); 427 427 428 428 /* 429 429 * Skip this if disabled or if it doesn't apply. 430 430 */ 431 if ( !pgmMapAreMappingsEnabled( &pVM->pgm.s)431 if ( !pgmMapAreMappingsEnabled(pVM) 432 432 || pVM->cCpus > 1) 433 433 return; … … 526 526 PPGMPOOLPAGE pPoolPagePd = pgmPoolGetPage(pPool, pShwPdpt->a[iPdpt].u & X86_PDPE_PG_MASK); 527 527 AssertFatal(pPoolPagePd); 528 if (pgmPoolIsPageLocked( &pVM->pgm.s,pPoolPagePd))528 if (pgmPoolIsPageLocked(pPoolPagePd)) 529 529 pgmPoolUnlockPage(pPool, pPoolPagePd); 530 530 } … … 630 630 * Can skip this if mappings are disabled. 631 631 */ 632 if (!pgmMapAreMappingsEnabled( &pVM->pgm.s))632 if (!pgmMapAreMappingsEnabled(pVM)) 633 633 return; 634 634 … … 665 665 * Skip this if disabled or if it doesn't apply. 666 666 */ 667 if ( !pgmMapAreMappingsEnabled( &pVM->pgm.s)667 if ( !pgmMapAreMappingsEnabled(pVM) 668 668 || pVM->cCpus > 1) 669 669 return VINF_SUCCESS; … … 702 702 * Skip this if disabled or if it doesn't apply. 703 703 */ 704 if ( !pgmMapAreMappingsEnabled( &pVM->pgm.s)704 if ( !pgmMapAreMappingsEnabled(pVM) 705 705 || pVM->cCpus > 1) 706 706 return VINF_SUCCESS; … … 733 733 * Can skip this if mappings are safely fixed. 734 734 */ 735 if (!pgmMapAreMappingsFloating( &pVM->pgm.s))735 if (!pgmMapAreMappingsFloating(pVM)) 736 736 return false; 737 737 -
trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp
r36639 r36891 141 141 142 142 #endif /* IN_RING3 */ 143 #ifdef PGM_USE_RAMRANGE_TLB 144 145 /** 146 * Invalidates the RAM range TLBs. 147 * 148 * @param pVM The VM handle. 149 */ 150 void pgmPhysInvalidRamRangeTlbs(PVM pVM) 151 { 152 pgmLock(pVM); 153 for (uint32_t i = 0; i < PGM_RAMRANGE_TLB_ENTRIES; i++) 154 { 155 pVM->pgm.s.apRamRangesTlbR3[i] = NIL_RTR3PTR; 156 pVM->pgm.s.apRamRangesTlbR0[i] = NIL_RTR0PTR; 157 pVM->pgm.s.apRamRangesTlbRC[i] = NIL_RTRCPTR; 158 } 159 pgmUnlock(pVM); 160 } 161 162 163 /** 164 * Slow worker for pgmPhysGetRange. 165 * 166 * @copydoc pgmPhysGetRange 167 */ 168 PPGMRAMRANGE pgmPhysGetRangeSlow(PVM pVM, RTGCPHYS GCPhys) 169 { 170 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses)); 171 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangesX); 172 while (GCPhys > pRam->GCPhysLast) 173 { 174 pRam = pRam->CTX_SUFF(pNext); 175 if (!pRam) 176 return NULL; 177 } 178 if (GCPhys < pRam->GCPhys) 179 return NULL; 180 181 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam; 182 return pRam; 183 } 184 185 186 /** 187 * Slow worker for pgmPhysGetRangeAtOrAbove. 188 * 189 * @copydoc pgmPhysGetRangeAtOrAbove 190 */ 191 PPGMRAMRANGE pgmPhysGetRangeAtOrAboveSlow(PVM pVM, RTGCPHYS GCPhys) 192 { 193 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses)); 194 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangesX); 195 while (GCPhys > pRam->GCPhysLast) 196 { 197 pRam = pRam->CTX_SUFF(pNext); 198 if (!pRam) 199 return NULL; 200 } 201 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(pRam->GCPhys)] = pRam; 202 return pRam; 203 } 204 205 206 /** 207 * Slow worker for pgmPhysGetPage. 208 * 209 * @copydoc pgmPhysGetPage 210 */ 211 PPGMPAGE pgmPhysGetPageSlow(PVM pVM, RTGCPHYS GCPhys) 212 { 213 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses)); 214 for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangesX); 215 pRam; 216 pRam = pRam->CTX_SUFF(pNext)) 217 { 218 RTGCPHYS off = GCPhys - pRam->GCPhys; 219 if (off < pRam->cb) 220 { 221 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam; 222 return &pRam->aPages[off >> PAGE_SHIFT]; 223 } 224 } 225 return NULL; 226 } 227 228 229 /** 230 * Slow worker for pgmPhysGetPageEx. 231 * 232 * @copydoc pgmPhysGetPageEx 233 */ 234 int pgmPhysGetPageExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage) 235 { 236 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses)); 237 for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangesX); 238 pRam; 239 pRam = pRam->CTX_SUFF(pNext)) 240 { 241 RTGCPHYS off = GCPhys - pRam->GCPhys; 242 if (off < pRam->cb) 243 { 244 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam; 245 *ppPage = &pRam->aPages[off >> PAGE_SHIFT]; 246 return VINF_SUCCESS; 247 } 248 } 249 *ppPage = NULL; 250 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS; 251 } 252 253 254 /** 255 * Slow worker for pgmPhysGetPageAndRangeEx. 256 * 257 * @copydoc pgmPhysGetPageAndRangeEx 258 */ 259 int pgmPhysGetPageAndRangeExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam) 260 { 261 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbMisses)); 262 for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangesX); 263 pRam; 264 pRam = pRam->CTX_SUFF(pNext)) 265 { 266 RTGCPHYS off = GCPhys - pRam->GCPhys; 267 if (off < pRam->cb) 268 { 269 pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)] = pRam; 270 *ppRam = pRam; 271 *ppPage = &pRam->aPages[off >> PAGE_SHIFT]; 272 return VINF_SUCCESS; 273 } 274 } 275 276 *ppRam = NULL; 277 *ppPage = NULL; 278 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS; 279 } 280 281 #endif /* PGM_USE_RAMRANGE_TLB */ 143 282 144 283 /** … … 166 305 VMMDECL(bool) PGMPhysIsGCPhysValid(PVM pVM, RTGCPHYS GCPhys) 167 306 { 168 PPGMPAGE pPage = pgmPhysGetPage( &pVM->pgm.s, GCPhys);307 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys); 169 308 return pPage != NULL; 170 309 } … … 182 321 VMMDECL(bool) PGMPhysIsGCPhysNormal(PVM pVM, RTGCPHYS GCPhys) 183 322 { 184 PPGMPAGE pPage = pgmPhysGetPage( &pVM->pgm.s, GCPhys);323 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys); 185 324 return pPage 186 325 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM; … … 205 344 pgmLock(pVM); 206 345 PPGMPAGE pPage; 207 int rc = pgmPhysGetPageEx( &pVM->pgm.s, GCPhys, &pPage);346 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage); 208 347 if (RT_SUCCESS(rc)) 209 348 *pHCPhys = PGM_PAGE_GET_HCPHYS(pPage) | (GCPhys & PAGE_OFFSET_MASK); … … 392 531 PPGMPAGE pBasePage; 393 532 394 int rc = pgmPhysGetPageEx( &pVM->pgm.s, GCPhysBase, &pBasePage);533 int rc = pgmPhysGetPageEx(pVM, GCPhysBase, &pBasePage); 395 534 AssertRCReturn(rc, rc); /* paranoia; can't happen. */ 396 535 if (PGM_PAGE_GET_PDE_TYPE(pBasePage) == PGM_PAGE_PDE_TYPE_DONTCARE) … … 531 670 532 671 PPGMPAGE pFirstPage; 533 int rc = pgmPhysGetPageEx( &pVM->pgm.s, GCPhysBase, &pFirstPage);672 int rc = pgmPhysGetPageEx(pVM, GCPhysBase, &pFirstPage); 534 673 if ( RT_SUCCESS(rc) 535 674 && PGM_PAGE_GET_TYPE(pFirstPage) == PGMPAGETYPE_RAM) … … 550 689 { 551 690 PPGMPAGE pSubPage; 552 rc = pgmPhysGetPageEx( &pVM->pgm.s, GCPhys, &pSubPage);691 rc = pgmPhysGetPageEx(pVM, GCPhys, &pSubPage); 553 692 if ( RT_FAILURE(rc) 554 693 || PGM_PAGE_GET_TYPE(pSubPage) != PGMPAGETYPE_RAM /* Anything other than ram implies monitoring. */ … … 629 768 { 630 769 PPGMPAGE pPage; 631 int rc = pgmPhysGetPageEx( &pVM->pgm.s, GCPhys, &pPage);770 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage); 632 771 AssertRCBreak(rc); 633 772 … … 845 984 { 846 985 /* Lookup the MMIO2 range and use pvR3 to calc the address. */ 847 PPGMRAMRANGE pRam = pgmPhysGetRange( &pVM->pgm.s, GCPhys);986 PPGMRAMRANGE pRam = pgmPhysGetRange(pVM, GCPhys); 848 987 AssertMsgReturn(pRam || !pRam->pvR3, ("pRam=%p pPage=%R[pgmpage]\n", pRam, pPage), VERR_INTERNAL_ERROR_2); 849 988 *ppv = (void *)((uintptr_t)pRam->pvR3 + (uintptr_t)((GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK) - pRam->GCPhys)); … … 1007 1146 } 1008 1147 1009 1010 1148 #if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 1149 1011 1150 /** 1012 1151 * Load a guest page into the ring-3 physical TLB. … … 1018 1157 * @param GCPhys The guest physical address in question. 1019 1158 */ 1020 int pgmPhysPageLoadIntoTlb(P PGM pPGM, RTGCPHYS GCPhys)1021 { 1022 Assert(PGMIsLocked( PGM2VM(pPGM)));1159 int pgmPhysPageLoadIntoTlb(PVM pVM, RTGCPHYS GCPhys) 1160 { 1161 Assert(PGMIsLocked(pVM)); 1023 1162 1024 1163 /* … … 1026 1165 * 99.8% of requests are expected to be in the first range. 1027 1166 */ 1028 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges); 1029 RTGCPHYS off = GCPhys - pRam->GCPhys; 1030 if (RT_UNLIKELY(off >= pRam->cb)) 1031 { 1032 do 1033 { 1034 pRam = pRam->CTX_SUFF(pNext); 1035 if (!pRam) 1036 { 1037 STAM_COUNTER_INC(&pPGM->CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbMisses)); 1038 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS; 1039 } 1040 off = GCPhys - pRam->GCPhys; 1041 } while (off >= pRam->cb); 1042 } 1043 1044 return pgmPhysPageLoadIntoTlbWithPage(pPGM, &pRam->aPages[off >> PAGE_SHIFT], GCPhys); 1167 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys); 1168 if (!pPage) 1169 { 1170 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbMisses)); 1171 return VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS; 1172 } 1173 1174 return pgmPhysPageLoadIntoTlbWithPage(pVM, pPage, GCPhys); 1045 1175 } 1046 1176 … … 1053 1183 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address. 1054 1184 * 1055 * @param p PGM The PGM instance pointer.1185 * @param pVM The VM handle. 1056 1186 * @param pPage Pointer to the PGMPAGE structure corresponding to 1057 1187 * GCPhys. 1058 1188 * @param GCPhys The guest physical address in question. 1059 1189 */ 1060 int pgmPhysPageLoadIntoTlbWithPage(P PGM pPGM, PPGMPAGE pPage, RTGCPHYS GCPhys)1061 { 1062 Assert(PGMIsLocked( PGM2VM(pPGM)));1063 STAM_COUNTER_INC(&p PGM->CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbMisses));1190 int pgmPhysPageLoadIntoTlbWithPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys) 1191 { 1192 Assert(PGMIsLocked(pVM)); 1193 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbMisses)); 1064 1194 1065 1195 /* … … 1067 1197 * Make a special case for the zero page as it is kind of special. 1068 1198 */ 1069 PPGMPAGEMAPTLBE pTlbe = &p PGM->CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];1199 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)]; 1070 1200 if ( !PGM_PAGE_IS_ZERO(pPage) 1071 1201 && !PGM_PAGE_IS_BALLOONED(pPage)) … … 1073 1203 void *pv; 1074 1204 PPGMPAGEMAP pMap; 1075 int rc = pgmPhysPageMapCommon( PGM2VM(pPGM), pPage, GCPhys, &pMap, &pv);1205 int rc = pgmPhysPageMapCommon(pVM, pPage, GCPhys, &pMap, &pv); 1076 1206 if (RT_FAILURE(rc)) 1077 1207 return rc; … … 1082 1212 else 1083 1213 { 1084 Assert(PGM_PAGE_GET_HCPHYS(pPage) == p PGM->HCPhysZeroPg);1214 Assert(PGM_PAGE_GET_HCPHYS(pPage) == pVM->pgm.s.HCPhysZeroPg); 1085 1215 pTlbe->pMap = NULL; 1086 pTlbe->pv = p PGM->CTXALLSUFF(pvZeroPg);1216 pTlbe->pv = pVM->pgm.s.CTXALLSUFF(pvZeroPg); 1087 1217 } 1088 1218 #ifdef PGM_WITH_PHYS_TLB … … 1098 1228 return VINF_SUCCESS; 1099 1229 } 1230 1100 1231 #endif /* !IN_RC && !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */ 1101 1102 1232 1103 1233 /** … … 1149 1279 #else 1150 1280 PPGMPAGEMAPTLBE pTlbe; 1151 rc = pgmPhysPageQueryTlbeWithPage( &pVM->pgm.s, pPage, GCPhys, &pTlbe);1281 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe); 1152 1282 if (RT_FAILURE(rc)) 1153 1283 return rc; … … 1194 1324 #else 1195 1325 PPGMPAGEMAPTLBE pTlbe; 1196 int rc = pgmPhysPageQueryTlbeWithPage( &pVM->pgm.s, pPage, GCPhys, &pTlbe);1326 int rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe); 1197 1327 if (RT_FAILURE(rc)) 1198 1328 return rc; … … 1242 1372 */ 1243 1373 PPGMPAGE pPage; 1244 rc = pgmPhysGetPageEx( &pVM->pgm.s, GCPhys, &pPage);1374 rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage); 1245 1375 if (RT_SUCCESS(rc)) 1246 1376 { … … 1278 1408 */ 1279 1409 PPGMPAGEMAPTLBE pTlbe; 1280 rc = pgmPhysPageQueryTlbe( &pVM->pgm.s, GCPhys, &pTlbe);1410 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe); 1281 1411 if (RT_SUCCESS(rc)) 1282 1412 { … … 1292 1422 { 1293 1423 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc)); 1294 rc = pgmPhysPageQueryTlbeWithPage( &pVM->pgm.s, pPage, GCPhys, &pTlbe);1424 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe); 1295 1425 } 1296 1426 } … … 1365 1495 */ 1366 1496 PPGMPAGE pPage; 1367 rc = pgmPhysGetPageEx( &pVM->pgm.s, GCPhys, &pPage);1497 rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage); 1368 1498 if (RT_SUCCESS(rc)) 1369 1499 { … … 1400 1530 */ 1401 1531 PPGMPAGEMAPTLBE pTlbe; 1402 rc = pgmPhysPageQueryTlbe( &pVM->pgm.s, GCPhys, &pTlbe);1532 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe); 1403 1533 if (RT_SUCCESS(rc)) 1404 1534 { … … 1624 1754 PPGMRAMRANGE pRam; 1625 1755 PPGMPAGE pPage; 1626 int rc = pgmPhysGetPageAndRangeEx( &pVM->pgm.s, GCPhys, &pPage, &pRam);1756 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam); 1627 1757 if (RT_SUCCESS(rc)) 1628 1758 rc = pgmPhysGCPhys2CCPtrInternal(pVM, pPage, GCPhys, (void **)pR3Ptr); … … 1883 2013 * Copy loop on ram ranges. 1884 2014 */ 1885 PPGMRAMRANGE pRam = p VM->pgm.s.CTX_SUFF(pRamRanges);2015 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys); 1886 2016 for (;;) 1887 2017 { 1888 /* Find range. */1889 while (pRam && GCPhys > pRam->GCPhysLast)1890 pRam = pRam->CTX_SUFF(pNext);1891 2018 /* Inside range or not? */ 1892 2019 if (pRam && GCPhys >= pRam->GCPhys) … … 1953 2080 * Unassigned address space. 1954 2081 */ 1955 if (!pRam) 1956 break; 1957 size_t cb = pRam->GCPhys - GCPhys; 2082 size_t cb = pRam ? pRam->GCPhys - GCPhys : ~(size_t)0; 1958 2083 if (cb >= cbRead) 1959 2084 { … … 1967 2092 GCPhys += cb; 1968 2093 } 2094 2095 /* Advance range if necessary. */ 2096 while (pRam && GCPhys > pRam->GCPhysLast) 2097 pRam = pRam->CTX_SUFF(pNext); 1969 2098 } /* Ram range walk */ 1970 2099 … … 2398 2527 * Copy loop on ram ranges. 2399 2528 */ 2400 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);2529 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys); 2401 2530 for (;;) 2402 2531 { 2403 /* Find range. */2404 while (pRam && GCPhys > pRam->GCPhysLast)2405 pRam = pRam->CTX_SUFF(pNext);2406 2532 /* Inside range or not? */ 2407 2533 if (pRam && GCPhys >= pRam->GCPhys) … … 2478 2604 GCPhys += cb; 2479 2605 } 2606 2607 /* Advance range if necessary. */ 2608 while (pRam && GCPhys > pRam->GCPhysLast) 2609 pRam = pRam->CTX_SUFF(pNext); 2480 2610 } /* Ram range walk */ 2481 2611 … … 3618 3748 PPGMPAGE pPage; 3619 3749 3620 pPage = pgmPhysGetPage( &pVM->pgm.s, GCPhys);3750 pPage = pgmPhysGetPage(pVM, GCPhys); 3621 3751 if (pPage) 3622 3752 return (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage); … … 3624 3754 return PGMPAGETYPE_INVALID; 3625 3755 } 3756 -
trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp
r36009 r36891 265 265 if ((uShw.pPDPae->a[iShw + i].u & (PGM_PDFLAGS_MAPPING | X86_PDE_P)) == (PGM_PDFLAGS_MAPPING | X86_PDE_P)) 266 266 { 267 Assert(pgmMapAreMappingsEnabled( &pVM->pgm.s));267 Assert(pgmMapAreMappingsEnabled(pVM)); 268 268 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3); 269 269 LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShwPdpt=%#x iShw=%#x!\n", iShwPdpt, iShw+i)); … … 292 292 if ((uShw.pPDPae->a[iShw2].u & (PGM_PDFLAGS_MAPPING | X86_PDE_P)) == (PGM_PDFLAGS_MAPPING | X86_PDE_P)) 293 293 { 294 Assert(pgmMapAreMappingsEnabled( &pVM->pgm.s));294 Assert(pgmMapAreMappingsEnabled(pVM)); 295 295 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3); 296 296 LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShwPdpt=%#x iShw2=%#x!\n", iShwPdpt, iShw2)); … … 371 371 if (uShw.pPD->a[iShw].u & PGM_PDFLAGS_MAPPING) 372 372 { 373 Assert(pgmMapAreMappingsEnabled( &pVM->pgm.s));373 Assert(pgmMapAreMappingsEnabled(pVM)); 374 374 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3); 375 375 STAM_COUNTER_INC(&(pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestCR3WriteConflict)); … … 403 403 if (uShw.pPD->a[iShw2].u & PGM_PDFLAGS_MAPPING) 404 404 { 405 Assert(pgmMapAreMappingsEnabled( &pVM->pgm.s));405 Assert(pgmMapAreMappingsEnabled(pVM)); 406 406 STAM_COUNTER_INC(&(pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestCR3WriteConflict)); 407 407 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3); … … 449 449 if (uShw.pPDPae->a[iShw].u & PGM_PDFLAGS_MAPPING) 450 450 { 451 Assert(pgmMapAreMappingsEnabled( &pVM->pgm.s));451 Assert(pgmMapAreMappingsEnabled(pVM)); 452 452 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3); 453 453 STAM_COUNTER_INC(&(pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestCR3WriteConflict)); … … 486 486 && uShw.pPDPae->a[iShw2].u & PGM_PDFLAGS_MAPPING) 487 487 { 488 Assert(pgmMapAreMappingsEnabled( &pVM->pgm.s));488 Assert(pgmMapAreMappingsEnabled(pVM)); 489 489 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3); 490 490 STAM_COUNTER_INC(&(pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestCR3WriteConflict)); … … 527 527 if (uShw.pPDPT->a[iShw].u & PGM_PLXFLAGS_MAPPING) 528 528 { 529 Assert(pgmMapAreMappingsEnabled( &pVM->pgm.s));529 Assert(pgmMapAreMappingsEnabled(pVM)); 530 530 STAM_COUNTER_INC(&(pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestCR3WriteConflict)); 531 531 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3); … … 558 558 if (uShw.pPDPT->a[iShw2].u & PGM_PLXFLAGS_MAPPING) 559 559 { 560 Assert(pgmMapAreMappingsEnabled( &pVM->pgm.s));560 Assert(pgmMapAreMappingsEnabled(pVM)); 561 561 STAM_COUNTER_INC(&(pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZGuestCR3WriteConflict)); 562 562 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3); … … 1125 1125 && pVCpu->pgm.s.cPoolAccessHandler == (pPage->cLastAccessHandlerCount + 1)) 1126 1126 { 1127 Log(("Possible page reuse cMods=%d -> %d (locked=%d type=%s)\n", pPage->cModifications, pPage->cModifications * 2, pgmPoolIsPageLocked( &pVM->pgm.s,pPage), pgmPoolPoolKindToStr(pPage->enmKind)));1127 Log(("Possible page reuse cMods=%d -> %d (locked=%d type=%s)\n", pPage->cModifications, pPage->cModifications * 2, pgmPoolIsPageLocked(pPage), pgmPoolPoolKindToStr(pPage->enmKind))); 1128 1128 Assert(pPage->cModifications < 32000); 1129 1129 pPage->cModifications = pPage->cModifications * 2; … … 1138 1138 1139 1139 if (pPage->cModifications >= cMaxModifications) 1140 Log(("Mod overflow %RGv cMods=%d (locked=%d type=%s)\n", pvFault, pPage->cModifications, pgmPoolIsPageLocked( &pVM->pgm.s,pPage), pgmPoolPoolKindToStr(pPage->enmKind)));1140 Log(("Mod overflow %RGv cMods=%d (locked=%d type=%s)\n", pvFault, pPage->cModifications, pgmPoolIsPageLocked(pPage), pgmPoolPoolKindToStr(pPage->enmKind))); 1141 1141 1142 1142 /* … … 1146 1146 bool fNotReusedNotForking = false; 1147 1147 if ( ( pPage->cModifications < cMaxModifications /** @todo #define */ /** @todo need to check that it's not mapping EIP. */ /** @todo adjust this! */ 1148 || pgmPoolIsPageLocked( &pVM->pgm.s,pPage)1148 || pgmPoolIsPageLocked(pPage) 1149 1149 ) 1150 1150 && !(fReused = pgmPoolMonitorIsReused(pVM, pVCpu, pRegFrame, pDis, pvFault)) … … 1255 1255 ) 1256 1256 { 1257 Assert(!pgmPoolIsPageLocked( &pVM->pgm.s,pPage));1257 Assert(!pgmPoolIsPageLocked(pPage)); 1258 1258 Assert(pPage->fDirty == false); 1259 1259 … … 2023 2023 * Call pgmPoolCacheUsed to move the page to the head of the age list. 2024 2024 */ 2025 if (!pgmPoolIsPageLocked( &pPool->CTX_SUFF(pVM)->pgm.s,pPage))2025 if (!pgmPoolIsPageLocked(pPage)) 2026 2026 break; 2027 2027 LogFlow(("pgmPoolCacheFreeOne: refuse CR3 mapping\n")); … … 3451 3451 if (GCPhysBase != GCPhysPage) 3452 3452 { 3453 pLargePage = pgmPhysGetPage( &pVM->pgm.s, GCPhysBase);3453 pLargePage = pgmPhysGetPage(pVM, GCPhysBase); 3454 3454 AssertFatal(pLargePage); 3455 3455 } … … 3707 3707 3708 3708 /* Safety precaution in case we change the paging for other modes too in the future. */ 3709 Assert(!pgmPoolIsPageLocked( &pPool->CTX_SUFF(pVM)->pgm.s,pPage));3709 Assert(!pgmPoolIsPageLocked(pPage)); 3710 3710 3711 3711 #ifdef VBOX_STRICT … … 4148 4148 * Clear references to guest physical memory. 4149 4149 * 4150 * This is the same as pgmPoolTracDerefGCPhys except that the guest physical address4151 * is assumed to be correct, so the linear search can be skipped and we can assert4152 * at an earlier point.4150 * This is the same as pgmPoolTracDerefGCPhysHint except that the guest 4151 * physical address is assumed to be correct, so the linear search can be 4152 * skipped and we can assert at an earlier point. 4153 4153 * 4154 4154 * @param pPool The pool. … … 4161 4161 { 4162 4162 /* 4163 * Walk range list. 4164 */ 4165 PPGMRAMRANGE pRam = pPool->CTX_SUFF(pVM)->pgm.s.CTX_SUFF(pRamRanges); 4166 while (pRam) 4167 { 4168 RTGCPHYS off = GCPhys - pRam->GCPhys; 4169 if (off < pRam->cb) 4170 { 4171 /* does it match? */ 4172 const unsigned iPage = off >> PAGE_SHIFT; 4173 Assert(PGM_PAGE_GET_HCPHYS(&pRam->aPages[iPage])); 4163 * Lookup the page and check if it checks out before derefing it. 4164 */ 4165 PPGMPAGE pPhysPage = pgmPhysGetPage(pPool->CTX_SUFF(pVM), GCPhys); 4166 if (pPhysPage) 4167 { 4168 Assert(PGM_PAGE_GET_HCPHYS(pPhysPage)); 4174 4169 #ifdef LOG_ENABLED 4175 RTHCPHYS HCPhysPage = PGM_PAGE_GET_HCPHYS(&pRam->aPages[iPage]);4176 4170 RTHCPHYS HCPhysPage = PGM_PAGE_GET_HCPHYS(pPhysPage); 4171 Log2(("pgmPoolTracDerefGCPhys %RHp vs %RHp\n", HCPhysPage, HCPhys)); 4177 4172 #endif 4178 if (PGM_PAGE_GET_HCPHYS(&pRam->aPages[iPage]) == HCPhys) 4179 { 4180 Assert(pPage->cPresent); 4181 Assert(pPool->cPresent); 4182 pPage->cPresent--; 4183 pPool->cPresent--; 4184 pgmTrackDerefGCPhys(pPool, pPage, &pRam->aPages[iPage], iPte); 4185 return; 4186 } 4187 AssertFatalMsgFailed(("HCPhys=%RHp GCPhys=%RGp; found page index %x HCPhys=%RHp\n", HCPhys, GCPhys, iPage, PGM_PAGE_GET_HCPHYS(&pRam->aPages[iPage]))); 4188 break; 4189 } 4190 pRam = pRam->CTX_SUFF(pNext); 4173 if (PGM_PAGE_GET_HCPHYS(pPhysPage) == HCPhys) 4174 { 4175 Assert(pPage->cPresent); 4176 Assert(pPool->cPresent); 4177 pPage->cPresent--; 4178 pPool->cPresent--; 4179 pgmTrackDerefGCPhys(pPool, pPage, pPhysPage, iPte); 4180 return; 4181 } 4182 4183 AssertFatalMsgFailed(("HCPhys=%RHp GCPhys=%RGp; found page has HCPhys=%RHp\n", 4184 HCPhys, GCPhys, PGM_PAGE_GET_HCPHYS(pPhysPage))); 4191 4185 } 4192 4186 AssertFatalMsgFailed(("HCPhys=%RHp GCPhys=%RGp\n", HCPhys, GCPhys)); … … 4205 4199 void pgmPoolTracDerefGCPhysHint(PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTHCPHYS HCPhys, RTGCPHYS GCPhysHint, uint16_t iPte) 4206 4200 { 4207 RTHCPHYS HCPhysExpected = 0xDEADBEEFDEADBEEFULL;4208 4209 4201 Log4(("pgmPoolTracDerefGCPhysHint %RHp %RGp\n", HCPhys, GCPhysHint)); 4210 4202 4211 4203 /* 4212 * Walk range list. 4213 */ 4214 PPGMRAMRANGE pRam = pPool->CTX_SUFF(pVM)->pgm.s.CTX_SUFF(pRamRanges); 4215 while (pRam) 4216 { 4217 RTGCPHYS off = GCPhysHint - pRam->GCPhys; 4218 if (off < pRam->cb) 4219 { 4220 /* does it match? */ 4221 const unsigned iPage = off >> PAGE_SHIFT; 4222 Assert(PGM_PAGE_GET_HCPHYS(&pRam->aPages[iPage])); 4223 if (PGM_PAGE_GET_HCPHYS(&pRam->aPages[iPage]) == HCPhys) 4224 { 4225 Assert(pPage->cPresent); 4226 Assert(pPool->cPresent); 4227 pPage->cPresent--; 4228 pPool->cPresent--; 4229 pgmTrackDerefGCPhys(pPool, pPage, &pRam->aPages[iPage], iPte); 4230 return; 4231 } 4232 HCPhysExpected = PGM_PAGE_GET_HCPHYS(&pRam->aPages[iPage]); 4233 break; 4234 } 4235 pRam = pRam->CTX_SUFF(pNext); 4236 } 4237 4238 /* 4239 * Damn, the hint didn't work. We'll have to do an expensive linear search. 4204 * Try the hint first. 4205 */ 4206 RTHCPHYS HCPhysHinted; 4207 PPGMPAGE pPhysPage = pgmPhysGetPage(pPool->CTX_SUFF(pVM), GCPhysHint); 4208 if (pPhysPage) 4209 { 4210 HCPhysHinted = PGM_PAGE_GET_HCPHYS(pPhysPage); 4211 Assert(HCPhysHinted); 4212 if (HCPhysHinted == HCPhys) 4213 { 4214 Assert(pPage->cPresent); 4215 Assert(pPool->cPresent); 4216 pPage->cPresent--; 4217 pPool->cPresent--; 4218 pgmTrackDerefGCPhys(pPool, pPage, pPhysPage, iPte); 4219 return; 4220 } 4221 } 4222 else 4223 HCPhysHinted = UINT64_C(0xdeadbeefdeadbeef); 4224 4225 /* 4226 * Damn, the hint didn't work. We'll have to do an expensive linear search. 4240 4227 */ 4241 4228 STAM_COUNTER_INC(&pPool->StatTrackLinearRamSearches); 4242 pRam = pPool->CTX_SUFF(pVM)->pgm.s.CTX_SUFF(pRamRanges);4229 PPGMRAMRANGE pRam = pPool->CTX_SUFF(pVM)->pgm.s.CTX_SUFF(pRamRangesX); 4243 4230 while (pRam) 4244 4231 { … … 4261 4248 } 4262 4249 4263 AssertFatalMsgFailed(("HCPhys=%RHp GCPhysHint=%RGp ( Expected HCPhys with hint = %RHp)\n", HCPhys, GCPhysHint, HCPhysExpected));4250 AssertFatalMsgFailed(("HCPhys=%RHp GCPhysHint=%RGp (Hinted page has HCPhys = %RHp)\n", HCPhys, GCPhysHint, HCPhysHinted)); 4264 4251 } 4265 4252 … … 4778 4765 * Quietly reject any attempts at flushing the currently active shadow CR3 mapping 4779 4766 */ 4780 if (pgmPoolIsPageLocked( &pVM->pgm.s,pPage))4767 if (pgmPoolIsPageLocked(pPage)) 4781 4768 { 4782 4769 AssertMsg( pPage->enmKind == PGMPOOLKIND_64BIT_PML4 … … 5184 5171 #endif 5185 5172 STAM_COUNTER_INC(&pPool->StatForceFlushPage); 5186 Assert(!pgmPoolIsPageLocked( &pVM->pgm.s,pPage));5173 Assert(!pgmPoolIsPageLocked(pPage)); 5187 5174 pgmPoolMonitorChainFlush(pPool, pPage); 5188 5175 return; … … 5328 5315 * Clear all the GCPhys links and rebuild the phys ext free list. 5329 5316 */ 5330 for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges );5317 for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangesX); 5331 5318 pRam; 5332 5319 pRam = pRam->CTX_SUFF(pNext)) -
trunk/src/VBox/VMM/VMMAll/PGMAllShw.h
r32036 r36891 301 301 AssertFailed(); /* can't happen */ 302 302 # else 303 Assert(pgmMapAreMappingsEnabled( &pVM->pgm.s));303 Assert(pgmMapAreMappingsEnabled(pVM)); 304 304 305 305 PPGMMAPPING pMap = pgmGetMapping(pVM, (RTGCPTR)GCPtr); … … 462 462 { 463 463 Assert(fGstPte & X86_PTE_RW); 464 PPGMPAGE pPage = pgmPhysGetPage( &pVCpu->CTX_SUFF(pVM)->pgm.s, GCPhys);464 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys); 465 465 Assert(pPage); 466 466 if (pPage) -
trunk/src/VBox/VMM/VMMR0/PGMR0.cpp
r36448 r36891 334 334 if (RT_SUCCESS(rc)) 335 335 { 336 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRanges R0; RT_SUCCESS(rc) && pRam; pRam = pRam->pNextR0)336 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR0; RT_SUCCESS(rc) && pRam; pRam = pRam->pNextR0) 337 337 { 338 338 PPGMPAGE pPage = &pRam->aPages[0]; … … 526 526 if ( ( pHandler->cAliasedPages 527 527 || pHandler->cTmpOffPages) 528 && ( (pPage = pgmPhysGetPage( &pVM->pgm.s, GCPhysFault)) == NULL528 && ( (pPage = pgmPhysGetPage(pVM, GCPhysFault)) == NULL 529 529 || PGM_PAGE_GET_HNDL_PHYS_STATE(pPage) == PGM_PAGE_HNDL_PHYS_STATE_DISABLED) 530 530 ) -
trunk/src/VBox/VMM/VMMR0/PGMR0SharedPage.cpp
r35346 r36891 61 61 Assert((pRegions[idxRegion].GCRegionAddr & 0xfff) == 0); 62 62 63 RTGCPTR GCRegion 63 RTGCPTR GCRegion = pRegions[idxRegion].GCRegionAddr; 64 64 unsigned cbRegion = pRegions[idxRegion].cbRegion & ~0xfff; 65 unsigned idxPage = 0;65 unsigned idxPage = 0; 66 66 67 67 while (cbRegion) … … 75 75 && !(fFlags & X86_PTE_RW)) /* important as we make assumptions about this below! */ 76 76 { 77 PPGMPAGE pPage = pgmPhysGetPage( &pVM->pgm.s, GCPhys);77 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys); 78 78 Assert(!pPage || !PGM_PAGE_IS_BALLOONED(pPage)); 79 79 if ( pPage -
trunk/src/VBox/VMM/VMMR3/PGM.cpp
r36627 r36891 1695 1695 PGM_REG_COUNTER(&pStats->StatPageMapTlbFlushes, "/PGM/R3/Page/MapTlbFlushes", "TLB flushes (all contexts)."); 1696 1696 PGM_REG_COUNTER(&pStats->StatPageMapTlbFlushEntry, "/PGM/R3/Page/MapTlbFlushEntry", "TLB entry flushes (all contexts)."); 1697 1698 PGM_REG_COUNTER(&pStats->StatRZRamRangeTlbHits, "/PGM/RZ/RamRange/TlbHits", "TLB hits."); 1699 PGM_REG_COUNTER(&pStats->StatRZRamRangeTlbMisses, "/PGM/RZ/RamRange/TlbMisses", "TLB misses."); 1700 PGM_REG_COUNTER(&pStats->StatR3RamRangeTlbHits, "/PGM/R3/RamRange/TlbHits", "TLB hits."); 1701 PGM_REG_COUNTER(&pStats->StatR3RamRangeTlbMisses, "/PGM/R3/RamRange/TlbMisses", "TLB misses."); 1697 1702 1698 1703 PGM_REG_PROFILE(&pStats->StatRZSyncCR3HandlerVirtualUpdate, "/PGM/RZ/SyncCR3/Handlers/VirtualUpdate", "Profiling of the virtual handler updates."); … … 2224 2229 * Ram ranges. 2225 2230 */ 2226 if (pVM->pgm.s.pRamRanges R3)2231 if (pVM->pgm.s.pRamRangesXR3) 2227 2232 { 2228 2233 /* Update the pSelfRC pointers and relink them. */ 2229 for (PPGMRAMRANGE pCur = pVM->pgm.s.pRamRanges R3; pCur; pCur = pCur->pNextR3)2234 for (PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesXR3; pCur; pCur = pCur->pNextR3) 2230 2235 if (!(pCur->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING)) 2231 2236 pCur->pSelfRC = MMHyperCCToRC(pVM, pCur); 2232 2237 pgmR3PhysRelinkRamRanges(pVM); 2238 2239 #ifdef PGM_USE_RAMRANGE_TLB 2240 /* Flush the RC TLB. */ 2241 for (unsigned i = 0; i < PGM_RAMRANGE_TLB_ENTRIES; i++) 2242 pVM->pgm.s.apRamRangesTlbRC[i] = NIL_RTRCPTR; 2243 #endif 2233 2244 } 2234 2245 … … 2607 2618 sizeof(RTHCPTR) * 2, "pvHC "); 2608 2619 2609 for (PPGMRAMRANGE pCur = pVM->pgm.s.pRamRanges R3; pCur; pCur = pCur->pNextR3)2620 for (PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesXR3; pCur; pCur = pCur->pNextR3) 2610 2621 pHlp->pfnPrintf(pHlp, 2611 2622 "%RGp-%RGp %RHv %s\n", … … 2656 2667 "%04X - %RGp P=%d U=%d RW=%d G=%d - BIG\n", 2657 2668 iPD, 2658 pgmGstGet4MBPhysPage( &pVM->pgm.s, PdeSrc),2669 pgmGstGet4MBPhysPage(pVM, PdeSrc), 2659 2670 PdeSrc.b.u1Present, PdeSrc.b.u1User, PdeSrc.b.u1Write, PdeSrc.b.u1Global && fPGE); 2660 2671 else … … 3590 3601 if (!pVM) 3591 3602 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "error: The command requires a VM to be selected.\n"); 3592 if (!pVM->pgm.s.pRamRanges RC)3603 if (!pVM->pgm.s.pRamRangesXR3) 3593 3604 return pCmdHlp->pfnPrintf(pCmdHlp, NULL, "Sorry, no Ram is registered.\n"); 3594 3605 … … 3598 3609 int rc = pCmdHlp->pfnPrintf(pCmdHlp, NULL, "From - To (incl) pvHC\n"); 3599 3610 PPGMRAMRANGE pRam; 3600 for (pRam = pVM->pgm.s.pRamRanges R3; pRam; pRam = pRam->pNextR3)3611 for (pRam = pVM->pgm.s.pRamRangesXR3; pRam; pRam = pRam->pNextR3) 3601 3612 { 3602 3613 rc = pCmdHlp->pfnPrintf(pCmdHlp, NULL, … … 3814 3825 3815 3826 pgmLock(pVM); 3816 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRanges R3;3817 3818 3827 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3; 3828 pRam && pRam->GCPhys < GCPhysEnd && RT_SUCCESS(rc); 3829 pRam = pRam->pNextR3) 3819 3830 { 3820 3831 /* fill the gap */ -
trunk/src/VBox/VMM/VMMR3/PGMDbg.cpp
r35346 r36891 160 160 return VERR_INVALID_POINTER; 161 161 162 for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges );162 for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangesX); 163 163 pRam; 164 164 pRam = pRam->CTX_SUFF(pNext)) … … 622 622 */ 623 623 pgmLock(pVM); 624 for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges );624 for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangesX); 625 625 pRam; 626 626 pRam = pRam->CTX_SUFF(pNext)) … … 793 793 if (RT_SUCCESS(rc)) 794 794 { 795 PPGMPAGE pPage = pgmPhysGetPage( &pVM->pgm.s, GCPhys);795 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys); 796 796 if ( pPage 797 797 && ( !PGM_PAGE_IS_ZERO(pPage) … … 1041 1041 { 1042 1042 pgmLock(pState->pVM); 1043 PCPGMPAGE pPage = pgmPhysGetPage( &pState->pVM->pgm.s, GCPhys);1043 PCPGMPAGE pPage = pgmPhysGetPage(pState->pVM, GCPhys); 1044 1044 if (pPage) 1045 1045 RTStrPrintf(szPage, sizeof(szPage), "%R[pgmpage]", pPage); … … 1719 1719 char szPage[80]; 1720 1720 pgmLock(pState->pVM); 1721 PCPGMPAGE pPage = pgmPhysGetPage( &pState->pVM->pgm.s, GCPhys);1721 PCPGMPAGE pPage = pgmPhysGetPage(pState->pVM, GCPhys); 1722 1722 if (pPage) 1723 1723 RTStrPrintf(szPage, sizeof(szPage), " %R[pgmpage]", pPage); -
trunk/src/VBox/VMM/VMMR3/PGMHandler.cpp
r35346 r36891 160 160 static DECLCALLBACK(int) pgmR3HandlerPhysicalOneClear(PAVLROGCPHYSNODECORE pNode, void *pvUser) 161 161 { 162 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)pNode;162 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)pNode; 163 163 PPGMRAMRANGE pRamHint = NULL; 164 RTGCPHYS GCPhys = pCur->Core.Key;165 RTUINT cPages = pCur->cPages;166 P PGM pPGM = &((PVM)pvUser)->pgm.s;164 RTGCPHYS GCPhys = pCur->Core.Key; 165 RTUINT cPages = pCur->cPages; 166 PVM pVM = (PVM)pvUser; 167 167 for (;;) 168 168 { 169 169 PPGMPAGE pPage; 170 int rc = pgmPhysGetPageWithHintEx(p PGM, GCPhys, &pPage, &pRamHint);170 int rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pPage, &pRamHint); 171 171 if (RT_SUCCESS(rc)) 172 172 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, PGM_PAGE_HNDL_PHYS_STATE_NONE); … … 190 190 static DECLCALLBACK(int) pgmR3HandlerPhysicalOneSet(PAVLROGCPHYSNODECORE pNode, void *pvUser) 191 191 { 192 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)pNode;193 unsigned uState = pgmHandlerPhysicalCalcState(pCur);192 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)pNode; 193 unsigned uState = pgmHandlerPhysicalCalcState(pCur); 194 194 PPGMRAMRANGE pRamHint = NULL; 195 RTGCPHYS GCPhys = pCur->Core.Key;196 RTUINT cPages = pCur->cPages;197 P PGM pPGM = &((PVM)pvUser)->pgm.s;195 RTGCPHYS GCPhys = pCur->Core.Key; 196 RTUINT cPages = pCur->cPages; 197 PVM pVM = (PVM)pvUser; 198 198 for (;;) 199 199 { 200 200 PPGMPAGE pPage; 201 int rc = pgmPhysGetPageWithHintEx(p PGM, GCPhys, &pPage, &pRamHint);201 int rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pPage, &pRamHint); 202 202 if (RT_SUCCESS(rc)) 203 203 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, uState); … … 471 471 * Reset the flags and remove phys2virt nodes. 472 472 */ 473 PPGM pPGM = &pVM->pgm.s; 474 for (unsigned iPage = 0; iPage < pCur->cPages; iPage++) 473 for (uint32_t iPage = 0; iPage < pCur->cPages; iPage++) 475 474 if (pCur->aPhysToVirt[iPage].offNextAlias & PGMPHYS2VIRTHANDLER_IN_TREE) 476 pgmHandlerVirtualClearPage(p PGM, pCur, iPage);475 pgmHandlerVirtualClearPage(pVM, pCur, iPage); 477 476 478 477 /* -
trunk/src/VBox/VMM/VMMR3/PGMMap.cpp
r35346 r36891 510 510 * Ignore the additions mapping fix call if disabled. 511 511 */ 512 if (!pgmMapAreMappingsEnabled( &pVM->pgm.s))512 if (!pgmMapAreMappingsEnabled(pVM)) 513 513 { 514 514 Assert(HWACCMIsEnabled(pVM)); … … 551 551 AssertMsgReturn(cb && !(cb & X86_PAGE_4M_OFFSET_MASK), ("cb (%#x) is 0 or not aligned on a 4MB address!\n", cb), 552 552 VERR_INVALID_PARAMETER); 553 AssertReturn(pgmMapAreMappingsEnabled( &pVM->pgm.s), VERR_INTERNAL_ERROR_3);553 AssertReturn(pgmMapAreMappingsEnabled(pVM), VERR_INTERNAL_ERROR_3); 554 554 AssertReturn(pVM->cCpus == 1, VERR_INTERNAL_ERROR_4); 555 555 … … 726 726 { 727 727 Log(("PGMR3MappingsUnfix: fMappingsFixed=%RTbool fMappingsDisabled=%RTbool\n", pVM->pgm.s.fMappingsFixed, pVM->pgm.s.fMappingsDisabled)); 728 if ( pgmMapAreMappingsEnabled( &pVM->pgm.s)728 if ( pgmMapAreMappingsEnabled(pVM) 729 729 && ( pVM->pgm.s.fMappingsFixed 730 730 || pVM->pgm.s.fMappingsFixedRestored) … … 1020 1020 pgmLock(pVM); /* to avoid assertions */ 1021 1021 1022 Assert(!pgmMapAreMappingsEnabled( &pVM->pgm.s) || PGMGetGuestMode(pVCpu) <= PGMMODE_PAE_NX);1022 Assert(!pgmMapAreMappingsEnabled(pVM) || PGMGetGuestMode(pVCpu) <= PGMMODE_PAE_NX); 1023 1023 1024 1024 pgmMapSetShadowPDEs(pVM, pMap, iNewPDE); -
trunk/src/VBox/VMM/VMMR3/PGMPhys.cpp
r36441 r36891 119 119 * Copy loop on ram ranges. 120 120 */ 121 PPGMRAMRANGE pRam = p VM->pgm.s.CTX_SUFF(pRamRanges);121 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys); 122 122 for (;;) 123 123 { 124 /* Find range. */125 while (pRam && GCPhys > pRam->GCPhysLast)126 pRam = pRam->CTX_SUFF(pNext);127 124 /* Inside range or not? */ 128 125 if (pRam && GCPhys >= pRam->GCPhys) … … 186 183 * Unassigned address space. 187 184 */ 188 if (!pRam) 189 break; 190 size_t cb = pRam->GCPhys - GCPhys; 185 size_t cb = pRam ? pRam->GCPhys - GCPhys : ~(size_t)0; 191 186 if (cb >= cbRead) 192 187 { … … 200 195 GCPhys += cb; 201 196 } 197 198 /* Advance range if necessary. */ 199 while (pRam && GCPhys > pRam->GCPhysLast) 200 pRam = pRam->CTX_SUFF(pNext); 202 201 } /* Ram range walk */ 203 202 … … 250 249 * Copy loop on ram ranges, stop when we hit something difficult. 251 250 */ 252 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges);251 PPGMRAMRANGE pRam = pgmPhysGetRangeAtOrAbove(pVM, GCPhys); 253 252 for (;;) 254 253 { 255 /* Find range. */256 while (pRam && GCPhys > pRam->GCPhysLast)257 pRam = pRam->CTX_SUFF(pNext);258 254 /* Inside range or not? */ 259 255 if (pRam && GCPhys >= pRam->GCPhys) … … 332 328 GCPhys += cb; 333 329 } 330 331 /* Advance range if necessary. */ 332 while (pRam && GCPhys > pRam->GCPhysLast) 333 pRam = pRam->CTX_SUFF(pNext); 334 334 } /* Ram range walk */ 335 335 … … 361 361 { 362 362 PPGMPAGEMAPTLBE pTlbe; 363 int rc2 = pgmPhysPageQueryTlbe( &pVM->pgm.s, *pGCPhys, &pTlbe);363 int rc2 = pgmPhysPageQueryTlbe(pVM, *pGCPhys, &pTlbe); 364 364 AssertFatalRC(rc2); 365 365 PPGMPAGE pPage = pTlbe->pPage; … … 431 431 */ 432 432 PPGMPAGEMAPTLBE pTlbe; 433 rc = pgmPhysPageQueryTlbe( &pVM->pgm.s, GCPhys, &pTlbe);433 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe); 434 434 if (RT_SUCCESS(rc)) 435 435 { … … 532 532 */ 533 533 PPGMPAGEMAPTLBE pTlbe; 534 rc = pgmPhysPageQueryTlbe( &pVM->pgm.s, GCPhys, &pTlbe);534 rc = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe); 535 535 if (RT_SUCCESS(rc)) 536 536 { … … 591 591 592 592 #ifdef VBOX_STRICT 593 for (pCur = pVM->pgm.s.pRamRanges R3; pCur; pCur = pCur->pNextR3)593 for (pCur = pVM->pgm.s.pRamRangesXR3; pCur; pCur = pCur->pNextR3) 594 594 { 595 595 Assert((pCur->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pCur->pSelfR0 == MMHyperCCToR0(pVM, pCur)); … … 599 599 Assert((pCur->cb & PAGE_OFFSET_MASK) == 0); 600 600 Assert(pCur->cb == pCur->GCPhysLast - pCur->GCPhys + 1); 601 for (PPGMRAMRANGE pCur2 = pVM->pgm.s.pRamRanges R3; pCur2; pCur2 = pCur2->pNextR3)601 for (PPGMRAMRANGE pCur2 = pVM->pgm.s.pRamRangesXR3; pCur2; pCur2 = pCur2->pNextR3) 602 602 Assert( pCur2 == pCur 603 603 || strcmp(pCur2->pszDesc, pCur->pszDesc)); /** @todo fix MMIO ranges!! */ … … 605 605 #endif 606 606 607 pCur = pVM->pgm.s.pRamRanges R3;607 pCur = pVM->pgm.s.pRamRangesXR3; 608 608 if (pCur) 609 609 { 610 pVM->pgm.s.pRamRanges R0 = pCur->pSelfR0;611 pVM->pgm.s.pRamRanges RC = pCur->pSelfRC;610 pVM->pgm.s.pRamRangesXR0 = pCur->pSelfR0; 611 pVM->pgm.s.pRamRangesXRC = pCur->pSelfRC; 612 612 613 613 for (; pCur->pNextR3; pCur = pCur->pNextR3) … … 622 622 else 623 623 { 624 Assert(pVM->pgm.s.pRamRanges R0 == NIL_RTR0PTR);625 Assert(pVM->pgm.s.pRamRanges RC == NIL_RTRCPTR);624 Assert(pVM->pgm.s.pRamRangesXR0 == NIL_RTR0PTR); 625 Assert(pVM->pgm.s.pRamRangesXRC == NIL_RTRCPTR); 626 626 } 627 627 ASMAtomicIncU32(&pVM->pgm.s.idRamRangesGen); … … 644 644 pgmLock(pVM); 645 645 646 PPGMRAMRANGE pRam = pPrev ? pPrev->pNextR3 : pVM->pgm.s.pRamRanges R3;646 PPGMRAMRANGE pRam = pPrev ? pPrev->pNextR3 : pVM->pgm.s.pRamRangesXR3; 647 647 pNew->pNextR3 = pRam; 648 648 pNew->pNextR0 = pRam ? pRam->pSelfR0 : NIL_RTR0PTR; … … 657 657 else 658 658 { 659 pVM->pgm.s.pRamRanges R3 = pNew;660 pVM->pgm.s.pRamRanges R0 = pNew->pSelfR0;661 pVM->pgm.s.pRamRanges RC = pNew->pSelfRC;659 pVM->pgm.s.pRamRangesXR3 = pNew; 660 pVM->pgm.s.pRamRangesXR0 = pNew->pSelfR0; 661 pVM->pgm.s.pRamRangesXRC = pNew->pSelfRC; 662 662 } 663 663 ASMAtomicIncU32(&pVM->pgm.s.idRamRangesGen); … … 675 675 static void pgmR3PhysUnlinkRamRange2(PVM pVM, PPGMRAMRANGE pRam, PPGMRAMRANGE pPrev) 676 676 { 677 Assert(pPrev ? pPrev->pNextR3 == pRam : pVM->pgm.s.pRamRanges R3 == pRam);677 Assert(pPrev ? pPrev->pNextR3 == pRam : pVM->pgm.s.pRamRangesXR3 == pRam); 678 678 Assert((pRam->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pRam->pSelfR0 == MMHyperCCToR0(pVM, pRam)); 679 679 Assert((pRam->fFlags & PGM_RAM_RANGE_FLAGS_FLOATING) || pRam->pSelfRC == MMHyperCCToRC(pVM, pRam)); … … 690 690 else 691 691 { 692 Assert(pVM->pgm.s.pRamRanges R3 == pRam);693 pVM->pgm.s.pRamRanges R3 = pNext;694 pVM->pgm.s.pRamRanges R0 = pNext ? pNext->pSelfR0 : NIL_RTR0PTR;695 pVM->pgm.s.pRamRanges RC = pNext ? pNext->pSelfRC : NIL_RTRCPTR;692 Assert(pVM->pgm.s.pRamRangesXR3 == pRam); 693 pVM->pgm.s.pRamRangesXR3 = pNext; 694 pVM->pgm.s.pRamRangesXR0 = pNext ? pNext->pSelfR0 : NIL_RTR0PTR; 695 pVM->pgm.s.pRamRangesXRC = pNext ? pNext->pSelfRC : NIL_RTRCPTR; 696 696 } 697 697 ASMAtomicIncU32(&pVM->pgm.s.idRamRangesGen); … … 712 712 /* find prev. */ 713 713 PPGMRAMRANGE pPrev = NULL; 714 PPGMRAMRANGE pCur = pVM->pgm.s.pRamRanges R3;714 PPGMRAMRANGE pCur = pVM->pgm.s.pRamRangesXR3; 715 715 while (pCur != pRam) 716 716 { … … 809 809 for (unsigned i = 0; i < cPages; i++) 810 810 { 811 PPGMPAGE pPage = pgmPhysGetPage( &pVM->pgm.s, paPhysPage[i]);811 PPGMPAGE pPage = pgmPhysGetPage(pVM, paPhysPage[i]); 812 812 if ( pPage == NULL 813 813 || pPage->uTypeY != PGMPAGETYPE_RAM) … … 850 850 for (unsigned i = 0; i < cPages; i++) 851 851 { 852 PPGMPAGE pPage = pgmPhysGetPage( &pVM->pgm.s, paPhysPage[i]);852 PPGMPAGE pPage = pgmPhysGetPage(pVM, paPhysPage[i]); 853 853 AssertBreak(pPage && pPage->uTypeY == PGMPAGETYPE_RAM); 854 854 … … 960 960 961 961 /** 962 * Rendezvous callback used by PGMR3WriteProtectRAM that write protects all physical RAM 962 * Rendezvous callback used by PGMR3WriteProtectRAM that write protects all 963 * physical RAM. 963 964 * 964 965 * This is only called on one of the EMTs while the other ones are waiting for … … 968 969 * @param pVM The VM handle. 969 970 * @param pVCpu The VMCPU for the EMT we're being called on. Unused. 970 * @param pvUser User parameter 971 * @param pvUser User parameter, unused. 971 972 */ 972 973 static DECLCALLBACK(VBOXSTRICTRC) pgmR3PhysWriteProtectRAMRendezvous(PVM pVM, PVMCPU pVCpu, void *pvUser) 973 974 { 974 975 int rc = VINF_SUCCESS; 976 NOREF(pvUser); 975 977 976 978 pgmLock(pVM); … … 981 983 /** @todo pointless to write protect the physical page pointed to by RSP. */ 982 984 983 for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges );985 for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangesX); 984 986 pRam; 985 987 pRam = pRam->CTX_SUFF(pNext)) 986 988 { 987 u nsignedcPages = pRam->cb >> PAGE_SHIFT;988 for (u nsignediPage = 0; iPage < cPages; iPage++)989 uint32_t cPages = pRam->cb >> PAGE_SHIFT; 990 for (uint32_t iPage = 0; iPage < cPages; iPage++) 989 991 { 990 992 PPGMPAGE pPage = &pRam->aPages[iPage]; … … 1048 1050 1049 1051 /** 1050 * Enumerate all dirty FT pages 1052 * Enumerate all dirty FT pages. 1051 1053 * 1052 1054 * @returns VBox status code. 1053 1055 * @param pVM The VM handle. 1054 * @param pfnEnum Enumerate callback handler 1055 * @param pvUser Enumerate callback handler parameter 1056 * @param pfnEnum Enumerate callback handler. 1057 * @param pvUser Enumerate callback handler parameter. 1056 1058 */ 1057 1059 VMMR3DECL(int) PGMR3PhysEnumDirtyFTPages(PVM pVM, PFNPGMENUMDIRTYFTPAGES pfnEnum, void *pvUser) … … 1060 1062 1061 1063 pgmLock(pVM); 1062 for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges );1064 for (PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangesX); 1063 1065 pRam; 1064 1066 pRam = pRam->CTX_SUFF(pNext)) 1065 1067 { 1066 u nsignedcPages = pRam->cb >> PAGE_SHIFT;1067 for (u nsignediPage = 0; iPage < cPages; iPage++)1068 { 1069 PPGMPAGE pPage = &pRam->aPages[iPage];1068 uint32_t cPages = pRam->cb >> PAGE_SHIFT; 1069 for (uint32_t iPage = 0; iPage < cPages; iPage++) 1070 { 1071 PPGMPAGE pPage = &pRam->aPages[iPage]; 1070 1072 PGMPAGETYPE enmPageType = (PGMPAGETYPE)PGM_PAGE_GET_TYPE(pPage); 1071 1073 … … 1074 1076 { 1075 1077 /* 1076 1077 1078 * A RAM page. 1079 */ 1078 1080 switch (PGM_PAGE_GET_STATE(pPage)) 1079 1081 { 1080 case PGM_PAGE_STATE_ALLOCATED: 1081 case PGM_PAGE_STATE_WRITE_MONITORED: 1082 if ( !PGM_PAGE_IS_WRITTEN_TO(pPage) /* not very recently updated? */ 1083 && PGM_PAGE_IS_FT_DIRTY(pPage)) 1084 { 1085 unsigned cbPageRange = PAGE_SIZE; 1086 unsigned iPageClean = iPage + 1; 1087 RTGCPHYS GCPhysPage = pRam->GCPhys + iPage * PAGE_SIZE; 1088 uint8_t *pu8Page = NULL; 1089 PGMPAGEMAPLOCK Lock; 1090 1091 /* Find the next clean page, so we can merge adjacent dirty pages. */ 1092 for (; iPageClean < cPages; iPageClean++) 1082 case PGM_PAGE_STATE_ALLOCATED: 1083 case PGM_PAGE_STATE_WRITE_MONITORED: 1084 if ( !PGM_PAGE_IS_WRITTEN_TO(pPage) /* not very recently updated? */ 1085 && PGM_PAGE_IS_FT_DIRTY(pPage)) 1093 1086 { 1094 PPGMPAGE pPageNext = &pRam->aPages[iPageClean]; 1095 if ( RT_UNLIKELY(PGM_PAGE_GET_TYPE(pPageNext) != PGMPAGETYPE_RAM) 1096 || PGM_PAGE_GET_STATE(pPageNext) != PGM_PAGE_STATE_ALLOCATED 1097 || PGM_PAGE_IS_WRITTEN_TO(pPageNext) 1098 || !PGM_PAGE_IS_FT_DIRTY(pPageNext) 1099 /* Crossing a chunk boundary? */ 1100 || (GCPhysPage & GMM_PAGEID_IDX_MASK) != ((GCPhysPage + cbPageRange) & GMM_PAGEID_IDX_MASK) 1101 ) 1102 break; 1103 1104 cbPageRange += PAGE_SIZE; 1087 unsigned cbPageRange = PAGE_SIZE; 1088 unsigned iPageClean = iPage + 1; 1089 RTGCPHYS GCPhysPage = pRam->GCPhys + iPage * PAGE_SIZE; 1090 uint8_t *pu8Page = NULL; 1091 PGMPAGEMAPLOCK Lock; 1092 1093 /* Find the next clean page, so we can merge adjacent dirty pages. */ 1094 for (; iPageClean < cPages; iPageClean++) 1095 { 1096 PPGMPAGE pPageNext = &pRam->aPages[iPageClean]; 1097 if ( RT_UNLIKELY(PGM_PAGE_GET_TYPE(pPageNext) != PGMPAGETYPE_RAM) 1098 || PGM_PAGE_GET_STATE(pPageNext) != PGM_PAGE_STATE_ALLOCATED 1099 || PGM_PAGE_IS_WRITTEN_TO(pPageNext) 1100 || !PGM_PAGE_IS_FT_DIRTY(pPageNext) 1101 /* Crossing a chunk boundary? */ 1102 || (GCPhysPage & GMM_PAGEID_IDX_MASK) != ((GCPhysPage + cbPageRange) & GMM_PAGEID_IDX_MASK) 1103 ) 1104 break; 1105 1106 cbPageRange += PAGE_SIZE; 1107 } 1108 1109 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysPage, (const void **)&pu8Page, &Lock); 1110 if (RT_SUCCESS(rc)) 1111 { 1112 /** @todo this is risky; the range might be changed, but little choice as the sync 1113 * costs a lot of time. */ 1114 pgmUnlock(pVM); 1115 pfnEnum(pVM, GCPhysPage, pu8Page, cbPageRange, pvUser); 1116 pgmLock(pVM); 1117 PGMPhysReleasePageMappingLock(pVM, &Lock); 1118 } 1119 1120 for (iPage; iPage < iPageClean; iPage++) 1121 PGM_PAGE_CLEAR_FT_DIRTY(&pRam->aPages[iPage]); 1122 1123 iPage = iPageClean - 1; 1105 1124 } 1106 1107 rc = PGMPhysGCPhys2CCPtrReadOnly(pVM, GCPhysPage, (const void **)&pu8Page, &Lock); 1108 if (RT_SUCCESS(rc)) 1109 { 1110 /** @todo this is risky; the range might be changed, but little choice as the sync costs a lot of time */ 1111 pgmUnlock(pVM); 1112 pfnEnum(pVM, GCPhysPage, pu8Page, cbPageRange, pvUser); 1113 pgmLock(pVM); 1114 PGMPhysReleasePageMappingLock(pVM, &Lock); 1115 } 1116 1117 for (iPage; iPage < iPageClean; iPage++) 1118 PGM_PAGE_CLEAR_FT_DIRTY(&pRam->aPages[iPage]); 1119 1120 iPage = iPageClean - 1; 1121 } 1122 break; 1125 break; 1123 1126 } 1124 1127 } … … 1142 1145 pgmLock(pVM); 1143 1146 uint32_t cRamRanges = 0; 1144 for (PPGMRAMRANGE pCur = pVM->pgm.s.CTX_SUFF(pRamRanges ); pCur; pCur = pCur->CTX_SUFF(pNext))1147 for (PPGMRAMRANGE pCur = pVM->pgm.s.CTX_SUFF(pRamRangesX); pCur; pCur = pCur->CTX_SUFF(pNext)) 1145 1148 cRamRanges++; 1146 1149 pgmUnlock(pVM); … … 1168 1171 pgmLock(pVM); 1169 1172 uint32_t iCurRange = 0; 1170 for (PPGMRAMRANGE pCur = pVM->pgm.s.CTX_SUFF(pRamRanges ); pCur; pCur = pCur->CTX_SUFF(pNext), iCurRange++)1173 for (PPGMRAMRANGE pCur = pVM->pgm.s.CTX_SUFF(pRamRangesX); pCur; pCur = pCur->CTX_SUFF(pNext), iCurRange++) 1171 1174 if (iCurRange == iRange) 1172 1175 { … … 1191 1194 * @returns VBox status code. 1192 1195 * @param pVM The VM handle. 1193 * @param puTotalAllocSize Pointer to total allocated memory inside VMMR0 (in bytes) 1194 * @param puTotalFreeSize Pointer to total free (allocated but not used yet) memory inside VMMR0 (in bytes) 1195 * @param puTotalBalloonSize Pointer to total ballooned memory inside VMMR0 (in bytes) 1196 * @param puTotalSharedSize Pointer to total shared memory inside VMMR0 (in bytes) 1197 */ 1198 VMMR3DECL(int) PGMR3QueryVMMMemoryStats(PVM pVM, uint64_t *puTotalAllocSize, uint64_t *puTotalFreeSize, uint64_t *puTotalBalloonSize, uint64_t *puTotalSharedSize) 1199 { 1200 int rc; 1201 1202 uint64_t cAllocPages = 0, cFreePages = 0, cBalloonPages = 0, cSharedPages = 0; 1203 rc = GMMR3QueryHypervisorMemoryStats(pVM, &cAllocPages, &cFreePages, &cBalloonPages, &cSharedPages); 1196 * @param pcbAllocMem Where to return the amount of memory allocated 1197 * by VMs. 1198 * @param pcbFreeMem Where to return the amount of memory that is 1199 * allocated from the host but not currently used 1200 * by any VMs. 1201 * @param pcbBallonedMem Where to return the sum of memory that is 1202 * currently ballooned by the VMs. 1203 * @param pcbSharedMem Where to return the amount of memory that is 1204 * currently shared. 1205 */ 1206 VMMR3DECL(int) PGMR3QueryGlobalMemoryStats(PVM pVM, uint64_t *pcbAllocMem, uint64_t *pcbFreeMem, 1207 uint64_t *pcbBallonedMem, uint64_t *pcbSharedMem) 1208 { 1209 uint64_t cAllocPages = 0; 1210 uint64_t cFreePages = 0; 1211 uint64_t cBalloonPages = 0; 1212 uint64_t cSharedPages = 0; 1213 int rc = GMMR3QueryHypervisorMemoryStats(pVM, &cAllocPages, &cFreePages, &cBalloonPages, &cSharedPages); 1204 1214 AssertRCReturn(rc, rc); 1205 1215 1206 if (puTotalAllocSize) 1207 *puTotalAllocSize = cAllocPages * _4K; 1208 1209 if (puTotalFreeSize) 1210 *puTotalFreeSize = cFreePages * _4K; 1211 1212 if (puTotalBalloonSize) 1213 *puTotalBalloonSize = cBalloonPages * _4K; 1214 1215 if (puTotalSharedSize) 1216 *puTotalSharedSize = cSharedPages * _4K; 1217 1218 Log(("PGMR3QueryVMMMemoryStats: all=%x free=%x ballooned=%x shared=%x\n", cAllocPages, cFreePages, cBalloonPages, cSharedPages)); 1216 if (pcbAllocMem) 1217 *pcbAllocMem = cAllocPages * _4K; 1218 1219 if (pcbFreeMem) 1220 *pcbFreeMem = cFreePages * _4K; 1221 1222 if (pcbBallonedMem) 1223 *pcbBallonedMem = cBalloonPages * _4K; 1224 1225 if (pcbSharedMem) 1226 *pcbSharedMem = cSharedPages * _4K; 1227 1228 Log(("PGMR3QueryVMMMemoryStats: all=%llx free=%llx ballooned=%llx shared=%llx\n", 1229 cAllocPages, cFreePages, cBalloonPages, cSharedPages)); 1219 1230 return VINF_SUCCESS; 1220 1231 } 1221 1232 1222 /** 1223 * Query memory stats for the VM 1233 1234 /** 1235 * Query memory stats for the VM. 1224 1236 * 1225 1237 * @returns VBox status code. 1226 1238 * @param pVM The VM handle. 1227 * @param puTotalAllocSize Pointer to total allocated memory inside the VM (in bytes) 1228 * @param puTotalFreeSize Pointer to total free (allocated but not used yet) memory inside the VM (in bytes) 1229 * @param puTotalBalloonSize Pointer to total ballooned memory inside the VM (in bytes) 1230 * @param puTotalSharedSize Pointer to total shared memory inside the VM (in bytes) 1231 */ 1232 VMMR3DECL(int) PGMR3QueryMemoryStats(PVM pVM, uint64_t *pulTotalMem, uint64_t *pulPrivateMem, uint64_t *puTotalSharedMem, uint64_t *puTotalZeroMem) 1233 { 1234 if (pulTotalMem) 1235 *pulTotalMem = (uint64_t)pVM->pgm.s.cAllPages * _4K; 1236 1237 if (pulPrivateMem) 1238 *pulPrivateMem = (uint64_t)pVM->pgm.s.cPrivatePages * _4K; 1239 1240 if (puTotalSharedMem) 1241 *puTotalSharedMem = (uint64_t)pVM->pgm.s.cReusedSharedPages * _4K; 1242 1243 if (puTotalZeroMem) 1244 *puTotalZeroMem = (uint64_t)pVM->pgm.s.cZeroPages * _4K; 1239 * @param pcbTotalMem Where to return total amount memory the VM may 1240 * possibly use. 1241 * @param pcbPrivateMem Where to return the amount of private memory 1242 * currently allocated. 1243 * @param pcbSharedMem Where to return the amount of actually shared 1244 * memory currently used by the VM. 1245 * @param pcbZeroMem Where to return the amount of memory backed by 1246 * zero pages. 1247 * 1248 * @remarks The total mem is normally larger than the sum of the three 1249 * components. There are two reasons for this, first the amount of 1250 * shared memory is what we're sure is shared instead of what could 1251 * possibly be shared with someone. Secondly, because the total may 1252 * include some pure MMIO pages that doesn't go into any of the three 1253 * sub-counts. 1254 * 1255 * @todo Why do we return reused shared pages instead of anything that could 1256 * potentially be shared? Doesn't this mean the first VM gets a much 1257 * lower number of shared pages? 1258 */ 1259 VMMR3DECL(int) PGMR3QueryMemoryStats(PVM pVM, uint64_t *pcbTotalMem, uint64_t *pcbPrivateMem, 1260 uint64_t *pcbSharedMem, uint64_t *pcbZeroMem) 1261 { 1262 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE); 1263 1264 if (pcbTotalMem) 1265 *pcbTotalMem = (uint64_t)pVM->pgm.s.cAllPages * PAGE_SIZE; 1266 1267 if (pcbPrivateMem) 1268 *pcbPrivateMem = (uint64_t)pVM->pgm.s.cPrivatePages * PAGE_SIZE; 1269 1270 if (pcbSharedMem) 1271 *pcbSharedMem = (uint64_t)pVM->pgm.s.cReusedSharedPages * PAGE_SIZE; 1272 1273 if (pcbZeroMem) 1274 *pcbZeroMem = (uint64_t)pVM->pgm.s.cZeroPages * PAGE_SIZE; 1245 1275 1246 1276 Log(("PGMR3QueryMemoryStats: all=%x private=%x reused=%x zero=%x\n", pVM->pgm.s.cAllPages, pVM->pgm.s.cPrivatePages, pVM->pgm.s.cReusedSharedPages, pVM->pgm.s.cZeroPages)); 1247 1277 return VINF_SUCCESS; 1248 1278 } 1279 1249 1280 1250 1281 /** … … 1308 1339 case PGMRELOCATECALL_SUGGEST: 1309 1340 return true; 1341 1310 1342 case PGMRELOCATECALL_RELOCATE: 1311 1343 { 1312 /* Update myself and then relink all the ranges. */ 1344 /* 1345 * Update myself, then relink all the ranges and flush the RC TLB. 1346 */ 1313 1347 pgmLock(pVM); 1348 1314 1349 pRam->pSelfRC = (RTRCPTR)(GCPtrNew + PAGE_SIZE); 1350 1315 1351 pgmR3PhysRelinkRamRanges(pVM); 1352 #ifdef PGM_USE_RAMRANGE_TLB 1353 for (unsigned i = 0; i < PGM_RAMRANGE_TLB_ENTRIES; i++) 1354 pVM->pgm.s.apRamRangesTlbRC[i] = NIL_RTRCPTR; 1355 #endif 1356 1316 1357 pgmUnlock(pVM); 1317 1358 return true; … … 1440 1481 */ 1441 1482 PPGMRAMRANGE pPrev = NULL; 1442 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRanges R3;1483 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3; 1443 1484 while (pRam && GCPhysLast >= pRam->GCPhys) 1444 1485 { … … 1563 1604 uint64_t NanoTS = RTTimeNanoTS(); 1564 1605 pgmLock(pVM); 1565 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRanges R3; pRam; pRam = pRam->pNextR3)1606 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3; pRam; pRam = pRam->pNextR3) 1566 1607 { 1567 1608 PPGMPAGE pPage = &pRam->aPages[0]; … … 1647 1688 * Walk the ram ranges. 1648 1689 */ 1649 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRanges R3; pRam; pRam = pRam->pNextR3)1690 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3; pRam; pRam = pRam->pNextR3) 1650 1691 { 1651 1692 uint32_t iPage = pRam->cb >> PAGE_SHIFT; … … 1800 1841 * Walk the ram ranges. 1801 1842 */ 1802 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRanges R3; pRam; pRam = pRam->pNextR3)1843 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3; pRam; pRam = pRam->pNextR3) 1803 1844 { 1804 1845 uint32_t iPage = pRam->cb >> PAGE_SHIFT; … … 1886 1927 bool fRamExists = false; 1887 1928 PPGMRAMRANGE pRamPrev = NULL; 1888 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRanges R3;1929 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3; 1889 1930 while (pRam && GCPhysLast >= pRam->GCPhys) 1890 1931 { … … 2028 2069 VM_ASSERT_EMT(pVM); 2029 2070 2071 /** @todo this needs to own the PGM lock! */ 2030 2072 /* 2031 2073 * First deregister the handler, then check if we should remove the ram range. … … 2036 2078 RTGCPHYS GCPhysLast = GCPhys + (cb - 1); 2037 2079 PPGMRAMRANGE pRamPrev = NULL; 2038 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRanges R3;2080 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3; 2039 2081 while (pRam && GCPhysLast >= pRam->GCPhys) 2040 2082 { … … 2122 2164 2123 2165 PGMPhysInvalidatePageMapTLB(pVM); 2166 #ifdef PGM_USE_RAMRANGE_TLB 2167 pgmPhysInvalidRamRangeTlbs(pVM); 2168 #endif 2124 2169 return rc; 2125 2170 } … … 2428 2473 bool fRamExists = false; 2429 2474 PPGMRAMRANGE pRamPrev = NULL; 2430 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRanges R3;2475 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3; 2431 2476 while (pRam && GCPhysLast >= pRam->GCPhys) 2432 2477 { … … 2589 2634 { 2590 2635 /* Restore the RAM pages we've replaced. */ 2591 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRanges R3;2636 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3; 2592 2637 while (pRam->GCPhys > pCur->RamRange.GCPhysLast) 2593 2638 pRam = pRam->pNextR3; … … 2630 2675 2631 2676 PGMPhysInvalidatePageMapTLB(pVM); 2677 #ifdef PGM_USE_RAMRANGE_TLB 2678 pgmPhysInvalidRamRangeTlbs(pVM); 2679 #endif 2632 2680 pgmUnlock(pVM); 2633 2681 … … 2830 2878 bool fRamExists = false; 2831 2879 PPGMRAMRANGE pRamPrev = NULL; 2832 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRanges R3;2880 PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3; 2833 2881 while (pRam && GCPhysLast >= pRam->GCPhys) 2834 2882 { … … 3202 3250 if (!PGMROMPROT_IS_ROM(pRomPage->enmProt)) 3203 3251 { 3204 pShadowPage = pgmPhysGetPage( &pVM->pgm.s, GCPhys);3252 pShadowPage = pgmPhysGetPage(pVM, GCPhys); 3205 3253 AssertLogRelReturn(pShadowPage, VERR_INTERNAL_ERROR); 3206 3254 } … … 3413 3461 3414 3462 /* flush references to the page. */ 3415 PPGMPAGE pRamPage = pgmPhysGetPage( &pVM->pgm.s, pRom->GCPhys + (iPage << PAGE_SHIFT));3463 PPGMPAGE pRamPage = pgmPhysGetPage(pVM, pRom->GCPhys + (iPage << PAGE_SHIFT)); 3416 3464 int rc2 = pgmPoolTrackUpdateGCPhys(pVM, pRom->GCPhys + (iPage << PAGE_SHIFT), pRamPage, 3417 3465 true /*fFlushPTEs*/, &fFlushTLB); … … 3904 3952 3905 3953 PPGMPAGE pPage; 3906 rc = pgmPhysGetPageEx( &pVM->pgm.s, GCPhys, &pPage);3954 rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage); 3907 3955 AssertRC(rc); 3908 3956 … … 4086 4134 if (idPage != NIL_GMM_PAGEID) 4087 4135 { 4088 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRanges R3;4136 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3; 4089 4137 pRam; 4090 4138 pRam = pRam->pNextR3) … … 4251 4299 PPGMRAMRANGE pRam; 4252 4300 PPGMPAGE pPage; 4253 int rc = pgmPhysGetPageAndRangeEx( &pVM->pgm.s, GCPhys, &pPage, &pRam);4301 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam); 4254 4302 if (RT_SUCCESS(rc)) 4255 4303 { … … 4305 4353 /* Get a ring-3 mapping of the address. */ 4306 4354 PPGMPAGER3MAPTLBE pTlbe; 4307 rc2 = pgmPhysPageQueryTlbe( &pVM->pgm.s, GCPhys, &pTlbe);4355 rc2 = pgmPhysPageQueryTlbe(pVM, GCPhys, &pTlbe); 4308 4356 AssertLogRelRCReturn(rc2, rc2); 4309 4357 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK)); -
trunk/src/VBox/VMM/VMMR3/PGMPool.cpp
r35696 r36891 594 594 } 595 595 else if ( ( pPage->cModifications < 96 /* it's cheaper here. */ 596 || pgmPoolIsPageLocked( &pVM->pgm.s,pPage)596 || pgmPoolIsPageLocked(pPage) 597 597 ) 598 598 && cbBuf <= 4) … … 643 643 unsigned cModifiedPages = 0; NOREF(cModifiedPages); 644 644 unsigned cLeft = pPool->cUsedPages; 645 u nsignediPage = pPool->cCurPages;645 uint32_t iPage = pPool->cCurPages; 646 646 while (--iPage >= PGMPOOL_IDX_FIRST) 647 647 { … … 799 799 * Clear all the GCPhys links and rebuild the phys ext free list. 800 800 */ 801 for (PPGMRAMRANGE pRam = pPool->CTX_SUFF(pVM)->pgm.s.CTX_SUFF(pRamRanges );801 for (PPGMRAMRANGE pRam = pPool->CTX_SUFF(pVM)->pgm.s.CTX_SUFF(pRamRangesX); 802 802 pRam; 803 803 pRam = pRam->CTX_SUFF(pNext)) -
trunk/src/VBox/VMM/VMMR3/PGMSavedState.cpp
r36042 r36891 223 223 RTGCPHYS GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT); 224 224 PPGMPAGE pPage; 225 int rc = pgmPhysGetPageWithHintEx( &pVM->pgm.s, GCPhys, &pPage, &pRamHint);225 int rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pPage, &pRamHint); 226 226 AssertLogRelMsgRC(rc, ("%Rrc GCPhys=%RGp\n", rc, GCPhys)); 227 227 if (RT_SUCCESS(rc)) … … 419 419 PPGMPAGE pPage; 420 420 if (PGMROMPROT_IS_ROM(enmProt)) 421 pPage = pgmPhysGetPage( &pVM->pgm.s, GCPhys);421 pPage = pgmPhysGetPage(pVM, GCPhys); 422 422 else 423 423 pPage = &pRom->aPages[iPage].Virgin; … … 513 513 PGMROMPROT enmProt = pRomPage->enmProt; 514 514 RTGCPHYS GCPhys = pRom->GCPhys + ((RTGCPHYS)iPage << PAGE_SHIFT); 515 PPGMPAGE pPage = PGMROMPROT_IS_ROM(enmProt) ? &pRomPage->Shadow : pgmPhysGetPage( &pVM->pgm.s, GCPhys);515 PPGMPAGE pPage = PGMROMPROT_IS_ROM(enmProt) ? &pRomPage->Shadow : pgmPhysGetPage(pVM, GCPhys); 516 516 bool fZero = PGM_PAGE_IS_ZERO(pPage) || PGM_PAGE_IS_BALLOONED(pPage); Assert(!PGM_PAGE_IS_BALLOONED(pPage)); /* Shouldn't be ballooned. */ 517 517 int rc = VINF_SUCCESS; … … 1048 1048 do 1049 1049 { 1050 for (pCur = pVM->pgm.s.pRamRanges R3; pCur; pCur = pCur->pNextR3)1050 for (pCur = pVM->pgm.s.pRamRangesXR3; pCur; pCur = pCur->pNextR3) 1051 1051 { 1052 1052 if ( !pCur->paLSPages … … 1304 1304 { 1305 1305 uint32_t const idRamRangesGen = pVM->pgm.s.idRamRangesGen; 1306 for (pCur = pVM->pgm.s.pRamRanges R3; pCur; pCur = pCur->pNextR3)1306 for (pCur = pVM->pgm.s.pRamRangesXR3; pCur; pCur = pCur->pNextR3) 1307 1307 { 1308 1308 if ( pCur->GCPhysLast > GCPhysCur … … 1531 1531 { 1532 1532 uint32_t const idRamRangesGen = pVM->pgm.s.idRamRangesGen; 1533 for (pCur = pVM->pgm.s.pRamRanges R3; pCur; pCur = pCur->pNextR3)1533 for (pCur = pVM->pgm.s.pRamRangesXR3; pCur; pCur = pCur->pNextR3) 1534 1534 { 1535 1535 if ( pCur->GCPhysLast > GCPhysCur … … 1746 1746 do 1747 1747 { 1748 for (pCur = pVM->pgm.s.pRamRanges R3; pCur; pCur = pCur->pNextR3)1748 for (pCur = pVM->pgm.s.pRamRangesXR3; pCur; pCur = pCur->pNextR3) 1749 1749 { 1750 1750 if (pCur->paLSPages) … … 2334 2334 */ 2335 2335 uint32_t i = 0; 2336 for (PPGMRAMRANGE pRam = pPGM->pRamRanges R3; ; pRam = pRam->pNextR3, i++)2336 for (PPGMRAMRANGE pRam = pPGM->pRamRangesXR3; ; pRam = pRam->pNextR3, i++) 2337 2337 { 2338 2338 /* Check the sequence number / separator. */ … … 2641 2641 2642 2642 PPGMPAGE pPage; 2643 rc = pgmPhysGetPageWithHintEx( &pVM->pgm.s, GCPhys, &pPage, &pRamHint);2643 rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pPage, &pRamHint); 2644 2644 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc %RGp\n", rc, GCPhys), rc); 2645 2645 … … 2858 2858 if (!pRealPage) 2859 2859 { 2860 rc = pgmPhysGetPageWithHintEx( &pVM->pgm.s, GCPhys, &pRealPage, &pRamHint);2860 rc = pgmPhysGetPageWithHintEx(pVM, GCPhys, &pRealPage, &pRamHint); 2861 2861 AssertLogRelMsgRCReturn(rc, ("rc=%Rrc %RGp\n", rc, GCPhys), rc); 2862 2862 } … … 3176 3176 pVM->pgm.s.fMappingsFixedRestored = false; 3177 3177 if ( pVM->pgm.s.fMappingsFixed 3178 && pgmMapAreMappingsEnabled( &pVM->pgm.s))3178 && pgmMapAreMappingsEnabled(pVM)) 3179 3179 { 3180 3180 RTGCPTR GCPtrFixed = pVM->pgm.s.GCPtrMappingFixed; … … 3213 3213 * when restoring other components like PATM. 3214 3214 */ 3215 if (pgmMapAreMappingsFloating( &pVM->pgm.s))3215 if (pgmMapAreMappingsFloating(pVM)) 3216 3216 { 3217 3217 PVMCPU pVCpu = &pVM->aCpus[0]; -
trunk/src/VBox/VMM/VMMR3/PGMSharedPage.cpp
r35696 r36891 262 262 case VINF_SUCCESS: 263 263 { 264 PPGMPAGE pPage = pgmPhysGetPage( &pVM->pgm.s, GCPhys);264 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys); 265 265 if (pPage) 266 266 { … … 316 316 pgmLock(pVM); 317 317 318 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRanges R3; pRam; pRam = pRam->pNextR3)318 for (PPGMRAMRANGE pRam = pVM->pgm.s.pRamRangesXR3; pRam; pRam = pRam->pNextR3) 319 319 { 320 320 PPGMPAGE pPage = &pRam->aPages[0]; -
trunk/src/VBox/VMM/include/PGMGstDefs.h
r35333 r36891 140 140 # define GST_GET_PTE_GCPHYS(Pte) ((Pte).u & GST_PDE_PG_MASK) 141 141 # define GST_GET_PDE_GCPHYS(Pde) ((Pde).u & GST_PDE_PG_MASK) 142 # define GST_GET_BIG_PDE_GCPHYS(pVM, Pde) pgmGstGet4MBPhysPage( &(pVM)->pgm.s, Pde)142 # define GST_GET_BIG_PDE_GCPHYS(pVM, Pde) pgmGstGet4MBPhysPage((pVM), Pde) 143 143 # define GST_GET_PDE_SHW_FLAGS(pVCpu, Pde) ((Pde).u & (X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_A)) 144 144 # define GST_GET_BIG_PDE_SHW_FLAGS(pVCpu, Pde) \ -
trunk/src/VBox/VMM/include/PGMInline.h
r36009 r36891 46 46 */ 47 47 48 /** @todo Split out all the inline stuff into a separate file. Then we can49 * include it later when VM and VMCPU are defined and so avoid all that50 * &pVM->pgm.s and &pVCpu->pgm.s stuff. It also chops ~1600 lines off51 * this file and will make it somewhat easier to navigate... */52 53 48 /** 54 49 * Gets the PGMRAMRANGE structure for a guest page. … … 57 52 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition. 58 53 * 59 * @param p PGM PGM handle.54 * @param pVM The VM handle. 60 55 * @param GCPhys The GC physical address. 61 56 */ 62 DECLINLINE(PPGMRAMRANGE) pgmPhysGetRange(PPGM pPGM, RTGCPHYS GCPhys) 63 { 57 DECLINLINE(PPGMRAMRANGE) pgmPhysGetRange(PVM pVM, RTGCPHYS GCPhys) 58 { 59 #ifdef PGM_USE_RAMRANGE_TLB 60 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)]; 61 if (!pRam || GCPhys - pRam->GCPhys >= pRam->cb) 62 pRam = pgmPhysGetRangeSlow(pVM, GCPhys); 63 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbHits)); 64 return pRam; 65 66 #else 64 67 /* 65 68 * Optimize for the first range. 66 69 */ 67 PPGMRAMRANGE pRam = p PGM->CTX_SUFF(pRamRanges);70 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangesX); 68 71 RTGCPHYS off = GCPhys - pRam->GCPhys; 69 72 if (RT_UNLIKELY(off >= pRam->cb)) … … 78 81 } 79 82 return pRam; 80 } 83 #endif 84 } 85 86 87 /** 88 * Gets the PGMRAMRANGE structure for a guest page, if unassigned get the ram 89 * range above it. 90 * 91 * @returns Pointer to the RAM range on success. 92 * @returns NULL if the address is located after the last range. 93 * 94 * @param pVM The VM handle. 95 * @param GCPhys The GC physical address. 96 */ 97 DECLINLINE(PPGMRAMRANGE) pgmPhysGetRangeAtOrAbove(PVM pVM, RTGCPHYS GCPhys) 98 { 99 #ifdef PGM_USE_RAMRANGE_TLB 100 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)]; 101 if ( !pRam 102 || (GCPhys - pRam->GCPhys) >= pRam->cb) 103 return pgmPhysGetRangeAtOrAboveSlow(pVM, GCPhys); 104 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbHits)); 105 return pRam; 106 107 #else 108 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangesX); 109 while (pRam && GCPhys > pRam->GCPhysLast) 110 pRam = pRam->CTX_SUFF(pNext); 111 return pRam; 112 #endif 113 } 114 81 115 82 116 … … 87 121 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition. 88 122 * 89 * @param p PGM PGM handle.123 * @param pVM The VM handle. 90 124 * @param GCPhys The GC physical address. 91 125 */ 92 DECLINLINE(PPGMPAGE) pgmPhysGetPage(PPGM pPGM, RTGCPHYS GCPhys) 93 { 126 DECLINLINE(PPGMPAGE) pgmPhysGetPage(PVM pVM, RTGCPHYS GCPhys) 127 { 128 #ifdef PGM_USE_RAMRANGE_TLB 129 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)]; 130 RTGCPHYS off; 131 if ( !pRam 132 || (off = GCPhys - pRam->GCPhys) >= pRam->cb) 133 return pgmPhysGetPageSlow(pVM, GCPhys); 134 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbHits)); 135 return &pRam->aPages[off >> PAGE_SHIFT]; 136 137 #else 94 138 /* 95 139 * Optimize for the first range. 96 140 */ 97 PPGMRAMRANGE pRam = p PGM->CTX_SUFF(pRamRanges);141 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangesX); 98 142 RTGCPHYS off = GCPhys - pRam->GCPhys; 99 143 if (RT_UNLIKELY(off >= pRam->cb)) … … 108 152 } 109 153 return &pRam->aPages[off >> PAGE_SHIFT]; 154 #endif 110 155 } 111 156 … … 120 165 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if the address isn't valid. 121 166 * 122 * @param p PGM PGM handle.167 * @param pVM The VM handle. 123 168 * @param GCPhys The GC physical address. 124 169 * @param ppPage Where to store the page pointer on success. 125 170 */ 126 DECLINLINE(int) pgmPhysGetPageEx(PPGM pPGM, RTGCPHYS GCPhys, PPPGMPAGE ppPage) 127 { 171 DECLINLINE(int) pgmPhysGetPageEx(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage) 172 { 173 #ifdef PGM_USE_RAMRANGE_TLB 174 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)]; 175 RTGCPHYS off; 176 if ( !pRam 177 || (off = GCPhys - pRam->GCPhys) >= pRam->cb) 178 return pgmPhysGetPageExSlow(pVM, GCPhys, ppPage); 179 *ppPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> PAGE_SHIFT]; 180 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbHits)); 181 return VINF_SUCCESS; 182 183 #else 128 184 /* 129 185 * Optimize for the first range. 130 186 */ 131 PPGMRAMRANGE pRam = p PGM->CTX_SUFF(pRamRanges);187 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangesX); 132 188 RTGCPHYS off = GCPhys - pRam->GCPhys; 133 189 if (RT_UNLIKELY(off >= pRam->cb)) … … 146 202 *ppPage = &pRam->aPages[off >> PAGE_SHIFT]; 147 203 return VINF_SUCCESS; 204 #endif 148 205 } 149 206 … … 160 217 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if the address isn't valid. 161 218 * 162 * @param p PGM PGM handle.219 * @param pVM The VM handle. 163 220 * @param GCPhys The GC physical address. 164 221 * @param ppPage Where to store the page pointer on success. … … 166 223 * The caller initializes this to NULL before the call. 167 224 */ 168 DECLINLINE(int) pgmPhysGetPageWithHintEx(P PGM pPGM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRamHint)225 DECLINLINE(int) pgmPhysGetPageWithHintEx(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRamHint) 169 226 { 170 227 RTGCPHYS off; … … 173 230 || RT_UNLIKELY((off = GCPhys - pRam->GCPhys) >= pRam->cb)) 174 231 { 175 pRam = pPGM->CTX_SUFF(pRamRanges); 232 #ifdef PGM_USE_RAMRANGE_TLB 233 pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)]; 234 if ( !pRam 235 || (off = GCPhys - pRam->GCPhys) >= pRam->cb) 236 return pgmPhysGetPageAndRangeExSlow(pVM, GCPhys, ppPage, ppRamHint); 237 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbHits)); 238 239 #else 240 pRam = pVM->pgm.s.CTX_SUFF(pRamRangesX); 176 241 off = GCPhys - pRam->GCPhys; 177 242 if (RT_UNLIKELY(off >= pRam->cb)) … … 188 253 } while (off >= pRam->cb); 189 254 } 255 #endif 190 256 *ppRamHint = pRam; 191 257 } … … 201 267 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition. 202 268 * 203 * @param p PGM PGM handle.269 * @param pVM The VM handle. 204 270 * @param GCPhys The GC physical address. 205 * @param ppRam Where to store the pointer to the PGMRAMRANGE. 206 */ 207 DECLINLINE(PPGMPAGE) pgmPhysGetPageAndRange(PPGM pPGM, RTGCPHYS GCPhys, PPGMRAMRANGE *ppRam) 208 { 271 * @param ppPage Where to store the pointer to the PGMPAGE structure. 272 * @param ppRam Where to store the pointer to the PGMRAMRANGE structure. 273 */ 274 DECLINLINE(int) pgmPhysGetPageAndRangeEx(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam) 275 { 276 #ifdef PGM_USE_RAMRANGE_TLB 277 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)]; 278 RTGCPHYS off; 279 if ( !pRam 280 || (off = GCPhys - pRam->GCPhys) >= pRam->cb) 281 return pgmPhysGetPageAndRangeExSlow(pVM, GCPhys, ppPage, ppRam); 282 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,RamRangeTlbHits)); 283 284 #else 209 285 /* 210 286 * Optimize for the first range. 211 287 */ 212 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges); 213 RTGCPHYS off = GCPhys - pRam->GCPhys; 214 if (RT_UNLIKELY(off >= pRam->cb)) 215 { 216 do 217 { 218 pRam = pRam->CTX_SUFF(pNext); 219 if (RT_UNLIKELY(!pRam)) 220 return NULL; 221 off = GCPhys - pRam->GCPhys; 222 } while (off >= pRam->cb); 223 } 224 *ppRam = pRam; 225 return &pRam->aPages[off >> PAGE_SHIFT]; 226 } 227 228 229 /** 230 * Gets the PGMPAGE structure for a guest page together with the PGMRAMRANGE. 231 * 232 * @returns Pointer to the page on success. 233 * @returns NULL on a VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS condition. 234 * 235 * @param pPGM PGM handle. 236 * @param GCPhys The GC physical address. 237 * @param ppPage Where to store the pointer to the PGMPAGE structure. 238 * @param ppRam Where to store the pointer to the PGMRAMRANGE structure. 239 */ 240 DECLINLINE(int) pgmPhysGetPageAndRangeEx(PPGM pPGM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam) 241 { 242 /* 243 * Optimize for the first range. 244 */ 245 PPGMRAMRANGE pRam = pPGM->CTX_SUFF(pRamRanges); 288 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangesX); 246 289 RTGCPHYS off = GCPhys - pRam->GCPhys; 247 290 if (RT_UNLIKELY(off >= pRam->cb)) … … 259 302 } while (off >= pRam->cb); 260 303 } 304 #endif 261 305 *ppRam = pRam; 262 306 *ppPage = &pRam->aPages[off >> PAGE_SHIFT]; … … 269 313 * 270 314 * @returns VBox status. 271 * @param p PGM PGM handle.315 * @param pVM The VM handle. 272 316 * @param GCPhys The GC physical address. 273 317 * @param pHCPhys Where to store the corresponding HC physical address. … … 276 320 * Avoid when writing new code! 277 321 */ 278 DECLINLINE(int) pgmRamGCPhys2HCPhys(P PGM pPGM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys)322 DECLINLINE(int) pgmRamGCPhys2HCPhys(PVM pVM, RTGCPHYS GCPhys, PRTHCPHYS pHCPhys) 279 323 { 280 324 PPGMPAGE pPage; 281 int rc = pgmPhysGetPageEx(p PGM, GCPhys, &pPage);325 int rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage); 282 326 if (RT_FAILURE(rc)) 283 327 return rc; … … 344 388 * Get the ram range. 345 389 */ 346 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges); 390 #ifdef PGM_USE_RAMRANGE_TLB 391 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)]; 392 RTGCPHYS off; 393 if ( !pRam 394 || (off = GCPhys - pRam->GCPhys) >= pRam->cb 395 /** @todo || page state stuff */ 396 ) 397 #else 398 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangesX); 347 399 RTGCPHYS off = GCPhys - pRam->GCPhys; 348 400 if (RT_UNLIKELY(off >= pRam->cb 349 401 /** @todo || page state stuff */)) 402 #endif 350 403 { 351 404 /* This case is not counted into StatRZDynMapGCPageInl. */ … … 418 471 */ 419 472 PVM pVM = pVCpu->CTX_SUFF(pVM); 420 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRanges); 473 #ifdef PGM_USE_RAMRANGE_TLB 474 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)]; 475 RTGCPHYS off; 476 if ( !pRam 477 || (off = GCPhys - pRam->GCPhys) >= pRam->cb 478 /** @todo || page state stuff */ 479 ) 480 #else 481 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(pRamRangesX); 421 482 RTGCPHYS off = GCPhys - pRam->GCPhys; 422 483 if (RT_UNLIKELY(off >= pRam->cb 423 484 /** @todo || page state stuff */)) 485 #endif 424 486 { 425 487 /* This case is not counted into StatRZDynMapGCPageInl. */ … … 513 575 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address. 514 576 * 515 * @param p PGM The PGM instancehandle.577 * @param pVM The VM handle. 516 578 * @param GCPhys The address of the guest page. 517 579 * @param ppTlbe Where to store the pointer to the TLB entry. 518 580 */ 519 DECLINLINE(int) pgmPhysPageQueryTlbe(P PGM pPGM, RTGCPHYS GCPhys, PPPGMPAGEMAPTLBE ppTlbe)581 DECLINLINE(int) pgmPhysPageQueryTlbe(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGEMAPTLBE ppTlbe) 520 582 { 521 583 int rc; 522 PPGMPAGEMAPTLBE pTlbe = &p PGM->CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];584 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)]; 523 585 if (pTlbe->GCPhys == (GCPhys & X86_PTE_PAE_PG_MASK)) 524 586 { 525 STAM_COUNTER_INC(&p PGM->CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbHits));587 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbHits)); 526 588 rc = VINF_SUCCESS; 527 589 } 528 590 else 529 rc = pgmPhysPageLoadIntoTlb(p PGM, GCPhys);591 rc = pgmPhysPageLoadIntoTlb(pVM, GCPhys); 530 592 *ppTlbe = pTlbe; 531 593 return rc; … … 541 603 * @retval VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid physical address. 542 604 * 543 * @param p PGM The PGM instancehandle.605 * @param pVM The VM handle. 544 606 * @param pPage Pointer to the PGMPAGE structure corresponding to 545 607 * GCPhys. … … 547 609 * @param ppTlbe Where to store the pointer to the TLB entry. 548 610 */ 549 DECLINLINE(int) pgmPhysPageQueryTlbeWithPage(P PGM pPGM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAPTLBE ppTlbe)611 DECLINLINE(int) pgmPhysPageQueryTlbeWithPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys, PPPGMPAGEMAPTLBE ppTlbe) 550 612 { 551 613 int rc; 552 PPGMPAGEMAPTLBE pTlbe = &p PGM->CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)];614 PPGMPAGEMAPTLBE pTlbe = &pVM->pgm.s.CTXSUFF(PhysTlb).aEntries[PGM_PAGEMAPTLB_IDX(GCPhys)]; 553 615 if (pTlbe->GCPhys == (GCPhys & X86_PTE_PAE_PG_MASK)) 554 616 { 555 STAM_COUNTER_INC(&p PGM->CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbHits));617 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbHits)); 556 618 rc = VINF_SUCCESS; 557 619 } 558 620 else 559 rc = pgmPhysPageLoadIntoTlbWithPage(p PGM, pPage, GCPhys);621 rc = pgmPhysPageLoadIntoTlbWithPage(pVM, pPage, GCPhys); 560 622 *ppTlbe = pTlbe; 561 623 return rc; … … 567 629 /** 568 630 * Enables write monitoring for an allocated page. 569 * 570 * The caller is responsible for updating the shadow page tables. 571 * 631 * 632 * The caller is responsible for updating the shadow page tables. 633 * 572 634 * @param pVM The VM handle. 573 * @param pPage The page to write monitor. 635 * @param pPage The page to write monitor. 574 636 * @param GCPhysPage The address of the page. 575 637 */ … … 585 647 if (PGM_PAGE_GET_PDE_TYPE(pPage) == PGM_PAGE_PDE_TYPE_PDE) 586 648 { 587 PPGMPAGE pFirstPage = pgmPhysGetPage( &pVM->pgm.s, GCPhysPage & X86_PDE2M_PAE_PG_MASK);649 PPGMPAGE pFirstPage = pgmPhysGetPage(pVM, GCPhysPage & X86_PDE2M_PAE_PG_MASK); 588 650 AssertFatal(pFirstPage); 589 651 if (PGM_PAGE_GET_PDE_TYPE(pFirstPage) == PGM_PAGE_PDE_TYPE_PDE) … … 638 700 * 639 701 * @returns guest physical address 640 * @param p PGM Pointer to the PGM instance data.702 * @param pVM The VM handle. 641 703 * @param Pde Guest Pde 642 704 */ 643 DECLINLINE(RTGCPHYS) pgmGstGet4MBPhysPage(P PGM pPGM, X86PDE Pde)705 DECLINLINE(RTGCPHYS) pgmGstGet4MBPhysPage(PVM pVM, X86PDE Pde) 644 706 { 645 707 RTGCPHYS GCPhys = Pde.u & X86_PDE4M_PG_MASK; 646 708 GCPhys |= (RTGCPHYS)Pde.b.u8PageNoHigh << 32; 647 709 648 return GCPhys & p PGM->GCPhys4MBPSEMask;710 return GCPhys & pVM->pgm.s.GCPhys4MBPSEMask; 649 711 } 650 712 … … 1307 1369 1308 1370 /** 1309 * Clears one physical page of a virtual handler 1310 * 1311 * @param p PGM Pointer to the PGM instance.1312 * @param pCur Virtual handler structure1313 * @param iPage Physical page index1371 * Clears one physical page of a virtual handler. 1372 * 1373 * @param pVM The VM handle. 1374 * @param pCur Virtual handler structure. 1375 * @param iPage Physical page index. 1314 1376 * 1315 1377 * @remark Only used when PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL is being set, so no 1316 1378 * need to care about other handlers in the same page. 1317 1379 */ 1318 DECLINLINE(void) pgmHandlerVirtualClearPage(P PGM pPGM, PPGMVIRTHANDLER pCur, unsigned iPage)1380 DECLINLINE(void) pgmHandlerVirtualClearPage(PVM pVM, PPGMVIRTHANDLER pCur, unsigned iPage) 1319 1381 { 1320 1382 const PPGMPHYS2VIRTHANDLER pPhys2Virt = &pCur->aPhysToVirt[iPage]; … … 1331 1393 { 1332 1394 /* We're the head of the alias chain. */ 1333 PPGMPHYS2VIRTHANDLER pRemove = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysRemove(&p PGM->CTX_SUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key); NOREF(pRemove);1395 PPGMPHYS2VIRTHANDLER pRemove = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key); NOREF(pRemove); 1334 1396 #ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL 1335 1397 AssertReleaseMsg(pRemove != NULL, … … 1352 1414 #endif 1353 1415 pNext->offNextAlias |= PGMPHYS2VIRTHANDLER_IS_HEAD; 1354 bool fRc = RTAvlroGCPhysInsert(&p PGM->CTX_SUFF(pTrees)->PhysToVirtHandlers, &pNext->Core);1416 bool fRc = RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers, &pNext->Core); 1355 1417 AssertRelease(fRc); 1356 1418 } … … 1359 1421 { 1360 1422 /* Locate the previous node in the alias chain. */ 1361 PPGMPHYS2VIRTHANDLER pPrev = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGet(&p PGM->CTX_SUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key);1423 PPGMPHYS2VIRTHANDLER pPrev = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key); 1362 1424 #ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL 1363 1425 AssertReleaseMsg(pPrev != pPhys2Virt, … … 1405 1467 * Clear the ram flags for this page. 1406 1468 */ 1407 PPGMPAGE pPage = pgmPhysGetPage(p PGM, pPhys2Virt->Core.Key);1469 PPGMPAGE pPage = pgmPhysGetPage(pVM, pPhys2Virt->Core.Key); 1408 1470 AssertReturnVoid(pPage); 1409 1471 PGM_PAGE_SET_HNDL_VIRT_STATE(pPage, PGM_PAGE_HNDL_VIRT_STATE_NONE); … … 1521 1583 * @param pPage PGM pool page 1522 1584 */ 1523 DECLINLINE(bool) pgmPoolIsPageLocked(PPGM pPGM, PPGMPOOLPAGE pPage)1585 DECLINLINE(bool) pgmPoolIsPageLocked(PPGMPOOLPAGE pPage) 1524 1586 { 1525 1587 if (pPage->cLocked) … … 1540 1602 * @param pVM VM handle. 1541 1603 */ 1542 DECL_FORCE_INLINE(bool) pgmMapAreMappingsEnabled(P PGM pPGM)1604 DECL_FORCE_INLINE(bool) pgmMapAreMappingsEnabled(PVM pVM) 1543 1605 { 1544 1606 #ifdef PGM_WITHOUT_MAPPINGS 1545 1607 /* There are no mappings in VT-x and AMD-V mode. */ 1546 Assert(p PGM->fMappingsDisabled);1608 Assert(pVM->pgm.s.fMappingsDisabled); 1547 1609 return false; 1548 1610 #else 1549 return !p PGM->fMappingsDisabled;1611 return !pVM->pgm.s.fMappingsDisabled; 1550 1612 #endif 1551 1613 } … … 1558 1620 * @param pVM The VM handle. 1559 1621 */ 1560 DECL_FORCE_INLINE(bool) pgmMapAreMappingsFloating(P PGM pPGM)1622 DECL_FORCE_INLINE(bool) pgmMapAreMappingsFloating(PVM pVM) 1561 1623 { 1562 1624 #ifdef PGM_WITHOUT_MAPPINGS 1563 1625 /* There are no mappings in VT-x and AMD-V mode. */ 1564 Assert(p PGM->fMappingsDisabled);1626 Assert(pVM->pgm.s.fMappingsDisabled); 1565 1627 return false; 1566 1628 #else 1567 return !p PGM->fMappingsDisabled1568 && !p PGM->fMappingsFixed;1629 return !pVM->pgm.s.fMappingsDisabled 1630 && !pVM->pgm.s.fMappingsFixed; 1569 1631 #endif 1570 1632 } -
trunk/src/VBox/VMM/include/PGMInternal.h
r36629 r36891 1326 1326 #define PGM_RAM_RANGE_IS_AD_HOC(pRam) \ 1327 1327 (!!( (pRam)->fFlags & (PGM_RAM_RANGE_FLAGS_AD_HOC_ROM | PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO | PGM_RAM_RANGE_FLAGS_AD_HOC_MMIO2) ) ) 1328 1329 /* enable the tlbs. */ 1330 //#define PGM_USE_RAMRANGE_TLB 1331 /** The number of entries in the RAM range TLBs (there is one for each 1332 * context). Must be a power of two. */ 1333 #define PGM_RAMRANGE_TLB_ENTRIES 8 1334 1335 /** 1336 * Calculates the RAM range TLB index for the physical address. 1337 * 1338 * @returns RAM range TLB index. 1339 * @param GCPhys The guest physical address. 1340 */ 1341 #define PGM_RAMRANGE_TLB_IDX(a_GCPhys) ( ((a_GCPhys) >> 20) & (PGM_RAMRANGE_TLB_ENTRIES - 1) ) 1342 1328 1343 1329 1344 … … 2792 2807 STAMCOUNTER StatR3PageMapTlbHits; /**< R3: Ring-3/0 page mapper TLB hits. */ 2793 2808 STAMCOUNTER StatR3PageMapTlbMisses; /**< R3: Ring-3/0 page mapper TLB misses. */ 2809 STAMCOUNTER StatRZRamRangeTlbHits; /**< RC/R0: RAM range TLB hits. */ 2810 STAMCOUNTER StatRZRamRangeTlbMisses; /**< RC/R0: RAM range TLB misses. */ 2811 STAMCOUNTER StatR3RamRangeTlbHits; /**< R3: RAM range TLB hits. */ 2812 STAMCOUNTER StatR3RamRangeTlbMisses; /**< R3: RAM range TLB misses. */ 2794 2813 STAMPROFILE StatRZSyncCR3HandlerVirtualReset; /**< RC/R0: Profiling of the virtual handler resets. */ 2795 2814 STAMPROFILE StatRZSyncCR3HandlerVirtualUpdate; /**< RC/R0: Profiling of the virtual handler updates. */ … … 2946 2965 /** Pointer to the list of RAM ranges (Phys GC -> Phys HC conversion) - for R3. 2947 2966 * This is sorted by physical address and contains no overlapping ranges. */ 2948 R3PTRTYPE(PPGMRAMRANGE) pRamRangesR3; 2967 R3PTRTYPE(PPGMRAMRANGE) pRamRangesXR3; 2968 #ifdef PGM_USE_RAMRANGE_TLB 2969 /** Ram range TLB for R3. */ 2970 R3PTRTYPE(PPGMRAMRANGE) apRamRangesTlbR3[PGM_RAMRANGE_TLB_ENTRIES]; 2971 #endif 2949 2972 /** PGM offset based trees - R3 Ptr. */ 2950 2973 R3PTRTYPE(PPGMTREES) pTreesR3; … … 2967 2990 /*RTR3PTR R3PtrAlignment0;*/ 2968 2991 2969 2970 /** R0 pointer corresponding to PGM::pRamRangesR3. */ 2971 R0PTRTYPE(PPGMRAMRANGE) pRamRangesR0; 2992 /** R0 pointer corresponding to PGM::pRamRangesXR3. */ 2993 R0PTRTYPE(PPGMRAMRANGE) pRamRangesXR0; 2994 #ifdef PGM_USE_RAMRANGE_TLB 2995 /** Ram range TLB for R0. */ 2996 R0PTRTYPE(PPGMRAMRANGE) apRamRangesTlbR0[PGM_RAMRANGE_TLB_ENTRIES]; 2997 #endif 2972 2998 /** PGM offset based trees - R0 Ptr. */ 2973 2999 R0PTRTYPE(PPGMTREES) pTreesR0; … … 2984 3010 2985 3011 2986 /** RC pointer corresponding to PGM::pRamRangesR3. */ 2987 RCPTRTYPE(PPGMRAMRANGE) pRamRangesRC; 3012 /** RC pointer corresponding to PGM::pRamRangesXR3. */ 3013 RCPTRTYPE(PPGMRAMRANGE) pRamRangesXRC; 3014 #ifdef PGM_USE_RAMRANGE_TLB 3015 /** Ram range TLB for RC. */ 3016 RCPTRTYPE(PPGMRAMRANGE) apRamRangesTlbRC[PGM_RAMRANGE_TLB_ENTRIES]; 3017 #endif 2988 3018 /** PGM offset based trees - RC Ptr. */ 2989 3019 RCPTRTYPE(PPGMTREES) pTreesRC; … … 3771 3801 int pgmPhysAllocLargePage(PVM pVM, RTGCPHYS GCPhys); 3772 3802 int pgmPhysRecheckLargePage(PVM pVM, RTGCPHYS GCPhys, PPGMPAGE pLargePage); 3773 int pgmPhysPageLoadIntoTlb(P PGM pPGM, RTGCPHYS GCPhys);3774 int pgmPhysPageLoadIntoTlbWithPage(P PGM pPGM, PPGMPAGE pPage, RTGCPHYS GCPhys);3803 int pgmPhysPageLoadIntoTlb(PVM pVM, RTGCPHYS GCPhys); 3804 int pgmPhysPageLoadIntoTlbWithPage(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys); 3775 3805 void pgmPhysPageMakeWriteMonitoredWritable(PVM pVM, PPGMPAGE pPage); 3776 3806 int pgmPhysPageMakeWritable(PVM pVM, PPGMPAGE pPage, RTGCPHYS GCPhys); … … 3784 3814 VMMDECL(int) pgmPhysRomWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser); 3785 3815 int pgmPhysFreePage(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t *pcPendingPages, PPGMPAGE pPage, RTGCPHYS GCPhys); 3816 void pgmPhysInvalidRamRangeTlbs(PVM pVM); 3817 PPGMRAMRANGE pgmPhysGetRangeSlow(PVM pVM, RTGCPHYS GCPhys); 3818 PPGMRAMRANGE pgmPhysGetRangeAtOrAboveSlow(PVM pVM, RTGCPHYS GCPhys); 3819 PPGMPAGE pgmPhysGetPageSlow(PVM pVM, RTGCPHYS GCPhys); 3820 int pgmPhysGetPageExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage); 3821 int pgmPhysGetPageAndRangeExSlow(PVM pVM, RTGCPHYS GCPhys, PPPGMPAGE ppPage, PPGMRAMRANGE *ppRam); 3786 3822 3787 3823 #ifdef IN_RING3 -
trunk/src/VBox/VMM/testcase/tstVMStructRC.cpp
r36768 r36891 564 564 GEN_CHECK_OFF(PGM, enmHostMode); 565 565 GEN_CHECK_OFF(PGM, GCPhys4MBPSEMask); 566 GEN_CHECK_OFF(PGM, pRamRanges R3);567 GEN_CHECK_OFF(PGM, pRamRanges R0);568 GEN_CHECK_OFF(PGM, pRamRanges RC);566 GEN_CHECK_OFF(PGM, pRamRangesXR3); 567 GEN_CHECK_OFF(PGM, pRamRangesXR0); 568 GEN_CHECK_OFF(PGM, pRamRangesXRC); 569 569 GEN_CHECK_OFF(PGM, pRomRangesR3); 570 570 GEN_CHECK_OFF(PGM, pRomRangesR0);
Note:
See TracChangeset
for help on using the changeset viewer.