Changeset 13062 in vbox for trunk/src/VBox/VMM/VMMAll
- Timestamp:
- Oct 8, 2008 8:06:56 AM (16 years ago)
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r13060 r13062 277 277 * Check if the fault address is in a virtual page access handler range. 278 278 */ 279 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)RTAvlroGCPtrRangeGet(& CTXSUFF(pVM->pgm.s.pTrees)->HyperVirtHandlers, pvFault);279 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)RTAvlroGCPtrRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->HyperVirtHandlers, pvFault); 280 280 if ( pCur 281 281 && (RTGCUINTPTR)pvFault - (RTGCUINTPTR)pCur->Core.Key < pCur->cb … … 366 366 */ 367 367 const RTGCPHYS GCPhysFault = GCPhys | ((RTGCUINTPTR)pvFault & PAGE_OFFSET_MASK); 368 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(& CTXSUFF(pVM->pgm.s.pTrees)->PhysHandlers, GCPhysFault);368 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhysFault); 369 369 if (pCur) 370 370 { … … 449 449 * page was changed without us noticing it (not-present -> present without invlpg or mov cr3, xxx) 450 450 */ 451 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)RTAvlroGCPtrRangeGet(& CTXSUFF(pVM->pgm.s.pTrees)->VirtHandlers, pvFault);451 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)RTAvlroGCPtrRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, pvFault); 452 452 if (pCur) 453 453 { … … 556 556 * we should correct both the shadow page table and physical memory flags, and not only check for 557 557 * accesses within the handler region but for access to pages with virtual handlers. */ 558 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)RTAvlroGCPtrRangeGet(& CTXSUFF(pVM->pgm.s.pTrees)->VirtHandlers, pvFault);558 PPGMVIRTHANDLER pCur = (PPGMVIRTHANDLER)RTAvlroGCPtrRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, pvFault); 559 559 if (pCur) 560 560 { -
trunk/src/VBox/VMM/VMMAll/PGMAllGst.h
r13060 r13062 513 513 goto try_again; 514 514 } 515 pVM->pgm.s.pHCPaePML4 = (PX86PML4)PGMPOOL_PAGE_2_PTR(pPool->CTX SUFF(pVM), pVM->pgm.s.pHCShwAmd64CR3);515 pVM->pgm.s.pHCPaePML4 = (PX86PML4)PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pVM->pgm.s.pHCShwAmd64CR3); 516 516 pVM->pgm.s.HCPhysPaePML4 = pVM->pgm.s.pHCShwAmd64CR3->Core.Key; 517 517 } … … 940 940 State.fTodo = pVM->pgm.s.fSyncFlags; 941 941 State.cr4 = cr4; 942 RTAvlroGCPtrDoWithAll(&pVM->pgm.s.CTX SUFF(pTrees)->VirtHandlers, true, PGM_GST_NAME(VirtHandlerUpdateOne), &State);942 RTAvlroGCPtrDoWithAll(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, true, PGM_GST_NAME(VirtHandlerUpdateOne), &State); 943 943 STAM_PROFILE_STOP(&pVM->pgm.s.CTXMID(Stat,SyncCR3HandlerVirtualUpdate), a); 944 944 … … 951 951 STAM_PROFILE_START(&pVM->pgm.s.CTXMID(Stat,SyncCR3HandlerVirtualReset), b); 952 952 Log(("pgmR3VirtualHandlersUpdate: resets bits\n")); 953 RTAvlroGCPtrDoWithAll(&pVM->pgm.s.CTX SUFF(pTrees)->VirtHandlers, true, pgmHandlerVirtualResetOne, pVM);953 RTAvlroGCPtrDoWithAll(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, true, pgmHandlerVirtualResetOne, pVM); 954 954 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_UPDATE_PAGE_BIT_VIRTUAL; 955 955 STAM_PROFILE_STOP(&pVM->pgm.s.CTXMID(Stat,SyncCR3HandlerVirtualReset), b); -
trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp
r13046 r13062 158 158 * Try insert into list. 159 159 */ 160 if (RTAvlroGCPhysInsert(&pVM->pgm.s.CTX SUFF(pTrees)->PhysHandlers, &pNew->Core))160 if (RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pNew->Core)) 161 161 { 162 162 rc = pgmHandlerPhysicalSetRamFlagsAndFlushShadowPTs(pVM, pNew, pRam); … … 291 291 */ 292 292 pgmLock(pVM); 293 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX SUFF(pTrees)->PhysHandlers, GCPhys);293 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys); 294 294 if (pCur) 295 295 { … … 394 394 for (;;) 395 395 { 396 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pPGM->CTX SUFF(pTrees)->PhysHandlers, GCPhys, fAbove);396 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pPGM->CTX_SUFF(pTrees)->PhysHandlers, GCPhys, fAbove); 397 397 if ( !pCur 398 398 || ((fAbove ? pCur->Core.Key : pCur->Core.KeyLast) >> PAGE_SHIFT) != (GCPhys >> PAGE_SHIFT)) … … 493 493 int rc; 494 494 pgmLock(pVM); 495 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX SUFF(pTrees)->PhysHandlers, GCPhysCurrent);495 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhysCurrent); 496 496 if (pCur) 497 497 { … … 523 523 pCur->cPages = (GCPhysLast - (GCPhys & X86_PTE_PAE_PG_MASK) + 1) >> PAGE_SHIFT; 524 524 525 if (RTAvlroGCPhysInsert(&pVM->pgm.s.CTX SUFF(pTrees)->PhysHandlers, &pCur->Core))525 if (RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pCur->Core)) 526 526 { 527 527 /* … … 609 609 int rc = VINF_SUCCESS; 610 610 pgmLock(pVM); 611 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX SUFF(pTrees)->PhysHandlers, GCPhys);611 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys); 612 612 if (pCur) 613 613 { … … 658 658 */ 659 659 pgmLock(pVM); 660 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX SUFF(pTrees)->PhysHandlers, GCPhys);660 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys); 661 661 if (pCur) 662 662 { … … 673 673 pCur->cPages = (pCur->Core.KeyLast - (pCur->Core.Key & X86_PTE_PAE_PG_MASK) + PAGE_SIZE) >> PAGE_SHIFT; 674 674 675 if (RTAvlroGCPhysInsert(&pVM->pgm.s.CTX SUFF(pTrees)->PhysHandlers, &pNew->Core))675 if (RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, &pNew->Core)) 676 676 { 677 677 LogFlow(("PGMHandlerPhysicalSplit: %VGp-%VGp and %VGp-%VGp\n", … … 715 715 int rc; 716 716 pgmLock(pVM); 717 PPGMPHYSHANDLER pCur1 = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX SUFF(pTrees)->PhysHandlers, GCPhys1);717 PPGMPHYSHANDLER pCur1 = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys1); 718 718 if (pCur1) 719 719 { 720 PPGMPHYSHANDLER pCur2 = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX SUFF(pTrees)->PhysHandlers, GCPhys2);720 PPGMPHYSHANDLER pCur2 = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys2); 721 721 if (pCur2) 722 722 { … … 730 730 && pCur1->pfnHandlerR3 == pCur2->pfnHandlerR3) 731 731 { 732 PPGMPHYSHANDLER pCur3 = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX SUFF(pTrees)->PhysHandlers, GCPhys2);732 PPGMPHYSHANDLER pCur3 = (PPGMPHYSHANDLER)RTAvlroGCPhysRemove(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys2); 733 733 if (pCur3 == pCur2) 734 734 { … … 793 793 */ 794 794 int rc; 795 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX SUFF(pTrees)->PhysHandlers, GCPhys);795 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys); 796 796 if (pCur) 797 797 { … … 870 870 * Validate the range. 871 871 */ 872 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX SUFF(pTrees)->PhysHandlers, GCPhys);872 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys); 873 873 if (pCur) 874 874 { … … 924 924 * Validate the range. 925 925 */ 926 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX SUFF(pTrees)->PhysHandlers, GCPhys);926 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys); 927 927 if (pCur) 928 928 { … … 969 969 * Find the handler. 970 970 */ 971 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX SUFF(pTrees)->PhysHandlers, GCPhys);971 PPGMPHYSHANDLER pCur = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysHandlers, GCPhys); 972 972 if (pCur) 973 973 { … … 1001 1001 1002 1002 PPGMPHYS2VIRTHANDLER pCur; 1003 pCur = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysRangeGet(& CTXSUFF(pVM->pgm.s.pTrees)->PhysToVirtHandlers, GCPhys);1003 pCur = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers, GCPhys); 1004 1004 if (pCur) 1005 1005 { … … 1039 1039 /** @todo check if the current head node covers the ground we do. This is highly unlikely 1040 1040 * and I'm too lazy to implement this now as it will require sorting the list and stuff like that. */ 1041 PPGMPHYS2VIRTHANDLER pHead = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX SUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key);1041 PPGMPHYS2VIRTHANDLER pHead = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key); 1042 1042 #ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL 1043 1043 AssertReleaseMsg(pHead != pPhys2Virt, ("%VGp-%VGp offVirtHandler=%#RX32\n", … … 1124 1124 pPhys2Virt->Core.KeyLast = pPhys2Virt->Core.Key + cbPhys - 1; /* inclusive */ 1125 1125 pPhys2Virt->offNextAlias = PGMPHYS2VIRTHANDLER_IS_HEAD | PGMPHYS2VIRTHANDLER_IN_TREE; 1126 if (!RTAvlroGCPhysInsert(&pVM->pgm.s.CTX SUFF(pTrees)->PhysToVirtHandlers, &pPhys2Virt->Core))1126 if (!RTAvlroGCPhysInsert(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers, &pPhys2Virt->Core)) 1127 1127 pgmHandlerVirtualInsertAliased(pVM, pPhys2Virt); 1128 1128 #ifdef VBOX_STRICT_PGM_HANDLER_VIRTUAL 1129 1129 else 1130 AssertReleaseMsg(RTAvlroGCPhysGet(&pVM->pgm.s.CTX SUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key) == &pPhys2Virt->Core,1130 AssertReleaseMsg(RTAvlroGCPhysGet(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers, pPhys2Virt->Core.Key) == &pPhys2Virt->Core, 1131 1131 ("%VGp-%VGp offNextAlias=%#RX32\n", 1132 1132 pPhys2Virt->Core.Key, pPhys2Virt->Core.KeyLast, pPhys2Virt->offNextAlias)); … … 1169 1169 void pgmHandlerVirtualDumpPhysPages(PVM pVM) 1170 1170 { 1171 RTAvlroGCPhysDoWithAll( CTXSUFF(&pVM->pgm.s.pTrees)->PhysToVirtHandlers, true /* from left */,1171 RTAvlroGCPhysDoWithAll(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers, true /* from left */, 1172 1172 pgmHandlerVirtualDumpPhysPagesCallback, 0); 1173 1173 } … … 1366 1366 { 1367 1367 /* the first */ 1368 PPGMPHYSHANDLER pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pPGM->CTX SUFF(pTrees)->PhysHandlers, State.GCPhys);1368 PPGMPHYSHANDLER pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pPGM->CTX_SUFF(pTrees)->PhysHandlers, State.GCPhys); 1369 1369 if (!pPhys) 1370 1370 { 1371 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pPGM->CTX SUFF(pTrees)->PhysHandlers, State.GCPhys, true);1371 pPhys = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pPGM->CTX_SUFF(pTrees)->PhysHandlers, State.GCPhys, true); 1372 1372 if ( pPhys 1373 1373 && pPhys->Core.Key > (State.GCPhys + PAGE_SIZE - 1)) … … 1382 1382 while (pPhys->Core.KeyLast < (State.GCPhys | PAGE_OFFSET_MASK)) 1383 1383 { 1384 PPGMPHYSHANDLER pPhys2 = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pPGM->CTX SUFF(pTrees)->PhysHandlers,1384 PPGMPHYSHANDLER pPhys2 = (PPGMPHYSHANDLER)RTAvlroGCPhysGetBestFit(&pPGM->CTX_SUFF(pTrees)->PhysHandlers, 1385 1385 pPhys->Core.KeyLast + 1, true); 1386 1386 if ( !pPhys2 … … 1432 1432 for (;;) 1433 1433 { 1434 PPGMPHYS2VIRTHANDLER pPhys2Virt = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGetBestFit(& CTXSUFF(pVM->pgm.s.pTrees)->PhysToVirtHandlers,1434 PPGMPHYS2VIRTHANDLER pPhys2Virt = (PPGMPHYS2VIRTHANDLER)RTAvlroGCPhysGetBestFit(&pVM->pgm.s.CTX_SUFF(pTrees)->PhysToVirtHandlers, 1435 1435 GCPhysKey, true /* above-or-equal */); 1436 1436 if ( !pPhys2Virt … … 1459 1459 #else 1460 1460 /* very slow */ 1461 RTAvlroGCPtrDoWithAll( CTXSUFF(&pVM->pgm.s.pTrees)->VirtHandlers, true, pgmHandlerVirtualVerifyOneByPhysAddr, &State);1461 RTAvlroGCPtrDoWithAll(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, true, pgmHandlerVirtualVerifyOneByPhysAddr, &State); 1462 1462 #endif 1463 1463 if (State.uVirtState != State.uVirtStateFound) … … 1476 1476 * and that they are otherwise sane. 1477 1477 */ 1478 RTAvlroGCPtrDoWithAll( CTXSUFF(&pVM->pgm.s.pTrees)->VirtHandlers, true, pgmHandlerVirtualVerifyOne, &State);1478 RTAvlroGCPtrDoWithAll(&pVM->pgm.s.CTX_SUFF(pTrees)->VirtHandlers, true, pgmHandlerVirtualVerifyOne, &State); 1479 1479 1480 1480 /* -
trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp
r13060 r13062 1203 1203 #ifdef IN_RING3 /** @todo deal with this in GC and R0! */ 1204 1204 /* find and call the handler */ 1205 PPGMPHYSHANDLER pNode = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.pTrees HC->PhysHandlers, GCPhys);1205 PPGMPHYSHANDLER pNode = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.pTreesR3->PhysHandlers, GCPhys); 1206 1206 if (pNode && pNode->pfnHandlerR3) 1207 1207 { … … 1478 1478 #ifdef IN_RING3 /** @todo deal with this in GC and R0! */ 1479 1479 /* 1. The physical handler */ 1480 PPGMPHYSHANDLER pPhysNode = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.pTrees HC->PhysHandlers, GCPhys);1480 PPGMPHYSHANDLER pPhysNode = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.pTreesR3->PhysHandlers, GCPhys); 1481 1481 if (pPhysNode && pPhysNode->pfnHandlerR3) 1482 1482 { … … 1591 1591 #ifdef IN_RING3 /** @todo deal with this in GC and R0! */ 1592 1592 /* find and call the handler */ 1593 PPGMPHYSHANDLER pNode = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.pTrees HC->PhysHandlers, GCPhys);1593 PPGMPHYSHANDLER pNode = (PPGMPHYSHANDLER)RTAvlroGCPhysRangeGet(&pVM->pgm.s.pTreesR3->PhysHandlers, GCPhys); 1594 1594 if (pNode && pNode->pfnHandlerR3) 1595 1595 { -
trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp
r13060 r13062 250 250 #elif defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) 251 251 void *pvRet; 252 int rc = PGMDynMapGCPageOff(pPool->pVM HC, GCPhysFault & ~(RTGCPHYS)(cbEntry - 1), &pvRet);252 int rc = PGMDynMapGCPageOff(pPool->pVMR0, GCPhysFault & ~(RTGCPHYS)(cbEntry - 1), &pvRet); 253 253 AssertFatalRCSuccess(rc); 254 254 return pvRet; … … 256 256 #elif defined(IN_RING0) 257 257 void *pvRet; 258 int rc = pgmRamGCPhys2HCPtr(&pPool->pVM HC->pgm.s, GCPhysFault & ~(RTGCPHYS)(cbEntry - 1), &pvRet);258 int rc = pgmRamGCPhys2HCPtr(&pPool->pVMR0->pgm.s, GCPhysFault & ~(RTGCPHYS)(cbEntry - 1), &pvRet); 259 259 AssertFatalRCSuccess(rc); 260 260 return pvRet; … … 306 306 PX86PML4 pPML4; 307 307 } uShw; 308 uShw.pv = PGMPOOL_PAGE_2_PTR(pPool->CTX SUFF(pVM), pPage);308 uShw.pv = PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage); 309 309 310 310 switch (pPage->enmKind) … … 390 390 if (uShw.pPD->a[iShw].u & PGM_PDFLAGS_MAPPING) 391 391 { 392 Assert(pgmMapAreMappingsEnabled(&pPool->CTX SUFF(pVM)->pgm.s));393 VM_FF_SET(pPool->CTX SUFF(pVM), VM_FF_PGM_SYNC_CR3);392 Assert(pgmMapAreMappingsEnabled(&pPool->CTX_SUFF(pVM)->pgm.s)); 393 VM_FF_SET(pPool->CTX_SUFF(pVM), VM_FF_PGM_SYNC_CR3); 394 394 LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw=%#x!\n", iShw)); 395 395 } … … 404 404 && uShw.pPD->a[iShw2].u & PGM_PDFLAGS_MAPPING) 405 405 { 406 Assert(pgmMapAreMappingsEnabled(&pPool->CTX SUFF(pVM)->pgm.s));407 VM_FF_SET(pPool->CTX SUFF(pVM), VM_FF_PGM_SYNC_CR3);406 Assert(pgmMapAreMappingsEnabled(&pPool->CTX_SUFF(pVM)->pgm.s)); 407 VM_FF_SET(pPool->CTX_SUFF(pVM), VM_FF_PGM_SYNC_CR3); 408 408 LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw2=%#x!\n", iShw2)); 409 409 } … … 411 411 #if 0 /* useful when running PGMAssertCR3(), a bit too troublesome for general use (TLBs). */ 412 412 if ( uShw.pPD->a[iShw].n.u1Present 413 && !VM_FF_ISSET(pPool->CTX SUFF(pVM), VM_FF_PGM_SYNC_CR3))413 && !VM_FF_ISSET(pPool->CTX_SUFF(pVM), VM_FF_PGM_SYNC_CR3)) 414 414 { 415 415 LogFlow(("pgmPoolMonitorChainChanging: iShw=%#x: %RX32 -> freeing it!\n", iShw, uShw.pPD->a[iShw].u)); … … 417 417 ASMProbeReadByte(pvAddress); 418 418 # endif 419 pgmPoolFree(pPool->CTX SUFF(pVM), uShw.pPD->a[iShw].u & X86_PDE_PG_MASK, pPage->idx, iShw);419 pgmPoolFree(pPool->CTX_SUFF(pVM), uShw.pPD->a[iShw].u & X86_PDE_PG_MASK, pPage->idx, iShw); 420 420 uShw.pPD->a[iShw].u = 0; 421 421 } … … 431 431 if ((uShw.pPDPae->a[iShw].u & (PGM_PDFLAGS_MAPPING | X86_PDE_P)) == (PGM_PDFLAGS_MAPPING | X86_PDE_P)) 432 432 { 433 Assert(pgmMapAreMappingsEnabled(&pPool->CTX SUFF(pVM)->pgm.s));434 VM_FF_SET(pPool->CTX SUFF(pVM), VM_FF_PGM_SYNC_CR3);433 Assert(pgmMapAreMappingsEnabled(&pPool->CTX_SUFF(pVM)->pgm.s)); 434 VM_FF_SET(pPool->CTX_SUFF(pVM), VM_FF_PGM_SYNC_CR3); 435 435 LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw=%#x!\n", iShw)); 436 436 } … … 444 444 && (uShw.pPDPae->a[iShw2].u & (PGM_PDFLAGS_MAPPING | X86_PDE_P)) == (PGM_PDFLAGS_MAPPING | X86_PDE_P)) 445 445 { 446 Assert(pgmMapAreMappingsEnabled(&pPool->CTX SUFF(pVM)->pgm.s));447 VM_FF_SET(pPool->CTX SUFF(pVM), VM_FF_PGM_SYNC_CR3);446 Assert(pgmMapAreMappingsEnabled(&pPool->CTX_SUFF(pVM)->pgm.s)); 447 VM_FF_SET(pPool->CTX_SUFF(pVM), VM_FF_PGM_SYNC_CR3); 448 448 LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw2=%#x!\n", iShw2)); 449 449 } … … 451 451 #if 0 /* useful when running PGMAssertCR3(), a bit too troublesome for general use (TLBs). */ 452 452 if ( uShw.pPDPae->a[iShw].n.u1Present 453 && !VM_FF_ISSET(pPool->CTX SUFF(pVM), VM_FF_PGM_SYNC_CR3))453 && !VM_FF_ISSET(pPool->CTX_SUFF(pVM), VM_FF_PGM_SYNC_CR3)) 454 454 { 455 455 LogFlow(("pgmPoolMonitorChainChanging: iShw=%#x: %RX64 -> freeing it!\n", iShw, uShw.pPDPae->a[iShw].u)); … … 457 457 ASMProbeReadByte(pvAddress); 458 458 # endif 459 pgmPoolFree(pPool->CTX SUFF(pVM), uShw.pPDPae->a[iShw].u & X86_PDE_PAE_PG_MASK, pPage->idx, iShw);459 pgmPoolFree(pPool->CTX_SUFF(pVM), uShw.pPDPae->a[iShw].u & X86_PDE_PAE_PG_MASK, pPage->idx, iShw); 460 460 uShw.pPDPae->a[iShw].u = 0; 461 461 } … … 470 470 if (uShw.pPDPae->a[iShw].u & PGM_PDFLAGS_MAPPING) 471 471 { 472 Assert(pgmMapAreMappingsEnabled(&pPool->CTX SUFF(pVM)->pgm.s));473 VM_FF_SET(pPool->CTX SUFF(pVM), VM_FF_PGM_SYNC_CR3);472 Assert(pgmMapAreMappingsEnabled(&pPool->CTX_SUFF(pVM)->pgm.s)); 473 VM_FF_SET(pPool->CTX_SUFF(pVM), VM_FF_PGM_SYNC_CR3); 474 474 LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw=%#x!\n", iShw)); 475 475 } … … 484 484 { 485 485 LogFlow(("pgmPoolMonitorChainChanging: pae pd iShw=%#x: %RX64 -> freeing it!\n", iShw, uShw.pPDPae->a[iShw].u)); 486 pgmPoolFree(pPool->CTX SUFF(pVM),486 pgmPoolFree(pPool->CTX_SUFF(pVM), 487 487 uShw.pPDPae->a[iShw].u & X86_PDE_PAE_PG_MASK, 488 488 /* Note: hardcoded PAE implementation dependency */ … … 504 504 && uShw.pPDPae->a[iShw2].u & PGM_PDFLAGS_MAPPING) 505 505 { 506 Assert(pgmMapAreMappingsEnabled(&pPool->CTX SUFF(pVM)->pgm.s));507 VM_FF_SET(pPool->CTX SUFF(pVM), VM_FF_PGM_SYNC_CR3);506 Assert(pgmMapAreMappingsEnabled(&pPool->CTX_SUFF(pVM)->pgm.s)); 507 VM_FF_SET(pPool->CTX_SUFF(pVM), VM_FF_PGM_SYNC_CR3); 508 508 LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw2=%#x!\n", iShw2)); 509 509 } … … 513 513 { 514 514 LogFlow(("pgmPoolMonitorChainChanging: pae pd iShw2=%#x: %RX64 -> freeing it!\n", iShw2, uShw.pPDPae->a[iShw2].u)); 515 pgmPoolFree(pPool->CTX SUFF(pVM),515 pgmPoolFree(pPool->CTX_SUFF(pVM), 516 516 uShw.pPDPae->a[iShw2].u & X86_PDE_PAE_PG_MASK, 517 517 /* Note: hardcoded PAE implementation dependency */ … … 536 536 if (uShw.pPDPT->a[iShw].u & PGM_PLXFLAGS_MAPPING) 537 537 { 538 Assert(pgmMapAreMappingsEnabled(&pPool->CTX SUFF(pVM)->pgm.s));539 VM_FF_SET(pPool->CTX SUFF(pVM), VM_FF_PGM_SYNC_CR3);538 Assert(pgmMapAreMappingsEnabled(&pPool->CTX_SUFF(pVM)->pgm.s)); 539 VM_FF_SET(pPool->CTX_SUFF(pVM), VM_FF_PGM_SYNC_CR3); 540 540 LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw=%#x!\n", iShw)); 541 541 } … … 550 550 && uShw.pPDPT->a[iShw2].u & PGM_PLXFLAGS_MAPPING) 551 551 { 552 Assert(pgmMapAreMappingsEnabled(&pPool->CTX SUFF(pVM)->pgm.s));553 VM_FF_SET(pPool->CTX SUFF(pVM), VM_FF_PGM_SYNC_CR3);552 Assert(pgmMapAreMappingsEnabled(&pPool->CTX_SUFF(pVM)->pgm.s)); 553 VM_FF_SET(pPool->CTX_SUFF(pVM), VM_FF_PGM_SYNC_CR3); 554 554 LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw2=%#x!\n", iShw2)); 555 555 } … … 567 567 if (uShw.pPDPae->a[iShw].u & PGM_PDFLAGS_MAPPING) 568 568 { 569 Assert(pgmMapAreMappingsEnabled(&pPool->CTX SUFF(pVM)->pgm.s));570 VM_FF_SET(pPool->CTX SUFF(pVM), VM_FF_PGM_SYNC_CR3);569 Assert(pgmMapAreMappingsEnabled(&pPool->CTX_SUFF(pVM)->pgm.s)); 570 VM_FF_SET(pPool->CTX_SUFF(pVM), VM_FF_PGM_SYNC_CR3); 571 571 LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw=%#x!\n", iShw)); 572 572 } … … 576 576 { 577 577 LogFlow(("pgmPoolMonitorChainChanging: pae pd iShw=%#x: %RX64 -> freeing it!\n", iShw, uShw.pPDPae->a[iShw].u)); 578 pgmPoolFree(pPool->CTX SUFF(pVM),578 pgmPoolFree(pPool->CTX_SUFF(pVM), 579 579 uShw.pPDPae->a[iShw].u & X86_PDE_PAE_PG_MASK, 580 580 pPage->idx, … … 594 594 && uShw.pPDPae->a[iShw2].u & PGM_PDFLAGS_MAPPING) 595 595 { 596 Assert(pgmMapAreMappingsEnabled(&pPool->CTX SUFF(pVM)->pgm.s));597 VM_FF_SET(pPool->CTX SUFF(pVM), VM_FF_PGM_SYNC_CR3);596 Assert(pgmMapAreMappingsEnabled(&pPool->CTX_SUFF(pVM)->pgm.s)); 597 VM_FF_SET(pPool->CTX_SUFF(pVM), VM_FF_PGM_SYNC_CR3); 598 598 LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw2=%#x!\n", iShw2)); 599 599 } … … 602 602 { 603 603 LogFlow(("pgmPoolMonitorChainChanging: pae pd iShw2=%#x: %RX64 -> freeing it!\n", iShw2, uShw.pPDPae->a[iShw2].u)); 604 pgmPoolFree(pPool->CTX SUFF(pVM),604 pgmPoolFree(pPool->CTX_SUFF(pVM), 605 605 uShw.pPDPae->a[iShw2].u & X86_PDE_PAE_PG_MASK, 606 606 pPage->idx, … … 617 617 * - messing with the bits of pd pointers without changing the physical address 618 618 */ 619 if (!VM_FF_ISSET(pPool->CTX SUFF(pVM), VM_FF_PGM_SYNC_CR3))619 if (!VM_FF_ISSET(pPool->CTX_SUFF(pVM), VM_FF_PGM_SYNC_CR3)) 620 620 { 621 621 const unsigned iShw = off / sizeof(X86PDPE); … … 623 623 { 624 624 LogFlow(("pgmPoolMonitorChainChanging: pdpt iShw=%#x: %RX64 -> freeing it!\n", iShw, uShw.pPDPT->a[iShw].u)); 625 pgmPoolFree(pPool->CTX SUFF(pVM), uShw.pPDPT->a[iShw].u & X86_PDPE_PG_MASK, pPage->idx, iShw);625 pgmPoolFree(pPool->CTX_SUFF(pVM), uShw.pPDPT->a[iShw].u & X86_PDPE_PG_MASK, pPage->idx, iShw); 626 626 uShw.pPDPT->a[iShw].u = 0; 627 627 } … … 635 635 { 636 636 LogFlow(("pgmPoolMonitorChainChanging: pdpt iShw2=%#x: %RX64 -> freeing it!\n", iShw2, uShw.pPDPT->a[iShw2].u)); 637 pgmPoolFree(pPool->CTX SUFF(pVM), uShw.pPDPT->a[iShw2].u & X86_PDPE_PG_MASK, pPage->idx, iShw2);637 pgmPoolFree(pPool->CTX_SUFF(pVM), uShw.pPDPT->a[iShw2].u & X86_PDPE_PG_MASK, pPage->idx, iShw2); 638 638 uShw.pPDPT->a[iShw2].u = 0; 639 639 } … … 648 648 * - messing with the bits of pd pointers without changing the physical address 649 649 */ 650 if (!VM_FF_ISSET(pPool->CTX SUFF(pVM), VM_FF_PGM_SYNC_CR3))650 if (!VM_FF_ISSET(pPool->CTX_SUFF(pVM), VM_FF_PGM_SYNC_CR3)) 651 651 { 652 652 const unsigned iShw = off / sizeof(X86PDPE); … … 654 654 { 655 655 LogFlow(("pgmPoolMonitorChainChanging: pml4 iShw=%#x: %RX64 -> freeing it!\n", iShw, uShw.pPML4->a[iShw].u)); 656 pgmPoolFree(pPool->CTX SUFF(pVM), uShw.pPML4->a[iShw].u & X86_PML4E_PG_MASK, pPage->idx, iShw);656 pgmPoolFree(pPool->CTX_SUFF(pVM), uShw.pPML4->a[iShw].u & X86_PML4E_PG_MASK, pPage->idx, iShw); 657 657 uShw.pPML4->a[iShw].u = 0; 658 658 } … … 666 666 { 667 667 LogFlow(("pgmPoolMonitorChainChanging: pml4 iShw2=%#x: %RX64 -> freeing it!\n", iShw2, uShw.pPML4->a[iShw2].u)); 668 pgmPoolFree(pPool->CTX SUFF(pVM), uShw.pPML4->a[iShw2].u & X86_PML4E_PG_MASK, pPage->idx, iShw2);668 pgmPoolFree(pPool->CTX_SUFF(pVM), uShw.pPML4->a[iShw2].u & X86_PML4E_PG_MASK, pPage->idx, iShw2); 669 669 uShw.pPML4->a[iShw2].u = 0; 670 670 } … … 1139 1139 { 1140 1140 #ifndef IN_GC 1141 const PVM pVM = pPool->CTX SUFF(pVM);1141 const PVM pVM = pPool->CTX_SUFF(pVM); 1142 1142 #endif 1143 1143 Assert(pPool->iAgeHead != pPool->iAgeTail); /* We shouldn't be here if there < 2 cached entries! */ … … 1285 1285 { 1286 1286 #ifndef IN_GC 1287 const PVM pVM = pPool->CTX SUFF(pVM);1287 const PVM pVM = pPool->CTX_SUFF(pVM); 1288 1288 #endif 1289 1289 /* … … 1561 1561 { 1562 1562 Assert(pPage->iMonitoredNext == NIL_PGMPOOL_IDX); Assert(pPage->iMonitoredPrev == NIL_PGMPOOL_IDX); 1563 PVM pVM = pPool->CTX SUFF(pVM);1563 PVM pVM = pPool->CTX_SUFF(pVM); 1564 1564 const RTGCPHYS GCPhysPage = pPage->GCPhys & ~(RTGCPHYS)(PAGE_SIZE - 1); 1565 1565 rc = PGMHandlerPhysicalRegisterEx(pVM, PGMPHYSHANDLERTYPE_PHYSICAL_WRITE, … … 1633 1633 * Remove the page from the monitored list or uninstall it if last. 1634 1634 */ 1635 const PVM pVM = pPool->CTX SUFF(pVM);1635 const PVM pVM = pPool->CTX_SUFF(pVM); 1636 1636 int rc; 1637 1637 if ( pPage->iMonitoredNext != NIL_PGMPOOL_IDX … … 1939 1939 #endif 1940 1940 { 1941 void *pvShw = PGMPOOL_PAGE_2_PTR(pPool->CTX SUFF(pVM), pPage);1941 void *pvShw = PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage); 1942 1942 STAM_PROFILE_START(&pPool->StatZeroPage, z); 1943 1943 ASMMemZeroPage(pvShw); … … 1991 1991 * Clear all the GCPhys links and rebuild the phys ext free list. 1992 1992 */ 1993 for (PPGMRAMRANGE pRam = pPool->CTX SUFF(pVM)->pgm.s.CTX_SUFF(pRamRanges);1993 for (PPGMRAMRANGE pRam = pPool->CTX_SUFF(pVM)->pgm.s.CTX_SUFF(pRamRanges); 1994 1994 pRam; 1995 1995 pRam = pRam->CTX_SUFF(pNext)) … … 2001 2001 2002 2002 pPool->iPhysExtFreeHead = 0; 2003 PPGMPOOLPHYSEXT paPhysExts = pPool->CTX SUFF(paPhysExts);2003 PPGMPOOLPHYSEXT paPhysExts = pPool->CTX_SUFF(paPhysExts); 2004 2004 const unsigned cMaxPhysExts = pPool->cMaxPhysExts; 2005 2005 for (unsigned i = 0; i < cMaxPhysExts; i++) … … 2110 2110 { 2111 2111 int rc = VINF_SUCCESS; 2112 PPGMPOOLUSER pUser = pPool->CTX SUFF(paUsers);2112 PPGMPOOLUSER pUser = pPool->CTX_SUFF(paUsers); 2113 2113 2114 2114 LogFlow(("pgmPoolTrackInsert iUser %d iUserTable %d\n", iUser, iUserTable)); … … 2152 2152 const bool fCanBeMonitored = true; 2153 2153 # else 2154 bool fCanBeMonitored = pPool->CTX SUFF(pVM)->pgm.s.GCPhysGstCR3Monitored == NIL_RTGCPHYS2155 || (GCPhys & X86_PTE_PAE_PG_MASK) != (pPool->CTX SUFF(pVM)->pgm.s.GCPhysGstCR3Monitored & X86_PTE_PAE_PG_MASK)2154 bool fCanBeMonitored = pPool->CTX_SUFF(pVM)->pgm.s.GCPhysGstCR3Monitored == NIL_RTGCPHYS 2155 || (GCPhys & X86_PTE_PAE_PG_MASK) != (pPool->CTX_SUFF(pVM)->pgm.s.GCPhysGstCR3Monitored & X86_PTE_PAE_PG_MASK) 2156 2156 || pgmPoolIsBigPage((PGMPOOLKIND)pPage->enmKind); 2157 2157 # endif … … 2199 2199 static int pgmPoolTrackAddUser(PPGMPOOL pPool, PPGMPOOLPAGE pPage, uint16_t iUser, uint32_t iUserTable) 2200 2200 { 2201 PPGMPOOLUSER paUsers = pPool->CTX SUFF(paUsers);2201 PPGMPOOLUSER paUsers = pPool->CTX_SUFF(paUsers); 2202 2202 2203 2203 LogFlow(("pgmPoolTrackAddUser iUser %d iUserTable %d\n", iUser, iUserTable)); … … 2266 2266 * Unlink and free the specified user entry. 2267 2267 */ 2268 PPGMPOOLUSER paUsers = pPool->CTX SUFF(paUsers);2268 PPGMPOOLUSER paUsers = pPool->CTX_SUFF(paUsers); 2269 2269 2270 2270 /* Special: For PAE and 32-bit paging, there is usually no more than one user. */ … … 2530 2530 { 2531 2531 Assert(iPhysExt < pPool->cMaxPhysExts); 2532 pPhysExt = &pPool->CTX SUFF(paPhysExts)[iPhysExt];2532 pPhysExt = &pPool->CTX_SUFF(paPhysExts)[iPhysExt]; 2533 2533 for (unsigned i = 0; i < RT_ELEMENTS(pPhysExt->aidx); i++) 2534 2534 if (pPhysExt->aidx[i] != NIL_PGMPOOL_IDX) … … 2675 2675 uint32_t *pau32; 2676 2676 } u; 2677 u.pau64 = (uint64_t *)PGMPOOL_PAGE_2_PTR(pPool->CTX SUFF(pVM), pUserPage);2677 u.pau64 = (uint64_t *)PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pUserPage); 2678 2678 2679 2679 /* Safety precaution in case we change the paging for other modes too in the future. */ 2680 Assert(PGMGetHyperCR3( CTXSUFF(pPool->pVM)) != pPage->Core.Key);2680 Assert(PGMGetHyperCR3(pPool->CTX_SUFF(pVM)) != pPage->Core.Key); 2681 2681 2682 2682 #ifdef VBOX_STRICT … … 2774 2774 * Free all the user records. 2775 2775 */ 2776 PPGMPOOLUSER paUsers = pPool->CTX SUFF(paUsers);2776 PPGMPOOLUSER paUsers = pPool->CTX_SUFF(paUsers); 2777 2777 uint16_t i = pPage->iUserHead; 2778 2778 while (i != NIL_PGMPOOL_USER_INDEX) … … 2811 2811 return NULL; 2812 2812 } 2813 PPGMPOOLPHYSEXT pPhysExt = &pPool->CTX SUFF(paPhysExts)[iPhysExt];2813 PPGMPOOLPHYSEXT pPhysExt = &pPool->CTX_SUFF(paPhysExts)[iPhysExt]; 2814 2814 pPool->iPhysExtFreeHead = pPhysExt->iNext; 2815 2815 pPhysExt->iNext = NIL_PGMPOOL_PHYSEXT_INDEX; … … 2829 2829 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 2830 2830 Assert(iPhysExt < pPool->cMaxPhysExts); 2831 PPGMPOOLPHYSEXT pPhysExt = &pPool->CTX SUFF(paPhysExts)[iPhysExt];2831 PPGMPOOLPHYSEXT pPhysExt = &pPool->CTX_SUFF(paPhysExts)[iPhysExt]; 2832 2832 for (unsigned i = 0; i < RT_ELEMENTS(pPhysExt->aidx); i++) 2833 2833 pPhysExt->aidx[i] = NIL_PGMPOOL_IDX; … … 2852 2852 { 2853 2853 Assert(iPhysExt < pPool->cMaxPhysExts); 2854 pPhysExt = &pPool->CTX SUFF(paPhysExts)[iPhysExt];2854 pPhysExt = &pPool->CTX_SUFF(paPhysExts)[iPhysExt]; 2855 2855 for (unsigned i = 0; i < RT_ELEMENTS(pPhysExt->aidx); i++) 2856 2856 pPhysExt->aidx[i] = NIL_PGMPOOL_IDX; … … 2877 2877 { 2878 2878 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 2879 PPGMPOOLPHYSEXT paPhysExts = pPool->CTX SUFF(paPhysExts);2879 PPGMPOOLPHYSEXT paPhysExts = pPool->CTX_SUFF(paPhysExts); 2880 2880 2881 2881 /* special common case. */ … … 2985 2985 { 2986 2986 uint16_t iPhysExtPrev = NIL_PGMPOOL_PHYSEXT_INDEX; 2987 PPGMPOOLPHYSEXT paPhysExts = pPool->CTX SUFF(paPhysExts);2987 PPGMPOOLPHYSEXT paPhysExts = pPool->CTX_SUFF(paPhysExts); 2988 2988 do 2989 2989 { … … 3007 3007 3008 3008 /* we can free the node. */ 3009 PVM pVM = pPool->CTX SUFF(pVM);3009 PVM pVM = pPool->CTX_SUFF(pVM); 3010 3010 const uint16_t iPhysExtNext = paPhysExts[iPhysExt].iNext; 3011 3011 if ( iPhysExtPrev == NIL_PGMPOOL_PHYSEXT_INDEX … … 3068 3068 * Walk range list. 3069 3069 */ 3070 PPGMRAMRANGE pRam = pPool->CTX SUFF(pVM)->pgm.s.CTX_SUFF(pRamRanges);3070 PPGMRAMRANGE pRam = pPool->CTX_SUFF(pVM)->pgm.s.CTX_SUFF(pRamRanges); 3071 3071 while (pRam) 3072 3072 { … … 3105 3105 * Walk range list. 3106 3106 */ 3107 PPGMRAMRANGE pRam = pPool->CTX SUFF(pVM)->pgm.s.CTX_SUFF(pRamRanges);3107 PPGMRAMRANGE pRam = pPool->CTX_SUFF(pVM)->pgm.s.CTX_SUFF(pRamRanges); 3108 3108 while (pRam) 3109 3109 { … … 3128 3128 */ 3129 3129 STAM_COUNTER_INC(&pPool->StatTrackLinearRamSearches); 3130 pRam = pPool->CTX SUFF(pVM)->pgm.s.CTX_SUFF(pRamRanges);3130 pRam = pPool->CTX_SUFF(pVM)->pgm.s.CTX_SUFF(pRamRanges); 3131 3131 while (pRam) 3132 3132 { … … 3400 3400 * Map the shadow page and take action according to the page kind. 3401 3401 */ 3402 void *pvShw = PGMPOOL_PAGE_2_PTR(pPool->CTX SUFF(pVM), pPage);3402 void *pvShw = PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage); 3403 3403 switch (pPage->enmKind) 3404 3404 { … … 3408 3408 STAM_PROFILE_START(&pPool->StatTrackDerefGCPhys, g); 3409 3409 void *pvGst; 3410 int rc = PGM_GCPHYS_2_PTR(pPool->CTX SUFF(pVM), pPage->GCPhys, &pvGst); AssertReleaseRC(rc);3410 int rc = PGM_GCPHYS_2_PTR(pPool->CTX_SUFF(pVM), pPage->GCPhys, &pvGst); AssertReleaseRC(rc); 3411 3411 pgmPoolTrackDerefPT32Bit32Bit(pPool, pPage, (PX86PT)pvShw, (PCX86PT)pvGst); 3412 3412 STAM_PROFILE_STOP(&pPool->StatTrackDerefGCPhys, g); … … 3418 3418 STAM_PROFILE_START(&pPool->StatTrackDerefGCPhys, g); 3419 3419 void *pvGst; 3420 int rc = PGM_GCPHYS_2_PTR_EX(pPool->CTX SUFF(pVM), pPage->GCPhys, &pvGst); AssertReleaseRC(rc);3420 int rc = PGM_GCPHYS_2_PTR_EX(pPool->CTX_SUFF(pVM), pPage->GCPhys, &pvGst); AssertReleaseRC(rc); 3421 3421 pgmPoolTrackDerefPTPae32Bit(pPool, pPage, (PX86PTPAE)pvShw, (PCX86PT)pvGst); 3422 3422 STAM_PROFILE_STOP(&pPool->StatTrackDerefGCPhys, g); … … 3428 3428 STAM_PROFILE_START(&pPool->StatTrackDerefGCPhys, g); 3429 3429 void *pvGst; 3430 int rc = PGM_GCPHYS_2_PTR(pPool->CTX SUFF(pVM), pPage->GCPhys, &pvGst); AssertReleaseRC(rc);3430 int rc = PGM_GCPHYS_2_PTR(pPool->CTX_SUFF(pVM), pPage->GCPhys, &pvGst); AssertReleaseRC(rc); 3431 3431 pgmPoolTrackDerefPTPaePae(pPool, pPage, (PX86PTPAE)pvShw, (PCX86PTPAE)pvGst); 3432 3432 STAM_PROFILE_STOP(&pPool->StatTrackDerefGCPhys, g); … … 3528 3528 uint32_t *pau32; 3529 3529 } u; 3530 u.pau64 = (uint64_t *)PGMPOOL_PAGE_2_PTR(pPool->CTX SUFF(pVM), pPage);3530 u.pau64 = (uint64_t *)PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage); 3531 3531 3532 3532 /* … … 3560 3560 * Paranoia (to be removed), flag a global CR3 sync. 3561 3561 */ 3562 VM_FF_SET(pPool->CTX SUFF(pVM), VM_FF_PGM_SYNC_CR3);3562 VM_FF_SET(pPool->CTX_SUFF(pVM), VM_FF_PGM_SYNC_CR3); 3563 3563 } 3564 3564 … … 3594 3594 3595 3595 #ifdef IN_RING3 3596 Assert(pPage->Core.Key == MMPage2Phys(pPool->pVM HC, pPage->pvPageHC));3596 Assert(pPage->Core.Key == MMPage2Phys(pPool->pVMR3, pPage->pvPageR3)); 3597 3597 #endif 3598 3598 #ifdef PGMPOOL_WITH_MONITORING … … 3633 3633 pPool->cPresent = 0; 3634 3634 pPool->iUserFreeHead = 0; 3635 PPGMPOOLUSER paUsers = pPool->CTX SUFF(paUsers);3635 PPGMPOOLUSER paUsers = pPool->CTX_SUFF(paUsers); 3636 3636 const unsigned cMaxUsers = pPool->cMaxUsers; 3637 3637 for (unsigned i = 0; i < cMaxUsers; i++) … … 3648 3648 * Clear all the GCPhys links and rebuild the phys ext free list. 3649 3649 */ 3650 for (PPGMRAMRANGE pRam = pPool->CTX SUFF(pVM)->pgm.s.CTX_SUFF(pRamRanges);3650 for (PPGMRAMRANGE pRam = pPool->CTX_SUFF(pVM)->pgm.s.CTX_SUFF(pRamRanges); 3651 3651 pRam; 3652 3652 pRam = pRam->CTX_SUFF(pNext)) … … 3658 3658 3659 3659 pPool->iPhysExtFreeHead = 0; 3660 PPGMPOOLPHYSEXT paPhysExts = pPool->CTX SUFF(paPhysExts);3660 PPGMPOOLPHYSEXT paPhysExts = pPool->CTX_SUFF(paPhysExts); 3661 3661 const unsigned cMaxPhysExts = pPool->cMaxPhysExts; 3662 3662 for (unsigned i = 0; i < cMaxPhysExts; i++) … … 3706 3706 if (pPage->fMonitored) 3707 3707 { 3708 PVM pVM = pPool->CTX SUFF(pVM);3708 PVM pVM = pPool->CTX_SUFF(pVM); 3709 3709 int rc = PGMHandlerPhysicalChangeCallbacks(pVM, pPage->GCPhys & ~(RTGCPHYS)(PAGE_SIZE - 1), 3710 3710 pPool->pfnAccessHandlerR3, MMHyperCCToR3(pVM, pPage), … … 3762 3762 * Quietly reject any attempts at flushing the currently active shadow CR3 mapping 3763 3763 */ 3764 if (PGMGetHyperCR3( CTXSUFF(pPool->pVM)) == pPage->Core.Key)3765 { 3766 AssertMsg(pPage->enmKind == PGMPOOLKIND_64BIT_PML4_FOR_64BIT_PML4, ("Can't free the shadow CR3! (%VGp vs %VGp kind=%d\n", PGMGetHyperCR3( CTXSUFF(pPool->pVM)), pPage->Core.Key, pPage->enmKind));3764 if (PGMGetHyperCR3(pPool->CTX_SUFF(pVM)) == pPage->Core.Key) 3765 { 3766 AssertMsg(pPage->enmKind == PGMPOOLKIND_64BIT_PML4_FOR_64BIT_PML4, ("Can't free the shadow CR3! (%VGp vs %VGp kind=%d\n", PGMGetHyperCR3(pPool->CTX_SUFF(pVM)), pPage->Core.Key, pPage->enmKind)); 3767 3767 Log(("pgmPoolFlushPage: current active shadow CR3, rejected. enmKind=%d idx=%d\n", pPage->enmKind, pPage->idx)); 3768 3768 return VINF_SUCCESS; … … 3864 3864 STAM_PROFILE_ADV_SUSPEND(&pPool->StatAlloc, a); 3865 3865 #ifdef IN_RING3 3866 int rc = PGMR3PoolGrow(pPool->pVM HC);3866 int rc = PGMR3PoolGrow(pPool->pVMR3); 3867 3867 #else 3868 int rc = CTXALLMID(VMM, CallHost)(pPool->CTX SUFF(pVM), VMMCALLHOST_PGM_POOL_GROW, 0);3868 int rc = CTXALLMID(VMM, CallHost)(pPool->CTX_SUFF(pVM), VMMCALLHOST_PGM_POOL_GROW, 0); 3869 3869 #endif 3870 3870 if (VBOX_FAILURE(rc))
Note:
See TracChangeset
for help on using the changeset viewer.