Changeset 80268 in vbox for trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp
- Timestamp:
- Aug 14, 2019 11:25:13 AM (5 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp
r80182 r80268 20 20 * Header Files * 21 21 *********************************************************************************************************************************/ 22 #define VBOX_BUGREF_9217_PART_I 22 23 #define LOG_GROUP LOG_GROUP_PGM_POOL 23 24 #include <VBox/vmm/pgm.h> … … 26 27 #include <VBox/vmm/cpum.h> 27 28 #include "PGMInternal.h" 28 #include <VBox/vmm/vm .h>29 #include <VBox/vmm/vmcc.h> 29 30 #include "PGMInline.h" 30 31 #include <VBox/disopcode.h> … … 142 143 * @param cb Size of data to read 143 144 */ 144 DECLINLINE(int) pgmPoolPhysSimpleReadGCPhys(PVM pVM, void *pvDst, void const *pvSrc, RTGCPHYS GCPhysSrc, size_t cb)145 DECLINLINE(int) pgmPoolPhysSimpleReadGCPhys(PVMCC pVM, void *pvDst, void const *pvSrc, RTGCPHYS GCPhysSrc, size_t cb) 145 146 { 146 147 #if defined(IN_RING3) … … 175 176 AssertMsg(pPage->iMonitoredPrev == NIL_PGMPOOL_IDX, ("%u (idx=%u)\n", pPage->iMonitoredPrev, pPage->idx)); 176 177 const unsigned off = GCPhysFault & PAGE_OFFSET_MASK; 177 PVM 178 PVMCC pVM = pPool->CTX_SUFF(pVM); 178 179 NOREF(pVCpu); 179 180 … … 641 642 * @remark The REP prefix check is left to the caller because of STOSD/W. 642 643 */ 643 DECLINLINE(bool) pgmRZPoolMonitorIsReused(PVM pVM, PVMCPUpVCpu, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pDis, RTGCPTR pvFault,644 DECLINLINE(bool) pgmRZPoolMonitorIsReused(PVMCC pVM, PVMCPUCC pVCpu, PCPUMCTXCORE pRegFrame, PDISCPUSTATE pDis, RTGCPTR pvFault, 644 645 PPGMPOOLPAGE pPage) 645 646 { … … 808 809 * @param pvFault The fault address. 809 810 */ 810 DECLINLINE(int) pgmRZPoolAccessPfHandlerSTOSD(PVM pVM, PPGMPOOL pPool, PPGMPOOLPAGE pPage, PDISCPUSTATE pDis,811 DECLINLINE(int) pgmRZPoolAccessPfHandlerSTOSD(PVMCC pVM, PPGMPOOL pPool, PPGMPOOLPAGE pPage, PDISCPUSTATE pDis, 811 812 PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, RTGCPTR pvFault) 812 813 { … … 839 840 * write situation, meaning that it's safe to write here. 840 841 */ 841 PVMCPU 842 PVMCPUCC pVCpu = VMMGetCpu(pPool->CTX_SUFF(pVM)); 842 843 RTGCUINTPTR pu32 = (RTGCUINTPTR)pvFault; 843 844 while (pRegFrame->rcx) … … 877 878 * @param pfReused Reused state (in/out) 878 879 */ 879 DECLINLINE(int) pgmRZPoolAccessPfHandlerSimple(PVM pVM, PVMCPU pVCpu, PPGMPOOL pPool, PPGMPOOLPAGE pPage, PDISCPUSTATE pDis,880 DECLINLINE(int) pgmRZPoolAccessPfHandlerSimple(PVMCC pVM, PVMCPU pVCpu, PPGMPOOL pPool, PPGMPOOLPAGE pPage, PDISCPUSTATE pDis, 880 881 PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, RTGCPTR pvFault, bool *pfReused) 881 882 { … … 972 973 * @remarks The @a pvUser argument points to the PGMPOOLPAGE. 973 974 */ 974 DECLEXPORT(VBOXSTRICTRC) pgmRZPoolAccessPfHandler(PVM pVM, PVMCPUpVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame,975 DECLEXPORT(VBOXSTRICTRC) pgmRZPoolAccessPfHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, 975 976 RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser) 976 977 { … … 1276 1277 */ 1277 1278 PGM_ALL_CB2_DECL(VBOXSTRICTRC) 1278 pgmPoolAccessHandler(PVM pVM, PVMCPUpVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf,1279 pgmPoolAccessHandler(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, 1279 1280 PGMACCESSTYPE enmAccessType, PGMACCESSORIGIN enmOrigin, void *pvUser) 1280 1281 { … … 1392 1393 unsigned LastPTE = ~0U; /* initialized to shut up gcc */ 1393 1394 RTHCPHYS LastHCPhys = NIL_RTHCPHYS; /* initialized to shut up gcc */ 1394 PVM 1395 PVMCC pVM = pPool->CTX_SUFF(pVM); 1395 1396 1396 1397 # ifdef VBOX_STRICT … … 1458 1459 unsigned LastPTE = ~0U; /* initialized to shut up gcc */ 1459 1460 RTHCPHYS LastHCPhys = NIL_RTHCPHYS; /* initialized to shut up gcc */ 1460 PVM 1461 PVMCC pVM = pPool->CTX_SUFF(pVM); 1461 1462 1462 1463 # ifdef VBOX_STRICT … … 1655 1656 * @param fAllowRemoval Allow a reused page table to be removed 1656 1657 */ 1657 static void pgmPoolFlushDirtyPage(PVM pVM, PPGMPOOL pPool, unsigned idxSlot, bool fAllowRemoval = false)1658 static void pgmPoolFlushDirtyPage(PVMCC pVM, PPGMPOOL pPool, unsigned idxSlot, bool fAllowRemoval = false) 1658 1659 { 1659 1660 AssertCompile(RT_ELEMENTS(pPool->aidxDirtyPages) == RT_ELEMENTS(pPool->aDirtyPages)); … … 1751 1752 * @param pPage The page. 1752 1753 */ 1753 void pgmPoolAddDirtyPage(PVM pVM, PPGMPOOL pPool, PPGMPOOLPAGE pPage)1754 void pgmPoolAddDirtyPage(PVMCC pVM, PPGMPOOL pPool, PPGMPOOLPAGE pPage) 1754 1755 { 1755 1756 PGM_LOCK_ASSERT_OWNER(pVM); … … 1857 1858 * @param pVM The cross context VM structure. 1858 1859 */ 1859 void pgmPoolResetDirtyPages(PVM pVM)1860 void pgmPoolResetDirtyPages(PVMCC pVM) 1860 1861 { 1861 1862 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); … … 1920 1921 * @param GCPhysPT Physical address of the page table 1921 1922 */ 1922 void pgmPoolInvalidateDirtyPage(PVM pVM, RTGCPHYS GCPhysPT)1923 void pgmPoolInvalidateDirtyPage(PVMCC pVM, RTGCPHYS GCPhysPT) 1923 1924 { 1924 1925 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); … … 2029 2030 static int pgmPoolCacheFreeOne(PPGMPOOL pPool, uint16_t iUser) 2030 2031 { 2031 const PVM pVM = pPool->CTX_SUFF(pVM);2032 const PVMCC pVM = pPool->CTX_SUFF(pVM); 2032 2033 Assert(pPool->iAgeHead != pPool->iAgeTail); /* We shouldn't be here if there < 2 cached entries! */ 2033 2034 STAM_COUNTER_INC(&pPool->StatCacheFreeUpOne); … … 2509 2510 { 2510 2511 Assert(pPage->iMonitoredNext == NIL_PGMPOOL_IDX); Assert(pPage->iMonitoredPrev == NIL_PGMPOOL_IDX); 2511 PVM pVM = pPool->CTX_SUFF(pVM);2512 PVMCC pVM = pPool->CTX_SUFF(pVM); 2512 2513 const RTGCPHYS GCPhysPage = pPage->GCPhys & ~(RTGCPHYS)PAGE_OFFSET_MASK; 2513 2514 rc = PGMHandlerPhysicalRegister(pVM, GCPhysPage, GCPhysPage + PAGE_OFFSET_MASK, pPool->hAccessHandlerType, … … 2581 2582 * Remove the page from the monitored list or uninstall it if last. 2582 2583 */ 2583 const PVM pVM = pPool->CTX_SUFF(pVM);2584 const PVMCC pVM = pPool->CTX_SUFF(pVM); 2584 2585 int rc; 2585 2586 if ( pPage->iMonitoredNext != NIL_PGMPOOL_IDX … … 2698 2699 * @param pVM The cross context VM structure. 2699 2700 */ 2700 static void pgmPoolMonitorModifiedClearAll(PVM pVM)2701 static void pgmPoolMonitorModifiedClearAll(PVMCC pVM) 2701 2702 { 2702 2703 pgmLock(pVM); … … 2737 2738 * the PGMPOOL_WITH_MONITORING \#ifdef. 2738 2739 */ 2739 int pgmPoolSyncCR3(PVMCPU pVCpu)2740 { 2741 PVM pVM = pVCpu->CTX_SUFF(pVM);2740 int pgmPoolSyncCR3(PVMCPUCC pVCpu) 2741 { 2742 PVMCC pVM = pVCpu->CTX_SUFF(pVM); 2742 2743 LogFlow(("pgmPoolSyncCR3 fSyncFlags=%x\n", pVCpu->pgm.s.fSyncFlags)); 2743 2744 … … 3491 3492 * The caller MUST initialized this to @a false. 3492 3493 */ 3493 int pgmPoolTrackUpdateGCPhys(PVM pVM, RTGCPHYS GCPhysPage, PPGMPAGE pPhysPage, bool fFlushPTEs, bool *pfFlushTLBs)3494 { 3495 PVMCPU pVCpu = VMMGetCpu(pVM);3494 int pgmPoolTrackUpdateGCPhys(PVMCC pVM, RTGCPHYS GCPhysPage, PPGMPAGE pPhysPage, bool fFlushPTEs, bool *pfFlushTLBs) 3495 { 3496 PVMCPUCC pVCpu = VMMGetCpu(pVM); 3496 3497 pgmLock(pVM); 3497 3498 int rc = VINF_SUCCESS; … … 4122 4123 void pgmPoolTrackPhysExtDerefGCPhys(PPGMPOOL pPool, PPGMPOOLPAGE pPage, PPGMPAGE pPhysPage, uint16_t iPte) 4123 4124 { 4124 PVM 4125 PVMCC pVM = pPool->CTX_SUFF(pVM); 4125 4126 const unsigned cRefs = PGM_PAGE_GET_TD_CREFS(pPhysPage); 4126 4127 AssertFatalMsg(cRefs == PGMPOOL_TD_CREFS_PHYSEXT, ("cRefs=%d pPhysPage=%R[pgmpage] pPage=%p:{.idx=%d}\n", cRefs, pPhysPage, pPage, pPage->idx)); … … 4216 4217 * Lookup the page and check if it checks out before derefing it. 4217 4218 */ 4218 PVM 4219 PVMCC pVM = pPool->CTX_SUFF(pVM); 4219 4220 PPGMPAGE pPhysPage = pgmPhysGetPage(pVM, GCPhys); 4220 4221 if (pPhysPage) … … 4259 4260 */ 4260 4261 RTHCPHYS HCPhysHinted; 4261 PVM 4262 PVMCC pVM = pPool->CTX_SUFF(pVM); 4262 4263 PPGMPAGE pPhysPage = pgmPhysGetPage(pVM, GCPhysHint); 4263 4264 if (pPhysPage) … … 4683 4684 * Map the shadow page and take action according to the page kind. 4684 4685 */ 4685 PVM 4686 PVMCC pVM = pPool->CTX_SUFF(pVM); 4686 4687 void *pvShw = PGMPOOL_PAGE_2_PTR(pVM, pPage); 4687 4688 switch (pPage->enmKind) … … 4809 4810 int pgmPoolFlushPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage, bool fFlush) 4810 4811 { 4811 PVM 4812 PVMCC pVM = pPool->CTX_SUFF(pVM); 4812 4813 bool fFlushRequired = false; 4813 4814 … … 4934 4935 void pgmPoolFreeByPage(PPGMPOOL pPool, PPGMPOOLPAGE pPage, uint16_t iUser, uint32_t iUserTable) 4935 4936 { 4936 PVM pVM = pPool->CTX_SUFF(pVM);4937 PVMCC pVM = pPool->CTX_SUFF(pVM); 4937 4938 4938 4939 STAM_PROFILE_START(&pPool->StatFree, a); … … 4963 4964 static int pgmPoolMakeMoreFreePages(PPGMPOOL pPool, PGMPOOLKIND enmKind, uint16_t iUser) 4964 4965 { 4965 PVM pVM = pPool->CTX_SUFF(pVM);4966 PVMCC pVM = pPool->CTX_SUFF(pVM); 4966 4967 LogFlow(("pgmPoolMakeMoreFreePages: enmKind=%d iUser=%d\n", enmKind, iUser)); 4967 4968 NOREF(enmKind); … … 5321 5322 * including the root page. 5322 5323 */ 5323 for (VMCPUID i = 0; i < pVM->cCpus; i++) 5324 pgmR3ExitShadowModeBeforePoolFlush(&pVM->aCpus[i]); 5324 VMCC_FOR_EACH_VMCPU(pVM) 5325 pgmR3ExitShadowModeBeforePoolFlush(pVCpu); 5326 VMCC_FOR_EACH_VMCPU_END(pVM); 5327 5325 5328 5326 5329 /* … … 5436 5439 * Reinsert active pages into the hash and ensure monitoring chains are correct. 5437 5440 */ 5438 for (VMCPUID i = 0; i < pVM->cCpus; i++)5441 VMCC_FOR_EACH_VMCPU(pVM) 5439 5442 { 5440 5443 /* 5441 5444 * Re-enter the shadowing mode and assert Sync CR3 FF. 5442 5445 */ 5443 PVMCPU pVCpu = &pVM->aCpus[i];5444 5446 pgmR3ReEnterShadowModeAfterPoolFlush(pVM, pVCpu); 5445 5447 VMCPU_FF_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3); 5446 5448 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH); 5447 5449 } 5450 VMCC_FOR_EACH_VMCPU_END(pVM); 5448 5451 5449 5452 STAM_PROFILE_STOP(&pPool->StatR3Reset, a);
Note:
See TracChangeset
for help on using the changeset viewer.