Changeset 18927 in vbox for trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp
- Timestamp:
- Apr 16, 2009 11:41:38 AM (16 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp
r18666 r18927 229 229 * for mapping conflicts and set the SyncCR3 FF if found. 230 230 * 231 * @param pVCpu VMCPU handle 231 232 * @param pPool The pool. 232 233 * @param pPage The head page. … … 237 238 * This need not be specified if the caller knows we won't do cross entry accesses. 238 239 */ 239 void pgmPoolMonitorChainChanging(P PGMPOOL pPool, PPGMPOOLPAGE pPage, RTGCPHYS GCPhysFault, CTXTYPE(RTGCPTR, RTHCPTR, RTGCPTR) pvAddress, PDISCPUSTATE pCpu)240 void pgmPoolMonitorChainChanging(PVMCPU pVCpu, PPGMPOOL pPool, PPGMPOOLPAGE pPage, RTGCPHYS GCPhysFault, CTXTYPE(RTGCPTR, RTHCPTR, RTGCPTR) pvAddress, PDISCPUSTATE pCpu) 240 241 { 241 242 Assert(pPage->iMonitoredPrev == NIL_PGMPOOL_IDX); … … 437 438 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s)); 438 439 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3); 439 STAM_COUNTER_INC(&(pV M->pgm.s.StatRZGuestCR3WriteConflict));440 STAM_COUNTER_INC(&(pVCpu->pgm.s.StatRZGuestCR3WriteConflict)); 440 441 LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw=%#x!\n", iShw)); 441 442 break; … … 469 470 { 470 471 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s)); 471 STAM_COUNTER_INC(&(pV M->pgm.s.StatRZGuestCR3WriteConflict));472 STAM_COUNTER_INC(&(pVCpu->pgm.s.StatRZGuestCR3WriteConflict)); 472 473 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3); 473 474 LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw2=%#x!\n", iShw2)); … … 515 516 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s)); 516 517 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3); 517 STAM_COUNTER_INC(&(pV M->pgm.s.StatRZGuestCR3WriteConflict));518 STAM_COUNTER_INC(&(pVCpu->pgm.s.StatRZGuestCR3WriteConflict)); 518 519 LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw=%#x!\n", iShw)); 519 520 break; … … 553 554 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s)); 554 555 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3); 555 STAM_COUNTER_INC(&(pV M->pgm.s.StatRZGuestCR3WriteConflict));556 STAM_COUNTER_INC(&(pVCpu->pgm.s.StatRZGuestCR3WriteConflict)); 556 557 LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw2=%#x!\n", iShw2)); 557 558 break; … … 592 593 { 593 594 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s)); 594 STAM_COUNTER_INC(&(pV M->pgm.s.StatRZGuestCR3WriteConflict));595 STAM_COUNTER_INC(&(pVCpu->pgm.s.StatRZGuestCR3WriteConflict)); 595 596 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3); 596 597 LogFlow(("pgmPoolMonitorChainChanging: Detected pdpt conflict at iShw=%#x!\n", iShw)); … … 624 625 { 625 626 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s)); 626 STAM_COUNTER_INC(&(pV M->pgm.s.StatRZGuestCR3WriteConflict));627 STAM_COUNTER_INC(&(pVCpu->pgm.s.StatRZGuestCR3WriteConflict)); 627 628 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3); 628 629 LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShw2=%#x!\n", iShw2)); … … 888 889 * @returns VBox status code suitable for scheduling. 889 890 * @param pVM The VM handle. 891 * @param pVCpu The VMCPU handle. 890 892 * @param pPool The pool. 891 893 * @param pPage The pool page (head). … … 895 897 * @param pvFault The fault address. 896 898 */ 897 static int pgmPoolAccessHandlerFlush(PVM pVM, P PGMPOOL pPool, PPGMPOOLPAGE pPage, PDISCPUSTATE pCpu,899 static int pgmPoolAccessHandlerFlush(PVM pVM, PVMCPU pVCpu, PPGMPOOL pPool, PPGMPOOLPAGE pPage, PDISCPUSTATE pCpu, 898 900 PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, RTGCPTR pvFault) 899 901 { … … 907 909 */ 908 910 uint32_t cbWritten; 909 int rc2 = EMInterpretInstructionCPU(pVM, p Cpu, pRegFrame, pvFault, &cbWritten);911 int rc2 = EMInterpretInstructionCPU(pVM, pVCpu, pCpu, pRegFrame, pvFault, &cbWritten); 910 912 if (RT_SUCCESS(rc2)) 911 913 pRegFrame->rip += pCpu->opsize; … … 971 973 * write situation, meaning that it's safe to write here. 972 974 */ 973 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0974 975 PVMCPU pVCpu = VMMGetCpu(pPool->CTX_SUFF(pVM)); 975 #endif976 976 RTGCUINTPTR pu32 = (RTGCUINTPTR)pvFault; 977 977 while (pRegFrame->ecx) … … 979 979 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 980 980 uint32_t iPrevSubset = PGMDynMapPushAutoSubset(pVCpu); 981 pgmPoolMonitorChainChanging(p Pool, pPage, GCPhysFault, (RTGCPTR)pu32, NULL);981 pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, (RTGCPTR)pu32, NULL); 982 982 PGMDynMapPopAutoSubset(pVCpu, iPrevSubset); 983 983 #else 984 pgmPoolMonitorChainChanging(p Pool, pPage, GCPhysFault, (RTGCPTR)pu32, NULL);984 pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, (RTGCPTR)pu32, NULL); 985 985 #endif 986 986 #ifdef IN_RC … … 1011 1011 * @returns VBox status code suitable for scheduling. 1012 1012 * @param pVM The VM handle. 1013 * @param pVCpu The VMCPU handle. 1013 1014 * @param pPool The pool. 1014 1015 * @param pPage The pool page (head). … … 1018 1019 * @param pvFault The fault address. 1019 1020 */ 1020 DECLINLINE(int) pgmPoolAccessHandlerSimple(PVM pVM, P PGMPOOL pPool, PPGMPOOLPAGE pPage, PDISCPUSTATE pCpu,1021 DECLINLINE(int) pgmPoolAccessHandlerSimple(PVM pVM, PVMCPU pVCpu, PPGMPOOL pPool, PPGMPOOLPAGE pPage, PDISCPUSTATE pCpu, 1021 1022 PCPUMCTXCORE pRegFrame, RTGCPHYS GCPhysFault, RTGCPTR pvFault) 1022 1023 { … … 1033 1034 */ 1034 1035 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 1035 PVMCPU pVCpu = VMMGetCpu(pPool->CTX_SUFF(pVM));1036 1036 uint32_t iPrevSubset = PGMDynMapPushAutoSubset(pVCpu); 1037 pgmPoolMonitorChainChanging(p Pool, pPage, GCPhysFault, pvFault, pCpu);1037 pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, pvFault, pCpu); 1038 1038 PGMDynMapPopAutoSubset(pVCpu, iPrevSubset); 1039 1039 #else 1040 pgmPoolMonitorChainChanging(p Pool, pPage, GCPhysFault, pvFault, pCpu);1040 pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, pvFault, pCpu); 1041 1041 #endif 1042 1042 … … 1045 1045 */ 1046 1046 uint32_t cb; 1047 int rc = EMInterpretInstructionCPU(pVM, p Cpu, pRegFrame, pvFault, &cb);1047 int rc = EMInterpretInstructionCPU(pVM, pVCpu, pCpu, pRegFrame, pvFault, &cb); 1048 1048 if (RT_SUCCESS(rc)) 1049 1049 pRegFrame->rip += pCpu->opsize; … … 1093 1093 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 1094 1094 PPGMPOOLPAGE pPage = (PPGMPOOLPAGE)pvUser; 1095 PVMCPU pVCpu = VMMGetCpu(pVM); 1096 1095 1097 LogFlow(("pgmPoolAccessHandler: pvFault=%RGv pPage=%p:{.idx=%d} GCPhysFault=%RGp\n", pvFault, pPage, pPage->idx, GCPhysFault)); 1096 1098 … … 1105 1107 */ 1106 1108 DISCPUSTATE Cpu; 1107 int rc = EMInterpretDisasOne(pVM, p RegFrame, &Cpu, NULL);1109 int rc = EMInterpretDisasOne(pVM, pVCpu, pRegFrame, &Cpu, NULL); 1108 1110 AssertRCReturn(rc, rc); 1109 1111 … … 1123 1125 if (!(Cpu.prefix & (PREFIX_REP | PREFIX_REPNE))) 1124 1126 { 1125 rc = pgmPoolAccessHandlerSimple(pVM, p Pool, pPage, &Cpu, pRegFrame, GCPhysFault, pvFault);1127 rc = pgmPoolAccessHandlerSimple(pVM, pVCpu, pPool, pPage, &Cpu, pRegFrame, GCPhysFault, pvFault); 1126 1128 STAM_PROFILE_STOP_EX(&pVM->pgm.s.CTX_SUFF(pPool)->CTX_SUFF_Z(StatMonitor), &pPool->CTX_MID_Z(StatMonitor,Handled), a); 1127 1129 return rc; … … 1133 1135 */ 1134 1136 if ( Cpu.pCurInstr->opcode == OP_STOSWD 1135 && CPUMGetGuestCPL(pV M, pRegFrame) == 01137 && CPUMGetGuestCPL(pVCpu, pRegFrame) == 0 1136 1138 && pRegFrame->ecx <= 0x20 1137 1139 && pRegFrame->ecx * 4 <= PAGE_SIZE - ((uintptr_t)pvFault & PAGE_OFFSET_MASK) … … 1164 1166 * the reuse detection must be fixed. 1165 1167 */ 1166 rc = pgmPoolAccessHandlerFlush(pVM, p Pool, pPage, &Cpu, pRegFrame, GCPhysFault, pvFault);1168 rc = pgmPoolAccessHandlerFlush(pVM, pVCpu, pPool, pPage, &Cpu, pRegFrame, GCPhysFault, pvFault); 1167 1169 if (rc == VINF_EM_RAW_EMULATE_INSTR && fReused) 1168 1170 rc = VINF_SUCCESS; … … 1720 1722 * the heap size should suffice. */ 1721 1723 AssertFatalRC(rc); 1722 Assert(!(pVM->pgm.s.f SyncFlags & PGM_SYNC_CLEAR_PGM_POOL) || VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3));1724 Assert(!(pVM->pgm.s.fGlobalSyncFlags & PGM_SYNC_CLEAR_PGM_POOL) || VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3)); 1723 1725 } 1724 1726 pPage->fMonitored = true; … … 1817 1819 rc = PGMHandlerPhysicalDeregister(pVM, pPage->GCPhys & ~(RTGCPHYS)(PAGE_SIZE - 1)); 1818 1820 AssertFatalRC(rc); 1819 AssertMsg(!(pVM->pgm.s.f SyncFlags & PGM_SYNC_CLEAR_PGM_POOL) || VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3),1820 ("%#x %#x\n", pVM->pgm.s.f SyncFlags, pVM->fForcedActions));1821 AssertMsg(!(pVM->pgm.s.fGlobalSyncFlags & PGM_SYNC_CLEAR_PGM_POOL) || VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3), 1822 ("%#x %#x\n", pVM->pgm.s.fGlobalSyncFlags, pVM->fForcedActions)); 1821 1823 } 1822 1824 pPage->fMonitored = false; … … 2068 2070 * sometimes refered to as a 'lightweight flush'. 2069 2071 */ 2070 if (!(pVM->pgm.s.f SyncFlags & PGM_SYNC_CLEAR_PGM_POOL))2072 if (!(pVM->pgm.s.fGlobalSyncFlags & PGM_SYNC_CLEAR_PGM_POOL)) 2071 2073 pgmPoolMonitorModifiedClearAll(pVM); 2072 2074 else 2073 2075 { 2074 2076 # ifdef IN_RING3 /* Don't flush in ring-0 or raw mode, it's taking too long. */ 2075 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_CLEAR_PGM_POOL; 2077 /** @todo SMP support! */ 2078 Assert(pVM->cCPUs == 1); 2079 pVM->pgm.s.fGlobalSyncFlags &= ~PGM_SYNC_CLEAR_PGM_POOL; 2076 2080 pgmPoolClearAll(pVM); 2077 2081 # else /* !IN_RING3 */ … … 2709 2713 if (rc == VINF_PGM_GCPHYS_ALIASED) 2710 2714 { 2711 pVM->pgm.s.f SyncFlags |= PGM_SYNC_CLEAR_PGM_POOL;2715 pVM->pgm.s.fGlobalSyncFlags |= PGM_SYNC_CLEAR_PGM_POOL; 2712 2716 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3); 2713 2717 rc = VINF_PGM_SYNC_CR3; … … 3811 3815 } 3812 3816 3817 /* @todo Need to synchronize this across all VCPUs! */ 3818 Assert(pVM->cCPUs == 1); 3819 PVMCPU pVCpu = &pVM->aCpus[0]; /* to get it compiled... */ 3820 3813 3821 /* Unmap the old CR3 value before flushing everything. */ 3814 int rc = PGM_BTH_PFN(UnmapCR3, pV M)(pVM);3822 int rc = PGM_BTH_PFN(UnmapCR3, pVCpu)(pVM, pVCpu); 3815 3823 AssertRC(rc); 3816 3824 3817 3825 /* Exit the current shadow paging mode as well; nested paging and EPT use a root CR3 which will get flushed here. */ 3818 rc = PGM_SHW_PFN(Exit, pV M)(pVM);3826 rc = PGM_SHW_PFN(Exit, pVCpu)(pVM, pVCpu); 3819 3827 AssertRC(rc); 3820 3828 … … 3966 3974 * EPT use a root CR3 which will get flushed here. 3967 3975 */ 3968 pV M->pgm.s.enmShadowMode = PGMMODE_INVALID;3969 rc = PGMR3ChangeMode(pVM, PGMGetGuestMode(pVM));3976 pVCpu->pgm.s.enmShadowMode = PGMMODE_INVALID; 3977 rc = PGMR3ChangeMode(pVM, pVCpu, PGMGetGuestMode(pVCpu)); 3970 3978 AssertRC(rc); 3971 3979 … … 4022 4030 || pPage->enmKind == PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD 4023 4031 || pPage->enmKind == PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD, 4024 ("Can't free the shadow CR3! (%RHp vs %RHp kind=%d\n", PGMGetHyperCR3( pPool->CTX_SUFF(pVM)), pPage->Core.Key, pPage->enmKind));4032 ("Can't free the shadow CR3! (%RHp vs %RHp kind=%d\n", PGMGetHyperCR3(VMMGetCpu(pPool->CTX_SUFF(pVM))), pPage->Core.Key, pPage->enmKind)); 4025 4033 Log(("pgmPoolFlushPage: current active shadow CR3, rejected. enmKind=%s idx=%d\n", pgmPoolPoolKindToStr(pPage->enmKind), pPage->idx)); 4026 4034 return VINF_SUCCESS; … … 4199 4207 /** @todo CSAM/PGMPrefetchPage messes up here during CSAMR3CheckGates 4200 4208 * (TRPMR3SyncIDT) because of FF priority. Try fix that? 4201 * Assert(!(pVM->pgm.s.f SyncFlags & PGM_SYNC_CLEAR_PGM_POOL)); */4209 * Assert(!(pVM->pgm.s.fGlobalSyncFlags & PGM_SYNC_CLEAR_PGM_POOL)); */ 4202 4210 4203 4211 #ifdef PGMPOOL_WITH_CACHE
Note:
See TracChangeset
for help on using the changeset viewer.