Changeset 27026 in vbox for trunk/src/VBox
- Timestamp:
- Mar 4, 2010 1:49:08 PM (15 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/PGMInternal.h
r26947 r27026 83 83 #endif 84 84 //#endif 85 86 /** 87 * Large page support enabled only on 64 bits hosts; applies to nested paging only. 88 */ 89 #if (HC_ARCH_BITS == 64) && !defined(IN_RC) 90 # define PGM_WITH_LARGE_PAGES 91 #endif 85 92 86 93 /** … … 3369 3376 int pgmPoolSyncCR3(PVMCPU pVCpu); 3370 3377 bool pgmPoolIsDirtyPage(PVM pVM, RTGCPHYS GCPhys); 3371 int pgmPoolTrackUpdateGCPhys(PVM pVM, PPGMPAGE pPhysPage, bool fFlushPTEs, bool *pfFlushTLBs);3378 int pgmPoolTrackUpdateGCPhys(PVM pVM, RTGCPHYS GCPhysPage, PPGMPAGE pPhysPage, bool fFlushPTEs, bool *pfFlushTLBs); 3372 3379 void pgmPoolInvalidateDirtyPage(PVM pVM, RTGCPHYS GCPhysPT); 3373 DECLINLINE(int) pgmPoolTrackFlushGCPhys(PVM pVM, PPGMPAGE pPhysPage, bool *pfFlushTLBs)3380 DECLINLINE(int) pgmPoolTrackFlushGCPhys(PVM pVM, RTGCPHYS GCPhysPage, PPGMPAGE pPhysPage, bool *pfFlushTLBs) 3374 3381 { 3375 return pgmPoolTrackUpdateGCPhys(pVM, pPhysPage, true /* flush PTEs */, pfFlushTLBs);3382 return pgmPoolTrackUpdateGCPhys(pVM, GCPhysPage, pPhysPage, true /* flush PTEs */, pfFlushTLBs); 3376 3383 } 3377 3384 -
trunk/src/VBox/VMM/PGMPhys.cpp
r26949 r27026 2862 2862 /* flush references to the page. */ 2863 2863 PPGMPAGE pRamPage = pgmPhysGetPage(&pVM->pgm.s, pRom->GCPhys + (iPage << PAGE_SHIFT)); 2864 int rc2 = pgmPoolTrackFlushGCPhys(pVM, pR amPage, &fFlushTLB);2864 int rc2 = pgmPoolTrackFlushGCPhys(pVM, pRom->GCPhys + (iPage << PAGE_SHIFT), pRamPage, &fFlushTLB); 2865 2865 if (rc2 != VINF_SUCCESS && (rc == VINF_SUCCESS || RT_FAILURE(rc2))) 2866 2866 rc = rc2; -
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r26849 r27026 2955 2955 Assert(!PdeDst.n.u1Present); /* We're only supposed to call SyncPT on PDE!P and conflicts.*/ 2956 2956 2957 # if (HC_ARCH_BITS == 64) && (PGM_SHW_TYPE != PGM_TYPE_32BIT && PGM_SHW_TYPE != PGM_TYPE_PAE)2957 # if defined(PGM_WITH_LARGE_PAGES) && (PGM_SHW_TYPE != PGM_TYPE_32BIT && PGM_SHW_TYPE != PGM_TYPE_PAE) 2958 2958 # if (PGM_SHW_TYPE != PGM_TYPE_EPT) /* PGM_TYPE_EPT implies nested paging */ 2959 2959 if (HWACCMIsNestedPagingActive(pVM)) … … 3001 3001 # endif 3002 3002 ASMAtomicWriteSize(pPdeDst, PdeDst.u); 3003 3004 /* Add a reference to the first page only. */ 3005 PGM_BTH_NAME(SyncPageWorkerTrackAddref)(pVCpu, pShwPde, PGM_PAGE_GET_TRACKING(pPage), pPage, iPDDst); 3003 3006 3004 3007 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_MID_Z(Stat,SyncPT), a); -
trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp
r26150 r27026 223 223 PGM_PAGE_SET_HNDL_PHYS_STATE(pPage, uState); 224 224 225 int rc2 = pgmPoolTrackUpdateGCPhys(pVM, p Page, false /* allow updates of PTEs (instead of flushing) */, &fFlushTLBs);225 int rc2 = pgmPoolTrackUpdateGCPhys(pVM, pRam->GCPhys + (i << PAGE_SHIFT), pPage, false /* allow updates of PTEs (instead of flushing) */, &fFlushTLBs); 226 226 if (rc2 != VINF_SUCCESS && rc == VINF_SUCCESS) 227 227 rc = rc2; … … 411 411 */ 412 412 bool fFlushTLBs = false; 413 int rc = pgmPoolTrackFlushGCPhys(pVM, pPage, &fFlushTLBs);413 int rc = pgmPoolTrackFlushGCPhys(pVM, GCPhysPage, pPage, &fFlushTLBs); 414 414 AssertLogRelRCReturnVoid(rc); 415 415 # ifdef IN_RC -
trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp
r26911 r27026 368 368 Assert(!PGM_PAGE_IS_MMIO(pPage)); 369 369 370 # if HC_ARCH_BITS == 64370 # ifdef PGM_WITH_LARGE_PAGES 371 371 if ( PGMIsUsingLargePages(pVM) 372 372 && PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM) … … 385 385 */ 386 386 bool fFlushTLBs = false; 387 int rc = pgmPoolTrackFlushGCPhys(pVM, pPage, &fFlushTLBs);387 int rc = pgmPoolTrackFlushGCPhys(pVM, GCPhys, pPage, &fFlushTLBs); 388 388 AssertMsgReturn(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3, ("%Rrc\n", rc), RT_FAILURE(rc) ? rc : VERR_IPE_UNEXPECTED_STATUS); 389 389 … … 456 456 } 457 457 458 #ifdef PGM_WITH_LARGE_PAGES 458 459 /** 459 460 * Replace a 2 MB range of zero pages with new pages that we can write to. … … 529 530 else 530 531 { 531 # ifdef IN_RING3532 # ifdef IN_RING3 532 533 rc = PGMR3PhysAllocateLargeHandyPage(pVM, GCPhysBase); 533 # else534 # else 534 535 rc = VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_PGM_ALLOCATE_LARGE_HANDY_PAGE, GCPhysBase); 535 # endif536 # endif 536 537 if (RT_SUCCESS(rc)) 537 538 { … … 550 551 return VERR_PGM_INVALID_LARGE_PAGE_RANGE; 551 552 } 553 #endif /* PGM_WITH_LARGE_PAGES */ 552 554 553 555 /** -
trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp
r26860 r27026 3110 3110 } 3111 3111 3112 #ifdef PGM_WITH_LARGE_PAGES 3113 /* Large page case only. */ 3114 case PGMPOOLKIND_EPT_PD_FOR_PHYS: 3115 { 3116 Assert(HWACCMIsNestedPagingActive(pVM)); 3117 Assert(cRefs == 1); 3118 3119 const uint64_t u64 = PGM_PAGE_GET_HCPHYS(pPhysPage) | X86_PDE4M_P | X86_PDE4M_PS; 3120 PEPTPD pPD = (PEPTPD)PGMPOOL_PAGE_2_PTR(pVM, pPage); 3121 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pPD->a); i++) 3122 if ((pPD->a[i].u & (EPT_PDE2M_PG_MASK | X86_PDE4M_P | X86_PDE4M_PS)) == u64) 3123 { 3124 Log4(("pgmPoolTrackFlushGCPhysPTs: i=%d pde=%RX64 cRefs=%#x\n", i, pPD->a[i], cRefs)); 3125 STAM_COUNTER_INC(&pPool->StatTrackFlushEntry); 3126 pPD->a[i].u = 0; 3127 cRefs--; 3128 if (!cRefs) 3129 return bRet; 3130 } 3131 # ifdef LOG_ENABLED 3132 Log(("cRefs=%d iFirstPresent=%d cPresent=%d\n", cRefs, pPage->iFirstPresent, pPage->cPresent)); 3133 for (unsigned i = 0; i < RT_ELEMENTS(pPD->a); i++) 3134 if ((pPD->a[i].u & (EPT_PDE2M_PG_MASK | X86_PDE4M_P | X86_PDE4M_PS)) == u64) 3135 { 3136 Log(("i=%d cRefs=%d\n", i, cRefs--)); 3137 } 3138 # endif 3139 AssertFatalMsgFailed(("cRefs=%d iFirstPresent=%d cPresent=%d\n", cRefs, pPage->iFirstPresent, pPage->cPresent)); 3140 break; 3141 } 3142 3143 /* AMD-V nested paging - @todo merge with EPT as we only check the parts that are identical. */ 3144 case PGMPOOLKIND_PAE_PD_PHYS: 3145 { 3146 Assert(HWACCMIsNestedPagingActive(pVM)); 3147 Assert(cRefs == 1); 3148 3149 const uint64_t u64 = PGM_PAGE_GET_HCPHYS(pPhysPage) | X86_PDE4M_P | X86_PDE4M_PS; 3150 PX86PD pPD = (PX86PD)PGMPOOL_PAGE_2_PTR(pVM, pPage); 3151 for (unsigned i = pPage->iFirstPresent; i < RT_ELEMENTS(pPD->a); i++) 3152 if ((pPD->a[i].u & (X86_PDE2M_PAE_PG_MASK | X86_PDE4M_P | X86_PDE4M_PS)) == u64) 3153 { 3154 Log4(("pgmPoolTrackFlushGCPhysPTs: i=%d pde=%RX64 cRefs=%#x\n", i, pPD->a[i], cRefs)); 3155 STAM_COUNTER_INC(&pPool->StatTrackFlushEntry); 3156 pPD->a[i].u = 0; 3157 cRefs--; 3158 if (!cRefs) 3159 return bRet; 3160 } 3161 # ifdef LOG_ENABLED 3162 Log(("cRefs=%d iFirstPresent=%d cPresent=%d\n", cRefs, pPage->iFirstPresent, pPage->cPresent)); 3163 for (unsigned i = 0; i < RT_ELEMENTS(pPD->a); i++) 3164 if ((pPD->a[i].u & (X86_PDE2M_PAE_PG_MASK | X86_PDE4M_P | X86_PDE4M_PS)) == u64) 3165 { 3166 Log(("i=%d cRefs=%d\n", i, cRefs--)); 3167 } 3168 # endif 3169 AssertFatalMsgFailed(("cRefs=%d iFirstPresent=%d cPresent=%d\n", cRefs, pPage->iFirstPresent, pPage->cPresent)); 3170 break; 3171 } 3172 #endif /* PGM_WITH_LARGE_PAGES */ 3173 3112 3174 default: 3113 3175 AssertFatalMsgFailed(("enmKind=%d iShw=%d\n", pPage->enmKind, iShw)); … … 3201 3263 * 3202 3264 * @param pVM The VM handle. 3265 * @param GCPhysPage GC physical address of the page in question 3203 3266 * @param pPhysPage The guest page in question. 3204 3267 * @param fFlushPTEs Flush PTEs or allow them to be updated (e.g. in case of an RW bit change) … … 3207 3270 * The caller MUST initialized this to @a false. 3208 3271 */ 3209 int pgmPoolTrackUpdateGCPhys(PVM pVM, PPGMPAGE pPhysPage, bool fFlushPTEs, bool *pfFlushTLBs)3272 int pgmPoolTrackUpdateGCPhys(PVM pVM, RTGCPHYS GCPhysPage, PPGMPAGE pPhysPage, bool fFlushPTEs, bool *pfFlushTLBs) 3210 3273 { 3211 3274 PVMCPU pVCpu = VMMGetCpu(pVM); 3212 3275 pgmLock(pVM); 3213 3276 int rc = VINF_SUCCESS; 3277 3278 #ifdef PGM_WITH_LARGE_PAGES 3279 /* Is this page part of a large page? */ 3280 if (PGM_PAGE_GET_PDE_TYPE(pPhysPage) == PGM_PAGE_PDE_TYPE_PDE) 3281 { 3282 PPGMPAGE pPhysBase; 3283 RTGCPHYS GCPhysBase = GCPhysPage & X86_PDE2M_PAE_PG_MASK; 3284 3285 GCPhysPage &= X86_PDE_PAE_PG_MASK; 3286 3287 /* Fetch the large page base. */ 3288 if (GCPhysBase != GCPhysPage) 3289 { 3290 pPhysBase = pgmPhysGetPage(&pVM->pgm.s, GCPhysBase); 3291 AssertFatal(pPhysBase); 3292 } 3293 else 3294 pPhysBase = pPhysPage; 3295 3296 Log(("pgmPoolTrackUpdateGCPhys: update large page PDE for %RGp (%RGp)\n", GCPhysBase, GCPhysPage)); 3297 3298 /* Mark the large page as disabled as we need to break it up to change a single page in the 2 MB range. */ 3299 PGM_PAGE_SET_PDE_TYPE(pPhysBase, PGM_PAGE_PDE_TYPE_PDE_DISABLED); 3300 3301 /* Update the base as that *only* that one has a reference and there's only one PDE to clear. */ 3302 rc = pgmPoolTrackUpdateGCPhys(pVM, GCPhysBase, pPhysBase, fFlushPTEs, pfFlushTLBs); 3303 3304 *pfFlushTLBs = true; 3305 pgmUnlock(pVM); 3306 return rc; 3307 } 3308 #else 3309 NOREF(GCPhysPage); 3310 #endif /* PGM_WITH_LARGE_PAGES */ 3311 3214 3312 const uint16_t u16 = PGM_PAGE_GET_TRACKING(pPhysPage); 3215 3313 if (u16)
Note:
See TracChangeset
for help on using the changeset viewer.