Changeset 107171 in vbox for trunk/src/VBox/VMM/VMMAll
- Timestamp:
- Nov 28, 2024 10:38:10 AM (5 months ago)
- svn:sync-xref-src-repo-rev:
- 166169
- Location:
- trunk/src/VBox/VMM/VMMAll
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
r106061 r107171 756 756 # define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL } 757 757 # define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \ 758 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm( VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(Trap0eHandler), Nm(NestedTrap0eHandler) }758 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(Trap0eHandler), Nm(NestedTrap0eHandler) } 759 759 760 760 #elif !defined(IN_RING3) && defined(VBOX_STRICT) 761 761 # define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL } 762 762 # define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \ 763 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm( VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(Trap0eHandler), Nm(NestedTrap0eHandler), Nm(AssertCR3) }763 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(Trap0eHandler), Nm(NestedTrap0eHandler), Nm(AssertCR3) } 764 764 765 765 #elif defined(IN_RING3) && !defined(VBOX_STRICT) 766 766 # define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL } 767 767 # define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \ 768 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm( VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), }768 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), } 769 769 770 770 #elif defined(IN_RING3) && defined(VBOX_STRICT) 771 771 # define PGMMODEDATABTH_NULL_ENTRY() { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL } 772 772 # define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \ 773 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm( VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(AssertCR3) }773 { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(AssertCR3) } 774 774 775 775 #else … … 1282 1282 } 1283 1283 1284 #ifndef VBOX_WITH_ONLY_PGM_NEM_MODE 1284 1285 1285 1286 /** … … 1741 1742 1742 1743 1743 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT1744 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT 1744 1745 /** 1745 1746 * Syncs the SHADOW nested-guest page directory pointer for the specified address. … … 1848 1849 return VINF_SUCCESS; 1849 1850 } 1850 # endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */1851 1852 1853 # ifdef IN_RING01851 # endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */ 1852 1853 1854 # ifdef IN_RING0 1854 1855 /** 1855 1856 * Synchronizes a range of nested page table entries. … … 1906 1907 return rc; 1907 1908 } 1908 #endif /* IN_RING0 */ 1909 1909 # endif /* IN_RING0 */ 1910 1911 #endif /* !VBOX_WITH_ONLY_PGM_NEM_MODE */ 1910 1912 1911 1913 /** … … 2511 2513 VMMDECL(RTHCPHYS) PGMGetHyperCR3(PVMCPU pVCpu) 2512 2514 { 2515 #ifndef VBOX_WITH_ONLY_PGM_NEM_MODE 2513 2516 PPGMPOOLPAGE pPoolPage = pVCpu->pgm.s.CTX_SUFF(pShwPageCR3); 2514 2517 AssertPtrReturn(pPoolPage, NIL_RTHCPHYS); 2515 2518 return pPoolPage->Core.Key; 2519 #else 2520 RT_NOREF(pVCpu); 2521 return NIL_RTHCPHYS; 2522 #endif 2516 2523 } 2517 2524 … … 3461 3468 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnSyncCR3, VERR_PGM_MODE_IPE); 3462 3469 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnPrefetchPage, VERR_PGM_MODE_IPE); 3463 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnVerifyAccessSyncPage, VERR_PGM_MODE_IPE);3464 3470 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnMapCR3, VERR_PGM_MODE_IPE); 3465 3471 AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnUnmapCR3, VERR_PGM_MODE_IPE); -
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r106061 r107171 49 49 PGM_BTH_DECL(int, NestedTrap0eHandler)(PVMCPUCC pVCpu, RTGCUINT uErr, PCPUMCTX pCtx, RTGCPHYS GCPhysNestedFault, 50 50 bool fIsLinearAddrValid, RTGCPTR GCPtrNestedFault, PPGMPTWALK pWalk, bool *pfLockTaken); 51 # if defined(VBOX_WITH_NESTED_HWVIRT_VMX_EPT) && PGM_SHW_TYPE == PGM_TYPE_EPT 51 # if defined(VBOX_WITH_NESTED_HWVIRT_VMX_EPT) && PGM_SHW_TYPE == PGM_TYPE_EPT && !defined(VBOX_WITH_ONLY_PGM_NEM_MODE) 52 52 static void PGM_BTH_NAME(NestedSyncPageWorker)(PVMCPUCC pVCpu, PSHWPTE pPte, RTGCPHYS GCPhysPage, PPGMPOOLPAGE pShwPage, 53 53 unsigned iPte, SLATPTE GstSlatPte); … … 58 58 #endif 59 59 PGM_BTH_DECL(int, InvalidatePage)(PVMCPUCC pVCpu, RTGCPTR GCPtrPage); 60 #ifndef VBOX_WITH_ONLY_PGM_NEM_MODE 60 61 static int PGM_BTH_NAME(SyncPage)(PVMCPUCC pVCpu, GSTPDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uErr); 61 62 static int PGM_BTH_NAME(CheckDirtyPageFault)(PVMCPUCC pVCpu, uint32_t uErr, PSHWPDE pPdeDst, GSTPDE const *pPdeSrc, RTGCPTR GCPtrPage); 62 63 static int PGM_BTH_NAME(SyncPT)(PVMCPUCC pVCpu, unsigned iPD, PGSTPD pPDSrc, RTGCPTR GCPtrPage); 63 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)64 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 64 65 static void PGM_BTH_NAME(SyncPageWorker)(PVMCPUCC pVCpu, PSHWPTE pPteDst, GSTPDE PdeSrc, GSTPTE PteSrc, PPGMPOOLPAGE pShwPage, unsigned iPTDst); 65 # else66 # else 66 67 static void PGM_BTH_NAME(SyncPageWorker)(PVMCPUCC pVCpu, PSHWPTE pPteDst, RTGCPHYS GCPhysPage, PPGMPOOLPAGE pShwPage, unsigned iPTDst); 67 # endif68 PGM_BTH_DECL(int, VerifyAccessSyncPage)(PVMCPUCC pVCpu, RTGCPTR Addr, unsigned fPage, unsigned uErr); 68 # endif 69 #endif /* !VBOX_WITH_ONLY_PGM_NEM_MODE */ 69 70 PGM_BTH_DECL(int, PrefetchPage)(PVMCPUCC pVCpu, RTGCPTR GCPtrPage); 70 71 PGM_BTH_DECL(int, SyncCR3)(PVMCPUCC pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal); … … 122 123 * Other modes rely on MapCR3/UnmapCR3 to setup the shadow root page tables. 123 124 */ 124 #if ( ( PGM_SHW_TYPE == PGM_TYPE_32BIT \ 125 || PGM_SHW_TYPE == PGM_TYPE_PAE \ 126 || PGM_SHW_TYPE == PGM_TYPE_AMD64) \ 127 && ( PGM_GST_TYPE == PGM_TYPE_REAL \ 128 || PGM_GST_TYPE == PGM_TYPE_PROT)) 125 #if ( ( PGM_SHW_TYPE == PGM_TYPE_32BIT \ 126 || PGM_SHW_TYPE == PGM_TYPE_PAE \ 127 || PGM_SHW_TYPE == PGM_TYPE_AMD64) \ 128 && ( PGM_GST_TYPE == PGM_TYPE_REAL \ 129 || PGM_GST_TYPE == PGM_TYPE_PROT)) \ 130 && !defined(VBOX_WITH_ONLY_PGM_NEM_MODE) 129 131 130 132 PVMCC pVM = pVCpu->CTX_SUFF(pVM); … … 179 181 #ifndef IN_RING3 180 182 181 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) 183 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) && !defined(VBOX_WITH_ONLY_PGM_NEM_MODE) 182 184 /** 183 185 * Deal with a guest page fault. … … 191 193 * @param uErr The error code. 192 194 */ 193 PGM_BTH_DECL(VBOXSTRICTRC,Trap0eHandlerGuestFault)(PVMCPUCC pVCpu, PPGMPTWALK pWalk, RTGCUINT uErr)195 static VBOXSTRICTRC PGM_BTH_NAME(Trap0eHandlerGuestFault)(PVMCPUCC pVCpu, PPGMPTWALK pWalk, RTGCUINT uErr) 194 196 { 195 197 /* … … 216 218 217 219 218 #if !PGM_TYPE_IS_NESTED(PGM_SHW_TYPE) && PGM_SHW_TYPE != PGM_TYPE_NONE 220 #if !PGM_TYPE_IS_NESTED(PGM_SHW_TYPE) && PGM_SHW_TYPE != PGM_TYPE_NONE && !defined(VBOX_WITH_ONLY_PGM_NEM_MODE) 219 221 /** 220 222 * Deal with a guest page fault. … … 425 427 && !PGM_TYPE_IS_NESTED(PGM_SHW_TYPE) \ 426 428 && (PGM_SHW_TYPE != PGM_TYPE_EPT || PGM_GST_TYPE == PGM_TYPE_PROT) \ 427 && PGM_SHW_TYPE != PGM_TYPE_NONE 429 && PGM_SHW_TYPE != PGM_TYPE_NONE \ 430 && !defined(VBOX_WITH_ONLY_PGM_NEM_MODE) 428 431 int rc; 429 432 … … 1156 1159 # if defined(VBOX_WITH_NESTED_HWVIRT_VMX_EPT) \ 1157 1160 && PGM_GST_TYPE == PGM_TYPE_PROT \ 1158 && PGM_SHW_TYPE == PGM_TYPE_EPT 1161 && PGM_SHW_TYPE == PGM_TYPE_EPT \ 1162 && !defined(VBOX_WITH_ONLY_PGM_NEM_MODE) 1159 1163 1160 1164 Assert(CPUMIsGuestVmxEptPagingEnabled(pVCpu)); … … 1404 1408 #if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) \ 1405 1409 && !PGM_TYPE_IS_NESTED_OR_EPT(PGM_SHW_TYPE) \ 1406 && PGM_SHW_TYPE != PGM_TYPE_NONE 1410 && PGM_SHW_TYPE != PGM_TYPE_NONE \ 1411 && !defined(VBOX_WITH_ONLY_PGM_NEM_MODE) 1407 1412 int rc; 1408 1413 PVMCC pVM = pVCpu->CTX_SUFF(pVM); … … 1662 1667 } 1663 1668 1664 #if PGM_SHW_TYPE != PGM_TYPE_NONE 1669 #if PGM_SHW_TYPE != PGM_TYPE_NONE && !defined(VBOX_WITH_ONLY_PGM_NEM_MODE) 1665 1670 1666 1671 /** … … 2572 2577 } 2573 2578 2574 #endif /* PGM_SHW_TYPE != PGM_TYPE_NONE */2575 2576 #if !defined(IN_RING3) && defined(VBOX_WITH_NESTED_HWVIRT_VMX_EPT) && PGM_SHW_TYPE == PGM_TYPE_EPT 2579 #endif /* PGM_SHW_TYPE != PGM_TYPE_NONE && !VBOX_WITH_ONLY_PGM_NEM_MODE */ 2580 2581 #if !defined(IN_RING3) && defined(VBOX_WITH_NESTED_HWVIRT_VMX_EPT) && PGM_SHW_TYPE == PGM_TYPE_EPT && !defined(VBOX_WITH_ONLY_PGM_NEM_MODE) 2577 2582 2578 2583 /** … … 3194 3199 } 3195 3200 3196 #endif /* !IN_RING3 && VBOX_WITH_NESTED_HWVIRT_VMX_EPT && PGM_SHW_TYPE == PGM_TYPE_EPT */3197 #if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) && PGM_SHW_TYPE != PGM_TYPE_NONE 3201 #endif /* !IN_RING3 && VBOX_WITH_NESTED_HWVIRT_VMX_EPT && PGM_SHW_TYPE == PGM_TYPE_EPT && !VBOX_WITH_ONLY_PGM_NEM_MODE */ 3202 #if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) && PGM_SHW_TYPE != PGM_TYPE_NONE && !defined(VBOX_WITH_ONLY_PGM_NEM_MODE) 3198 3203 3199 3204 /** … … 3352 3357 } 3353 3358 3354 #endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) && PGM_SHW_TYPE != PGM_TYPE_NONE */ 3355 3359 #endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) && PGM_SHW_TYPE != PGM_TYPE_NONE && !VBOX_WITH_ONLY_PGM_NEM_MODE */ 3360 3361 #ifndef VBOX_WITH_ONLY_PGM_NEM_MODE 3356 3362 /** 3357 3363 * Sync a shadow page table. … … 3361 3367 * Handles mapping conflicts. 3362 3368 * 3363 * This is called by VerifyAccessSyncPage, PrefetchPage, InvalidatePage (on3364 * conflict), andTrap0eHandler.3369 * This is called by PrefetchPage, InvalidatePage (on conflict), and 3370 * Trap0eHandler. 3365 3371 * 3366 3372 * A precondition for this method is that the shadow PDE is not present. The … … 3380 3386 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); NOREF(pPool); 3381 3387 3382 # if 0 /* rarely useful; leave for debugging. */3388 # if 0 /* rarely useful; leave for debugging. */ 3383 3389 STAM_COUNTER_INC(&pVCpu->pgm.s.StatSyncPtPD[iPDSrc]); 3384 # endif3390 # endif 3385 3391 LogFlow(("SyncPT: GCPtrPage=%RGv\n", GCPtrPage)); RT_NOREF_PV(GCPtrPage); 3386 3392 3387 3393 PGM_LOCK_ASSERT_OWNER(pVM); 3388 3394 3389 # if ( PGM_GST_TYPE == PGM_TYPE_32BIT \3390 || PGM_GST_TYPE == PGM_TYPE_PAE \3391 || PGM_GST_TYPE == PGM_TYPE_AMD64) \3392 && !PGM_TYPE_IS_NESTED_OR_EPT(PGM_SHW_TYPE) \3393 && PGM_SHW_TYPE != PGM_TYPE_NONE3395 # if ( PGM_GST_TYPE == PGM_TYPE_32BIT \ 3396 || PGM_GST_TYPE == PGM_TYPE_PAE \ 3397 || PGM_GST_TYPE == PGM_TYPE_AMD64) \ 3398 && !PGM_TYPE_IS_NESTED_OR_EPT(PGM_SHW_TYPE) \ 3399 && PGM_SHW_TYPE != PGM_TYPE_NONE 3394 3400 int rc = VINF_SUCCESS; 3395 3401 … … 3404 3410 * Get the relevant shadow PDE entry. 3405 3411 */ 3406 # if PGM_SHW_TYPE == PGM_TYPE_32BIT3412 # if PGM_SHW_TYPE == PGM_TYPE_32BIT 3407 3413 const unsigned iPDDst = GCPtrPage >> SHW_PD_SHIFT; 3408 3414 PSHWPDE pPdeDst = pgmShwGet32BitPDEPtr(pVCpu, GCPtrPage); … … 3413 3419 Assert(pShwPde); 3414 3420 3415 # elif PGM_SHW_TYPE == PGM_TYPE_PAE3421 # elif PGM_SHW_TYPE == PGM_TYPE_PAE 3416 3422 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK; 3417 3423 PPGMPOOLPAGE pShwPde = NULL; … … 3427 3433 pPdeDst = &pPDDst->a[iPDDst]; 3428 3434 3429 # elif PGM_SHW_TYPE == PGM_TYPE_AMD643435 # elif PGM_SHW_TYPE == PGM_TYPE_AMD64 3430 3436 const unsigned iPdpt = (GCPtrPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64; 3431 3437 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK; … … 3437 3443 PSHWPDE pPdeDst = &pPDDst->a[iPDDst]; 3438 3444 3439 # endif3445 # endif 3440 3446 SHWPDE PdeDst = *pPdeDst; 3441 3447 3442 # if PGM_GST_TYPE == PGM_TYPE_AMD643448 # if PGM_GST_TYPE == PGM_TYPE_AMD64 3443 3449 /* Fetch the pgm pool shadow descriptor. */ 3444 3450 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pPool, pPdptDst->a[iPdpt].u & X86_PDPE_PG_MASK); 3445 3451 Assert(pShwPde); 3446 # endif3452 # endif 3447 3453 3448 3454 Assert(!SHW_PDE_IS_P(PdeDst)); /* We're only supposed to call SyncPT on PDE!P.*/ … … 3465 3471 { 3466 3472 GCPhys = GST_GET_PDE_GCPHYS(PdeSrc); 3467 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT3473 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT 3468 3474 /* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */ 3469 3475 GCPhys = PGM_A20_APPLY(pVCpu, GCPhys | ((iPDDst & 1) * (GUEST_PAGE_SIZE / 2))); 3470 # endif3476 # endif 3471 3477 rc = pgmPoolAlloc(pVM, GCPhys, BTH_PGMPOOLKIND_PT_FOR_PT, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu), 3472 3478 pShwPde->idx, iPDDst, false /*fLockPage*/, … … 3476 3482 { 3477 3483 PGMPOOLACCESS enmAccess; 3478 # if PGM_WITH_NX(PGM_GST_TYPE, PGM_SHW_TYPE)3484 # if PGM_WITH_NX(PGM_GST_TYPE, PGM_SHW_TYPE) 3479 3485 const bool fNoExecute = (PdeSrc.u & X86_PDE_PAE_NX) && GST_IS_NX_ACTIVE(pVCpu); 3480 # else3486 # else 3481 3487 const bool fNoExecute = false; 3482 # endif3488 # endif 3483 3489 3484 3490 GCPhys = GST_GET_BIG_PDE_GCPHYS(pVM, PdeSrc); 3485 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT3491 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT 3486 3492 /* Select the right PDE as we're emulating a 4MB page directory with two 2 MB shadow PDEs.*/ 3487 3493 GCPhys = PGM_A20_APPLY(pVCpu, GCPhys | (GCPtrPage & (1 << X86_PD_PAE_SHIFT))); 3488 # endif3494 # endif 3489 3495 /* Determine the right kind of large page to avoid incorrect cached entry reuse. */ 3490 3496 if (PdeSrc.u & X86_PDE_US) … … 3585 3591 */ 3586 3592 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncPT4K)); 3587 # ifdef PGM_SYNC_N_PAGES3593 # ifdef PGM_SYNC_N_PAGES 3588 3594 unsigned iPTBase = (GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK; 3589 3595 unsigned iPTDst = iPTBase; … … 3593 3599 else 3594 3600 iPTDst -= PGM_SYNC_NR_PAGES / 2; 3595 # else /* !PGM_SYNC_N_PAGES */3601 # else /* !PGM_SYNC_N_PAGES */ 3596 3602 unsigned iPTDst = 0; 3597 3603 const unsigned iPTDstEnd = RT_ELEMENTS(pPTDst->a); 3598 # endif /* !PGM_SYNC_N_PAGES */3604 # endif /* !PGM_SYNC_N_PAGES */ 3599 3605 RTGCPTR GCPtrCur = (GCPtrPage & ~(RTGCPTR)((1 << SHW_PD_SHIFT) - 1)) 3600 3606 | ((RTGCPTR)iPTDst << GUEST_PAGE_SHIFT); 3601 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT3607 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT 3602 3608 /* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */ 3603 3609 const unsigned offPTSrc = ((GCPtrPage >> SHW_PD_SHIFT) & 1) * 512; 3604 # else3610 # else 3605 3611 const unsigned offPTSrc = 0; 3606 # endif3612 # endif 3607 3613 for (; iPTDst < iPTDstEnd; iPTDst++, GCPtrCur += GUEST_PAGE_SIZE) 3608 3614 { … … 3684 3690 if (pRam && GCPhys >= pRam->GCPhys) 3685 3691 { 3686 # ifndef PGM_WITH_A203692 # ifndef PGM_WITH_A20 3687 3693 unsigned iHCPage = (GCPhys - pRam->GCPhys) >> GUEST_PAGE_SHIFT; 3688 # endif3694 # endif 3689 3695 do 3690 3696 { 3691 3697 /* Make shadow PTE. */ 3692 # ifdef PGM_WITH_A203698 # ifdef PGM_WITH_A20 3693 3699 PPGMPAGE pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> GUEST_PAGE_SHIFT]; 3694 # else3700 # else 3695 3701 PPGMPAGE pPage = &pRam->aPages[iHCPage]; 3696 # endif3702 # endif 3697 3703 SHWPTE PteDst; 3698 3704 3699 # ifndef VBOX_WITH_NEW_LAZY_PAGE_ALLOC3705 # ifndef VBOX_WITH_NEW_LAZY_PAGE_ALLOC 3700 3706 /* Try to make the page writable if necessary. */ 3701 3707 if ( PGM_PAGE_GET_TYPE(pPage) == PGMPAGETYPE_RAM … … 3703 3709 || ( SHW_PTE_IS_RW(PteDstBase) 3704 3710 && PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED 3705 # ifdef VBOX_WITH_REAL_WRITE_MONITORED_PAGES3711 # ifdef VBOX_WITH_REAL_WRITE_MONITORED_PAGES 3706 3712 && PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_WRITE_MONITORED 3707 # endif3708 # ifdef VBOX_WITH_PAGE_SHARING3713 # endif 3714 # ifdef VBOX_WITH_PAGE_SHARING 3709 3715 && PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_SHARED 3710 # endif3716 # endif 3711 3717 && !PGM_PAGE_IS_BALLOONED(pPage)) 3712 3718 ) … … 3718 3724 break; 3719 3725 } 3720 # endif3726 # endif 3721 3727 3722 3728 if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) && !PGM_PAGE_IS_HNDL_PHYS_NOT_IN_HM(pPage)) … … 3731 3737 && PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED) 3732 3738 { 3733 # ifndef VBOX_WITH_NEW_LAZY_PAGE_ALLOC3739 # ifndef VBOX_WITH_NEW_LAZY_PAGE_ALLOC 3734 3740 /* Still applies to shared pages. */ 3735 3741 Assert(!PGM_PAGE_IS_ZERO(pPage)); 3736 # endif3742 # endif 3737 3743 SHW_PTE_SET_RO(PteDst); /** @todo this isn't quite working yet... */ 3738 3744 Log3(("SyncPT: write-protecting %RGp pPage=%R[pgmpage] at %RGv\n", GCPhys, pPage, (RTGCPTR)(GCPtr | (iPTDst << SHW_PT_SHIFT)))); … … 3751 3757 GCPhys += GUEST_PAGE_SIZE; 3752 3758 PGM_A20_APPLY_TO_VAR(pVCpu, GCPhys); 3753 # ifndef PGM_WITH_A203759 # ifndef PGM_WITH_A20 3754 3760 iHCPage++; 3755 # endif3761 # endif 3756 3762 iPTDst++; 3757 3763 } while ( iPTDst < RT_ELEMENTS(pPTDst->a) … … 3787 3793 return rc; 3788 3794 3789 # elif (PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT) \3795 # elif (PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT) \ 3790 3796 && !PGM_TYPE_IS_NESTED(PGM_SHW_TYPE) \ 3791 3797 && (PGM_SHW_TYPE != PGM_TYPE_EPT || PGM_GST_TYPE == PGM_TYPE_PROT) \ … … 3799 3805 */ 3800 3806 int rc = VINF_SUCCESS; 3801 # if PGM_SHW_TYPE == PGM_TYPE_32BIT3807 # if PGM_SHW_TYPE == PGM_TYPE_32BIT 3802 3808 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK; 3803 3809 PSHWPDE pPdeDst = pgmShwGet32BitPDEPtr(pVCpu, GCPtrPage); … … 3808 3814 Assert(pShwPde); 3809 3815 3810 # elif PGM_SHW_TYPE == PGM_TYPE_PAE3816 # elif PGM_SHW_TYPE == PGM_TYPE_PAE 3811 3817 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK; 3812 3818 PPGMPOOLPAGE pShwPde = NULL; /* initialized to shut up gcc */ … … 3822 3828 pPdeDst = &pPDDst->a[iPDDst]; 3823 3829 3824 # elif PGM_SHW_TYPE == PGM_TYPE_AMD643830 # elif PGM_SHW_TYPE == PGM_TYPE_AMD64 3825 3831 const unsigned iPdpt = (GCPtrPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64; 3826 3832 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK; … … 3836 3842 Assert(pShwPde); 3837 3843 3838 # elif PGM_SHW_TYPE == PGM_TYPE_EPT3844 # elif PGM_SHW_TYPE == PGM_TYPE_EPT 3839 3845 const unsigned iPdpt = (GCPtrPage >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK; 3840 3846 const unsigned iPDDst = ((GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK); … … 3856 3862 PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pPool, pPdptDst->a[iPdpt].u & EPT_PDPTE_PG_MASK); 3857 3863 Assert(pShwPde); 3858 # endif3864 # endif 3859 3865 SHWPDE PdeDst = *pPdeDst; 3860 3866 3861 3867 Assert(!SHW_PDE_IS_P(PdeDst)); /* We're only supposed to call SyncPT on PDE!P and conflicts.*/ 3862 3868 3863 # if defined(PGM_WITH_LARGE_PAGES) && PGM_SHW_TYPE != PGM_TYPE_32BIT && PGM_SHW_TYPE != PGM_TYPE_PAE3869 # if defined(PGM_WITH_LARGE_PAGES) && PGM_SHW_TYPE != PGM_TYPE_32BIT && PGM_SHW_TYPE != PGM_TYPE_PAE 3864 3870 if (BTH_IS_NP_ACTIVE(pVM)) 3865 3871 { … … 3898 3904 } 3899 3905 } 3900 # if !defined(VBOX_WITH_NEW_LAZY_PAGE_ALLOC) && !defined(PGM_WITH_PAGE_ZEROING_DETECTION) /* This code is too aggresive! */3906 # if !defined(VBOX_WITH_NEW_LAZY_PAGE_ALLOC) && !defined(PGM_WITH_PAGE_ZEROING_DETECTION) /* This code is too aggresive! */ 3901 3907 else if ( PGMIsUsingLargePages(pVM) 3902 3908 && PGM_A20_IS_ENABLED(pVCpu)) … … 3912 3918 LogFlow(("pgmPhysAllocLargePage failed with %Rrc\n", rc)); 3913 3919 } 3914 # endif3920 # endif 3915 3921 3916 3922 if (HCPhys != NIL_RTHCPHYS) 3917 3923 { 3918 # if PGM_SHW_TYPE == PGM_TYPE_EPT3924 # if PGM_SHW_TYPE == PGM_TYPE_EPT 3919 3925 PdeDst.u = HCPhys | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE | EPT_E_LEAF | EPT_E_IGNORE_PAT | EPT_E_MEMTYPE_WB 3920 3926 | (PdeDst.u & X86_PDE_AVL_MASK) /** @todo do we need this? */; 3921 # else3927 # else 3922 3928 PdeDst.u = HCPhys | X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_PS 3923 3929 | (PdeDst.u & X86_PDE_AVL_MASK) /** @todo PGM_PD_FLAGS? */; 3924 # endif3930 # endif 3925 3931 SHW_PDE_ATOMIC_SET2(*pPdeDst, PdeDst); 3926 3932 … … 3934 3940 } 3935 3941 } 3936 # endif /* defined(PGM_WITH_LARGE_PAGES) && PGM_SHW_TYPE != PGM_TYPE_32BIT && PGM_SHW_TYPE != PGM_TYPE_PAE */3942 # endif /* defined(PGM_WITH_LARGE_PAGES) && PGM_SHW_TYPE != PGM_TYPE_32BIT && PGM_SHW_TYPE != PGM_TYPE_PAE */ 3937 3943 3938 3944 /* … … 3984 3990 3985 3991 /* Save the new PDE. */ 3986 # if PGM_SHW_TYPE == PGM_TYPE_EPT3992 # if PGM_SHW_TYPE == PGM_TYPE_EPT 3987 3993 PdeDst.u = pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE 3988 3994 | (PdeDst.u & X86_PDE_AVL_MASK /** @todo do we really need this? */); 3989 # else3995 # else 3990 3996 PdeDst.u = pShwPage->Core.Key | X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_A 3991 3997 | (PdeDst.u & X86_PDE_AVL_MASK /** @todo use a PGM_PD_FLAGS define */); 3992 # endif3998 # endif 3993 3999 SHW_PDE_ATOMIC_SET2(*pPdeDst, PdeDst); 3994 4000 … … 3998 4004 return rc; 3999 4005 4000 # else4006 # else 4001 4007 NOREF(iPDSrc); NOREF(pPDSrc); 4002 4008 AssertReleaseMsgFailed(("Shw=%d Gst=%d is not implemented!\n", PGM_SHW_TYPE, PGM_GST_TYPE)); 4003 4009 return VERR_PGM_NOT_USED_IN_MODE; 4004 # endif4010 # endif 4005 4011 } 4012 #endif /* !VBOX_WITH_ONLY_PGM_NEM_MODE */ 4006 4013 4007 4014 … … 4025 4032 || PGM_GST_TYPE == PGM_TYPE_AMD64 ) \ 4026 4033 && !PGM_TYPE_IS_NESTED_OR_EPT(PGM_SHW_TYPE) \ 4027 && PGM_SHW_TYPE != PGM_TYPE_NONE 4034 && PGM_SHW_TYPE != PGM_TYPE_NONE \ 4035 && !defined(VBOX_WITH_ONLY_PGM_NEM_MODE) 4028 4036 /* 4029 4037 * Check that all Guest levels thru the PDE are present, getting the … … 4129 4137 return rc; 4130 4138 4131 #elif PGM_TYPE_IS_NESTED_OR_EPT(PGM_SHW_TYPE) || PGM_SHW_TYPE == PGM_TYPE_NONE 4139 #elif PGM_TYPE_IS_NESTED_OR_EPT(PGM_SHW_TYPE) || PGM_SHW_TYPE == PGM_TYPE_NONE || defined(VBOX_WITH_ONLY_PGM_NEM_MODE) 4132 4140 NOREF(pVCpu); NOREF(GCPtrPage); 4133 4141 return VINF_SUCCESS; /* ignore */ … … 4135 4143 AssertCompile(0); 4136 4144 #endif 4137 }4138 4139 4140 4141 4142 /**4143 * Syncs a page during a PGMVerifyAccess() call.4144 *4145 * @returns VBox status code (informational included).4146 * @param pVCpu The cross context virtual CPU structure.4147 * @param GCPtrPage The address of the page to sync.4148 * @param fPage The effective guest page flags.4149 * @param uErr The trap error code.4150 * @remarks This will normally never be called on invalid guest page4151 * translation entries.4152 */4153 PGM_BTH_DECL(int, VerifyAccessSyncPage)(PVMCPUCC pVCpu, RTGCPTR GCPtrPage, unsigned fPage, unsigned uErr)4154 {4155 PVMCC pVM = pVCpu->CTX_SUFF(pVM); NOREF(pVM);4156 4157 LogFlow(("VerifyAccessSyncPage: GCPtrPage=%RGv fPage=%#x uErr=%#x\n", GCPtrPage, fPage, uErr));4158 RT_NOREF_PV(GCPtrPage); RT_NOREF_PV(fPage); RT_NOREF_PV(uErr);4159 4160 Assert(!pVM->pgm.s.fNestedPaging);4161 #if ( PGM_GST_TYPE == PGM_TYPE_32BIT \4162 || PGM_GST_TYPE == PGM_TYPE_REAL \4163 || PGM_GST_TYPE == PGM_TYPE_PROT \4164 || PGM_GST_TYPE == PGM_TYPE_PAE \4165 || PGM_GST_TYPE == PGM_TYPE_AMD64 ) \4166 && !PGM_TYPE_IS_NESTED_OR_EPT(PGM_SHW_TYPE) \4167 && PGM_SHW_TYPE != PGM_TYPE_NONE4168 4169 /*4170 * Get guest PD and index.4171 */4172 /** @todo Performance: We've done all this a jiffy ago in the4173 * PGMGstGetPage call. */4174 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)4175 # if PGM_GST_TYPE == PGM_TYPE_32BIT4176 const unsigned iPDSrc = (uint32_t)GCPtrPage >> GST_PD_SHIFT;4177 PGSTPD pPDSrc = pgmGstGet32bitPDPtr(pVCpu);4178 4179 # elif PGM_GST_TYPE == PGM_TYPE_PAE4180 unsigned iPDSrc = 0;4181 X86PDPE PdpeSrc;4182 PGSTPD pPDSrc = pgmGstGetPaePDPtr(pVCpu, GCPtrPage, &iPDSrc, &PdpeSrc);4183 if (RT_UNLIKELY(!pPDSrc))4184 {4185 Log(("PGMVerifyAccess: access violation for %RGv due to non-present PDPTR\n", GCPtrPage));4186 return VINF_EM_RAW_GUEST_TRAP;4187 }4188 4189 # elif PGM_GST_TYPE == PGM_TYPE_AMD644190 unsigned iPDSrc = 0; /* shut up gcc */4191 PX86PML4E pPml4eSrc = NULL; /* ditto */4192 X86PDPE PdpeSrc;4193 PGSTPD pPDSrc = pgmGstGetLongModePDPtr(pVCpu, GCPtrPage, &pPml4eSrc, &PdpeSrc, &iPDSrc);4194 if (RT_UNLIKELY(!pPDSrc))4195 {4196 Log(("PGMVerifyAccess: access violation for %RGv due to non-present PDPTR\n", GCPtrPage));4197 return VINF_EM_RAW_GUEST_TRAP;4198 }4199 # endif4200 4201 # else /* !PGM_WITH_PAGING */4202 PGSTPD pPDSrc = NULL;4203 const unsigned iPDSrc = 0;4204 # endif /* !PGM_WITH_PAGING */4205 int rc = VINF_SUCCESS;4206 4207 PGM_LOCK_VOID(pVM);4208 4209 /*4210 * First check if the shadow pd is present.4211 */4212 # if PGM_SHW_TYPE == PGM_TYPE_32BIT4213 PX86PDE pPdeDst = pgmShwGet32BitPDEPtr(pVCpu, GCPtrPage);4214 AssertReturn(pPdeDst, VERR_INTERNAL_ERROR_3);4215 4216 # elif PGM_SHW_TYPE == PGM_TYPE_PAE4217 PX86PDEPAE pPdeDst;4218 const unsigned iPDDst = ((GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK);4219 PX86PDPAE pPDDst;4220 # if PGM_GST_TYPE != PGM_TYPE_PAE4221 /* Fake PDPT entry; access control handled on the page table level, so allow everything. */4222 X86PDPE PdpeSrc;4223 PdpeSrc.u = X86_PDPE_P; /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */4224 # endif4225 rc = pgmShwSyncPaePDPtr(pVCpu, GCPtrPage, PdpeSrc.u, &pPDDst);4226 if (rc != VINF_SUCCESS)4227 {4228 PGM_UNLOCK(pVM);4229 AssertRC(rc);4230 return rc;4231 }4232 Assert(pPDDst);4233 pPdeDst = &pPDDst->a[iPDDst];4234 4235 # elif PGM_SHW_TYPE == PGM_TYPE_AMD644236 const unsigned iPDDst = ((GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK);4237 PX86PDPAE pPDDst;4238 PX86PDEPAE pPdeDst;4239 4240 # if PGM_GST_TYPE == PGM_TYPE_PROT4241 /* AMD-V nested paging: Fake PML4 & PDPT entry; access control handled on the page table level, so allow everything. */4242 X86PML4E Pml4eSrc;4243 X86PDPE PdpeSrc;4244 PX86PML4E pPml4eSrc = &Pml4eSrc;4245 Pml4eSrc.u = X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_A;4246 PdpeSrc.u = X86_PDPE_P | X86_PDPE_RW | X86_PDPE_US | X86_PDPE_A;4247 # endif4248 4249 rc = pgmShwSyncLongModePDPtr(pVCpu, GCPtrPage, pPml4eSrc->u, PdpeSrc.u, &pPDDst);4250 if (rc != VINF_SUCCESS)4251 {4252 PGM_UNLOCK(pVM);4253 AssertRC(rc);4254 return rc;4255 }4256 Assert(pPDDst);4257 pPdeDst = &pPDDst->a[iPDDst];4258 # endif4259 4260 if (!(pPdeDst->u & X86_PDE_P))4261 {4262 rc = PGM_BTH_NAME(SyncPT)(pVCpu, iPDSrc, pPDSrc, GCPtrPage);4263 if (rc != VINF_SUCCESS)4264 {4265 PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdeDst);4266 PGM_UNLOCK(pVM);4267 AssertRC(rc);4268 return rc;4269 }4270 }4271 4272 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)4273 /* Check for dirty bit fault */4274 rc = PGM_BTH_NAME(CheckDirtyPageFault)(pVCpu, uErr, pPdeDst, &pPDSrc->a[iPDSrc], GCPtrPage);4275 if (rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT)4276 Log(("PGMVerifyAccess: success (dirty)\n"));4277 else4278 # endif4279 {4280 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)4281 GSTPDE PdeSrc = pPDSrc->a[iPDSrc];4282 # else4283 GSTPDE const PdeSrc = { X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_A }; /* faked so we don't have to #ifdef everything */4284 # endif4285 4286 Assert(rc != VINF_EM_RAW_GUEST_TRAP);4287 if (uErr & X86_TRAP_PF_US)4288 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,PageOutOfSyncUser));4289 else /* supervisor */4290 STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,PageOutOfSyncSupervisor));4291 4292 rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrc, GCPtrPage, 1, 0);4293 if (RT_SUCCESS(rc))4294 {4295 /* Page was successfully synced */4296 Log2(("PGMVerifyAccess: success (sync)\n"));4297 rc = VINF_SUCCESS;4298 }4299 else4300 {4301 Log(("PGMVerifyAccess: access violation for %RGv rc=%Rrc\n", GCPtrPage, rc));4302 rc = VINF_EM_RAW_GUEST_TRAP;4303 }4304 }4305 PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdeDst);4306 PGM_UNLOCK(pVM);4307 return rc;4308 4309 #else /* PGM_TYPE_IS_NESTED_OR_EPT(PGM_SHW_TYPE) */4310 4311 AssertLogRelMsgFailed(("Shw=%d Gst=%d is not implemented!\n", PGM_GST_TYPE, PGM_SHW_TYPE));4312 return VERR_PGM_NOT_USED_IN_MODE;4313 #endif /* PGM_TYPE_IS_NESTED_OR_EPT(PGM_SHW_TYPE) */4314 4145 } 4315 4146 … … 4336 4167 LogFlow(("SyncCR3 FF=%d fGlobal=%d\n", !!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), fGlobal)); 4337 4168 4338 #if !PGM_TYPE_IS_NESTED_OR_EPT(PGM_SHW_TYPE) && PGM_SHW_TYPE != PGM_TYPE_NONE 4169 #if !PGM_TYPE_IS_NESTED_OR_EPT(PGM_SHW_TYPE) && PGM_SHW_TYPE != PGM_TYPE_NONE && !defined(VBOX_WITH_ONLY_PGM_NEM_MODE) 4339 4170 # ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT 4340 4171 PGM_LOCK_VOID(pVM); … … 4344 4175 PGM_UNLOCK(pVM); 4345 4176 # endif 4346 #endif /* !NESTED && !EPT */4177 #endif /* !NESTED && !EPT && !VBOX_WITH_ONLY_PGM_NEM_MODE */ 4347 4178 4348 4179 #if PGM_TYPE_IS_NESTED_OR_EPT(PGM_SHW_TYPE) || PGM_SHW_TYPE == PGM_TYPE_NONE … … 4399 4230 || PGM_GST_TYPE == PGM_TYPE_AMD64 4400 4231 4401 bool fBigPagesSupported = GST_IS_PSE_ACTIVE(pVCpu); 4402 PPGMCPU pPGM = &pVCpu->pgm.s;4232 # ifndef VBOX_WITH_ONLY_PGM_NEM_MODE 4233 bool const fBigPagesSupported = GST_IS_PSE_ACTIVE(pVCpu); 4403 4234 RTGCPHYS GCPhysGst; /* page address derived from the guest page tables. */ 4404 4235 RTHCPHYS HCPhysShw; /* page address derived from the shadow page tables. */ 4405 # ifndef IN_RING04236 # ifndef IN_RING0 4406 4237 RTHCPHYS HCPhys; /* general usage. */ 4238 # endif 4407 4239 # endif 4240 PPGMCPU const pPGM = &pVCpu->pgm.s; 4408 4241 int rc; 4242 RT_NOREF(rc); 4409 4243 4410 4244 /* … … 4435 4269 # endif /* !IN_RING0 */ 4436 4270 4271 # ifndef VBOX_WITH_ONLY_PGM_NEM_MODE 4437 4272 /* 4438 4273 * Get and check the Shadow CR3. 4439 4274 */ 4440 # if PGM_SHW_TYPE == PGM_TYPE_32BIT4275 # if PGM_SHW_TYPE == PGM_TYPE_32BIT 4441 4276 unsigned cPDEs = X86_PG_ENTRIES; 4442 4277 unsigned cIncrement = X86_PG_ENTRIES * GUEST_PAGE_SIZE; 4443 # elif PGM_SHW_TYPE == PGM_TYPE_PAE4444 # if PGM_GST_TYPE == PGM_TYPE_32BIT4278 # elif PGM_SHW_TYPE == PGM_TYPE_PAE 4279 # if PGM_GST_TYPE == PGM_TYPE_32BIT 4445 4280 unsigned cPDEs = X86_PG_PAE_ENTRIES * 4; /* treat it as a 2048 entry table. */ 4446 # else4281 # else 4447 4282 unsigned cPDEs = X86_PG_PAE_ENTRIES; 4448 # endif4283 # endif 4449 4284 unsigned cIncrement = X86_PG_PAE_ENTRIES * GUEST_PAGE_SIZE; 4450 # elif PGM_SHW_TYPE == PGM_TYPE_AMD644285 # elif PGM_SHW_TYPE == PGM_TYPE_AMD64 4451 4286 unsigned cPDEs = X86_PG_PAE_ENTRIES; 4452 4287 unsigned cIncrement = X86_PG_PAE_ENTRIES * GUEST_PAGE_SIZE; 4453 # endif4288 # endif 4454 4289 if (cb != ~(RTGCPTR)0) 4455 4290 cPDEs = RT_MIN(cb >> SHW_PD_SHIFT, 1); … … 4457 4292 /** @todo call the other two PGMAssert*() functions. */ 4458 4293 4459 # if PGM_GST_TYPE == PGM_TYPE_AMD644294 # if PGM_GST_TYPE == PGM_TYPE_AMD64 4460 4295 unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK; 4461 4296 … … 4502 4337 continue; 4503 4338 } 4504 # else /* PGM_GST_TYPE != PGM_TYPE_AMD64 */4505 { 4506 # endif /* PGM_GST_TYPE != PGM_TYPE_AMD64 */4507 4508 # if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE4339 # else /* PGM_GST_TYPE != PGM_TYPE_AMD64 */ 4340 { 4341 # endif /* PGM_GST_TYPE != PGM_TYPE_AMD64 */ 4342 4343 # if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE 4509 4344 /* 4510 4345 * Check the PDPTEs too. … … 4520 4355 X86PDPE PdpeSrc; 4521 4356 PdpeSrc.u = 0; /* initialized to shut up gcc 4.5 */ 4522 # if PGM_GST_TYPE == PGM_TYPE_PAE4357 # if PGM_GST_TYPE == PGM_TYPE_PAE 4523 4358 PGSTPD pPDSrc = pgmGstGetPaePDPtr(pVCpu, GCPtr, &iPDSrc, &PdpeSrc); 4524 4359 PX86PDPT pPdptDst = pgmShwGetPaePDPTPtr(pVCpu); 4525 # else4360 # else 4526 4361 PX86PML4E pPml4eSrcIgn; 4527 4362 PX86PDPT pPdptDst; … … 4537 4372 } 4538 4373 Assert(pPDDst); 4539 # endif4374 # endif 4540 4375 Assert(iPDSrc == 0); 4541 4376 … … 4561 4396 if (GCPhysPdeSrc != pShwPde->GCPhys) 4562 4397 { 4563 # if PGM_GST_TYPE == PGM_TYPE_AMD644398 # if PGM_GST_TYPE == PGM_TYPE_AMD64 4564 4399 AssertMsgFailed(("Physical address doesn't match! iPml4 %d iPdpt %d pPdpeDst.u=%#RX64 pPdpeSrc.u=%RX64 Phys %RX64 vs %RX64\n", iPml4, iPdpt, pPdpeDst->u, PdpeSrc.u, pShwPde->GCPhys, GCPhysPdeSrc)); 4565 # else4400 # else 4566 4401 AssertMsgFailed(("Physical address doesn't match! iPdpt %d pPdpeDst.u=%#RX64 pPdpeSrc.u=%RX64 Phys %RX64 vs %RX64\n", iPdpt, pPdpeDst->u, PdpeSrc.u, pShwPde->GCPhys, GCPhysPdeSrc)); 4567 # endif4402 # endif 4568 4403 GCPtr += 512 * _2M; 4569 4404 cErrors++; … … 4571 4406 } 4572 4407 4573 # if PGM_GST_TYPE == PGM_TYPE_AMD644408 # if PGM_GST_TYPE == PGM_TYPE_AMD64 4574 4409 if ( (pPdpeDst->u & (X86_PDPE_US | X86_PDPE_RW | X86_PDPE_LM_NX)) 4575 4410 != (PdpeSrc.u & (X86_PDPE_US | X86_PDPE_RW | X86_PDPE_LM_NX))) … … 4580 4415 continue; 4581 4416 } 4582 # endif4583 4584 # else /* PGM_GST_TYPE != PGM_TYPE_AMD64 && PGM_GST_TYPE != PGM_TYPE_PAE */4585 { 4586 # endif /* PGM_GST_TYPE != PGM_TYPE_AMD64 && PGM_GST_TYPE != PGM_TYPE_PAE */4587 # if PGM_GST_TYPE == PGM_TYPE_32BIT4417 # endif 4418 4419 # else /* PGM_GST_TYPE != PGM_TYPE_AMD64 && PGM_GST_TYPE != PGM_TYPE_PAE */ 4420 { 4421 # endif /* PGM_GST_TYPE != PGM_TYPE_AMD64 && PGM_GST_TYPE != PGM_TYPE_PAE */ 4422 # if PGM_GST_TYPE == PGM_TYPE_32BIT 4588 4423 GSTPD const *pPDSrc = pgmGstGet32bitPDPtr(pVCpu); 4589 # if PGM_SHW_TYPE == PGM_TYPE_32BIT4424 # if PGM_SHW_TYPE == PGM_TYPE_32BIT 4590 4425 PCX86PD pPDDst = pgmShwGet32BitPDPtr(pVCpu); 4591 # endif4592 # endif /* PGM_GST_TYPE == PGM_TYPE_32BIT */4426 # endif 4427 # endif /* PGM_GST_TYPE == PGM_TYPE_32BIT */ 4593 4428 /* 4594 4429 * Iterate the shadow page directory. … … 4601 4436 iPDDst++, GCPtr += cIncrement) 4602 4437 { 4603 # if PGM_SHW_TYPE == PGM_TYPE_PAE4438 # if PGM_SHW_TYPE == PGM_TYPE_PAE 4604 4439 const SHWPDE PdeDst = *pgmShwGetPaePDEPtr(pVCpu, GCPtr); 4605 # else4440 # else 4606 4441 const SHWPDE PdeDst = pPDDst->a[iPDDst]; 4607 # endif4442 # endif 4608 4443 if ( (PdeDst.u & X86_PDE_P) 4609 4444 || ((PdeDst.u & (X86_PDE_P | PGM_PDFLAGS_TRACK_DIRTY)) == (X86_PDE_P | PGM_PDFLAGS_TRACK_DIRTY)) ) … … 4647 4482 { 4648 4483 GCPhysGst = GST_GET_PDE_GCPHYS(PdeSrc); 4649 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT4484 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT 4650 4485 GCPhysGst = PGM_A20_APPLY(pVCpu, GCPhysGst | ((iPDDst & 1) * (GUEST_PAGE_SIZE / 2))); 4651 # endif4486 # endif 4652 4487 } 4653 4488 else 4654 4489 { 4655 # if PGM_GST_TYPE == PGM_TYPE_32BIT4490 # if PGM_GST_TYPE == PGM_TYPE_32BIT 4656 4491 if (PdeSrc.u & X86_PDE4M_PG_HIGH_MASK) 4657 4492 { … … 4661 4496 continue; 4662 4497 } 4663 # endif4498 # endif 4664 4499 GCPhysGst = GST_GET_BIG_PDE_GCPHYS(pVM, PdeSrc); 4665 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT4500 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT 4666 4501 GCPhysGst = PGM_A20_APPLY(pVCpu, GCPhysGst | (GCPtr & RT_BIT(X86_PAGE_2M_SHIFT))); 4667 # endif4502 # endif 4668 4503 } 4669 4504 … … 4728 4563 4729 4564 /* iterate the page table. */ 4730 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT4565 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT 4731 4566 /* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */ 4732 4567 const unsigned offPTSrc = ((GCPtr >> SHW_PD_SHIFT) & 1) * 512; 4733 # else4568 # else 4734 4569 const unsigned offPTSrc = 0; 4735 # endif4570 # endif 4736 4571 for (unsigned iPT = 0, off = 0; 4737 4572 iPT < RT_ELEMENTS(pPTDst->a); … … 4748 4583 if (!(PteSrc.u & X86_PTE_P)) 4749 4584 { 4750 # ifdef IN_RING34585 # ifdef IN_RING3 4751 4586 PGMAssertHandlerAndFlagsInSync(pVM); 4752 4587 DBGFR3PagingDumpEx(pVM->pUVM, pVCpu->idCpu, DBGFPGDMP_FLAGS_CURRENT_CR3 | DBGFPGDMP_FLAGS_CURRENT_MODE 4753 4588 | DBGFPGDMP_FLAGS_GUEST | DBGFPGDMP_FLAGS_HEADER | DBGFPGDMP_FLAGS_PRINT_CR3, 4754 4589 0, 0, UINT64_MAX, 99, NULL); 4755 # endif4590 # endif 4756 4591 AssertMsgFailed(("Out of sync (!P) PTE at %RGv! PteSrc=%#RX64 PteDst=%#RX64 pPTSrc=%RGv iPTSrc=%x PdeSrc=%x physpte=%RGp\n", 4757 4592 GCPtr + off, (uint64_t)PteSrc.u, SHW_PTE_LOG64(PteDst), pPTSrc, iPT + offPTSrc, PdeSrc.au32[0], … … 4762 4597 4763 4598 uint64_t fIgnoreFlags = GST_PTE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_G | X86_PTE_D | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_PAT; 4764 # if 1 /** @todo sync accessed bit properly... */4599 # if 1 /** @todo sync accessed bit properly... */ 4765 4600 fIgnoreFlags |= X86_PTE_A; 4766 # endif4601 # endif 4767 4602 4768 4603 /* match the physical addresses */ … … 4770 4605 GCPhysGst = GST_GET_PTE_GCPHYS(PteSrc); 4771 4606 4772 # ifdef IN_RING34607 # ifdef IN_RING3 4773 4608 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhysGst, &HCPhys); 4774 4609 if (RT_FAILURE(rc)) 4775 4610 { 4776 # if 04611 # if 0 4777 4612 if (HCPhysShw != MMR3PageDummyHCPhys(pVM)) /** @todo this is wrong. */ 4778 4613 { … … 4782 4617 continue; 4783 4618 } 4784 # endif4619 # endif 4785 4620 } 4786 4621 else if (HCPhysShw != (HCPhys & SHW_PTE_PG_MASK)) … … 4791 4626 continue; 4792 4627 } 4793 # endif4628 # endif 4794 4629 4795 4630 pPhysPage = pgmPhysGetPage(pVM, GCPhysGst); 4796 4631 if (!pPhysPage) 4797 4632 { 4798 # if 04633 # if 0 4799 4634 if (HCPhysShw != MMR3PageDummyHCPhys(pVM)) /** @todo this is wrong. */ 4800 4635 { … … 4804 4639 continue; 4805 4640 } 4806 # endif4641 # endif 4807 4642 if (SHW_PTE_IS_RW(PteDst)) 4808 4643 { … … 4838 4673 { 4839 4674 if ( SHW_PTE_IS_P(PteDst) 4840 # if PGM_SHW_TYPE == PGM_TYPE_EPT || PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_AMD644675 # if PGM_SHW_TYPE == PGM_TYPE_EPT || PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_AMD64 4841 4676 && !PGM_PAGE_IS_MMIO(pPhysPage) 4842 # endif4677 # endif 4843 4678 ) 4844 4679 { … … 4875 4710 cErrors++; 4876 4711 } 4877 # if 0 /** @todo sync access bit properly... */4712 # if 0 /** @todo sync access bit properly... */ 4878 4713 if (PteDst.n.u1Accessed != PteSrc.n.u1Accessed) 4879 4714 { … … 4883 4718 } 4884 4719 fIgnoreFlags |= X86_PTE_RW; 4885 # else4720 # else 4886 4721 fIgnoreFlags |= X86_PTE_RW | X86_PTE_A; 4887 # endif4722 # endif 4888 4723 } 4889 4724 else if (SHW_PTE_IS_TRACK_DIRTY(PteDst)) … … 4905 4740 fIgnoreFlags |= X86_PTE_P; 4906 4741 } 4907 # ifdef DEBUG_sandervl4742 # ifdef DEBUG_sandervl 4908 4743 fIgnoreFlags |= X86_PTE_D | X86_PTE_A; 4909 # endif4744 # endif 4910 4745 } 4911 4746 … … 4944 4779 continue; 4945 4780 } 4946 # if 0 /** @todo sync access bit properly... */4781 # if 0 /** @todo sync access bit properly... */ 4947 4782 if (PdeDst.n.u1Accessed != PdeSrc.b.u1Accessed) 4948 4783 { … … 4952 4787 } 4953 4788 fIgnoreFlags |= X86_PTE_RW; 4954 # else4789 # else 4955 4790 fIgnoreFlags |= X86_PTE_RW | X86_PTE_A; 4956 # endif4791 # endif 4957 4792 } 4958 4793 else if (PdeDst.u & PGM_PDFLAGS_TRACK_DIRTY) … … 5006 4841 HCPhysShw = SHW_PTE_GET_HCPHYS(PteDst); 5007 4842 5008 # ifdef IN_RING34843 # ifdef IN_RING3 5009 4844 rc = PGMPhysGCPhys2HCPhys(pVM, GCPhysGst, &HCPhys); 5010 4845 if (RT_FAILURE(rc)) 5011 4846 { 5012 # if 04847 # if 0 5013 4848 if (HCPhysShw != MMR3PageDummyHCPhys(pVM)) /** @todo this is wrong. */ 5014 4849 { … … 5017 4852 cErrors++; 5018 4853 } 5019 # endif4854 # endif 5020 4855 } 5021 4856 else if (HCPhysShw != (HCPhys & X86_PTE_PAE_PG_MASK)) … … 5026 4861 continue; 5027 4862 } 5028 # endif4863 # endif 5029 4864 pPhysPage = pgmPhysGetPage(pVM, GCPhysGst); 5030 4865 if (!pPhysPage) 5031 4866 { 5032 # if 0 /** @todo make MMR3PageDummyHCPhys an 'All' function! */4867 # if 0 /** @todo make MMR3PageDummyHCPhys an 'All' function! */ 5033 4868 if (HCPhysShw != MMR3PageDummyHCPhys(pVM)) /** @todo this is wrong. */ 5034 4869 { … … 5038 4873 continue; 5039 4874 } 5040 # endif4875 # endif 5041 4876 if (SHW_PTE_IS_RW(PteDst)) 5042 4877 { … … 5077 4912 if ( SHW_PTE_IS_P(PteDst) 5078 4913 && !PGM_PAGE_IS_HNDL_PHYS_NOT_IN_HM(pPhysPage) 5079 # if PGM_SHW_TYPE == PGM_TYPE_EPT || PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_AMD644914 # if PGM_SHW_TYPE == PGM_TYPE_EPT || PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_AMD64 5080 4915 && !PGM_PAGE_IS_MMIO(pPhysPage) 5081 # endif4916 # endif 5082 4917 ) 5083 4918 { … … 5112 4947 } /* for each PML4E */ 5113 4948 4949 # endif /* !VBOX_WITH_ONLY_PGM_NEM_MODE */ 5114 4950 # ifdef DEBUG 5115 4951 if (cErrors) … … 5232 5068 */ 5233 5069 # if ( ( PGM_SHW_TYPE == PGM_TYPE_32BIT \ 5234 || PGM_SHW_TYPE == PGM_TYPE_PAE 5070 || PGM_SHW_TYPE == PGM_TYPE_PAE \ 5235 5071 || PGM_SHW_TYPE == PGM_TYPE_AMD64) \ 5236 && ( PGM_GST_TYPE != PGM_TYPE_REAL \ 5237 && PGM_GST_TYPE != PGM_TYPE_PROT)) 5072 && ( PGM_GST_TYPE != PGM_TYPE_REAL \ 5073 && PGM_GST_TYPE != PGM_TYPE_PROT) \ 5074 && !defined(VBOX_WITH_ONLY_PGM_NEM_MODE) ) 5238 5075 5239 5076 Assert(!pVM->pgm.s.fNestedPaging); … … 5333 5170 * Update shadow paging info. 5334 5171 */ 5335 #if ( ( PGM_SHW_TYPE == PGM_TYPE_32BIT \ 5336 || PGM_SHW_TYPE == PGM_TYPE_PAE \ 5337 || PGM_SHW_TYPE == PGM_TYPE_AMD64)) 5172 #if ( ( PGM_SHW_TYPE == PGM_TYPE_32BIT \ 5173 || PGM_SHW_TYPE == PGM_TYPE_PAE \ 5174 || PGM_SHW_TYPE == PGM_TYPE_AMD64) \ 5175 && !defined(VBOX_WITH_ONLY_PGM_NEM_MODE) ) 5338 5176 # if PGM_GST_TYPE != PGM_TYPE_REAL 5339 5177 Assert(!pVM->pgm.s.fNestedPaging); -
trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp
r106061 r107171 973 973 { 974 974 uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage); 975 NEMHCNotifyPhysPageChanged(pVM, GCPhysPage, HCPhysPrev, pVM->pgm.s.HCPhysZeroPg, 975 NEMHCNotifyPhysPageChanged(pVM, GCPhysPage, HCPhysPrev, 976 # ifndef VBOX_WITH_ONLY_PGM_NEM_MODE 977 pVM->pgm.s.HCPhysZeroPg, 978 # else 979 0, 980 # endif 976 981 PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage), 977 982 NEM_PAGE_PROT_NONE, PGMPAGETYPE_MMIO, &u2State); … … 1739 1744 PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, RTGCPHYS offMmio2PageRemap) 1740 1745 { 1741 #ifdef VBOX_WITH_PGM_NEM_MODE 1746 #ifdef VBOX_WITH_ONLY_PGM_NEM_MODE 1747 RT_NOREF(pVM, GCPhys, GCPhysPage, pDevIns, hMmio2, offMmio2PageRemap); 1748 AssertFailedReturn(VERR_PGM_NOT_SUPPORTED_FOR_NEM_MODE); 1749 #else 1750 # ifdef VBOX_WITH_PGM_NEM_MODE 1742 1751 AssertReturn(!VM_IS_NEM_ENABLED(pVM) || !pVM->pgm.s.fNemMode, VERR_PGM_NOT_SUPPORTED_FOR_NEM_MODE); 1743 # endif1752 # endif 1744 1753 int rc = PGM_LOCK(pVM); 1745 1754 AssertRCReturn(rc, rc); … … 1839 1848 * and important when this kind of aliasing is used, so it may pay of... */ 1840 1849 1841 # ifdef VBOX_WITH_NATIVE_NEM1850 # ifdef VBOX_WITH_NATIVE_NEM 1842 1851 /* Tell NEM about the backing and protection change. */ 1843 1852 if (VM_IS_NEM_ENABLED(pVM)) … … 1850 1859 PGM_PAGE_SET_NEM_STATE(pPage, u2State); 1851 1860 } 1852 # endif1861 # endif 1853 1862 LogFlow(("PGMHandlerPhysicalPageAliasMmio2: => %R[pgmpage]\n", pPage)); 1854 1863 PGM_UNLOCK(pVM); … … 1868 1877 } 1869 1878 return rc; 1879 #endif /* !VBOX_WITH_ONLY_PGM_NEM_MODE */ 1870 1880 } 1871 1881 … … 1904 1914 VMMDECL(int) PGMHandlerPhysicalPageAliasHC(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage, RTHCPHYS HCPhysPageRemap) 1905 1915 { 1916 #ifdef VBOX_WITH_ONLY_PGM_NEM_MODE 1917 RT_NOREF(pVM, GCPhys, GCPhysPage, HCPhysPageRemap); 1918 AssertFailedReturn(VERR_PGM_NOT_SUPPORTED_FOR_NEM_MODE); 1919 #else 1906 1920 /// Assert(!IOMIsLockOwner(pVM)); /* We mustn't own any other locks when calling this */ 1907 # ifdef VBOX_WITH_PGM_NEM_MODE1921 # ifdef VBOX_WITH_PGM_NEM_MODE 1908 1922 AssertReturn(!VM_IS_NEM_ENABLED(pVM) || !pVM->pgm.s.fNemMode, VERR_PGM_NOT_SUPPORTED_FOR_NEM_MODE); 1909 # endif1923 # endif 1910 1924 int rc = PGM_LOCK(pVM); 1911 1925 AssertRCReturn(rc, rc); … … 1932 1946 */ 1933 1947 PPGMPAGE pPage = NULL; 1934 # ifdef VBOX_WITH_NATIVE_NEM1948 # ifdef VBOX_WITH_NATIVE_NEM 1935 1949 PPGMRAMRANGE pRam = NULL; 1936 1950 rc = pgmPhysGetPageAndRangeEx(pVM, GCPhysPage, &pPage, &pRam); 1937 # else1951 # else 1938 1952 rc = pgmPhysGetPageEx(pVM, GCPhysPage, &pPage); 1939 # endif1953 # endif 1940 1954 AssertReturnStmt(RT_SUCCESS_NP(rc), PGM_UNLOCK(pVM), rc); 1941 1955 if (PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO) … … 1972 1986 pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage); 1973 1987 1974 # ifdef VBOX_WITH_NATIVE_NEM1988 # ifdef VBOX_WITH_NATIVE_NEM 1975 1989 /* Tell NEM about the backing and protection change. */ 1976 1990 if (VM_IS_NEM_ENABLED(pVM)) … … 1983 1997 PGM_PAGE_SET_NEM_STATE(pPage, u2State); 1984 1998 } 1985 # endif1999 # endif 1986 2000 LogFlow(("PGMHandlerPhysicalPageAliasHC: => %R[pgmpage]\n", pPage)); 1987 2001 PGM_UNLOCK(pVM); … … 2000 2014 } 2001 2015 return rc; 2016 #endif /* !VBOX_WITH_ONLY_PGM_NEM_MODE */ 2002 2017 } 2003 2018 -
trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp
r106061 r107171 2729 2729 2730 2730 #ifdef VBOX_WITH_PGM_NEM_MODE 2731 # ifndef VBOX_WITH_ONLY_PGM_NEM_MODE 2731 2732 if (pVM->pgm.s.fNemMode) 2733 # endif 2732 2734 { 2733 2735 # ifdef IN_RING3 … … 2748 2750 } 2749 2751 #endif /* VBOX_WITH_PGM_NEM_MODE */ 2752 #ifndef VBOX_WITH_ONLY_PGM_NEM_MODE 2750 2753 2751 2754 const uint32_t idChunk = PGM_PAGE_GET_CHUNKID(pPage); … … 2826 2829 return VINF_SUCCESS; 2827 2830 # endif /* !IN_RING0 */ 2831 #endif /* !VBOX_WITH_ONLY_PGM_NEM_MODE */ 2828 2832 } 2829 2833 … … 2978 2982 if (RT_FAILURE(rc)) 2979 2983 return rc; 2980 # 2984 #ifndef IN_RING0 2981 2985 pTlbe->pMap = pMap; 2982 # 2986 #endif 2983 2987 pTlbe->pv = pv; 2984 2988 Assert(!((uintptr_t)pTlbe->pv & GUEST_PAGE_OFFSET_MASK)); … … 2986 2990 else 2987 2991 { 2992 #ifndef VBOX_WITH_ONLY_PGM_NEM_MODE 2988 2993 AssertMsg(PGM_PAGE_GET_HCPHYS(pPage) == pVM->pgm.s.HCPhysZeroPg, ("%RGp/%R[pgmpage]\n", GCPhys, pPage)); 2989 # ifndef IN_RING0 2994 #endif 2995 #ifndef IN_RING0 2990 2996 pTlbe->pMap = NULL; 2991 # 2997 #endif 2992 2998 pTlbe->pv = pVM->pgm.s.abZeroPg; 2993 2999 } 2994 # 3000 #ifdef PGM_WITH_PHYS_TLB 2995 3001 if ( PGM_PAGE_GET_TYPE(pPage) < PGMPAGETYPE_ROM_SHADOW 2996 3002 || PGM_PAGE_GET_TYPE(pPage) > PGMPAGETYPE_ROM) … … 2998 3004 else 2999 3005 pTlbe->GCPhys = NIL_RTGCPHYS; /* ROM: Problematic because of the two pages. :-/ */ 3000 # 3006 #else 3001 3007 pTlbe->GCPhys = NIL_RTGCPHYS; 3002 # 3008 #endif 3003 3009 pTlbe->pPage = pPage; 3004 3010 return VINF_SUCCESS; … … 3743 3749 { 3744 3750 #ifdef IN_RING3 3751 # ifndef VBOX_WITH_ONLY_PGM_NEM_MODE 3745 3752 PPGMPAGEMAPTLBE pTlbe; 3746 3753 rc = pgmPhysPageQueryLocklessTlbeWithPage(pVCpu, (PPGMPAGE)pPage, GCPhys, &pTlbe); … … 3748 3755 pb = (uint8_t *)pTlbe->pv; 3749 3756 RT_NOREF(pVM); 3757 # endif 3750 3758 #else /** @todo a safe lockless page TLB in ring-0 needs the to ensure it gets the right invalidations. later. */ 3751 3759 PGM_LOCK(pVM); … … 5135 5143 { 5136 5144 #ifdef IN_RING3 5145 # ifndef VBOX_WITH_ONLY_PGM_NEM_MODE 5137 5146 PPGMPAGEMAPTLBE pTlbe; 5138 5147 int rc = pgmPhysPageQueryLocklessTlbeWithPage(pVCpu, pPage, GCPhys, &pTlbe); … … 5140 5149 *ppb = (uint8_t *)pTlbe->pv; 5141 5150 RT_NOREF(pVM); 5151 # endif 5142 5152 #else /** @todo a safe lockless page TLB in ring-0 needs the to ensure it gets the right invalidations. later. */ 5143 5153 PGM_LOCK(pVM); … … 5151 5161 } 5152 5162 Log6(("PGMPhysIemGCPhys2PtrNoLock: GCPhys=%RGp *ppb=%p *pfTlb=%#RX64 PageCopy=%R[pgmpage] RO\n", GCPhys, *ppb, *pfTlb, pPageCopy)); 5153 RT_NOREF(pRam );5163 RT_NOREF(pRam, pVM, pVCpu); 5154 5164 return VINF_SUCCESS; 5155 5165 } … … 5172 5182 { 5173 5183 #ifdef IN_RING3 5184 # ifndef VBOX_WITH_ONLY_PGM_NEM_MODE 5174 5185 PPGMPAGEMAPTLBE pTlbe; 5175 5186 int rc = pgmPhysPageQueryLocklessTlbeWithPage(pVCpu, pPage, GCPhys, &pTlbe); … … 5177 5188 *ppb = (uint8_t *)pTlbe->pv; 5178 5189 RT_NOREF(pVM); 5190 # endif 5179 5191 #else /** @todo a safe lockless page TLB in ring-0 needs the to ensure it gets the right invalidations. later. */ 5180 5192 PGM_LOCK(pVM); … … 5188 5200 } 5189 5201 Log6(("PGMPhysIemGCPhys2PtrNoLock: GCPhys=%RGp *ppb=%p *pfTlb=%#RX64 PageCopy=%R[pgmpage] RW\n", GCPhys, *ppb, *pfTlb, pPageCopy)); 5190 RT_NOREF(pRam );5202 RT_NOREF(pRam, pVM, pVCpu); 5191 5203 return VINF_SUCCESS; 5192 5204 } -
trunk/src/VBox/VMM/VMMAll/PGMAllShw.h
r106061 r107171 230 230 PGM_SHW_DECL(int, Enter)(PVMCPUCC pVCpu) 231 231 { 232 #if PGM_TYPE_IS_NESTED_OR_EPT(PGM_SHW_TYPE) 232 #if PGM_TYPE_IS_NESTED_OR_EPT(PGM_SHW_TYPE) && !defined(VBOX_WITH_ONLY_PGM_NEM_MODE) 233 233 234 234 # ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT … … 284 284 PGM_SHW_DECL(int, Exit)(PVMCPUCC pVCpu) 285 285 { 286 #if PGM_TYPE_IS_NESTED_OR_EPT(PGM_SHW_TYPE) 286 #if PGM_TYPE_IS_NESTED_OR_EPT(PGM_SHW_TYPE) && !defined(VBOX_WITH_ONLY_PGM_NEM_MODE) 287 287 PVMCC pVM = pVCpu->CTX_SUFF(pVM); 288 288 if (pVCpu->pgm.s.CTX_SUFF(pShwPageCR3)) … … 395 395 PGM_SHW_DECL(int, GetPage)(PVMCPUCC pVCpu, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys) 396 396 { 397 #if PGM_SHW_TYPE == PGM_TYPE_NONE 397 #if PGM_SHW_TYPE == PGM_TYPE_NONE || defined(VBOX_WITH_ONLY_PGM_NEM_MODE) 398 398 RT_NOREF(pVCpu, GCPtr); 399 399 AssertFailed(); … … 607 607 PGM_SHW_DECL(int, ModifyPage)(PVMCPUCC pVCpu, RTGCUINTPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags) 608 608 { 609 #if PGM_SHW_TYPE == PGM_TYPE_NONE 609 #if PGM_SHW_TYPE == PGM_TYPE_NONE || defined(VBOX_WITH_ONLY_PGM_NEM_MODE) 610 610 RT_NOREF(pVCpu, GCPtr, cb, fFlags, fMask, fOpFlags); 611 611 AssertFailed();
Note:
See TracChangeset
for help on using the changeset viewer.