VirtualBox

Changeset 107171 in vbox for trunk/src/VBox/VMM/VMMAll


Ignore:
Timestamp:
Nov 28, 2024 10:38:10 AM (5 months ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
166169
Message:

VMM/PGM: Introducing VBOX_WITH_ONLY_PGM_NEM_MODE to disable lots unused code on *.arm64 and darwin. jiraref:VBP-1466

Location:
trunk/src/VBox/VMM/VMMAll
Files:
5 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/VMMAll/PGMAll.cpp

    r106061 r107171  
    756756# define PGMMODEDATABTH_NULL_ENTRY()    { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
    757757# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
    758     { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(Trap0eHandler), Nm(NestedTrap0eHandler) }
     758    { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(Trap0eHandler), Nm(NestedTrap0eHandler) }
    759759
    760760#elif !defined(IN_RING3) && defined(VBOX_STRICT)
    761761# define PGMMODEDATABTH_NULL_ENTRY()    { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
    762762# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
    763     { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(Trap0eHandler), Nm(NestedTrap0eHandler), Nm(AssertCR3) }
     763    { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(Trap0eHandler), Nm(NestedTrap0eHandler), Nm(AssertCR3) }
    764764
    765765#elif defined(IN_RING3) && !defined(VBOX_STRICT)
    766766# define PGMMODEDATABTH_NULL_ENTRY()    { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL }
    767767# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
    768     { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), }
     768    { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), }
    769769
    770770#elif defined(IN_RING3) && defined(VBOX_STRICT)
    771771# define PGMMODEDATABTH_NULL_ENTRY()    { UINT32_MAX, UINT32_MAX, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
    772772# define PGMMODEDATABTH_ENTRY(uShwT, uGstT, Nm) \
    773     { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(VerifyAccessSyncPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(AssertCR3) }
     773    { uShwT, uGstT, Nm(InvalidatePage), Nm(SyncCR3), Nm(PrefetchPage), Nm(MapCR3), Nm(UnmapCR3), Nm(Enter), Nm(AssertCR3) }
    774774
    775775#else
     
    12821282}
    12831283
     1284#ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
    12841285
    12851286/**
     
    17411742
    17421743
    1743 #ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
     1744# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
    17441745/**
    17451746 * Syncs the SHADOW nested-guest page directory pointer for the specified address.
     
    18481849    return VINF_SUCCESS;
    18491850}
    1850 #endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
    1851 
    1852 
    1853 #ifdef IN_RING0
     1851# endif /* VBOX_WITH_NESTED_HWVIRT_VMX_EPT */
     1852
     1853
     1854# ifdef IN_RING0
    18541855/**
    18551856 * Synchronizes a range of nested page table entries.
     
    19061907    return rc;
    19071908}
    1908 #endif /* IN_RING0 */
    1909 
     1909# endif /* IN_RING0 */
     1910
     1911#endif /* !VBOX_WITH_ONLY_PGM_NEM_MODE */
    19101912
    19111913/**
     
    25112513VMMDECL(RTHCPHYS) PGMGetHyperCR3(PVMCPU pVCpu)
    25122514{
     2515#ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
    25132516    PPGMPOOLPAGE pPoolPage = pVCpu->pgm.s.CTX_SUFF(pShwPageCR3);
    25142517    AssertPtrReturn(pPoolPage, NIL_RTHCPHYS);
    25152518    return pPoolPage->Core.Key;
     2519#else
     2520    RT_NOREF(pVCpu);
     2521    return NIL_RTHCPHYS;
     2522#endif
    25162523}
    25172524
     
    34613468    AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnSyncCR3, VERR_PGM_MODE_IPE);
    34623469    AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnPrefetchPage, VERR_PGM_MODE_IPE);
    3463     AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnVerifyAccessSyncPage, VERR_PGM_MODE_IPE);
    34643470    AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnMapCR3, VERR_PGM_MODE_IPE);
    34653471    AssertPtrReturn(g_aPgmBothModeData[idxNewBth].pfnUnmapCR3, VERR_PGM_MODE_IPE);
  • trunk/src/VBox/VMM/VMMAll/PGMAllBth.h

    r106061 r107171  
    4949PGM_BTH_DECL(int, NestedTrap0eHandler)(PVMCPUCC pVCpu, RTGCUINT uErr, PCPUMCTX pCtx, RTGCPHYS GCPhysNestedFault,
    5050                                       bool fIsLinearAddrValid, RTGCPTR GCPtrNestedFault, PPGMPTWALK pWalk, bool *pfLockTaken);
    51 # if defined(VBOX_WITH_NESTED_HWVIRT_VMX_EPT) && PGM_SHW_TYPE == PGM_TYPE_EPT
     51# if defined(VBOX_WITH_NESTED_HWVIRT_VMX_EPT) && PGM_SHW_TYPE == PGM_TYPE_EPT && !defined(VBOX_WITH_ONLY_PGM_NEM_MODE)
    5252static void PGM_BTH_NAME(NestedSyncPageWorker)(PVMCPUCC pVCpu, PSHWPTE pPte, RTGCPHYS GCPhysPage, PPGMPOOLPAGE pShwPage,
    5353                                               unsigned iPte, SLATPTE GstSlatPte);
     
    5858#endif
    5959PGM_BTH_DECL(int, InvalidatePage)(PVMCPUCC pVCpu, RTGCPTR GCPtrPage);
     60#ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
    6061static int PGM_BTH_NAME(SyncPage)(PVMCPUCC pVCpu, GSTPDE PdeSrc, RTGCPTR GCPtrPage, unsigned cPages, unsigned uErr);
    6162static int PGM_BTH_NAME(CheckDirtyPageFault)(PVMCPUCC pVCpu, uint32_t uErr, PSHWPDE pPdeDst, GSTPDE const *pPdeSrc, RTGCPTR GCPtrPage);
    6263static int PGM_BTH_NAME(SyncPT)(PVMCPUCC pVCpu, unsigned iPD, PGSTPD pPDSrc, RTGCPTR GCPtrPage);
    63 #if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
     64# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
    6465static void PGM_BTH_NAME(SyncPageWorker)(PVMCPUCC pVCpu, PSHWPTE pPteDst, GSTPDE PdeSrc, GSTPTE PteSrc, PPGMPOOLPAGE pShwPage, unsigned iPTDst);
    65 #else
     66# else
    6667static void PGM_BTH_NAME(SyncPageWorker)(PVMCPUCC pVCpu, PSHWPTE pPteDst, RTGCPHYS GCPhysPage, PPGMPOOLPAGE pShwPage, unsigned iPTDst);
    67 #endif
    68 PGM_BTH_DECL(int, VerifyAccessSyncPage)(PVMCPUCC pVCpu, RTGCPTR Addr, unsigned fPage, unsigned uErr);
     68# endif
     69#endif /* !VBOX_WITH_ONLY_PGM_NEM_MODE */
    6970PGM_BTH_DECL(int, PrefetchPage)(PVMCPUCC pVCpu, RTGCPTR GCPtrPage);
    7071PGM_BTH_DECL(int, SyncCR3)(PVMCPUCC pVCpu, uint64_t cr0, uint64_t cr3, uint64_t cr4, bool fGlobal);
     
    122123     * Other modes rely on MapCR3/UnmapCR3 to setup the shadow root page tables.
    123124     */
    124 #if  (   (   PGM_SHW_TYPE == PGM_TYPE_32BIT \
    125           || PGM_SHW_TYPE == PGM_TYPE_PAE    \
    126           || PGM_SHW_TYPE == PGM_TYPE_AMD64) \
    127       && (   PGM_GST_TYPE == PGM_TYPE_REAL   \
    128           || PGM_GST_TYPE == PGM_TYPE_PROT))
     125#if (   (   PGM_SHW_TYPE == PGM_TYPE_32BIT \
     126         || PGM_SHW_TYPE == PGM_TYPE_PAE \
     127         || PGM_SHW_TYPE == PGM_TYPE_AMD64) \
     128     && (   PGM_GST_TYPE == PGM_TYPE_REAL \
     129         || PGM_GST_TYPE == PGM_TYPE_PROT)) \
     130     && !defined(VBOX_WITH_ONLY_PGM_NEM_MODE)
    129131
    130132    PVMCC pVM = pVCpu->CTX_SUFF(pVM);
     
    179181#ifndef IN_RING3
    180182
    181 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
     183# if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) && !defined(VBOX_WITH_ONLY_PGM_NEM_MODE)
    182184/**
    183185 * Deal with a guest page fault.
     
    191193 * @param   uErr            The error code.
    192194 */
    193 PGM_BTH_DECL(VBOXSTRICTRC, Trap0eHandlerGuestFault)(PVMCPUCC pVCpu, PPGMPTWALK pWalk, RTGCUINT uErr)
     195static VBOXSTRICTRC PGM_BTH_NAME(Trap0eHandlerGuestFault)(PVMCPUCC pVCpu, PPGMPTWALK pWalk, RTGCUINT uErr)
    194196{
    195197    /*
     
    216218
    217219
    218 #if !PGM_TYPE_IS_NESTED(PGM_SHW_TYPE) && PGM_SHW_TYPE != PGM_TYPE_NONE
     220#if !PGM_TYPE_IS_NESTED(PGM_SHW_TYPE) && PGM_SHW_TYPE != PGM_TYPE_NONE && !defined(VBOX_WITH_ONLY_PGM_NEM_MODE)
    219221/**
    220222 * Deal with a guest page fault.
     
    425427    && !PGM_TYPE_IS_NESTED(PGM_SHW_TYPE) \
    426428    && (PGM_SHW_TYPE != PGM_TYPE_EPT || PGM_GST_TYPE == PGM_TYPE_PROT) \
    427     && PGM_SHW_TYPE != PGM_TYPE_NONE
     429    && PGM_SHW_TYPE != PGM_TYPE_NONE \
     430    && !defined(VBOX_WITH_ONLY_PGM_NEM_MODE)
    428431    int rc;
    429432
     
    11561159# if defined(VBOX_WITH_NESTED_HWVIRT_VMX_EPT) \
    11571160    && PGM_GST_TYPE == PGM_TYPE_PROT \
    1158     && PGM_SHW_TYPE == PGM_TYPE_EPT
     1161    && PGM_SHW_TYPE == PGM_TYPE_EPT \
     1162    && !defined(VBOX_WITH_ONLY_PGM_NEM_MODE)
    11591163
    11601164    Assert(CPUMIsGuestVmxEptPagingEnabled(pVCpu));
     
    14041408#if    PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) \
    14051409    && !PGM_TYPE_IS_NESTED_OR_EPT(PGM_SHW_TYPE) \
    1406     && PGM_SHW_TYPE != PGM_TYPE_NONE
     1410    && PGM_SHW_TYPE != PGM_TYPE_NONE \
     1411    && !defined(VBOX_WITH_ONLY_PGM_NEM_MODE)
    14071412    int rc;
    14081413    PVMCC    pVM   = pVCpu->CTX_SUFF(pVM);
     
    16621667}
    16631668
    1664 #if PGM_SHW_TYPE != PGM_TYPE_NONE
     1669#if PGM_SHW_TYPE != PGM_TYPE_NONE && !defined(VBOX_WITH_ONLY_PGM_NEM_MODE)
    16651670
    16661671/**
     
    25722577}
    25732578
    2574 #endif /* PGM_SHW_TYPE != PGM_TYPE_NONE */
    2575 
    2576 #if !defined(IN_RING3) && defined(VBOX_WITH_NESTED_HWVIRT_VMX_EPT) && PGM_SHW_TYPE == PGM_TYPE_EPT
     2579#endif /* PGM_SHW_TYPE != PGM_TYPE_NONE && !VBOX_WITH_ONLY_PGM_NEM_MODE */
     2580
     2581#if !defined(IN_RING3) && defined(VBOX_WITH_NESTED_HWVIRT_VMX_EPT) && PGM_SHW_TYPE == PGM_TYPE_EPT && !defined(VBOX_WITH_ONLY_PGM_NEM_MODE)
    25772582
    25782583/**
     
    31943199}
    31953200
    3196 #endif  /* !IN_RING3 && VBOX_WITH_NESTED_HWVIRT_VMX_EPT && PGM_SHW_TYPE == PGM_TYPE_EPT*/
    3197 #if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) && PGM_SHW_TYPE != PGM_TYPE_NONE
     3201#endif  /* !IN_RING3 && VBOX_WITH_NESTED_HWVIRT_VMX_EPT && PGM_SHW_TYPE == PGM_TYPE_EPT && !VBOX_WITH_ONLY_PGM_NEM_MODE */
     3202#if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) && PGM_SHW_TYPE != PGM_TYPE_NONE && !defined(VBOX_WITH_ONLY_PGM_NEM_MODE)
    31983203
    31993204/**
     
    33523357}
    33533358
    3354 #endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) && PGM_SHW_TYPE != PGM_TYPE_NONE */
    3355 
     3359#endif /* PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE) && PGM_SHW_TYPE != PGM_TYPE_NONE && !VBOX_WITH_ONLY_PGM_NEM_MODE */
     3360
     3361#ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
    33563362/**
    33573363 * Sync a shadow page table.
     
    33613367 * Handles mapping conflicts.
    33623368 *
    3363  * This is called by VerifyAccessSyncPage, PrefetchPage, InvalidatePage (on
    3364  * conflict), and Trap0eHandler.
     3369 * This is called by PrefetchPage, InvalidatePage (on conflict), and
     3370 * Trap0eHandler.
    33653371 *
    33663372 * A precondition for this method is that the shadow PDE is not present.  The
     
    33803386    PPGMPOOL    pPool = pVM->pgm.s.CTX_SUFF(pPool); NOREF(pPool);
    33813387
    3382 #if 0 /* rarely useful; leave for debugging. */
     3388# if 0 /* rarely useful; leave for debugging. */
    33833389    STAM_COUNTER_INC(&pVCpu->pgm.s.StatSyncPtPD[iPDSrc]);
    3384 #endif
     3390# endif
    33853391    LogFlow(("SyncPT: GCPtrPage=%RGv\n", GCPtrPage)); RT_NOREF_PV(GCPtrPage);
    33863392
    33873393    PGM_LOCK_ASSERT_OWNER(pVM);
    33883394
    3389 #if (   PGM_GST_TYPE == PGM_TYPE_32BIT \
    3390      || PGM_GST_TYPE == PGM_TYPE_PAE \
    3391      || PGM_GST_TYPE == PGM_TYPE_AMD64) \
    3392  && !PGM_TYPE_IS_NESTED_OR_EPT(PGM_SHW_TYPE) \
    3393  && PGM_SHW_TYPE != PGM_TYPE_NONE
     3395# if (   PGM_GST_TYPE == PGM_TYPE_32BIT \
     3396      || PGM_GST_TYPE == PGM_TYPE_PAE \
     3397      || PGM_GST_TYPE == PGM_TYPE_AMD64) \
     3398  && !PGM_TYPE_IS_NESTED_OR_EPT(PGM_SHW_TYPE) \
     3399  && PGM_SHW_TYPE != PGM_TYPE_NONE
    33943400    int             rc       = VINF_SUCCESS;
    33953401
     
    34043410     * Get the relevant shadow PDE entry.
    34053411     */
    3406 # if PGM_SHW_TYPE == PGM_TYPE_32BIT
     3412#  if PGM_SHW_TYPE == PGM_TYPE_32BIT
    34073413    const unsigned  iPDDst   = GCPtrPage >> SHW_PD_SHIFT;
    34083414    PSHWPDE         pPdeDst  = pgmShwGet32BitPDEPtr(pVCpu, GCPtrPage);
     
    34133419    Assert(pShwPde);
    34143420
    3415 # elif PGM_SHW_TYPE == PGM_TYPE_PAE
     3421#  elif PGM_SHW_TYPE == PGM_TYPE_PAE
    34163422    const unsigned  iPDDst   = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
    34173423    PPGMPOOLPAGE    pShwPde  = NULL;
     
    34273433    pPdeDst = &pPDDst->a[iPDDst];
    34283434
    3429 # elif PGM_SHW_TYPE == PGM_TYPE_AMD64
     3435#  elif PGM_SHW_TYPE == PGM_TYPE_AMD64
    34303436    const unsigned  iPdpt    = (GCPtrPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
    34313437    const unsigned  iPDDst   = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
     
    34373443    PSHWPDE         pPdeDst  = &pPDDst->a[iPDDst];
    34383444
    3439 # endif
     3445#  endif
    34403446    SHWPDE          PdeDst   = *pPdeDst;
    34413447
    3442 # if PGM_GST_TYPE == PGM_TYPE_AMD64
     3448#  if PGM_GST_TYPE == PGM_TYPE_AMD64
    34433449    /* Fetch the pgm pool shadow descriptor. */
    34443450    PPGMPOOLPAGE    pShwPde  = pgmPoolGetPage(pPool, pPdptDst->a[iPdpt].u & X86_PDPE_PG_MASK);
    34453451    Assert(pShwPde);
    3446 # endif
     3452#  endif
    34473453
    34483454    Assert(!SHW_PDE_IS_P(PdeDst)); /* We're only supposed to call SyncPT on PDE!P.*/
     
    34653471        {
    34663472            GCPhys = GST_GET_PDE_GCPHYS(PdeSrc);
    3467 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
     3473#  if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
    34683474            /* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */
    34693475            GCPhys = PGM_A20_APPLY(pVCpu, GCPhys | ((iPDDst & 1) * (GUEST_PAGE_SIZE / 2)));
    3470 # endif
     3476#  endif
    34713477            rc = pgmPoolAlloc(pVM, GCPhys, BTH_PGMPOOLKIND_PT_FOR_PT, PGMPOOLACCESS_DONTCARE, PGM_A20_IS_ENABLED(pVCpu),
    34723478                              pShwPde->idx, iPDDst, false /*fLockPage*/,
     
    34763482        {
    34773483            PGMPOOLACCESS enmAccess;
    3478 # if PGM_WITH_NX(PGM_GST_TYPE, PGM_SHW_TYPE)
     3484#  if PGM_WITH_NX(PGM_GST_TYPE, PGM_SHW_TYPE)
    34793485            const bool  fNoExecute = (PdeSrc.u & X86_PDE_PAE_NX) && GST_IS_NX_ACTIVE(pVCpu);
    3480 # else
     3486#  else
    34813487            const bool  fNoExecute = false;
    3482 # endif
     3488#  endif
    34833489
    34843490            GCPhys = GST_GET_BIG_PDE_GCPHYS(pVM, PdeSrc);
    3485 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
     3491#  if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
    34863492            /* Select the right PDE as we're emulating a 4MB page directory with two 2 MB shadow PDEs.*/
    34873493            GCPhys = PGM_A20_APPLY(pVCpu, GCPhys | (GCPtrPage & (1 << X86_PD_PAE_SHIFT)));
    3488 # endif
     3494#  endif
    34893495            /* Determine the right kind of large page to avoid incorrect cached entry reuse. */
    34903496            if (PdeSrc.u & X86_PDE_US)
     
    35853591                 */
    35863592                STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,SyncPT4K));
    3587 # ifdef PGM_SYNC_N_PAGES
     3593#  ifdef PGM_SYNC_N_PAGES
    35883594                unsigned        iPTBase   = (GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK;
    35893595                unsigned        iPTDst    = iPTBase;
     
    35933599                else
    35943600                    iPTDst -= PGM_SYNC_NR_PAGES / 2;
    3595 # else /* !PGM_SYNC_N_PAGES */
     3601#  else /* !PGM_SYNC_N_PAGES */
    35963602                unsigned        iPTDst    = 0;
    35973603                const unsigned  iPTDstEnd = RT_ELEMENTS(pPTDst->a);
    3598 # endif /* !PGM_SYNC_N_PAGES */
     3604#  endif /* !PGM_SYNC_N_PAGES */
    35993605                RTGCPTR         GCPtrCur  = (GCPtrPage & ~(RTGCPTR)((1 << SHW_PD_SHIFT) - 1))
    36003606                                          | ((RTGCPTR)iPTDst << GUEST_PAGE_SHIFT);
    3601 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
     3607#  if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
    36023608                /* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */
    36033609                const unsigned  offPTSrc  = ((GCPtrPage >> SHW_PD_SHIFT) & 1) * 512;
    3604 # else
     3610#  else
    36053611                const unsigned  offPTSrc  = 0;
    3606 # endif
     3612#  endif
    36073613                for (; iPTDst < iPTDstEnd; iPTDst++, GCPtrCur += GUEST_PAGE_SIZE)
    36083614                {
     
    36843690                if (pRam && GCPhys >= pRam->GCPhys)
    36853691                {
    3686 # ifndef PGM_WITH_A20
     3692#  ifndef PGM_WITH_A20
    36873693                    unsigned iHCPage = (GCPhys - pRam->GCPhys) >> GUEST_PAGE_SHIFT;
    3688 # endif
     3694#  endif
    36893695                    do
    36903696                    {
    36913697                        /* Make shadow PTE. */
    3692 # ifdef PGM_WITH_A20
     3698#  ifdef PGM_WITH_A20
    36933699                        PPGMPAGE    pPage = &pRam->aPages[(GCPhys - pRam->GCPhys) >> GUEST_PAGE_SHIFT];
    3694 # else
     3700#  else
    36953701                        PPGMPAGE    pPage = &pRam->aPages[iHCPage];
    3696 # endif
     3702#  endif
    36973703                        SHWPTE      PteDst;
    36983704
    3699 # ifndef VBOX_WITH_NEW_LAZY_PAGE_ALLOC
     3705#  ifndef VBOX_WITH_NEW_LAZY_PAGE_ALLOC
    37003706                        /* Try to make the page writable if necessary. */
    37013707                        if (    PGM_PAGE_GET_TYPE(pPage)  == PGMPAGETYPE_RAM
     
    37033709                                 || (   SHW_PTE_IS_RW(PteDstBase)
    37043710                                     && PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED
    3705 ifdef VBOX_WITH_REAL_WRITE_MONITORED_PAGES
     3711 ifdef VBOX_WITH_REAL_WRITE_MONITORED_PAGES
    37063712                                     && PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_WRITE_MONITORED
    3707 endif
    3708 ifdef VBOX_WITH_PAGE_SHARING
     3713 endif
     3714 ifdef VBOX_WITH_PAGE_SHARING
    37093715                                     && PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_SHARED
    3710 endif
     3716 endif
    37113717                                     && !PGM_PAGE_IS_BALLOONED(pPage))
    37123718                                 )
     
    37183724                                break;
    37193725                        }
    3720 # endif
     3726#  endif
    37213727
    37223728                        if (PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage) && !PGM_PAGE_IS_HNDL_PHYS_NOT_IN_HM(pPage))
     
    37313737                            &&  PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED)
    37323738                        {
    3733 # ifndef VBOX_WITH_NEW_LAZY_PAGE_ALLOC
     3739#  ifndef VBOX_WITH_NEW_LAZY_PAGE_ALLOC
    37343740                            /* Still applies to shared pages. */
    37353741                            Assert(!PGM_PAGE_IS_ZERO(pPage));
    3736 # endif
     3742#  endif
    37373743                            SHW_PTE_SET_RO(PteDst);   /** @todo this isn't quite working yet... */
    37383744                            Log3(("SyncPT: write-protecting %RGp pPage=%R[pgmpage] at %RGv\n", GCPhys, pPage, (RTGCPTR)(GCPtr | (iPTDst << SHW_PT_SHIFT))));
     
    37513757                        GCPhys += GUEST_PAGE_SIZE;
    37523758                        PGM_A20_APPLY_TO_VAR(pVCpu, GCPhys);
    3753 # ifndef PGM_WITH_A20
     3759#  ifndef PGM_WITH_A20
    37543760                        iHCPage++;
    3755 # endif
     3761#  endif
    37563762                        iPTDst++;
    37573763                    } while (   iPTDst < RT_ELEMENTS(pPTDst->a)
     
    37873793    return rc;
    37883794
    3789 #elif (PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT) \
     3795# elif (PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT) \
    37903796    && !PGM_TYPE_IS_NESTED(PGM_SHW_TYPE) \
    37913797    && (PGM_SHW_TYPE != PGM_TYPE_EPT || PGM_GST_TYPE == PGM_TYPE_PROT) \
     
    37993805     */
    38003806    int             rc = VINF_SUCCESS;
    3801 # if PGM_SHW_TYPE == PGM_TYPE_32BIT
     3807#  if PGM_SHW_TYPE == PGM_TYPE_32BIT
    38023808    const unsigned  iPDDst  = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
    38033809    PSHWPDE         pPdeDst = pgmShwGet32BitPDEPtr(pVCpu, GCPtrPage);
     
    38083814    Assert(pShwPde);
    38093815
    3810 # elif PGM_SHW_TYPE == PGM_TYPE_PAE
     3816#  elif PGM_SHW_TYPE == PGM_TYPE_PAE
    38113817    const unsigned  iPDDst  = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
    38123818    PPGMPOOLPAGE    pShwPde = NULL;             /* initialized to shut up gcc */
     
    38223828    pPdeDst = &pPDDst->a[iPDDst];
    38233829
    3824 # elif PGM_SHW_TYPE == PGM_TYPE_AMD64
     3830#  elif PGM_SHW_TYPE == PGM_TYPE_AMD64
    38253831    const unsigned  iPdpt   = (GCPtrPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
    38263832    const unsigned  iPDDst  = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
     
    38363842    Assert(pShwPde);
    38373843
    3838 # elif PGM_SHW_TYPE == PGM_TYPE_EPT
     3844#  elif PGM_SHW_TYPE == PGM_TYPE_EPT
    38393845    const unsigned  iPdpt   = (GCPtrPage >> EPT_PDPT_SHIFT) & EPT_PDPT_MASK;
    38403846    const unsigned  iPDDst  = ((GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK);
     
    38563862    PPGMPOOLPAGE pShwPde = pgmPoolGetPage(pPool, pPdptDst->a[iPdpt].u & EPT_PDPTE_PG_MASK);
    38573863    Assert(pShwPde);
    3858 # endif
     3864#  endif
    38593865    SHWPDE          PdeDst = *pPdeDst;
    38603866
    38613867    Assert(!SHW_PDE_IS_P(PdeDst)); /* We're only supposed to call SyncPT on PDE!P and conflicts.*/
    38623868
    3863 # if defined(PGM_WITH_LARGE_PAGES) && PGM_SHW_TYPE != PGM_TYPE_32BIT && PGM_SHW_TYPE != PGM_TYPE_PAE
     3869#  if defined(PGM_WITH_LARGE_PAGES) && PGM_SHW_TYPE != PGM_TYPE_32BIT && PGM_SHW_TYPE != PGM_TYPE_PAE
    38643870    if (BTH_IS_NP_ACTIVE(pVM))
    38653871    {
     
    38983904                }
    38993905            }
    3900 if !defined(VBOX_WITH_NEW_LAZY_PAGE_ALLOC) && !defined(PGM_WITH_PAGE_ZEROING_DETECTION) /* This code is too aggresive! */
     3906 if !defined(VBOX_WITH_NEW_LAZY_PAGE_ALLOC) && !defined(PGM_WITH_PAGE_ZEROING_DETECTION) /* This code is too aggresive! */
    39013907            else if (   PGMIsUsingLargePages(pVM)
    39023908                     && PGM_A20_IS_ENABLED(pVCpu))
     
    39123918                    LogFlow(("pgmPhysAllocLargePage failed with %Rrc\n", rc));
    39133919            }
    3914 endif
     3920 endif
    39153921
    39163922            if (HCPhys != NIL_RTHCPHYS)
    39173923            {
    3918 if PGM_SHW_TYPE == PGM_TYPE_EPT
     3924 if PGM_SHW_TYPE == PGM_TYPE_EPT
    39193925                PdeDst.u = HCPhys | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE | EPT_E_LEAF | EPT_E_IGNORE_PAT | EPT_E_MEMTYPE_WB
    39203926                         | (PdeDst.u & X86_PDE_AVL_MASK) /** @todo do we need this? */;
    3921 else
     3927 else
    39223928                PdeDst.u = HCPhys | X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_PS
    39233929                         | (PdeDst.u & X86_PDE_AVL_MASK) /** @todo PGM_PD_FLAGS? */;
    3924 endif
     3930 endif
    39253931                SHW_PDE_ATOMIC_SET2(*pPdeDst, PdeDst);
    39263932
     
    39343940        }
    39353941    }
    3936 # endif /* defined(PGM_WITH_LARGE_PAGES) && PGM_SHW_TYPE != PGM_TYPE_32BIT && PGM_SHW_TYPE != PGM_TYPE_PAE */
     3942#  endif /* defined(PGM_WITH_LARGE_PAGES) && PGM_SHW_TYPE != PGM_TYPE_32BIT && PGM_SHW_TYPE != PGM_TYPE_PAE */
    39373943
    39383944    /*
     
    39843990
    39853991    /* Save the new PDE. */
    3986 # if PGM_SHW_TYPE == PGM_TYPE_EPT
     3992#  if PGM_SHW_TYPE == PGM_TYPE_EPT
    39873993    PdeDst.u = pShwPage->Core.Key | EPT_E_READ | EPT_E_WRITE | EPT_E_EXECUTE
    39883994             | (PdeDst.u & X86_PDE_AVL_MASK /** @todo do we really need this? */);
    3989 # else
     3995#  else
    39903996    PdeDst.u = pShwPage->Core.Key | X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_A
    39913997             | (PdeDst.u & X86_PDE_AVL_MASK /** @todo use a PGM_PD_FLAGS define */);
    3992 # endif
     3998#  endif
    39933999    SHW_PDE_ATOMIC_SET2(*pPdeDst, PdeDst);
    39944000
     
    39984004    return rc;
    39994005
    4000 #else
     4006# else
    40014007    NOREF(iPDSrc); NOREF(pPDSrc);
    40024008    AssertReleaseMsgFailed(("Shw=%d Gst=%d is not implemented!\n", PGM_SHW_TYPE, PGM_GST_TYPE));
    40034009    return VERR_PGM_NOT_USED_IN_MODE;
    4004 #endif
     4010# endif
    40054011}
     4012#endif /* !VBOX_WITH_ONLY_PGM_NEM_MODE */
    40064013
    40074014
     
    40254032     || PGM_GST_TYPE == PGM_TYPE_AMD64 ) \
    40264033 && !PGM_TYPE_IS_NESTED_OR_EPT(PGM_SHW_TYPE) \
    4027  && PGM_SHW_TYPE != PGM_TYPE_NONE
     4034 && PGM_SHW_TYPE != PGM_TYPE_NONE \
     4035 && !defined(VBOX_WITH_ONLY_PGM_NEM_MODE)
    40284036    /*
    40294037     * Check that all Guest levels thru the PDE are present, getting the
     
    41294137    return rc;
    41304138
    4131 #elif PGM_TYPE_IS_NESTED_OR_EPT(PGM_SHW_TYPE) || PGM_SHW_TYPE == PGM_TYPE_NONE
     4139#elif PGM_TYPE_IS_NESTED_OR_EPT(PGM_SHW_TYPE) || PGM_SHW_TYPE == PGM_TYPE_NONE || defined(VBOX_WITH_ONLY_PGM_NEM_MODE)
    41324140    NOREF(pVCpu); NOREF(GCPtrPage);
    41334141    return VINF_SUCCESS; /* ignore */
     
    41354143    AssertCompile(0);
    41364144#endif
    4137 }
    4138 
    4139 
    4140 
    4141 
    4142 /**
    4143  * Syncs a page during a PGMVerifyAccess() call.
    4144  *
    4145  * @returns VBox status code (informational included).
    4146  * @param   pVCpu       The cross context virtual CPU structure.
    4147  * @param   GCPtrPage   The address of the page to sync.
    4148  * @param   fPage       The effective guest page flags.
    4149  * @param   uErr        The trap error code.
    4150  * @remarks This will normally never be called on invalid guest page
    4151  *          translation entries.
    4152  */
    4153 PGM_BTH_DECL(int, VerifyAccessSyncPage)(PVMCPUCC pVCpu, RTGCPTR GCPtrPage, unsigned fPage, unsigned uErr)
    4154 {
    4155     PVMCC pVM = pVCpu->CTX_SUFF(pVM); NOREF(pVM);
    4156 
    4157     LogFlow(("VerifyAccessSyncPage: GCPtrPage=%RGv fPage=%#x uErr=%#x\n", GCPtrPage, fPage, uErr));
    4158     RT_NOREF_PV(GCPtrPage); RT_NOREF_PV(fPage); RT_NOREF_PV(uErr);
    4159 
    4160     Assert(!pVM->pgm.s.fNestedPaging);
    4161 #if   (   PGM_GST_TYPE == PGM_TYPE_32BIT \
    4162        || PGM_GST_TYPE == PGM_TYPE_REAL \
    4163        || PGM_GST_TYPE == PGM_TYPE_PROT \
    4164        || PGM_GST_TYPE == PGM_TYPE_PAE \
    4165        || PGM_GST_TYPE == PGM_TYPE_AMD64 ) \
    4166     && !PGM_TYPE_IS_NESTED_OR_EPT(PGM_SHW_TYPE) \
    4167     && PGM_SHW_TYPE != PGM_TYPE_NONE
    4168 
    4169     /*
    4170      * Get guest PD and index.
    4171      */
    4172     /** @todo Performance: We've done all this a jiffy ago in the
    4173      *        PGMGstGetPage call. */
    4174 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
    4175 #  if PGM_GST_TYPE == PGM_TYPE_32BIT
    4176     const unsigned  iPDSrc = (uint32_t)GCPtrPage >> GST_PD_SHIFT;
    4177     PGSTPD          pPDSrc = pgmGstGet32bitPDPtr(pVCpu);
    4178 
    4179 #  elif PGM_GST_TYPE == PGM_TYPE_PAE
    4180     unsigned        iPDSrc = 0;
    4181     X86PDPE         PdpeSrc;
    4182     PGSTPD          pPDSrc = pgmGstGetPaePDPtr(pVCpu, GCPtrPage, &iPDSrc, &PdpeSrc);
    4183     if (RT_UNLIKELY(!pPDSrc))
    4184     {
    4185         Log(("PGMVerifyAccess: access violation for %RGv due to non-present PDPTR\n", GCPtrPage));
    4186         return VINF_EM_RAW_GUEST_TRAP;
    4187     }
    4188 
    4189 #  elif PGM_GST_TYPE == PGM_TYPE_AMD64
    4190     unsigned        iPDSrc = 0;         /* shut up gcc */
    4191     PX86PML4E       pPml4eSrc = NULL;   /* ditto */
    4192     X86PDPE         PdpeSrc;
    4193     PGSTPD          pPDSrc = pgmGstGetLongModePDPtr(pVCpu, GCPtrPage, &pPml4eSrc, &PdpeSrc, &iPDSrc);
    4194     if (RT_UNLIKELY(!pPDSrc))
    4195     {
    4196         Log(("PGMVerifyAccess: access violation for %RGv due to non-present PDPTR\n", GCPtrPage));
    4197         return VINF_EM_RAW_GUEST_TRAP;
    4198     }
    4199 #  endif
    4200 
    4201 # else  /* !PGM_WITH_PAGING */
    4202     PGSTPD          pPDSrc = NULL;
    4203     const unsigned  iPDSrc = 0;
    4204 # endif /* !PGM_WITH_PAGING */
    4205     int             rc = VINF_SUCCESS;
    4206 
    4207     PGM_LOCK_VOID(pVM);
    4208 
    4209     /*
    4210      * First check if the shadow pd is present.
    4211      */
    4212 # if PGM_SHW_TYPE == PGM_TYPE_32BIT
    4213     PX86PDE         pPdeDst = pgmShwGet32BitPDEPtr(pVCpu, GCPtrPage);
    4214     AssertReturn(pPdeDst, VERR_INTERNAL_ERROR_3);
    4215 
    4216 # elif PGM_SHW_TYPE == PGM_TYPE_PAE
    4217     PX86PDEPAE      pPdeDst;
    4218     const unsigned  iPDDst = ((GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK);
    4219     PX86PDPAE       pPDDst;
    4220 #   if PGM_GST_TYPE != PGM_TYPE_PAE
    4221     /* Fake PDPT entry; access control handled on the page table level, so allow everything. */
    4222     X86PDPE         PdpeSrc;
    4223     PdpeSrc.u  = X86_PDPE_P;   /* rw/us are reserved for PAE pdpte's; accessed bit causes invalid VT-x guest state errors */
    4224 #   endif
    4225     rc = pgmShwSyncPaePDPtr(pVCpu, GCPtrPage, PdpeSrc.u, &pPDDst);
    4226     if (rc != VINF_SUCCESS)
    4227     {
    4228         PGM_UNLOCK(pVM);
    4229         AssertRC(rc);
    4230         return rc;
    4231     }
    4232     Assert(pPDDst);
    4233     pPdeDst = &pPDDst->a[iPDDst];
    4234 
    4235 # elif PGM_SHW_TYPE == PGM_TYPE_AMD64
    4236     const unsigned  iPDDst = ((GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK);
    4237     PX86PDPAE       pPDDst;
    4238     PX86PDEPAE      pPdeDst;
    4239 
    4240 #  if PGM_GST_TYPE == PGM_TYPE_PROT
    4241     /* AMD-V nested paging: Fake PML4 & PDPT entry; access control handled on the page table level, so allow everything. */
    4242     X86PML4E        Pml4eSrc;
    4243     X86PDPE         PdpeSrc;
    4244     PX86PML4E       pPml4eSrc = &Pml4eSrc;
    4245     Pml4eSrc.u = X86_PML4E_P | X86_PML4E_RW | X86_PML4E_US | X86_PML4E_A;
    4246     PdpeSrc.u  = X86_PDPE_P  | X86_PDPE_RW  | X86_PDPE_US  | X86_PDPE_A;
    4247 #  endif
    4248 
    4249     rc = pgmShwSyncLongModePDPtr(pVCpu, GCPtrPage, pPml4eSrc->u, PdpeSrc.u, &pPDDst);
    4250     if (rc != VINF_SUCCESS)
    4251     {
    4252         PGM_UNLOCK(pVM);
    4253         AssertRC(rc);
    4254         return rc;
    4255     }
    4256     Assert(pPDDst);
    4257     pPdeDst = &pPDDst->a[iPDDst];
    4258 # endif
    4259 
    4260     if (!(pPdeDst->u & X86_PDE_P))
    4261     {
    4262         rc = PGM_BTH_NAME(SyncPT)(pVCpu, iPDSrc, pPDSrc, GCPtrPage);
    4263         if (rc != VINF_SUCCESS)
    4264         {
    4265             PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdeDst);
    4266             PGM_UNLOCK(pVM);
    4267             AssertRC(rc);
    4268             return rc;
    4269         }
    4270     }
    4271 
    4272 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
    4273     /* Check for dirty bit fault */
    4274     rc = PGM_BTH_NAME(CheckDirtyPageFault)(pVCpu, uErr, pPdeDst, &pPDSrc->a[iPDSrc], GCPtrPage);
    4275     if (rc == VINF_PGM_HANDLED_DIRTY_BIT_FAULT)
    4276         Log(("PGMVerifyAccess: success (dirty)\n"));
    4277     else
    4278 # endif
    4279     {
    4280 # if PGM_WITH_PAGING(PGM_GST_TYPE, PGM_SHW_TYPE)
    4281         GSTPDE PdeSrc       = pPDSrc->a[iPDSrc];
    4282 # else
    4283         GSTPDE const PdeSrc = { X86_PDE_P | X86_PDE_RW | X86_PDE_US | X86_PDE_A }; /* faked so we don't have to #ifdef everything */
    4284 # endif
    4285 
    4286         Assert(rc != VINF_EM_RAW_GUEST_TRAP);
    4287         if (uErr & X86_TRAP_PF_US)
    4288             STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,PageOutOfSyncUser));
    4289         else /* supervisor */
    4290             STAM_COUNTER_INC(&pVCpu->pgm.s.Stats.CTX_MID_Z(Stat,PageOutOfSyncSupervisor));
    4291 
    4292         rc = PGM_BTH_NAME(SyncPage)(pVCpu, PdeSrc, GCPtrPage, 1, 0);
    4293         if (RT_SUCCESS(rc))
    4294         {
    4295             /* Page was successfully synced */
    4296             Log2(("PGMVerifyAccess: success (sync)\n"));
    4297             rc = VINF_SUCCESS;
    4298         }
    4299         else
    4300         {
    4301             Log(("PGMVerifyAccess: access violation for %RGv rc=%Rrc\n", GCPtrPage, rc));
    4302             rc = VINF_EM_RAW_GUEST_TRAP;
    4303         }
    4304     }
    4305     PGM_DYNMAP_UNUSED_HINT(pVCpu, pPdeDst);
    4306     PGM_UNLOCK(pVM);
    4307     return rc;
    4308 
    4309 #else  /* PGM_TYPE_IS_NESTED_OR_EPT(PGM_SHW_TYPE) */
    4310 
    4311     AssertLogRelMsgFailed(("Shw=%d Gst=%d is not implemented!\n", PGM_GST_TYPE, PGM_SHW_TYPE));
    4312     return VERR_PGM_NOT_USED_IN_MODE;
    4313 #endif /* PGM_TYPE_IS_NESTED_OR_EPT(PGM_SHW_TYPE) */
    43144145}
    43154146
     
    43364167    LogFlow(("SyncCR3 FF=%d fGlobal=%d\n", !!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3), fGlobal));
    43374168
    4338 #if !PGM_TYPE_IS_NESTED_OR_EPT(PGM_SHW_TYPE) && PGM_SHW_TYPE != PGM_TYPE_NONE
     4169#if !PGM_TYPE_IS_NESTED_OR_EPT(PGM_SHW_TYPE) && PGM_SHW_TYPE != PGM_TYPE_NONE && !defined(VBOX_WITH_ONLY_PGM_NEM_MODE)
    43394170# ifdef PGMPOOL_WITH_OPTIMIZED_DIRTY_PT
    43404171    PGM_LOCK_VOID(pVM);
     
    43444175    PGM_UNLOCK(pVM);
    43454176# endif
    4346 #endif /* !NESTED && !EPT */
     4177#endif /* !NESTED && !EPT && !VBOX_WITH_ONLY_PGM_NEM_MODE */
    43474178
    43484179#if PGM_TYPE_IS_NESTED_OR_EPT(PGM_SHW_TYPE) || PGM_SHW_TYPE == PGM_TYPE_NONE
     
    43994230    || PGM_GST_TYPE == PGM_TYPE_AMD64
    44004231
    4401     bool            fBigPagesSupported = GST_IS_PSE_ACTIVE(pVCpu);
    4402     PPGMCPU         pPGM = &pVCpu->pgm.s;
     4232#  ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
     4233    bool const      fBigPagesSupported = GST_IS_PSE_ACTIVE(pVCpu);
    44034234    RTGCPHYS        GCPhysGst;              /* page address derived from the guest page tables. */
    44044235    RTHCPHYS        HCPhysShw;              /* page address derived from the shadow page tables. */
    4405 ifndef IN_RING0
     4236 ifndef IN_RING0
    44064237    RTHCPHYS        HCPhys;                 /* general usage. */
     4238#   endif
    44074239#  endif
     4240    PPGMCPU const   pPGM = &pVCpu->pgm.s;
    44084241    int             rc;
     4242    RT_NOREF(rc);
    44094243
    44104244    /*
     
    44354269#  endif /* !IN_RING0 */
    44364270
     4271#  ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
    44374272    /*
    44384273     * Get and check the Shadow CR3.
    44394274     */
    4440 if PGM_SHW_TYPE == PGM_TYPE_32BIT
     4275 if PGM_SHW_TYPE == PGM_TYPE_32BIT
    44414276    unsigned        cPDEs       = X86_PG_ENTRIES;
    44424277    unsigned        cIncrement  = X86_PG_ENTRIES * GUEST_PAGE_SIZE;
    4443 elif PGM_SHW_TYPE == PGM_TYPE_PAE
    4444 #   if PGM_GST_TYPE == PGM_TYPE_32BIT
     4278 elif PGM_SHW_TYPE == PGM_TYPE_PAE
     4279#    if PGM_GST_TYPE == PGM_TYPE_32BIT
    44454280    unsigned        cPDEs       = X86_PG_PAE_ENTRIES * 4;   /* treat it as a 2048 entry table. */
    4446 #   else
     4281#    else
    44474282    unsigned        cPDEs       = X86_PG_PAE_ENTRIES;
    4448 #   endif
     4283#    endif
    44494284    unsigned        cIncrement  = X86_PG_PAE_ENTRIES * GUEST_PAGE_SIZE;
    4450 elif PGM_SHW_TYPE == PGM_TYPE_AMD64
     4285 elif PGM_SHW_TYPE == PGM_TYPE_AMD64
    44514286    unsigned        cPDEs       = X86_PG_PAE_ENTRIES;
    44524287    unsigned        cIncrement  = X86_PG_PAE_ENTRIES * GUEST_PAGE_SIZE;
    4453 endif
     4288 endif
    44544289    if (cb != ~(RTGCPTR)0)
    44554290        cPDEs = RT_MIN(cb >> SHW_PD_SHIFT, 1);
     
    44574292/** @todo call the other two PGMAssert*() functions. */
    44584293
    4459 if PGM_GST_TYPE == PGM_TYPE_AMD64
     4294 if PGM_GST_TYPE == PGM_TYPE_AMD64
    44604295    unsigned iPml4 = (GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK;
    44614296
     
    45024337            continue;
    45034338        }
    4504 else  /* PGM_GST_TYPE != PGM_TYPE_AMD64 */
    4505     {
    4506 endif /* PGM_GST_TYPE != PGM_TYPE_AMD64 */
    4507 
    4508 if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE
     4339 else  /* PGM_GST_TYPE != PGM_TYPE_AMD64 */
     4340    {
     4341 endif /* PGM_GST_TYPE != PGM_TYPE_AMD64 */
     4342
     4343 if PGM_GST_TYPE == PGM_TYPE_AMD64 || PGM_GST_TYPE == PGM_TYPE_PAE
    45094344        /*
    45104345         * Check the PDPTEs too.
     
    45204355            X86PDPE         PdpeSrc;
    45214356            PdpeSrc.u = 0;                      /* initialized to shut up gcc 4.5 */
    4522 #   if PGM_GST_TYPE == PGM_TYPE_PAE
     4357#    if PGM_GST_TYPE == PGM_TYPE_PAE
    45234358            PGSTPD          pPDSrc    = pgmGstGetPaePDPtr(pVCpu, GCPtr, &iPDSrc, &PdpeSrc);
    45244359            PX86PDPT        pPdptDst  = pgmShwGetPaePDPTPtr(pVCpu);
    4525 #   else
     4360#    else
    45264361            PX86PML4E       pPml4eSrcIgn;
    45274362            PX86PDPT        pPdptDst;
     
    45374372            }
    45384373            Assert(pPDDst);
    4539 #   endif
     4374#    endif
    45404375            Assert(iPDSrc == 0);
    45414376
     
    45614396            if (GCPhysPdeSrc != pShwPde->GCPhys)
    45624397            {
    4563 #   if PGM_GST_TYPE == PGM_TYPE_AMD64
     4398#    if PGM_GST_TYPE == PGM_TYPE_AMD64
    45644399                AssertMsgFailed(("Physical address doesn't match! iPml4 %d iPdpt %d pPdpeDst.u=%#RX64 pPdpeSrc.u=%RX64 Phys %RX64 vs %RX64\n", iPml4, iPdpt, pPdpeDst->u, PdpeSrc.u, pShwPde->GCPhys, GCPhysPdeSrc));
    4565 #   else
     4400#    else
    45664401                AssertMsgFailed(("Physical address doesn't match! iPdpt %d pPdpeDst.u=%#RX64 pPdpeSrc.u=%RX64 Phys %RX64 vs %RX64\n", iPdpt, pPdpeDst->u, PdpeSrc.u, pShwPde->GCPhys, GCPhysPdeSrc));
    4567 #   endif
     4402#    endif
    45684403                GCPtr += 512 * _2M;
    45694404                cErrors++;
     
    45714406            }
    45724407
    4573 #   if PGM_GST_TYPE == PGM_TYPE_AMD64
     4408#    if PGM_GST_TYPE == PGM_TYPE_AMD64
    45744409            if (    (pPdpeDst->u & (X86_PDPE_US | X86_PDPE_RW | X86_PDPE_LM_NX))
    45754410                !=  (PdpeSrc.u   & (X86_PDPE_US | X86_PDPE_RW | X86_PDPE_LM_NX)))
     
    45804415                continue;
    45814416            }
    4582 #   endif
    4583 
    4584 else  /* PGM_GST_TYPE != PGM_TYPE_AMD64 && PGM_GST_TYPE != PGM_TYPE_PAE */
    4585         {
    4586 endif /* PGM_GST_TYPE != PGM_TYPE_AMD64 && PGM_GST_TYPE != PGM_TYPE_PAE */
    4587 if PGM_GST_TYPE == PGM_TYPE_32BIT
     4417#    endif
     4418
     4419 else  /* PGM_GST_TYPE != PGM_TYPE_AMD64 && PGM_GST_TYPE != PGM_TYPE_PAE */
     4420        {
     4421 endif /* PGM_GST_TYPE != PGM_TYPE_AMD64 && PGM_GST_TYPE != PGM_TYPE_PAE */
     4422 if PGM_GST_TYPE == PGM_TYPE_32BIT
    45884423            GSTPD const    *pPDSrc = pgmGstGet32bitPDPtr(pVCpu);
    4589 #   if PGM_SHW_TYPE == PGM_TYPE_32BIT
     4424#    if PGM_SHW_TYPE == PGM_TYPE_32BIT
    45904425            PCX86PD         pPDDst = pgmShwGet32BitPDPtr(pVCpu);
    4591 #   endif
    4592 endif /* PGM_GST_TYPE == PGM_TYPE_32BIT */
     4426#    endif
     4427 endif /* PGM_GST_TYPE == PGM_TYPE_32BIT */
    45934428            /*
    45944429            * Iterate the shadow page directory.
     
    46014436                iPDDst++, GCPtr += cIncrement)
    46024437            {
    4603 if PGM_SHW_TYPE == PGM_TYPE_PAE
     4438 if PGM_SHW_TYPE == PGM_TYPE_PAE
    46044439                const SHWPDE PdeDst = *pgmShwGetPaePDEPtr(pVCpu, GCPtr);
    4605 else
     4440 else
    46064441                const SHWPDE PdeDst = pPDDst->a[iPDDst];
    4607 endif
     4442 endif
    46084443                if (   (PdeDst.u & X86_PDE_P)
    46094444                    || ((PdeDst.u & (X86_PDE_P | PGM_PDFLAGS_TRACK_DIRTY)) == (X86_PDE_P | PGM_PDFLAGS_TRACK_DIRTY)) )
     
    46474482                    {
    46484483                        GCPhysGst = GST_GET_PDE_GCPHYS(PdeSrc);
    4649 if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
     4484 if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
    46504485                        GCPhysGst = PGM_A20_APPLY(pVCpu, GCPhysGst | ((iPDDst & 1) * (GUEST_PAGE_SIZE / 2)));
    4651 endif
     4486 endif
    46524487                    }
    46534488                    else
    46544489                    {
    4655 if PGM_GST_TYPE == PGM_TYPE_32BIT
     4490 if PGM_GST_TYPE == PGM_TYPE_32BIT
    46564491                        if (PdeSrc.u & X86_PDE4M_PG_HIGH_MASK)
    46574492                        {
     
    46614496                            continue;
    46624497                        }
    4663 endif
     4498 endif
    46644499                        GCPhysGst = GST_GET_BIG_PDE_GCPHYS(pVM, PdeSrc);
    4665 if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
     4500 if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
    46664501                        GCPhysGst = PGM_A20_APPLY(pVCpu, GCPhysGst | (GCPtr & RT_BIT(X86_PAGE_2M_SHIFT)));
    4667 endif
     4502 endif
    46684503                    }
    46694504
     
    47284563
    47294564                        /* iterate the page table. */
    4730 if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
     4565 if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
    47314566                        /* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */
    47324567                        const unsigned offPTSrc  = ((GCPtr >> SHW_PD_SHIFT) & 1) * 512;
    4733 else
     4568 else
    47344569                        const unsigned offPTSrc  = 0;
    4735 endif
     4570 endif
    47364571                        for (unsigned iPT = 0, off = 0;
    47374572                            iPT < RT_ELEMENTS(pPTDst->a);
     
    47484583                            if (!(PteSrc.u & X86_PTE_P))
    47494584                            {
    4750 ifdef IN_RING3
     4585 ifdef IN_RING3
    47514586                                PGMAssertHandlerAndFlagsInSync(pVM);
    47524587                                DBGFR3PagingDumpEx(pVM->pUVM, pVCpu->idCpu, DBGFPGDMP_FLAGS_CURRENT_CR3 | DBGFPGDMP_FLAGS_CURRENT_MODE
    47534588                                                   | DBGFPGDMP_FLAGS_GUEST | DBGFPGDMP_FLAGS_HEADER | DBGFPGDMP_FLAGS_PRINT_CR3,
    47544589                                                   0, 0, UINT64_MAX, 99, NULL);
    4755 endif
     4590 endif
    47564591                                AssertMsgFailed(("Out of sync (!P) PTE at %RGv! PteSrc=%#RX64 PteDst=%#RX64 pPTSrc=%RGv iPTSrc=%x PdeSrc=%x physpte=%RGp\n",
    47574592                                                GCPtr + off, (uint64_t)PteSrc.u, SHW_PTE_LOG64(PteDst), pPTSrc, iPT + offPTSrc, PdeSrc.au32[0],
     
    47624597
    47634598                            uint64_t fIgnoreFlags = GST_PTE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_G | X86_PTE_D | X86_PTE_PWT | X86_PTE_PCD | X86_PTE_PAT;
    4764 if 1 /** @todo sync accessed bit properly... */
     4599 if 1 /** @todo sync accessed bit properly... */
    47654600                            fIgnoreFlags |= X86_PTE_A;
    4766 endif
     4601 endif
    47674602
    47684603                            /* match the physical addresses */
     
    47704605                            GCPhysGst = GST_GET_PTE_GCPHYS(PteSrc);
    47714606
    4772 ifdef IN_RING3
     4607 ifdef IN_RING3
    47734608                            rc = PGMPhysGCPhys2HCPhys(pVM, GCPhysGst, &HCPhys);
    47744609                            if (RT_FAILURE(rc))
    47754610                            {
    4776 #   if 0
     4611#    if 0
    47774612                                if (HCPhysShw != MMR3PageDummyHCPhys(pVM)) /** @todo this is wrong. */
    47784613                                {
     
    47824617                                    continue;
    47834618                                }
    4784 #   endif
     4619#    endif
    47854620                            }
    47864621                            else if (HCPhysShw != (HCPhys & SHW_PTE_PG_MASK))
     
    47914626                                continue;
    47924627                            }
    4793 endif
     4628 endif
    47944629
    47954630                            pPhysPage = pgmPhysGetPage(pVM, GCPhysGst);
    47964631                            if (!pPhysPage)
    47974632                            {
    4798 if 0
     4633 if 0
    47994634                                if (HCPhysShw != MMR3PageDummyHCPhys(pVM))  /** @todo this is wrong. */
    48004635                                {
     
    48044639                                    continue;
    48054640                                }
    4806 endif
     4641 endif
    48074642                                if (SHW_PTE_IS_RW(PteDst))
    48084643                                {
     
    48384673                                {
    48394674                                    if (   SHW_PTE_IS_P(PteDst)
    4840 if PGM_SHW_TYPE == PGM_TYPE_EPT || PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_AMD64
     4675 if PGM_SHW_TYPE == PGM_TYPE_EPT || PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_AMD64
    48414676                                        && !PGM_PAGE_IS_MMIO(pPhysPage)
    4842 endif
     4677 endif
    48434678                                       )
    48444679                                    {
     
    48754710                                        cErrors++;
    48764711                                    }
    4877 if 0 /** @todo sync access bit properly... */
     4712 if 0 /** @todo sync access bit properly... */
    48784713                                    if (PteDst.n.u1Accessed != PteSrc.n.u1Accessed)
    48794714                                    {
     
    48834718                                    }
    48844719                                    fIgnoreFlags |= X86_PTE_RW;
    4885 else
     4720 else
    48864721                                    fIgnoreFlags |= X86_PTE_RW | X86_PTE_A;
    4887 endif
     4722 endif
    48884723                                }
    48894724                                else if (SHW_PTE_IS_TRACK_DIRTY(PteDst))
     
    49054740                                    fIgnoreFlags |= X86_PTE_P;
    49064741                                }
    4907 ifdef DEBUG_sandervl
     4742 ifdef DEBUG_sandervl
    49084743                                fIgnoreFlags |= X86_PTE_D | X86_PTE_A;
    4909 endif
     4744 endif
    49104745                            }
    49114746
     
    49444779                                continue;
    49454780                            }
    4946 if 0 /** @todo sync access bit properly... */
     4781 if 0 /** @todo sync access bit properly... */
    49474782                            if (PdeDst.n.u1Accessed != PdeSrc.b.u1Accessed)
    49484783                            {
     
    49524787                            }
    49534788                            fIgnoreFlags |= X86_PTE_RW;
    4954 else
     4789 else
    49554790                            fIgnoreFlags |= X86_PTE_RW | X86_PTE_A;
    4956 endif
     4791 endif
    49574792                        }
    49584793                        else if (PdeDst.u & PGM_PDFLAGS_TRACK_DIRTY)
     
    50064841                            HCPhysShw = SHW_PTE_GET_HCPHYS(PteDst);
    50074842
    5008 ifdef IN_RING3
     4843 ifdef IN_RING3
    50094844                            rc = PGMPhysGCPhys2HCPhys(pVM, GCPhysGst, &HCPhys);
    50104845                            if (RT_FAILURE(rc))
    50114846                            {
    5012 #   if 0
     4847#    if 0
    50134848                                if (HCPhysShw != MMR3PageDummyHCPhys(pVM))  /** @todo this is wrong. */
    50144849                                {
     
    50174852                                    cErrors++;
    50184853                                }
    5019 #   endif
     4854#    endif
    50204855                            }
    50214856                            else if (HCPhysShw != (HCPhys & X86_PTE_PAE_PG_MASK))
     
    50264861                                continue;
    50274862                            }
    5028 endif
     4863 endif
    50294864                            pPhysPage = pgmPhysGetPage(pVM, GCPhysGst);
    50304865                            if (!pPhysPage)
    50314866                            {
    5032 if 0 /** @todo make MMR3PageDummyHCPhys an 'All' function! */
     4867 if 0 /** @todo make MMR3PageDummyHCPhys an 'All' function! */
    50334868                                if (HCPhysShw != MMR3PageDummyHCPhys(pVM))  /** @todo this is wrong. */
    50344869                                {
     
    50384873                                    continue;
    50394874                                }
    5040 endif
     4875 endif
    50414876                                if (SHW_PTE_IS_RW(PteDst))
    50424877                                {
     
    50774912                                    if (   SHW_PTE_IS_P(PteDst)
    50784913                                        && !PGM_PAGE_IS_HNDL_PHYS_NOT_IN_HM(pPhysPage)
    5079 if PGM_SHW_TYPE == PGM_TYPE_EPT || PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_AMD64
     4914 if PGM_SHW_TYPE == PGM_TYPE_EPT || PGM_SHW_TYPE == PGM_TYPE_PAE || PGM_SHW_TYPE == PGM_TYPE_AMD64
    50804915                                        && !PGM_PAGE_IS_MMIO(pPhysPage)
    5081 endif
     4916 endif
    50824917                                        )
    50834918                                    {
     
    51124947    } /* for each PML4E */
    51134948
     4949#  endif /* !VBOX_WITH_ONLY_PGM_NEM_MODE */
    51144950#  ifdef DEBUG
    51154951    if (cErrors)
     
    52325068     */
    52335069# if  (   (   PGM_SHW_TYPE == PGM_TYPE_32BIT \
    5234            || PGM_SHW_TYPE == PGM_TYPE_PAE    \
     5070           || PGM_SHW_TYPE == PGM_TYPE_PAE \
    52355071           || PGM_SHW_TYPE == PGM_TYPE_AMD64) \
    5236        && (   PGM_GST_TYPE != PGM_TYPE_REAL   \
    5237            && PGM_GST_TYPE != PGM_TYPE_PROT))
     5072       && (   PGM_GST_TYPE != PGM_TYPE_REAL \
     5073           && PGM_GST_TYPE != PGM_TYPE_PROT) \
     5074       && !defined(VBOX_WITH_ONLY_PGM_NEM_MODE) )
    52385075
    52395076    Assert(!pVM->pgm.s.fNestedPaging);
     
    53335170     * Update shadow paging info.
    53345171     */
    5335 #if  (   (   PGM_SHW_TYPE == PGM_TYPE_32BIT \
    5336           || PGM_SHW_TYPE == PGM_TYPE_PAE \
    5337           || PGM_SHW_TYPE == PGM_TYPE_AMD64))
     5172#if (   (   PGM_SHW_TYPE == PGM_TYPE_32BIT \
     5173         || PGM_SHW_TYPE == PGM_TYPE_PAE \
     5174         || PGM_SHW_TYPE == PGM_TYPE_AMD64) \
     5175     && !defined(VBOX_WITH_ONLY_PGM_NEM_MODE) )
    53385176# if PGM_GST_TYPE != PGM_TYPE_REAL
    53395177    Assert(!pVM->pgm.s.fNestedPaging);
  • trunk/src/VBox/VMM/VMMAll/PGMAllHandler.cpp

    r106061 r107171  
    973973    {
    974974        uint8_t u2State = PGM_PAGE_GET_NEM_STATE(pPage);
    975         NEMHCNotifyPhysPageChanged(pVM, GCPhysPage, HCPhysPrev, pVM->pgm.s.HCPhysZeroPg,
     975        NEMHCNotifyPhysPageChanged(pVM, GCPhysPage, HCPhysPrev,
     976# ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
     977                                   pVM->pgm.s.HCPhysZeroPg,
     978# else
     979                                   0,
     980# endif
    976981                                   PGM_RAMRANGE_CALC_PAGE_R3PTR(pRam, GCPhysPage),
    977982                                   NEM_PAGE_PROT_NONE, PGMPAGETYPE_MMIO, &u2State);
     
    17391744                                               PPDMDEVINS pDevIns, PGMMMIO2HANDLE hMmio2, RTGCPHYS offMmio2PageRemap)
    17401745{
    1741 #ifdef VBOX_WITH_PGM_NEM_MODE
     1746#ifdef VBOX_WITH_ONLY_PGM_NEM_MODE
     1747    RT_NOREF(pVM, GCPhys, GCPhysPage, pDevIns, hMmio2, offMmio2PageRemap);
     1748    AssertFailedReturn(VERR_PGM_NOT_SUPPORTED_FOR_NEM_MODE);
     1749#else
     1750# ifdef VBOX_WITH_PGM_NEM_MODE
    17421751    AssertReturn(!VM_IS_NEM_ENABLED(pVM) || !pVM->pgm.s.fNemMode, VERR_PGM_NOT_SUPPORTED_FOR_NEM_MODE);
    1743 #endif
     1752# endif
    17441753    int rc = PGM_LOCK(pVM);
    17451754    AssertRCReturn(rc, rc);
     
    18391848             *        and important when this kind of aliasing is used, so it may pay of... */
    18401849
    1841 #ifdef VBOX_WITH_NATIVE_NEM
     1850# ifdef VBOX_WITH_NATIVE_NEM
    18421851            /* Tell NEM about the backing and protection change. */
    18431852            if (VM_IS_NEM_ENABLED(pVM))
     
    18501859                PGM_PAGE_SET_NEM_STATE(pPage, u2State);
    18511860            }
    1852 #endif
     1861# endif
    18531862            LogFlow(("PGMHandlerPhysicalPageAliasMmio2: => %R[pgmpage]\n", pPage));
    18541863            PGM_UNLOCK(pVM);
     
    18681877    }
    18691878    return rc;
     1879#endif /* !VBOX_WITH_ONLY_PGM_NEM_MODE */
    18701880}
    18711881
     
    19041914VMMDECL(int)  PGMHandlerPhysicalPageAliasHC(PVMCC pVM, RTGCPHYS GCPhys, RTGCPHYS GCPhysPage, RTHCPHYS HCPhysPageRemap)
    19051915{
     1916#ifdef VBOX_WITH_ONLY_PGM_NEM_MODE
     1917    RT_NOREF(pVM, GCPhys, GCPhysPage, HCPhysPageRemap);
     1918    AssertFailedReturn(VERR_PGM_NOT_SUPPORTED_FOR_NEM_MODE);
     1919#else
    19061920///    Assert(!IOMIsLockOwner(pVM)); /* We mustn't own any other locks when calling this */
    1907 #ifdef VBOX_WITH_PGM_NEM_MODE
     1921# ifdef VBOX_WITH_PGM_NEM_MODE
    19081922    AssertReturn(!VM_IS_NEM_ENABLED(pVM) || !pVM->pgm.s.fNemMode, VERR_PGM_NOT_SUPPORTED_FOR_NEM_MODE);
    1909 #endif
     1923# endif
    19101924    int rc = PGM_LOCK(pVM);
    19111925    AssertRCReturn(rc, rc);
     
    19321946             */
    19331947            PPGMPAGE     pPage = NULL;
    1934 #ifdef VBOX_WITH_NATIVE_NEM
     1948# ifdef VBOX_WITH_NATIVE_NEM
    19351949            PPGMRAMRANGE pRam  = NULL;
    19361950            rc = pgmPhysGetPageAndRangeEx(pVM, GCPhysPage, &pPage, &pRam);
    1937 #else
     1951# else
    19381952            rc = pgmPhysGetPageEx(pVM, GCPhysPage, &pPage);
    1939 #endif
     1953# endif
    19401954            AssertReturnStmt(RT_SUCCESS_NP(rc), PGM_UNLOCK(pVM), rc);
    19411955            if (PGM_PAGE_GET_TYPE(pPage) != PGMPAGETYPE_MMIO)
     
    19721986            pgmPhysInvalidatePageMapTLBEntry(pVM, GCPhysPage);
    19731987
    1974 #ifdef VBOX_WITH_NATIVE_NEM
     1988# ifdef VBOX_WITH_NATIVE_NEM
    19751989            /* Tell NEM about the backing and protection change. */
    19761990            if (VM_IS_NEM_ENABLED(pVM))
     
    19831997                PGM_PAGE_SET_NEM_STATE(pPage, u2State);
    19841998            }
    1985 #endif
     1999# endif
    19862000            LogFlow(("PGMHandlerPhysicalPageAliasHC: => %R[pgmpage]\n", pPage));
    19872001            PGM_UNLOCK(pVM);
     
    20002014    }
    20012015    return rc;
     2016#endif /* !VBOX_WITH_ONLY_PGM_NEM_MODE */
    20022017}
    20032018
  • trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp

    r106061 r107171  
    27292729
    27302730#ifdef VBOX_WITH_PGM_NEM_MODE
     2731# ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
    27312732    if (pVM->pgm.s.fNemMode)
     2733# endif
    27322734    {
    27332735# ifdef IN_RING3
     
    27482750    }
    27492751#endif /* VBOX_WITH_PGM_NEM_MODE */
     2752#ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
    27502753
    27512754    const uint32_t idChunk = PGM_PAGE_GET_CHUNKID(pPage);
     
    28262829    return VINF_SUCCESS;
    28272830# endif /* !IN_RING0 */
     2831#endif /* !VBOX_WITH_ONLY_PGM_NEM_MODE */
    28282832}
    28292833
     
    29782982        if (RT_FAILURE(rc))
    29792983            return rc;
    2980 # ifndef IN_RING0
     2984#ifndef IN_RING0
    29812985        pTlbe->pMap = pMap;
    2982 # endif
     2986#endif
    29832987        pTlbe->pv = pv;
    29842988        Assert(!((uintptr_t)pTlbe->pv & GUEST_PAGE_OFFSET_MASK));
     
    29862990    else
    29872991    {
     2992#ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
    29882993        AssertMsg(PGM_PAGE_GET_HCPHYS(pPage) == pVM->pgm.s.HCPhysZeroPg, ("%RGp/%R[pgmpage]\n", GCPhys, pPage));
    2989 # ifndef IN_RING0
     2994#endif
     2995#ifndef IN_RING0
    29902996        pTlbe->pMap = NULL;
    2991 # endif
     2997#endif
    29922998        pTlbe->pv = pVM->pgm.s.abZeroPg;
    29932999    }
    2994 # ifdef PGM_WITH_PHYS_TLB
     3000#ifdef PGM_WITH_PHYS_TLB
    29953001    if (    PGM_PAGE_GET_TYPE(pPage) < PGMPAGETYPE_ROM_SHADOW
    29963002        ||  PGM_PAGE_GET_TYPE(pPage) > PGMPAGETYPE_ROM)
     
    29983004    else
    29993005        pTlbe->GCPhys = NIL_RTGCPHYS; /* ROM: Problematic because of the two pages. :-/ */
    3000 # else
     3006#else
    30013007    pTlbe->GCPhys = NIL_RTGCPHYS;
    3002 # endif
     3008#endif
    30033009    pTlbe->pPage = pPage;
    30043010    return VINF_SUCCESS;
     
    37433749        {
    37443750#ifdef IN_RING3
     3751# ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
    37453752            PPGMPAGEMAPTLBE pTlbe;
    37463753            rc = pgmPhysPageQueryLocklessTlbeWithPage(pVCpu, (PPGMPAGE)pPage, GCPhys, &pTlbe);
     
    37483755            pb = (uint8_t *)pTlbe->pv;
    37493756            RT_NOREF(pVM);
     3757# endif
    37503758#else /** @todo a safe lockless page TLB in ring-0 needs the to ensure it gets the right invalidations. later. */
    37513759            PGM_LOCK(pVM);
     
    51355143    {
    51365144#ifdef IN_RING3
     5145# ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
    51375146        PPGMPAGEMAPTLBE pTlbe;
    51385147        int rc = pgmPhysPageQueryLocklessTlbeWithPage(pVCpu, pPage, GCPhys, &pTlbe);
     
    51405149        *ppb = (uint8_t *)pTlbe->pv;
    51415150        RT_NOREF(pVM);
     5151# endif
    51425152#else /** @todo a safe lockless page TLB in ring-0 needs the to ensure it gets the right invalidations. later. */
    51435153        PGM_LOCK(pVM);
     
    51515161    }
    51525162    Log6(("PGMPhysIemGCPhys2PtrNoLock: GCPhys=%RGp *ppb=%p *pfTlb=%#RX64 PageCopy=%R[pgmpage] RO\n", GCPhys, *ppb, *pfTlb, pPageCopy));
    5153     RT_NOREF(pRam);
     5163    RT_NOREF(pRam, pVM, pVCpu);
    51545164    return VINF_SUCCESS;
    51555165}
     
    51725182    {
    51735183#ifdef IN_RING3
     5184# ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
    51745185        PPGMPAGEMAPTLBE pTlbe;
    51755186        int rc = pgmPhysPageQueryLocklessTlbeWithPage(pVCpu, pPage, GCPhys, &pTlbe);
     
    51775188        *ppb = (uint8_t *)pTlbe->pv;
    51785189        RT_NOREF(pVM);
     5190# endif
    51795191#else /** @todo a safe lockless page TLB in ring-0 needs the to ensure it gets the right invalidations. later. */
    51805192        PGM_LOCK(pVM);
     
    51885200    }
    51895201    Log6(("PGMPhysIemGCPhys2PtrNoLock: GCPhys=%RGp *ppb=%p *pfTlb=%#RX64 PageCopy=%R[pgmpage] RW\n", GCPhys, *ppb, *pfTlb, pPageCopy));
    5190     RT_NOREF(pRam);
     5202    RT_NOREF(pRam, pVM, pVCpu);
    51915203    return VINF_SUCCESS;
    51925204}
  • trunk/src/VBox/VMM/VMMAll/PGMAllShw.h

    r106061 r107171  
    230230PGM_SHW_DECL(int, Enter)(PVMCPUCC pVCpu)
    231231{
    232 #if PGM_TYPE_IS_NESTED_OR_EPT(PGM_SHW_TYPE)
     232#if PGM_TYPE_IS_NESTED_OR_EPT(PGM_SHW_TYPE) && !defined(VBOX_WITH_ONLY_PGM_NEM_MODE)
    233233
    234234# ifdef VBOX_WITH_NESTED_HWVIRT_VMX_EPT
     
    284284PGM_SHW_DECL(int, Exit)(PVMCPUCC pVCpu)
    285285{
    286 #if PGM_TYPE_IS_NESTED_OR_EPT(PGM_SHW_TYPE)
     286#if PGM_TYPE_IS_NESTED_OR_EPT(PGM_SHW_TYPE) && !defined(VBOX_WITH_ONLY_PGM_NEM_MODE)
    287287    PVMCC pVM = pVCpu->CTX_SUFF(pVM);
    288288    if (pVCpu->pgm.s.CTX_SUFF(pShwPageCR3))
     
    395395PGM_SHW_DECL(int, GetPage)(PVMCPUCC pVCpu, RTGCUINTPTR GCPtr, uint64_t *pfFlags, PRTHCPHYS pHCPhys)
    396396{
    397 #if PGM_SHW_TYPE == PGM_TYPE_NONE
     397#if PGM_SHW_TYPE == PGM_TYPE_NONE || defined(VBOX_WITH_ONLY_PGM_NEM_MODE)
    398398    RT_NOREF(pVCpu, GCPtr);
    399399    AssertFailed();
     
    607607PGM_SHW_DECL(int, ModifyPage)(PVMCPUCC pVCpu, RTGCUINTPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask, uint32_t fOpFlags)
    608608{
    609 #if PGM_SHW_TYPE == PGM_TYPE_NONE
     609#if PGM_SHW_TYPE == PGM_TYPE_NONE || defined(VBOX_WITH_ONLY_PGM_NEM_MODE)
    610610    RT_NOREF(pVCpu, GCPtr, cb, fFlags, fMask, fOpFlags);
    611611    AssertFailed();
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette