VirtualBox

Changeset 7676 in vbox for trunk/src


Ignore:
Timestamp:
Apr 1, 2008 9:18:10 AM (17 years ago)
Author:
vboxsync
Message:

Cleaned up.
AMD64 shadow paging is only valid with AMD64 guest paging. Other combinations removed.
Simplified paging #ifdefs.

Location:
trunk/src/VBox/VMM
Files:
7 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/PGM.cpp

    r7629 r7676  
    834834#define PGM_SHW_NAME_R0_STR(name)   PGM_SHW_NAME_R0_AMD64_STR(name)
    835835#include "PGMShw.h"
    836 
    837 /* Guest - real mode */
    838 #define PGM_GST_TYPE                PGM_TYPE_REAL
    839 #define PGM_GST_NAME(name)          PGM_GST_NAME_REAL(name)
    840 #define PGM_GST_NAME_GC_STR(name)   PGM_GST_NAME_GC_REAL_STR(name)
    841 #define PGM_GST_NAME_R0_STR(name)   PGM_GST_NAME_R0_REAL_STR(name)
    842 #define PGM_BTH_NAME(name)          PGM_BTH_NAME_AMD64_REAL(name)
    843 #define PGM_BTH_NAME_GC_STR(name)   PGM_BTH_NAME_GC_AMD64_REAL_STR(name)
    844 #define PGM_BTH_NAME_R0_STR(name)   PGM_BTH_NAME_R0_AMD64_REAL_STR(name)
    845 #define BTH_PGMPOOLKIND_PT_FOR_PT   PGMPOOLKIND_PAE_PT_FOR_PHYS
    846 #include "PGMBth.h"
    847 #undef BTH_PGMPOOLKIND_PT_FOR_PT
    848 #undef PGM_BTH_NAME
    849 #undef PGM_BTH_NAME_GC_STR
    850 #undef PGM_BTH_NAME_R0_STR
    851 #undef PGM_GST_TYPE
    852 #undef PGM_GST_NAME
    853 #undef PGM_GST_NAME_GC_STR
    854 #undef PGM_GST_NAME_R0_STR
    855 
    856 /* Guest - protected mode */
    857 #define PGM_GST_TYPE                PGM_TYPE_PROT
    858 #define PGM_GST_NAME(name)          PGM_GST_NAME_PROT(name)
    859 #define PGM_GST_NAME_GC_STR(name)   PGM_GST_NAME_GC_PROT_STR(name)
    860 #define PGM_GST_NAME_R0_STR(name)   PGM_GST_NAME_R0_PROT_STR(name)
    861 #define PGM_BTH_NAME(name)          PGM_BTH_NAME_AMD64_PROT(name)
    862 #define PGM_BTH_NAME_GC_STR(name)   PGM_BTH_NAME_GC_AMD64_PROT_STR(name)
    863 #define PGM_BTH_NAME_R0_STR(name)   PGM_BTH_NAME_R0_AMD64_PROT_STR(name)
    864 #define BTH_PGMPOOLKIND_PT_FOR_PT   PGMPOOLKIND_PAE_PT_FOR_PHYS
    865 #include "PGMBth.h"
    866 #undef BTH_PGMPOOLKIND_PT_FOR_PT
    867 #undef PGM_BTH_NAME
    868 #undef PGM_BTH_NAME_GC_STR
    869 #undef PGM_BTH_NAME_R0_STR
    870 #undef PGM_GST_TYPE
    871 #undef PGM_GST_NAME
    872 #undef PGM_GST_NAME_GC_STR
    873 #undef PGM_GST_NAME_R0_STR
    874836
    875837/* Guest - AMD64 mode */
     
    25032465    rc = PGM_SHW_NAME_AMD64(InitData)(      pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    25042466    rc = PGM_GST_NAME_REAL(InitData)(       pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    2505     rc = PGM_BTH_NAME_AMD64_REAL(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    25062467
    25072468    pModeData = &pVM->pgm.s.paModeData[pgmModeDataIndex(PGM_TYPE_AMD64, PGM_TYPE_PROT)];
     
    25102471    rc = PGM_SHW_NAME_AMD64(InitData)(      pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    25112472    rc = PGM_GST_NAME_PROT(InitData)(       pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    2512     rc = PGM_BTH_NAME_AMD64_PROT(InitData)( pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    25132473
    25142474    pModeData = &pVM->pgm.s.paModeData[pgmModeDataIndex(PGM_TYPE_AMD64, PGM_TYPE_AMD64)];
     
    25172477    rc = PGM_SHW_NAME_AMD64(InitData)(      pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    25182478    rc = PGM_GST_NAME_AMD64(InitData)(      pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    2519     rc = PGM_BTH_NAME_AMD64_AMD64(InitData)(pVM, pModeData, fResolveGCAndR0); AssertRCReturn(rc, rc);
    25202479
    25212480    return VINF_SUCCESS;
     
    29282887                case PGMMODE_AMD64:
    29292888                case PGMMODE_AMD64_NX:
    2930                     rc2 = PGM_BTH_NAME_AMD64_REAL(Enter)(pVM, NIL_RTGCPHYS);
     2889                    rc2 = PGM_BTH_NAME_PAE_REAL(Enter)(pVM, NIL_RTGCPHYS);
    29312890                    break;
    29322891                default: AssertFailed(); break;
     
    29472906                case PGMMODE_AMD64:
    29482907                case PGMMODE_AMD64_NX:
    2949                     rc2 = PGM_BTH_NAME_AMD64_PROT(Enter)(pVM, NIL_RTGCPHYS);
     2908                    rc2 = PGM_BTH_NAME_PAE_PROT(Enter)(pVM, NIL_RTGCPHYS);
    29502909                    break;
    29512910                default: AssertFailed(); break;
     
    32063165
    32073166    int rc = VINF_SUCCESS;
    3208     const unsigned c = fLongMode ? ELEMENTS(pPDPTR->a) : 4;
     3167    const unsigned c = fLongMode ? ELEMENTS(pPDPTR->a) : X86_PG_PAE_PDPTE_ENTRIES;
    32093168    for (unsigned i = 0; i < c; i++)
    32103169    {
  • trunk/src/VBox/VMM/PGMInternal.h

    r7642 r7676  
    17181718#define PGM_BTH_NAME_PAE_32BIT(name)    PGM_CTX(pgm,BthPAE32Bit##name)
    17191719#define PGM_BTH_NAME_PAE_PAE(name)      PGM_CTX(pgm,BthPAEPAE##name)
    1720 #define PGM_BTH_NAME_AMD64_REAL(name)   PGM_CTX(pgm,BthAMD64Real##name)
    1721 #define PGM_BTH_NAME_AMD64_PROT(name)   PGM_CTX(pgm,BthAMD64Prot##name)
    17221720#define PGM_BTH_NAME_AMD64_AMD64(name)  PGM_CTX(pgm,BthAMD64AMD64##name)
    17231721#define PGM_BTH_NAME_GC_32BIT_REAL_STR(name)   "pgmGCBth32BitReal" #name
     
    17281726#define PGM_BTH_NAME_GC_PAE_32BIT_STR(name)    "pgmGCBthPAE32Bit" #name
    17291727#define PGM_BTH_NAME_GC_PAE_PAE_STR(name)      "pgmGCBthPAEPAE" #name
    1730 #define PGM_BTH_NAME_GC_AMD64_REAL_STR(name)   "pgmGCBthAMD64Real" #name
    1731 #define PGM_BTH_NAME_GC_AMD64_PROT_STR(name)   "pgmGCBthAMD64Prot" #name
    17321728#define PGM_BTH_NAME_GC_AMD64_AMD64_STR(name)  "pgmGCBthAMD64AMD64" #name
    17331729#define PGM_BTH_NAME_R0_32BIT_REAL_STR(name)   "pgmR0Bth32BitReal" #name
     
    17381734#define PGM_BTH_NAME_R0_PAE_32BIT_STR(name)    "pgmR0BthPAE32Bit" #name
    17391735#define PGM_BTH_NAME_R0_PAE_PAE_STR(name)      "pgmR0BthPAEPAE" #name
    1740 #define PGM_BTH_NAME_R0_AMD64_REAL_STR(name)   "pgmR0BthAMD64Real" #name
    1741 #define PGM_BTH_NAME_R0_AMD64_PROT_STR(name)   "pgmR0BthAMD64Prot" #name
    17421736#define PGM_BTH_NAME_R0_AMD64_AMD64_STR(name)  "pgmR0BthAMD64AMD64" #name
    17431737#define PGM_BTH_DECL(type, name)        PGM_CTX_DECL(type) PGM_BTH_NAME(name)
  • trunk/src/VBox/VMM/VMMAll/PGMAll.cpp

    r7629 r7676  
    184184#include "PGMAllShw.h"
    185185
    186 /* Guest - real mode */
    187 #define PGM_GST_TYPE                PGM_TYPE_REAL
    188 #define PGM_GST_NAME(name)          PGM_GST_NAME_REAL(name)
    189 #define PGM_BTH_NAME(name)          PGM_BTH_NAME_AMD64_REAL(name)
    190 #define BTH_PGMPOOLKIND_PT_FOR_PT   PGMPOOLKIND_PAE_PT_FOR_PHYS
    191 #include "PGMAllBth.h"
    192 #undef BTH_PGMPOOLKIND_PT_FOR_PT
    193 #undef PGM_BTH_NAME
    194 #undef PGM_GST_NAME
    195 #undef PGM_GST_TYPE
    196 
    197 /* Guest - protected mode */
    198 #define PGM_GST_TYPE                PGM_TYPE_PROT
    199 #define PGM_GST_NAME(name)          PGM_GST_NAME_PROT(name)
    200 #define PGM_BTH_NAME(name)          PGM_BTH_NAME_AMD64_PROT(name)
    201 #define BTH_PGMPOOLKIND_PT_FOR_PT   PGMPOOLKIND_PAE_PT_FOR_PHYS
    202 #include "PGMAllBth.h"
    203 #undef BTH_PGMPOOLKIND_PT_FOR_PT
    204 #undef PGM_BTH_NAME
    205 #undef PGM_GST_TYPE
    206 #undef PGM_GST_NAME
    207 
    208186/* Guest - AMD64 mode */
    209187#define PGM_GST_TYPE                PGM_TYPE_AMD64
  • trunk/src/VBox/VMM/VMMAll/PGMAllBth.h

    r7668 r7676  
    3939
    4040
     41/* Filter out some illegal combinations of guest and shadow paging, so we can remove redundant checks inside functions. */
     42#if      PGM_GST_TYPE == PGM_TYPE_PAE && PGM_SHW_TYPE != PGM_TYPE_PAE
     43#error "Invalid combination; PAE guest implies PAE shadow"
     44#endif
     45
     46#if     (PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT) \
     47    && !(PGM_SHW_TYPE == PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_PAE)
     48#error "Invalid combination; real or protected mode without paging implies 32 bits or PAE shadow paging."
     49#endif
     50
     51#if     (PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE) \
     52    && !(PGM_SHW_TYPE == PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_PAE)
     53#error "Invalid combination; 32 bits guest paging or PAE implies 32 bits or PAE shadow paging."
     54#endif
     55
     56#if    (PGM_GST_TYPE == PGM_TYPE_AMD64 && PGM_SHW_TYPE != PGM_TYPE_AMD64)
     57    || (PGM_SHW_TYPE == PGM_TYPE_AMD64 && PGM_GST_TYPE != PGM_TYPE_AMD64)
     58#error "Invalid combination; AMD64 guest implies AMD64 shadow and vice versa"
     59#endif
     60
    4161/**
    4262 * #PF Handler for raw-mode guest execution.
     
    5171{
    5272#if (PGM_GST_TYPE == PGM_TYPE_32BIT ||  PGM_GST_TYPE == PGM_TYPE_REAL ||  PGM_GST_TYPE == PGM_TYPE_PROT || PGM_GST_TYPE == PGM_TYPE_PAE) && PGM_SHW_TYPE != PGM_TYPE_AMD64
    53 
    54 # if PGM_SHW_TYPE != PGM_TYPE_32BIT && PGM_SHW_TYPE != PGM_TYPE_PAE
    55 #  error "32-bit guest mode is only implemented for 32-bit and PAE shadow modes."
    56 # endif
    5773
    5874# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE != PGM_TYPE_PAE
     
    759775
    760776    LogFlow(("InvalidatePage %x\n", GCPtrPage));
    761 # if PGM_SHW_TYPE == PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_PAE
    762777    /*
    763778     * Get the shadow PD entry and skip out if this PD isn't present.
     
    870885            if (pShwPage->GCPhys == GCPhys)
    871886            {
    872 #if 0 /* likely cause of a major performance regression; must be SyncPageWorkerTrackDeref then */
     887#  if 0 /* likely cause of a major performance regression; must be SyncPageWorkerTrackDeref then */
    873888                const unsigned iPTEDst = (GCPtrPage >> SHW_PT_SHIFT) & SHW_PT_MASK;
    874889                PSHWPT pPT = (PSHWPT)PGMPOOL_PAGE_2_PTR(pVM, pShwPage);
    875890                if (pPT->a[iPTEDst].n.u1Present)
    876891                {
    877 ifdef PGMPOOL_WITH_USER_TRACKING
     892 ifdef PGMPOOL_WITH_USER_TRACKING
    878893                    /* This is very unlikely with caching/monitoring enabled. */
    879894                    PGM_BTH_NAME(SyncPageWorkerTrackDeref)(pVM, pShwPage, pPT->a[iPTEDst].u & SHW_PTE_PG_MASK);
    880 endif
     895 endif
    881896                    pPT->a[iPTEDst].u = 0;
    882897                }
    883 #else /* Syncing it here isn't 100% safe and it's probably not worth spending time syncing it. */
     898#  else /* Syncing it here isn't 100% safe and it's probably not worth spending time syncing it. */
    884899                rc = PGM_BTH_NAME(SyncPage)(pVM, PdeSrc, GCPtrPage, 1, 0);
    885900                if (VBOX_SUCCESS(rc))
    886901                    rc = VINF_SUCCESS;
    887 #endif
     902#  endif
    888903                STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,InvalidatePage4KBPages));
    889904                PGM_INVL_PG(GCPtrPage);
     
    910925            PPGMPOOLPAGE    pShwPage = pgmPoolGetPageByHCPhys(pVM, PdeDst.u & SHW_PDE_PG_MASK);
    911926            RTGCPHYS        GCPhys   = PdeSrc.u & GST_PDE_BIG_PG_MASK;
    912 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
     927#  if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
    913928            /* Select the right PDE as we're emulating a 4MB page directory with two 2 MB shadow PDEs.*/
    914929            GCPhys |= GCPtrPage & (1 << X86_PD_PAE_SHIFT);
     
    968983    return rc;
    969984
    970 # elif PGM_SHW_TYPE == PGM_TYPE_AMD64
    971 #  error "Guest 32-bit mode and shadow AMD64 mode doesn't add up!"
    972 # endif
    973     return VINF_SUCCESS;
    974 
    975985#elif PGM_GST_TYPE == PGM_TYPE_AMD64
    976 # if PGM_SHW_TYPE == PGM_TYPE_AMD64
    977986//# error not implemented
    978987    return VERR_INTERNAL_ERROR;
    979988
    980 # else  /* PGM_SHW_TYPE != PGM_TYPE_AMD64 */
    981 #  error "Guest AMD64 mode, but not the shadow mode - that can't be right!"
    982 # endif /* PGM_SHW_TYPE != PGM_TYPE_AMD64 */
    983 
    984989#else /* guest real and protected mode */
    985     /* There's no such thing when paging is disabled. */
     990    /* There's no such thing as InvalidatePage when paging is disabled, so just ignore. */
    986991    return VINF_SUCCESS;
    987992#endif
     
    12371242#if    PGM_GST_TYPE == PGM_TYPE_32BIT \
    12381243    || PGM_GST_TYPE == PGM_TYPE_PAE
    1239 
    1240 # if PGM_SHW_TYPE != PGM_TYPE_32BIT && PGM_SHW_TYPE != PGM_TYPE_PAE
    1241 #  error "Invalid shadow mode for 32-bit guest mode!"
    1242 # endif
    12431244
    12441245    /*
     
    18421843    || PGM_GST_TYPE == PGM_TYPE_PAE
    18431844
    1844 # if PGM_SHW_TYPE != PGM_TYPE_32BIT && PGM_SHW_TYPE != PGM_TYPE_PAE
    1845 #  error "Invalid shadow mode for 32-bit guest mode!"
    1846 # endif
    1847 
    18481845    /*
    18491846     * Validate input a little bit.
     
    19081905            GCPhys = PdeSrc.u & GST_PDE_PG_MASK;
    19091906# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
     1907            /* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */
    19101908            GCPhys |= (iPDDst & 1) * (PAGE_SIZE / 2);
    19111909# endif
     
    19161914            GCPhys = PdeSrc.u & GST_PDE_BIG_PG_MASK;
    19171915# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
    1918             GCPhys |= GCPtrPage & RT_BIT(X86_PAGE_2M_SHIFT);
     1916            /* Select the right PDE as we're emulating a 4MB page directory with two 2 MB shadow PDEs.*/
     1917            GCPhys |= GCPtrPage & (1 << X86_PD_PAE_SHIFT);
    19191918# endif
    19201919            rc = pgmPoolAlloc(pVM, GCPhys, BTH_PGMPOOLKIND_PT_FOR_BIG, SHW_POOL_ROOT_IDX, iPDDst, &pShwPage);
     
    19341933                PdeDst.u = pShwPage->Core.Key
    19351934                         | (PdeSrc.u & ~(X86_PDE_PAE_PG_MASK | X86_PDE_AVL_MASK | X86_PDE_PCD | X86_PDE_PWT | X86_PDE_PS | X86_PDE4M_G | X86_PDE4M_D));
    1936 #  ifdef PGM_SYNC_DIRTY_BIT /* (see explanation and assumtions further down.) */
     1935# ifdef PGM_SYNC_DIRTY_BIT /* (see explanation and assumptions further down.) */
    19371936                if (!PdeSrc.b.u1Dirty && PdeSrc.b.u1Write)
    19381937                {
     
    19411940                    PdeDst.b.u1Write = 0;
    19421941                }
    1943 #  endif
     1942# endif
    19441943            }
    19451944            *pPdeDst = PdeDst;
     
    20052004                const unsigned  iPTDstEnd = ELEMENTS(pPTDst->a);
    20062005# endif /* !PGM_SYNC_N_PAGES */
    2007 #  if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
     2006# if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
    20082007                /* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */
    20092008                const unsigned  offPTSrc  = ((GCPtrPage >> SHW_PD_SHIFT) & 1) * 512;
    2010 #  else
     2009# else
    20112010                const unsigned  offPTSrc  = 0;
    2012 #  endif
     2011# endif
    20132012                for (; iPTDst < iPTDstEnd; iPTDst++)
    20142013                {
     
    20182017                    if (PteSrc.n.u1Present) /* we've already cleared it above */
    20192018                    {
    2020 #ifndef IN_RING0
     2019# ifndef IN_RING0
    20212020                        /*
    20222021                         * Assuming kernel code will be marked as supervisor - and not as user level
     
    20302029                                 &&  PGM_PAGE_HAS_ACTIVE_HANDLERS(pPage))
    20312030                           )
    2032 #endif
     2031# endif
    20332032                            PGM_BTH_NAME(SyncPageWorker)(pVM, &pPTDst->a[iPTDst], PdeSrc, PteSrc, pShwPage, iPTDst);
    20342033                        Log2(("SyncPT:   4K+ %VGv PteSrc:{P=%d RW=%d U=%d raw=%08llx}%s dst.raw=%08llx iPTSrc=%x PdeSrc.u=%x physpte=%VGp\n",
     
    20882087             */
    20892088            /* Get address and flags from the source PDE. */
    2090             SHWPTE      PteDstBase;
     2089            SHWPTE PteDstBase;
    20912090            PteDstBase.u = PdeSrc.u & ~(X86_PTE_PAE_PG_MASK | X86_PTE_AVL_MASK | X86_PTE_PAT | X86_PTE_PCD | X86_PTE_PWT);
    20922091
     
    20962095                  GCPtrPage, PdeSrc.b.u1Present, PdeSrc.b.u1Write, PdeSrc.b.u1User, (uint64_t)PdeSrc.u, GCPtr,
    20972096                  GCPhys, PdeDst.u & PGM_PDFLAGS_TRACK_DIRTY ? " Track-Dirty" : ""));
    2098             PPGMRAMRANGE        pRam   = CTXALLSUFF(pVM->pgm.s.pRamRanges);
    2099             unsigned            iPTDst = 0;
     2097            PPGMRAMRANGE      pRam   = CTXALLSUFF(pVM->pgm.s.pRamRanges);
     2098            unsigned          iPTDst = 0;
    21002099            while (iPTDst < ELEMENTS(pPTDst->a))
    21012100            {
     
    22492248
    22502249#else /* PGM_GST_TYPE != PGM_TYPE_32BIT */
    2251 
    22522250    AssertReleaseMsgFailed(("Shw=%d Gst=%d is not implemented!\n", PGM_GST_TYPE, PGM_SHW_TYPE));
    22532251    STAM_PROFILE_STOP(&pVM->pgm.s.CTXMID(Stat,SyncPT), a);
     
    22702268PGM_BTH_DECL(int, PrefetchPage)(PVM pVM, RTGCUINTPTR GCPtrPage)
    22712269{
    2272 #if (PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT) && PGM_SHW_TYPE != PGM_TYPE_AMD64
    2273 
    2274 # if PGM_SHW_TYPE != PGM_TYPE_32BIT && PGM_SHW_TYPE != PGM_TYPE_PAE
    2275 #  error "Invalid shadow mode for 32-bit guest mode!"
    2276 # endif
    2277 
     2270#if (PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_REAL || PGM_GST_TYPE == PGM_TYPE_PROT || PGM_GST_TYPE == PGM_TYPE_PAE) && PGM_SHW_TYPE != PGM_TYPE_AMD64
    22782271    /*
    22792272     * Check that all Guest levels thru the PDE are present, getting the
     
    22862279    PGSTPD          pPDSrc = CTXSUFF(pVM->pgm.s.pGuestPD);
    22872280#  else /* PAE */
    2288     unsigned        iPDSrc
     2281    unsigned        iPDSrc;
    22892282    PGSTPD          pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, GCPtrPage, &iPDSrc);
    22902283#  endif
     2284    const GSTPDE    PdeSrc = pPDSrc->a[iPDSrc];
    22912285# else
    22922286    PGSTPD          pPDSrc = NULL;
    22932287    const unsigned  iPDSrc = 0;
    2294 # endif
    2295 
    2296 # if PGM_WITH_PAGING(PGM_GST_TYPE)
    2297     const GSTPDE PdeSrc = pPDSrc->a[iPDSrc];
    2298 # else
    2299     GSTPDE PdeSrc;
     2288    GSTPDE          PdeSrc;
     2289
    23002290    PdeSrc.au32[0]      = 0; /* faked so we don't have to #ifdef everything */
    23012291    PdeSrc.n.u1Present  = 1;
     
    23592349#if (PGM_GST_TYPE == PGM_TYPE_32BIT ||  PGM_GST_TYPE == PGM_TYPE_REAL ||  PGM_GST_TYPE == PGM_TYPE_PROT || PGM_GST_TYPE == PGM_TYPE_PAE) && PGM_SHW_TYPE != PGM_TYPE_AMD64
    23602350
    2361 # if PGM_SHW_TYPE != PGM_TYPE_32BIT && PGM_SHW_TYPE != PGM_TYPE_PAE
    2362 #  error "Invalid shadow mode for 32-bit guest mode!"
    2363 # endif
    2364 
    2365 #ifndef IN_RING0
     2351#  ifndef IN_RING0
    23662352    if (!(fPage & X86_PTE_US))
    23672353    {
     
    23732359        CSAMMarkPage(pVM, (RTGCPTR)GCPtrPage, true);
    23742360    }
    2375 #endif
     2361#  endif
    23762362    /*
    23772363     * Get guest PD and index.
     
    24552441
    24562442
    2457 #if PGM_GST_TYPE == PGM_TYPE_32BIT
     2443#if PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE
    24582444# if PGM_SHW_TYPE == PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_PAE
    24592445/**
     
    25102496PGM_BTH_DECL(int, SyncCR3)(PVM pVM, uint32_t cr0, uint32_t cr3, uint32_t cr4, bool fGlobal)
    25112497{
    2512 #if PGM_GST_TYPE == PGM_TYPE_32BIT
    2513 # if PGM_SHW_TYPE == PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_PAE
    25142498    if (VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3))
    25152499        fGlobal = true; /* Change this CR3 reload to be a global one. */
    2516 # endif
    2517 #endif
    25182500
    25192501    /*
     
    25542536    MY_STAM_COUNTER_INC(fGlobal ? &pVM->pgm.s.CTXMID(Stat,SyncCR3Global) : &pVM->pgm.s.CTXMID(Stat,SyncCR3NotGlobal));
    25552537
    2556 #if PGM_GST_TYPE == PGM_TYPE_32BIT
    2557 # if PGM_SHW_TYPE == PGM_TYPE_32BIT || PGM_SHW_TYPE == PGM_TYPE_PAE
     2538#if PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE
    25582539    /*
    25592540     * Get page directory addresses.
     
    25652546#  endif
    25662547
    2567 # if PGM_GST_TYPE == PGM_TYPE_32BIT
     2548#  if PGM_GST_TYPE == PGM_TYPE_32BIT
    25682549    PGSTPD          pPDSrc = CTXSUFF(pVM->pgm.s.pGuestPD);
    2569 # else /* PAE */
    2570     PGSTPD          pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, 0);
    2571 # endif
    2572 
    25732550    Assert(pPDSrc);
    2574 #ifndef IN_GC
     2551#   ifndef IN_GC
    25752552    Assert(MMPhysGCPhys2HCVirt(pVM, (RTGCPHYS)(cr3 & GST_CR3_PAGE_MASK), sizeof(*pPDSrc)) == pPDSrc);
    2576 #endif
     2553#   endif
     2554#  endif
    25772555
    25782556    /*
     
    25952573        iPdNoMapping  = ~0U;
    25962574    }
    2597 
    2598     for (unsigned iPD = 0; iPD < ELEMENTS(pPDSrc->a); iPD++)
     2575#  if PGM_GST_TYPE == PGM_TYPE_PAE
     2576    for (unsigned iPDPTRE = 0; iPDPTRE < X86_PG_PAE_PDPTE_ENTRIES; iPDPTRE++)
     2577#  elif PGM_GST_TYPE == PGM_TYPE_AMD64
     2578    for (unsigned iPDPTRE = 0; iPDPTRE < X86_PG_AMD64_PDPTE_ENTRIES; iPDPTRE++)
     2579#  endif
    25992580    {
     2581#  if PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64
     2582        unsigned        iPDSrc;
     2583#   if PGM_SHW_TYPE == PGM_TYPE_PAE
     2584        PX86PDPAE       pPDPAE    = pVM->pgm.s.CTXMID(ap,PaePDs)[iPDPTRE * X86_PG_PAE_ENTRIES];
     2585#   else
     2586        AssertFailed(); /* @todo */
     2587        PX86PDPE        pPDPAE    = pVM->pgm.s.CTXMID(ap,PaePDs)[iPDPTRE * X86_PG_AMD64_ENTRIES];
     2588#   endif
     2589        PX86PDEPAE      pPDEDst   = &pPDPAE->a[0];
     2590        PGSTPD          pPDSrc    = pgmGstGetPaePDPtr(&pVM->pgm.s, iPDPTRE << X86_PDPTR_SHIFT, &iPDSrc);
     2591
     2592        if (pPDSrc == NULL)
     2593        {
     2594            /* PDPTR not present */
     2595            pVM->pgm.s.CTXMID(p,PaePDPTR)->a[iPDPTRE].n.u1Present = 0;
     2596            continue;
     2597        }
     2598#  endif /* if PGM_GST_TYPE == PGM_TYPE_PAE || PGM_GST_TYPE == PGM_TYPE_AMD64 */
     2599        for (unsigned iPD = 0; iPD < ELEMENTS(pPDSrc->a); iPD++)
     2600        {
    26002601#  if PGM_SHW_TYPE == PGM_TYPE_32BIT
    2601         Assert(&pVM->pgm.s.CTXMID(p,32BitPD)->a[iPD] == pPDEDst);
     2602            Assert(&pVM->pgm.s.CTXMID(p,32BitPD)->a[iPD] == pPDEDst);
    26022603#  else
    2603         Assert(&pVM->pgm.s.CTXMID(ap,PaePDs)[iPD * 2 / 512]->a[iPD * 2 % 512] == pPDEDst);
    2604 #  endif
    2605         register GSTPDE PdeSrc = pPDSrc->a[iPD];
    2606         if (    PdeSrc.n.u1Present
    2607             &&  (PdeSrc.n.u1User || fRawR0Enabled))
    2608         {
     2604            Assert(&pVM->pgm.s.CTXMID(ap,PaePDs)[iPD * 2 / 512]->a[iPD * 2 % 512] == pPDEDst);
     2605#  endif
     2606            register GSTPDE PdeSrc = pPDSrc->a[iPD];
     2607            if (    PdeSrc.n.u1Present
     2608                &&  (PdeSrc.n.u1User || fRawR0Enabled))
     2609            {
    26092610#  if PGM_GST_TYPE == PGM_TYPE_32BIT
    2610             /*
    2611              * Check for conflicts with GC mappings.
    2612              */
    2613             if (iPD == iPdNoMapping)
    2614             {
     2611                /*
     2612                 * Check for conflicts with GC mappings.
     2613                 */
     2614                if (iPD == iPdNoMapping)
     2615                {
     2616                    if (pVM->pgm.s.fMappingsFixed)
     2617                    {
     2618                        /* It's fixed, just skip the mapping. */
     2619                        const unsigned cPTs = pMapping->cPTs;
     2620                        iPD += cPTs - 1;
     2621                        pPDEDst += cPTs + (PGM_SHW_TYPE != PGM_TYPE_32BIT) * cPTs;
     2622                        pMapping = pMapping->CTXALLSUFF(pNext);
     2623                        iPdNoMapping = pMapping ? pMapping->GCPtr >> X86_PD_SHIFT : ~0U;
     2624                        continue;
     2625                    }
     2626#   ifdef IN_RING3
     2627                    int rc = pgmR3SyncPTResolveConflict(pVM, pMapping, pPDSrc, iPD);
     2628                    if (VBOX_FAILURE(rc))
     2629                        return rc;
     2630
     2631                    /*
     2632                     * Update iPdNoMapping and pMapping.
     2633                     */
     2634                    pMapping = pVM->pgm.s.pMappingsR3;
     2635                    while (pMapping && pMapping->GCPtr < (iPD << X86_PD_SHIFT))
     2636                        pMapping = pMapping->pNextR3;
     2637                    iPdNoMapping = pMapping ? pMapping->GCPtr >> X86_PD_SHIFT : ~0U;
     2638#   else
     2639                    LogFlow(("SyncCR3: detected conflict -> VINF_PGM_SYNC_CR3\n"));
     2640                    return VINF_PGM_SYNC_CR3;
     2641#   endif
     2642                }
     2643#  else /* PGM_GST_TYPE == PGM_TYPE_32BIT */
     2644                /* PAE and AMD64 modes are hardware accelerated only, so there are no mappings. */
     2645                Assert(iPD != iPdNoMapping);
     2646#  endif /* PGM_GST_TYPE == PGM_TYPE_32BIT */
     2647                /*
     2648                 * Sync page directory entry.
     2649                 *
     2650                 * The current approach is to allocated the page table but to set
     2651                 * the entry to not-present and postpone the page table synching till
     2652                 * it's actually used.
     2653                 */
     2654#  if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
     2655                for (unsigned i = 0, iPdShw = iPD * 2; i < 2; i++, iPdShw++) /* pray that the compiler unrolls this */
     2656#  else
     2657                const unsigned iPdShw = iPD; NOREF(iPdShw);
     2658#  endif
     2659                {
     2660                    SHWPDE PdeDst = *pPDEDst;
     2661                    if (PdeDst.n.u1Present)
     2662                    {
     2663                        PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, PdeDst.u & SHW_PDE_PG_MASK);
     2664                        RTGCPHYS     GCPhys;
     2665                        if (    !PdeSrc.b.u1Size
     2666                            ||  !(cr4 & X86_CR4_PSE))
     2667                        {
     2668                            GCPhys = PdeSrc.u & GST_PDE_PG_MASK;
     2669#  if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
     2670                            /* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */
     2671                            GCPhys |= i * (PAGE_SIZE / 2);
     2672#  endif
     2673                        }
     2674                        else
     2675                        {
     2676                            GCPhys = PdeSrc.u & GST_PDE_BIG_PG_MASK;
     2677#  if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
     2678                            /* Select the right PDE as we're emulating a 4MB page directory with two 2 MB shadow PDEs.*/
     2679                            GCPhys |= i * X86_PAGE_2M_SIZE;
     2680#  endif
     2681                        }
     2682
     2683                        if (    pShwPage->GCPhys == GCPhys
     2684                            &&  pShwPage->enmKind == PGM_BTH_NAME(CalcPageKind)(&PdeSrc, cr4)
     2685                            &&  (   pShwPage->fCached
     2686                                || (   !fGlobal
     2687                                    && (   false
     2688#  ifdef PGM_SKIP_GLOBAL_PAGEDIRS_ON_NONGLOBAL_FLUSH
     2689                                        || (   (PdeSrc.u & (X86_PDE4M_PS | X86_PDE4M_G)) == (X86_PDE4M_PS | X86_PDE4M_G)
     2690                                            && (cr4 & (X86_CR4_PGE | X86_CR4_PSE)) == (X86_CR4_PGE | X86_CR4_PSE)) /* global 2/4MB page. */
     2691                                        || (  !pShwPage->fSeenNonGlobal
     2692                                            && (cr4 & X86_CR4_PGE))
     2693#  endif
     2694                                        )
     2695                                    )
     2696                                )
     2697                            &&  (   (PdeSrc.u & (X86_PDE_US | X86_PDE_RW)) == (PdeDst.u & (X86_PDE_US | X86_PDE_RW))
     2698                                || (   (cr4 & X86_CR4_PSE)
     2699                                    &&     ((PdeSrc.u & (X86_PDE_US | X86_PDE4M_PS | X86_PDE4M_D)) | PGM_PDFLAGS_TRACK_DIRTY)
     2700                                        ==  ((PdeDst.u & (X86_PDE_US | X86_PDE_RW | PGM_PDFLAGS_TRACK_DIRTY)) | X86_PDE4M_PS))
     2701                                )
     2702                        )
     2703                        {
     2704#  ifdef VBOX_WITH_STATISTICS
     2705                            if (   !fGlobal
     2706                                && (PdeSrc.u & (X86_PDE4M_PS | X86_PDE4M_G)) == (X86_PDE4M_PS | X86_PDE4M_G)
     2707                                && (cr4 & (X86_CR4_PGE | X86_CR4_PSE)) == (X86_CR4_PGE | X86_CR4_PSE))
     2708                                MY_STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,SyncCR3DstSkippedGlobalPD));
     2709                            else if (!fGlobal && !pShwPage->fSeenNonGlobal && (cr4 & X86_CR4_PGE))
     2710                                MY_STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,SyncCR3DstSkippedGlobalPT));
     2711                            else
     2712                                MY_STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,SyncCR3DstCacheHit));
     2713#  endif /* VBOX_WITH_STATISTICS */
     2714    /** @todo a replacement strategy isn't really needed unless we're using a very small pool < 512 pages.
     2715    * The whole ageing stuff should be put in yet another set of #ifdefs. For now, let's just skip it. */
     2716    //#  ifdef PGMPOOL_WITH_CACHE
     2717    //                        pgmPoolCacheUsed(pPool, pShwPage);
     2718    //#  endif
     2719                        }
     2720                        else
     2721                        {
     2722                            pgmPoolFreeByPage(pPool, pShwPage, SHW_POOL_ROOT_IDX, iPdShw);
     2723                            pPDEDst->u = 0;
     2724                            MY_STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,SyncCR3DstFreed));
     2725                        }
     2726                    }
     2727                    else
     2728                        MY_STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,SyncCR3DstNotPresent));
     2729                    pPDEDst++;
     2730                }
     2731            }
     2732            else if (iPD != iPdNoMapping)
     2733            {
     2734                /*
     2735                 * Check if there is any page directory to mark not present here.
     2736                 */
     2737#  if PGM_SHW_TYPE == PGM_TYPE_32BIT
     2738                const unsigned iPdShw = iPD; NOREF(iPdShw);
     2739#  else
     2740                for (unsigned i = 0, iPdShw = iPD * 2; i < 2; i++, iPdShw++) /* pray that the compiler unrolls this */
     2741#  endif
     2742                {
     2743                    if (pPDEDst->n.u1Present)
     2744                    {
     2745                        pgmPoolFreeByPage(pPool, pgmPoolGetPage(pPool, pPDEDst->u & SHW_PDE_PG_MASK), SHW_POOL_ROOT_IDX, iPdShw);
     2746                        pPDEDst->u = 0;
     2747                        MY_STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,SyncCR3DstFreedSrcNP));
     2748                    }
     2749                    pPDEDst++;
     2750                }
     2751            }
     2752            else
     2753            {
     2754#  if PGM_GST_TYPE == PGM_TYPE_32BIT
     2755                Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
     2756                const unsigned cPTs = pMapping->cPTs;
    26152757                if (pVM->pgm.s.fMappingsFixed)
    26162758                {
    26172759                    /* It's fixed, just skip the mapping. */
    2618                     const unsigned cPTs = pMapping->cPTs;
    2619                     iPD += cPTs - 1;
    2620                     pPDEDst += cPTs + (PGM_SHW_TYPE != PGM_TYPE_32BIT) * cPTs;
    26212760                    pMapping = pMapping->CTXALLSUFF(pNext);
    26222761                    iPdNoMapping = pMapping ? pMapping->GCPtr >> X86_PD_SHIFT : ~0U;
    2623                     continue;
    2624                 }
    2625 
     2762                }
     2763                else
     2764                {
     2765                    /*
     2766                     * Check for conflicts for subsequent pagetables
     2767                     * and advance to the next mapping.
     2768                     */
     2769                    iPdNoMapping = ~0U;
     2770                    unsigned iPT = cPTs;
     2771                    while (iPT-- > 1)
     2772                    {
     2773                        if (    pPDSrc->a[iPD + iPT].n.u1Present
     2774                            &&  (pPDSrc->a[iPD + iPT].n.u1User || fRawR0Enabled))
     2775                        {
    26262776#   ifdef IN_RING3
    2627                 int rc = pgmR3SyncPTResolveConflict(pVM, pMapping, pPDSrc, iPD);
    2628                 if (VBOX_FAILURE(rc))
    2629                     return rc;
    2630 
    2631                 /*
    2632                  * Update iPdNoMapping and pMapping.
    2633                  */
    2634                 pMapping = pVM->pgm.s.pMappingsR3;
    2635                 while (pMapping && pMapping->GCPtr < (iPD << X86_PD_SHIFT))
    2636                     pMapping = pMapping->pNextR3;
    2637                 iPdNoMapping = pMapping ? pMapping->GCPtr >> X86_PD_SHIFT : ~0U;
     2777                            int rc = pgmR3SyncPTResolveConflict(pVM, pMapping, pPDSrc, iPD);
     2778                            if (VBOX_FAILURE(rc))
     2779                                return rc;
     2780
     2781                            /*
     2782                             * Update iPdNoMapping and pMapping.
     2783                             */
     2784                            pMapping = pVM->pgm.s.CTXALLSUFF(pMappings);
     2785                            while (pMapping && pMapping->GCPtr < (iPD << X86_PD_SHIFT))
     2786                                pMapping = pMapping->CTXALLSUFF(pNext);
     2787                            iPdNoMapping = pMapping ? pMapping->GCPtr >> X86_PD_SHIFT : ~0U;
     2788                            break;
    26382789#   else
    2639                 LogFlow(("SyncCR3: detected conflict -> VINF_PGM_SYNC_CR3\n"));
    2640                 return VINF_PGM_SYNC_CR3;
     2790                            LogFlow(("SyncCR3: detected conflict -> VINF_PGM_SYNC_CR3\n"));
     2791                            return VINF_PGM_SYNC_CR3;
    26412792#   endif
    2642             }
    2643 #  else /* PGM_GST_TYPE == PGM_TYPE_32BIT */
    2644             /* PAE and AMD64 modes are hardware accelerated only, so there are no mappings. */
    2645             Assert(iPD != iPdNoMapping);
    2646 #  endif /* PGM_GST_TYPE == PGM_TYPE_32BIT */
    2647             /*
    2648              * Sync page directory entry.
    2649              *
    2650              * The current approach is to allocated the page table but to set
    2651              * the entry to not-present and postpone the page table synching till
    2652              * it's actually used.
    2653              */
    2654 #  if PGM_SHW_TYPE == PGM_TYPE_32BIT
    2655             const unsigned iPdShw = iPD; NOREF(iPdShw);
    2656 #  else
    2657             for (unsigned i = 0, iPdShw = iPD * 2; i < 2; i++, iPdShw++) /* pray that the compiler unrolls this */
    2658 #  endif
    2659             {
    2660                 SHWPDE PdeDst = *pPDEDst;
    2661                 if (PdeDst.n.u1Present)
    2662                 {
    2663                     PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, PdeDst.u & SHW_PDE_PG_MASK);
    2664                     RTGCPHYS     GCPhys;
    2665                     if (    !PdeSrc.b.u1Size
    2666                         ||  !(cr4 & X86_CR4_PSE))
    2667                     {
    2668                         GCPhys = PdeSrc.u & GST_PDE_PG_MASK;
    2669 #  if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
    2670                         GCPhys |= i * (PAGE_SIZE / 2);
    2671 #  endif
    2672                     }
    2673                     else
    2674                     {
    2675                         GCPhys = PdeSrc.u & GST_PDE_BIG_PG_MASK;
    2676 #  if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
    2677                         GCPhys |= i * X86_PAGE_2M_SIZE;
    2678 #  endif
    2679                     }
    2680 
    2681                     if (    pShwPage->GCPhys == GCPhys
    2682                         &&  pShwPage->enmKind == PGM_BTH_NAME(CalcPageKind)(&PdeSrc, cr4)
    2683                         &&  (   pShwPage->fCached
    2684                              || (   !fGlobal
    2685                                  && (   false
    2686 #  ifdef PGM_SKIP_GLOBAL_PAGEDIRS_ON_NONGLOBAL_FLUSH
    2687                                      || (   (PdeSrc.u & (X86_PDE4M_PS | X86_PDE4M_G)) == (X86_PDE4M_PS | X86_PDE4M_G)
    2688                                          && (cr4 & (X86_CR4_PGE | X86_CR4_PSE)) == (X86_CR4_PGE | X86_CR4_PSE)) /* global 2/4MB page. */
    2689                                      || (  !pShwPage->fSeenNonGlobal
    2690                                          && (cr4 & X86_CR4_PGE))
    2691 #  endif
    2692                                      )
    2693                                 )
    2694                             )
    2695                         &&  (   (PdeSrc.u & (X86_PDE_US | X86_PDE_RW)) == (PdeDst.u & (X86_PDE_US | X86_PDE_RW))
    2696                              || (   (cr4 & X86_CR4_PSE)
    2697                                  &&     ((PdeSrc.u & (X86_PDE_US | X86_PDE4M_PS | X86_PDE4M_D)) | PGM_PDFLAGS_TRACK_DIRTY)
    2698                                     ==  ((PdeDst.u & (X86_PDE_US | X86_PDE_RW | PGM_PDFLAGS_TRACK_DIRTY)) | X86_PDE4M_PS))
    2699                             )
    2700                        )
    2701                     {
    2702 #  ifdef VBOX_WITH_STATISTICS
    2703                         if (   !fGlobal
    2704                             && (PdeSrc.u & (X86_PDE4M_PS | X86_PDE4M_G)) == (X86_PDE4M_PS | X86_PDE4M_G)
    2705                             && (cr4 & (X86_CR4_PGE | X86_CR4_PSE)) == (X86_CR4_PGE | X86_CR4_PSE))
    2706                             MY_STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,SyncCR3DstSkippedGlobalPD));
    2707                         else if (!fGlobal && !pShwPage->fSeenNonGlobal && (cr4 & X86_CR4_PGE))
    2708                             MY_STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,SyncCR3DstSkippedGlobalPT));
    2709                         else
    2710                             MY_STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,SyncCR3DstCacheHit));
    2711 #  endif /* VBOX_WITH_STATISTICS */
    2712 /** @todo a replacement strategy isn't really needed unless we're using a very small pool < 512 pages.
    2713  * The whole ageing stuff should be put in yet another set of #ifdefs. For now, let's just skip it. */
    2714 //#  ifdef PGMPOOL_WITH_CACHE
    2715 //                        pgmPoolCacheUsed(pPool, pShwPage);
    2716 //#  endif
    2717                     }
    2718                     else
    2719                     {
    2720                         pgmPoolFreeByPage(pPool, pShwPage, SHW_POOL_ROOT_IDX, iPdShw);
    2721                         pPDEDst->u = 0;
    2722                         MY_STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,SyncCR3DstFreed));
    2723                     }
    2724                 }
    2725                 else
    2726                     MY_STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,SyncCR3DstNotPresent));
    2727                 pPDEDst++;
    2728             }
    2729         }
    2730         else if (iPD != iPdNoMapping)
    2731         {
    2732             /*
    2733              * Check if there is any page directory to mark not present here.
    2734              */
    2735 #  if PGM_SHW_TYPE == PGM_TYPE_32BIT
    2736             const unsigned iPdShw = iPD; NOREF(iPdShw);
    2737 #  else
    2738             for (unsigned i = 0, iPdShw = iPD * 2; i < 2; i++, iPdShw++) /* pray that the compiler unrolls this */
    2739 #  endif
    2740             {
    2741                 if (pPDEDst->n.u1Present)
    2742                 {
    2743                     pgmPoolFreeByPage(pPool, pgmPoolGetPage(pPool, pPDEDst->u & SHW_PDE_PG_MASK), SHW_POOL_ROOT_IDX, iPdShw);
    2744                     pPDEDst->u = 0;
    2745                     MY_STAM_COUNTER_INC(&pVM->pgm.s.CTXMID(Stat,SyncCR3DstFreedSrcNP));
    2746                 }
    2747                 pPDEDst++;
    2748             }
    2749         }
    2750         else
    2751         {
    2752 #  if PGM_GST_TYPE == PGM_TYPE_32BIT
    2753             Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
    2754             const unsigned cPTs = pMapping->cPTs;
    2755             if (pVM->pgm.s.fMappingsFixed)
    2756             {
    2757                 /* It's fixed, just skip the mapping. */
    2758                 pMapping = pMapping->CTXALLSUFF(pNext);
    2759                 iPdNoMapping = pMapping ? pMapping->GCPtr >> X86_PD_SHIFT : ~0U;
    2760             }
    2761             else
    2762             {
    2763                 /*
    2764                  * Check for conflicts for subsequent pagetables
    2765                  * and advance to the next mapping.
    2766                  */
    2767                 iPdNoMapping = ~0U;
    2768                 unsigned iPT = cPTs;
    2769                 while (iPT-- > 1)
    2770                 {
    2771                     if (    pPDSrc->a[iPD + iPT].n.u1Present
    2772                         &&  (pPDSrc->a[iPD + iPT].n.u1User || fRawR0Enabled))
    2773                     {
    2774 #   ifdef IN_RING3
    2775                         int rc = pgmR3SyncPTResolveConflict(pVM, pMapping, pPDSrc, iPD);
    2776                         if (VBOX_FAILURE(rc))
    2777                             return rc;
    2778 
    2779                         /*
    2780                          * Update iPdNoMapping and pMapping.
    2781                          */
    2782                         pMapping = pVM->pgm.s.CTXALLSUFF(pMappings);
    2783                         while (pMapping && pMapping->GCPtr < (iPD << X86_PD_SHIFT))
    2784                             pMapping = pMapping->CTXALLSUFF(pNext);
    2785                         iPdNoMapping = pMapping ? pMapping->GCPtr >> X86_PD_SHIFT : ~0U;
    2786                         break;
    2787 #   else
    2788                         LogFlow(("SyncCR3: detected conflict -> VINF_PGM_SYNC_CR3\n"));
    2789                         return VINF_PGM_SYNC_CR3;
    2790 #   endif
    2791                     }
    2792                 }
    2793                 if (iPdNoMapping == ~0U && pMapping)
    2794                 {
    2795                     pMapping = pMapping->CTXALLSUFF(pNext);
    2796                     if (pMapping)
    2797                         iPdNoMapping = pMapping->GCPtr >> X86_PD_SHIFT;
    2798                 }
     2793                        }
     2794                    }
     2795                    if (iPdNoMapping == ~0U && pMapping)
     2796                    {
     2797                        pMapping = pMapping->CTXALLSUFF(pNext);
     2798                        if (pMapping)
     2799                            iPdNoMapping = pMapping->GCPtr >> X86_PD_SHIFT;
     2800                    }
     2801                }
     2802
     2803                /* advance. */
     2804                iPD += cPTs - 1;
     2805                pPDEDst += cPTs + (PGM_SHW_TYPE != PGM_TYPE_32BIT) * cPTs;
    27992806#  else /* PGM_GST_TYPE == PGM_TYPE_32BIT */
    28002807                /* PAE and AMD64 modes are hardware accelerated only, so there are no mappings. */
     
    28032810            }
    28042811
    2805             /* advance. */
    2806             iPD += cPTs - 1;
    2807             pPDEDst += cPTs + (PGM_SHW_TYPE != PGM_TYPE_32BIT) * cPTs;
    2808         }
    2809 
    2810     } /* for iPD */
    2811 # elif PGM_SHW_TYPE == PGM_TYPE_AMD64
    2812 #  error "Guest 32-bit mode and shadow AMD64 mode doesn't add up!"
    2813 # endif
     2812        } /* for iPD */
     2813    } /* for each PDPTE (PAE) */
    28142814
    28152815    return VINF_SUCCESS;
    2816 
    2817 #elif PGM_GST_TYPE == PGM_TYPE_PAE
    2818 # if PGM_SHW_TYPE == PGM_TYPE_PAE
    2819 //# error not implemented
    2820     return VERR_INTERNAL_ERROR;
    2821 
    2822 # else  /* PGM_SHW_TYPE != PGM_TYPE_AMD64 */
    2823 #  error "Guest PAE mode, but not the shadow mode ; 32bit - maybe, but amd64 no."
    2824 # endif /* PGM_SHW_TYPE != PGM_TYPE_AMD64 */
    28252816
    28262817#elif PGM_GST_TYPE == PGM_TYPE_AMD64
     
    28342825
    28352826#else /* guest real and protected mode */
    2836 
    28372827    return VINF_SUCCESS;
    28382828#endif
     
    28822872
    28832873#if PGM_GST_TYPE == PGM_TYPE_32BIT
    2884 
    2885 # if PGM_SHW_TYPE != PGM_TYPE_32BIT && PGM_SHW_TYPE != PGM_TYPE_PAE
    2886 #  error "Invalid shadow mode for 32-bit guest paging."
    2887 # endif
    2888 
    28892874    PPGM        pPGM = &pVM->pgm.s;
    28902875    RTHCPHYS    HCPhysShw;              /* page address derived from the shadow page tables. */
  • trunk/src/VBox/VMM/VMMAll/PGMAllGst.h

    r7666 r7676  
    7878# define GST_PD_SHIFT               X86_PD_PAE_SHIFT
    7979# define GST_PD_MASK                X86_PD_PAE_MASK
    80 # define GST_TOTAL_PD_ENTRIES       (X86_PG_PAE_ENTRIES*4)
     80# if PGM_GST_TYPE == PGM_TYPE_PAE
     81#  define GST_TOTAL_PD_ENTRIES       (X86_PG_PAE_ENTRIES * X86_PG_PAE_PDPTE_ENTRIES)
     82# else
     83#  define GST_TOTAL_PD_ENTRIES       (X86_PG_AMD64_ENTRIES * X86_PG_AMD64_PDPTE_ENTRIES)
     84# endif
    8185# define GST_PTE_PG_MASK            X86_PTE_PAE_PG_MASK
    8286# define GST_PT_SHIFT               X86_PT_PAE_SHIFT
  • trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp

    r6829 r7676  
    29892989
    29902990            case PGMPOOLKIND_ROOT_PAE_PD:
    2991                 for (unsigned iPage = 0; iPage < X86_PG_PAE_ENTRIES * 4; iPage++)
     2991                for (unsigned iPage = 0; iPage < X86_PG_PAE_ENTRIES * X86_PG_PAE_PDPTE_ENTRIES; iPage++)
    29922992                    if ((u.pau64[iPage] & (PGM_PDFLAGS_MAPPING | X86_PDE_P)) == X86_PDE_P)
    29932993                        u.pau64[iPage] = 0;
  • trunk/src/VBox/VMM/VMMAll/PGMAllShw.h

    r5999 r7676  
    7777# define SHW_PDPTR_SHIFT        X86_PDPTR_SHIFT
    7878# define SHW_PDPTR_MASK         X86_PDPTR_MASK_32
    79 # define SHW_TOTAL_PD_ENTRIES   (X86_PG_PAE_ENTRIES*4)
     79# define SHW_TOTAL_PD_ENTRIES   (X86_PG_PAE_ENTRIES*X86_PG_PAE_PDPTE_ENTRIES)
    8080# define SHW_POOL_ROOT_IDX      PGMPOOL_IDX_PAE_PD
    8181#endif
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette