VirtualBox

Changeset 17586 in vbox for trunk


Ignore:
Timestamp:
Mar 9, 2009 3:28:25 PM (16 years ago)
Author:
vboxsync
Message:

Removed all dead non-VBOX_WITH_PGMPOOL_PAGING_ONLY code.

Location:
trunk/src/VBox/VMM
Files:
15 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/VMM/PGM.cpp

    r17556 r17586  
    11771177    pVM->pgm.s.enmHostMode      = SUPPAGINGMODE_INVALID;
    11781178    pVM->pgm.s.GCPhysCR3        = NIL_RTGCPHYS;
    1179 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    1180     pVM->pgm.s.GCPhysGstCR3Monitored = NIL_RTGCPHYS;
    1181 #endif
    11821179    pVM->pgm.s.fA20Enabled      = true;
    11831180    pVM->pgm.s.GCPhys4MBPSEMask = RT_BIT_64(32) - 1; /* default; checked later */
     
    12981295        rc = pgmR3PoolInit(pVM);
    12991296    }
    1300 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    13011297    if (RT_SUCCESS(rc))
    13021298        rc = PGMR3ChangeMode(pVM, PGMMODE_REAL);
    1303 #endif
     1299
    13041300    if (RT_SUCCESS(rc))
    13051301    {
     
    14731469
    14741470    /*
    1475      * Allocate pages for the three possible guest contexts (AMD64, PAE and plain 32-Bit).
    1476      * We allocate pages for all three posibilities in order to simplify mappings and
    1477      * avoid resource failure during mode switches. So, we need to cover all levels of the
    1478      * of the first 4GB down to PD level.
    1479      * As with the intermediate context, AMD64 uses the PAE PDPT and PDs.
    1480      */
    1481 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    1482     pVM->pgm.s.pShw32BitPdR3    = (PX86PD)MMR3PageAllocLow(pVM);
    1483 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
    1484     pVM->pgm.s.pShw32BitPdR0    = (uintptr_t)pVM->pgm.s.pShw32BitPdR3;
    1485 # endif
    1486     pVM->pgm.s.apShwPaePDsR3[0] = (PX86PDPAE)MMR3PageAlloc(pVM);
    1487     pVM->pgm.s.apShwPaePDsR3[1] = (PX86PDPAE)MMR3PageAlloc(pVM);
    1488     AssertRelease((uintptr_t)pVM->pgm.s.apShwPaePDsR3[0] + PAGE_SIZE == (uintptr_t)pVM->pgm.s.apShwPaePDsR3[1]);
    1489     pVM->pgm.s.apShwPaePDsR3[2] = (PX86PDPAE)MMR3PageAlloc(pVM);
    1490     AssertRelease((uintptr_t)pVM->pgm.s.apShwPaePDsR3[1] + PAGE_SIZE == (uintptr_t)pVM->pgm.s.apShwPaePDsR3[2]);
    1491     pVM->pgm.s.apShwPaePDsR3[3] = (PX86PDPAE)MMR3PageAlloc(pVM);
    1492     AssertRelease((uintptr_t)pVM->pgm.s.apShwPaePDsR3[2] + PAGE_SIZE == (uintptr_t)pVM->pgm.s.apShwPaePDsR3[3]);
    1493 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
    1494     pVM->pgm.s.apShwPaePDsR0[0] = (uintptr_t)pVM->pgm.s.apShwPaePDsR3[0];
    1495     pVM->pgm.s.apShwPaePDsR0[1] = (uintptr_t)pVM->pgm.s.apShwPaePDsR3[1];
    1496     pVM->pgm.s.apShwPaePDsR0[2] = (uintptr_t)pVM->pgm.s.apShwPaePDsR3[2];
    1497     pVM->pgm.s.apShwPaePDsR0[3] = (uintptr_t)pVM->pgm.s.apShwPaePDsR3[3];
    1498 # endif
    1499     pVM->pgm.s.pShwPaePdptR3 = (PX86PDPT)MMR3PageAllocLow(pVM);
    1500 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
    1501     pVM->pgm.s.pShwPaePdptR0 = (uintptr_t)pVM->pgm.s.pShwPaePdptR3;
    1502 # endif
    1503     pVM->pgm.s.pShwNestedRootR3 = MMR3PageAllocLow(pVM);
    1504 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
    1505     pVM->pgm.s.pShwNestedRootR0 = (uintptr_t)pVM->pgm.s.pShwNestedRootR3;
    1506 # endif
    1507 #endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY */
    1508 
    1509 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    1510     if (    !pVM->pgm.s.pShw32BitPdR3
    1511         ||  !pVM->pgm.s.apShwPaePDsR3[0]
    1512         ||  !pVM->pgm.s.apShwPaePDsR3[1]
    1513         ||  !pVM->pgm.s.apShwPaePDsR3[2]
    1514         ||  !pVM->pgm.s.apShwPaePDsR3[3]
    1515         ||  !pVM->pgm.s.pShwPaePdptR3
    1516         ||  !pVM->pgm.s.pShwNestedRootR3)
    1517     {
    1518         AssertMsgFailed(("Failed to allocate pages for the intermediate context!\n"));
    1519         return VERR_NO_PAGE_MEMORY;
    1520     }
    1521 #endif
    1522 
    1523     /* get physical addresses. */
    1524 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    1525     pVM->pgm.s.HCPhysShw32BitPD = MMPage2Phys(pVM, pVM->pgm.s.pShw32BitPdR3);
    1526     Assert(MMPagePhys2Page(pVM, pVM->pgm.s.HCPhysShw32BitPD) == pVM->pgm.s.pShw32BitPdR3);
    1527     pVM->pgm.s.aHCPhysPaePDs[0] = MMPage2Phys(pVM, pVM->pgm.s.apShwPaePDsR3[0]);
    1528     pVM->pgm.s.aHCPhysPaePDs[1] = MMPage2Phys(pVM, pVM->pgm.s.apShwPaePDsR3[1]);
    1529     pVM->pgm.s.aHCPhysPaePDs[2] = MMPage2Phys(pVM, pVM->pgm.s.apShwPaePDsR3[2]);
    1530     pVM->pgm.s.aHCPhysPaePDs[3] = MMPage2Phys(pVM, pVM->pgm.s.apShwPaePDsR3[3]);
    1531     pVM->pgm.s.HCPhysShwPaePdpt = MMPage2Phys(pVM, pVM->pgm.s.pShwPaePdptR3);
    1532     pVM->pgm.s.HCPhysShwNestedRoot = MMPage2Phys(pVM, pVM->pgm.s.pShwNestedRootR3);
    1533 #endif
    1534 
    1535     /*
    1536      * Initialize the pages, setting up the PML4 and PDPT for action below 4GB.
    1537      */
    1538 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    1539     ASMMemZero32(pVM->pgm.s.pShw32BitPdR3, PAGE_SIZE);
    1540     ASMMemZero32(pVM->pgm.s.pShwPaePdptR3, PAGE_SIZE);
    1541     ASMMemZero32(pVM->pgm.s.pShwNestedRootR3, PAGE_SIZE);
    1542 
    1543     for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.apShwPaePDsR3); i++)
    1544     {
    1545         ASMMemZero32(pVM->pgm.s.apShwPaePDsR3[i], PAGE_SIZE);
    1546         pVM->pgm.s.pShwPaePdptR3->a[i].u = X86_PDPE_P | PGM_PLXFLAGS_PERMANENT | pVM->pgm.s.aHCPhysPaePDs[i];
    1547         /* The flags will be corrected when entering and leaving long mode. */
    1548     }
    1549 #endif
    1550 
    1551     /*
    15521471     * Initialize paging workers and mode from current host mode
    15531472     * and the guest running in real mode.
     
    15821501    }
    15831502    rc = pgmR3ModeDataInit(pVM, false /* don't resolve GC and R0 syms yet */);
    1584 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    1585     if (RT_SUCCESS(rc))
    1586         rc = PGMR3ChangeMode(pVM, PGMMODE_REAL);
    1587 #endif
    15881503    if (RT_SUCCESS(rc))
    15891504    {
    15901505        LogFlow(("pgmR3InitPaging: returns successfully\n"));
    15911506#if HC_ARCH_BITS == 64
    1592 # ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    1593         LogRel(("Debug: HCPhysShw32BitPD=%RHp aHCPhysPaePDs={%RHp,%RHp,%RHp,%RHp} HCPhysShwPaePdpt=%RHp\n",
    1594                 pVM->pgm.s.HCPhysShw32BitPD,
    1595                 pVM->pgm.s.aHCPhysPaePDs[0], pVM->pgm.s.aHCPhysPaePDs[1], pVM->pgm.s.aHCPhysPaePDs[2], pVM->pgm.s.aHCPhysPaePDs[3],
    1596                 pVM->pgm.s.HCPhysShwPaePdpt));
    1597 # endif
    15981507        LogRel(("Debug: HCPhysInterPD=%RHp HCPhysInterPaePDPT=%RHp HCPhysInterPaePML4=%RHp\n",
    15991508                pVM->pgm.s.HCPhysInterPD, pVM->pgm.s.HCPhysInterPaePDPT, pVM->pgm.s.HCPhysInterPaePML4));
     
    18911800    int     rc;
    18921801
    1893 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    1894     /*
    1895      * Reserve space for mapping the paging pages into guest context.
    1896      */
    1897     rc = MMR3HyperReserve(pVM, PAGE_SIZE * (2 + RT_ELEMENTS(pVM->pgm.s.apShwPaePDsR3) + 1 + 2 + 2), "Paging", &GCPtr);
    1898     AssertRCReturn(rc, rc);
    1899     pVM->pgm.s.pShw32BitPdRC = GCPtr;
    1900     MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);
    1901 #endif
    1902 
    19031802    /*
    19041803     * Reserve space for the dynamic mappings.
     
    19331832{
    19341833    int rc;
    1935 
    1936 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    1937     /*
    1938      * Map the paging pages into the guest context.
    1939      */
    1940     RTGCPTR GCPtr = pVM->pgm.s.pShw32BitPdRC;
    1941     AssertReleaseReturn(GCPtr, VERR_INTERNAL_ERROR);
    1942 
    1943     rc = PGMMap(pVM, GCPtr, pVM->pgm.s.HCPhysShw32BitPD, PAGE_SIZE, 0);
    1944     AssertRCReturn(rc, rc);
    1945     pVM->pgm.s.pShw32BitPdRC = GCPtr;
    1946     GCPtr += PAGE_SIZE;
    1947     GCPtr += PAGE_SIZE; /* reserved page */
    1948 
    1949     for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.apShwPaePDsR3); i++)
    1950     {
    1951         rc = PGMMap(pVM, GCPtr, pVM->pgm.s.aHCPhysPaePDs[i], PAGE_SIZE, 0);
    1952         AssertRCReturn(rc, rc);
    1953         pVM->pgm.s.apShwPaePDsRC[i] = GCPtr;
    1954         GCPtr += PAGE_SIZE;
    1955     }
    1956     /* A bit of paranoia is justified. */
    1957     AssertRelease(pVM->pgm.s.apShwPaePDsRC[0] + PAGE_SIZE == pVM->pgm.s.apShwPaePDsRC[1]);
    1958     AssertRelease(pVM->pgm.s.apShwPaePDsRC[1] + PAGE_SIZE == pVM->pgm.s.apShwPaePDsRC[2]);
    1959     AssertRelease(pVM->pgm.s.apShwPaePDsRC[2] + PAGE_SIZE == pVM->pgm.s.apShwPaePDsRC[3]);
    1960     GCPtr += PAGE_SIZE; /* reserved page */
    1961 
    1962     rc = PGMMap(pVM, GCPtr, pVM->pgm.s.HCPhysShwPaePdpt, PAGE_SIZE, 0);
    1963     AssertRCReturn(rc, rc);
    1964     pVM->pgm.s.pShwPaePdptRC = GCPtr;
    1965     GCPtr += PAGE_SIZE;
    1966     GCPtr += PAGE_SIZE; /* reserved page */
    1967 #endif
    19681834
    19691835    /*
     
    20271893    pVM->pgm.s.GCPtrCR3Mapping += offDelta;
    20281894    /** @todo move this into shadow and guest specific relocation functions. */
    2029 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    2030     AssertMsg(pVM->pgm.s.pShw32BitPdR3, ("Init order, no relocation before paging is initialized!\n"));
    2031     pVM->pgm.s.pShw32BitPdRC += offDelta;
    2032 #endif
    20331895    pVM->pgm.s.pGst32BitPdRC += offDelta;
    20341896    for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.apGstPaePDsRC); i++)
    20351897    {
    2036 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    2037         AssertCompile(RT_ELEMENTS(pVM->pgm.s.apShwPaePDsRC) == RT_ELEMENTS(pVM->pgm.s.apGstPaePDsRC));
    2038         pVM->pgm.s.apShwPaePDsRC[i] += offDelta;
    2039 #endif
    20401898        pVM->pgm.s.apGstPaePDsRC[i] += offDelta;
    20411899    }
    20421900    pVM->pgm.s.pGstPaePdptRC += offDelta;
    2043 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    2044     pVM->pgm.s.pShwPaePdptRC += offDelta;
    2045 #endif
    2046 
    2047 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
     1901
    20481902    pVM->pgm.s.pShwPageCR3RC += offDelta;
    2049 #endif
    20501903
    20511904    pgmR3ModeDataInit(pVM, true /* resolve GC/R0 symbols */);
     
    31162969    pVM->pgm.s.pfnR3GstModifyPage           = pModeData->pfnR3GstModifyPage;
    31172970    pVM->pgm.s.pfnR3GstGetPDE               = pModeData->pfnR3GstGetPDE;
    3118 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    3119     pVM->pgm.s.pfnR3GstMonitorCR3           = pModeData->pfnR3GstMonitorCR3;
    3120     pVM->pgm.s.pfnR3GstUnmonitorCR3         = pModeData->pfnR3GstUnmonitorCR3;
    3121 #endif
    3122 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    3123     pVM->pgm.s.pfnR3GstWriteHandlerCR3      = pModeData->pfnR3GstWriteHandlerCR3;
    3124     pVM->pgm.s.pszR3GstWriteHandlerCR3      = pModeData->pszR3GstWriteHandlerCR3;
    3125     pVM->pgm.s.pfnR3GstPAEWriteHandlerCR3   = pModeData->pfnR3GstPAEWriteHandlerCR3;
    3126     pVM->pgm.s.pszR3GstPAEWriteHandlerCR3   = pModeData->pszR3GstPAEWriteHandlerCR3;
    3127 #endif
    31282971    pVM->pgm.s.pfnRCGstGetPage              = pModeData->pfnRCGstGetPage;
    31292972    pVM->pgm.s.pfnRCGstModifyPage           = pModeData->pfnRCGstModifyPage;
    31302973    pVM->pgm.s.pfnRCGstGetPDE               = pModeData->pfnRCGstGetPDE;
    3131 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    3132     pVM->pgm.s.pfnRCGstMonitorCR3           = pModeData->pfnRCGstMonitorCR3;
    3133     pVM->pgm.s.pfnRCGstUnmonitorCR3         = pModeData->pfnRCGstUnmonitorCR3;
    3134 #endif
    3135 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    3136     pVM->pgm.s.pfnRCGstWriteHandlerCR3      = pModeData->pfnRCGstWriteHandlerCR3;
    3137     pVM->pgm.s.pfnRCGstPAEWriteHandlerCR3   = pModeData->pfnRCGstPAEWriteHandlerCR3;
    3138 #endif
    31392974    pVM->pgm.s.pfnR0GstGetPage              = pModeData->pfnR0GstGetPage;
    31402975    pVM->pgm.s.pfnR0GstModifyPage           = pModeData->pfnR0GstModifyPage;
    31412976    pVM->pgm.s.pfnR0GstGetPDE               = pModeData->pfnR0GstGetPDE;
    3142 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    3143     pVM->pgm.s.pfnR0GstMonitorCR3           = pModeData->pfnR0GstMonitorCR3;
    3144     pVM->pgm.s.pfnR0GstUnmonitorCR3         = pModeData->pfnR0GstUnmonitorCR3;
    3145 #endif
    3146 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    3147     pVM->pgm.s.pfnR0GstWriteHandlerCR3      = pModeData->pfnR0GstWriteHandlerCR3;
    3148     pVM->pgm.s.pfnR0GstPAEWriteHandlerCR3   = pModeData->pfnR0GstPAEWriteHandlerCR3;
    3149 #endif
    31502977
    31512978    /* both */
     
    34863313        }
    34873314    }
    3488 
    3489 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    3490     /** @todo This is a bug!
    3491      *
    3492      * We must flush the PGM pool cache if the guest mode changes; we don't always
    3493      * switch shadow paging mode (e.g. protected->32-bit) and shouldn't reuse
    3494      * the shadow page tables.
    3495      *
    3496      * That only applies when switching between paging and non-paging modes.
    3497      */
    3498    /** @todo A20 setting */
    3499     if (   pVM->pgm.s.CTX_SUFF(pPool)
    3500         && !HWACCMIsNestedPagingActive(pVM)
    3501         && PGMMODE_WITH_PAGING(pVM->pgm.s.enmGuestMode) != PGMMODE_WITH_PAGING(enmGuestMode))
    3502     {
    3503         Log(("PGMR3ChangeMode: changing guest paging mode -> flush pgm pool cache!\n"));
    3504         pgmPoolFlushAll(pVM);
    3505     }
    3506 #endif
    35073315
    35083316    /*
  • trunk/src/VBox/VMM/PGMBth.h

    r17483 r17586  
    130130PGM_BTH_DECL(int, Enter)(PVM pVM, RTGCPHYS GCPhysCR3)
    131131{
    132 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    133132    /* Here we deal with allocation of the root shadow page table for real and protected mode during mode switches;
    134133     * Other modes rely on MapCR3/UnmapCR3 to setup the shadow root page tables.
    135134     */
    136 # if  (   (   PGM_SHW_TYPE == PGM_TYPE_32BIT \
    137            || PGM_SHW_TYPE == PGM_TYPE_PAE    \
    138            || PGM_SHW_TYPE == PGM_TYPE_AMD64) \
    139        && (   PGM_GST_TYPE == PGM_TYPE_REAL   \
    140            || PGM_GST_TYPE == PGM_TYPE_PROT))
     135#if  (   (   PGM_SHW_TYPE == PGM_TYPE_32BIT \
     136          || PGM_SHW_TYPE == PGM_TYPE_PAE    \
     137          || PGM_SHW_TYPE == PGM_TYPE_AMD64) \
     138      && (   PGM_GST_TYPE == PGM_TYPE_REAL   \
     139          || PGM_GST_TYPE == PGM_TYPE_PROT))
    141140
    142141    Assert(!HWACCMIsNestedPagingActive(pVM));
     
    188187    /* Apply all hypervisor mappings to the new CR3. */
    189188    return pgmMapActivateCR3(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3));
    190 # endif
    191 #else
    192     /* nothing special to do here - InitData does the job. */
    193189#endif
    194190    return VINF_SUCCESS;
  • trunk/src/VBox/VMM/PGMGst.h

    r17215 r17586  
    3131PGM_GST_DECL(int, Exit)(PVM pVM);
    3232
    33 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    34 static DECLCALLBACK(int) pgmR3Gst32BitWriteHandlerCR3(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
    35 static DECLCALLBACK(int) pgmR3GstPAEWriteHandlerCR3(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);
    36 #endif
    37 
    3833/* all */
    3934PGM_GST_DECL(int, GetPage)(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys);
    4035PGM_GST_DECL(int, ModifyPage)(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask);
    4136PGM_GST_DECL(int, GetPDE)(PVM pVM, RTGCPTR GCPtr, PX86PDEPAE pPDE);
    42 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    43 PGM_GST_DECL(int, MonitorCR3)(PVM pVM, RTGCPHYS GCPhysCR3);
    44 PGM_GST_DECL(int, UnmonitorCR3)(PVM pVM);
    45 #endif
    4637__END_DECLS
    4738
     
    6657    pModeData->pfnR3GstGetPage            = PGM_GST_NAME(GetPage);
    6758    pModeData->pfnR3GstModifyPage         = PGM_GST_NAME(ModifyPage);
    68 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    69     pModeData->pfnR3GstMonitorCR3         = PGM_GST_NAME(MonitorCR3);
    70     pModeData->pfnR3GstUnmonitorCR3       = PGM_GST_NAME(UnmonitorCR3);
    71 #endif
    72 
    73 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    74 # if PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE
    75     pModeData->pfnR3GstWriteHandlerCR3    = PGM_GST_NAME(WriteHandlerCR3);
    76     pModeData->pszR3GstWriteHandlerCR3    = "Guest CR3 Write access handler";
    77     pModeData->pfnR3GstPAEWriteHandlerCR3 = PGM_GST_NAME(WriteHandlerCR3);
    78     pModeData->pszR3GstPAEWriteHandlerCR3 = "Guest CR3 Write access handler (PAE)";
    79 # else
    80     pModeData->pfnR3GstWriteHandlerCR3    = NULL;
    81     pModeData->pszR3GstWriteHandlerCR3    = NULL;
    82     pModeData->pfnR3GstPAEWriteHandlerCR3 = NULL;
    83     pModeData->pszR3GstPAEWriteHandlerCR3 = NULL;
    84 # endif
    85 #endif
    8659
    8760    if (fResolveGCAndR0)
     
    9770        rc = PDMR3LdrGetSymbolRC(pVM, NULL,       PGM_GST_NAME_RC_STR(GetPDE),           &pModeData->pfnRCGstGetPDE);
    9871        AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_RC_STR(GetPDE), rc), rc);
    99 # ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    100         rc = PDMR3LdrGetSymbolRC(pVM, NULL,       PGM_GST_NAME_RC_STR(MonitorCR3),       &pModeData->pfnRCGstMonitorCR3);
    101         AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_RC_STR(MonitorCR3), rc), rc);
    102         rc = PDMR3LdrGetSymbolRC(pVM, NULL,       PGM_GST_NAME_RC_STR(UnmonitorCR3),     &pModeData->pfnRCGstUnmonitorCR3);
    103         AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_RC_STR(UnmonitorCR3), rc), rc);
    104 # endif
    105 # ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    106 #  if PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE
    107         rc = PDMR3LdrGetSymbolRC(pVM, NULL,       PGM_GST_NAME_RC_STR(WriteHandlerCR3),  &pModeData->pfnRCGstWriteHandlerCR3);
    108         AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_RC_STR(WriteHandlerCR3), rc), rc);
    109         rc = PDMR3LdrGetSymbolRC(pVM, NULL,       PGM_GST_NAME_RC_STR(WriteHandlerCR3),  &pModeData->pfnRCGstPAEWriteHandlerCR3);
    110         AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_RC_STR(WriteHandlerCR3), rc), rc);
    111 #  endif
    112 # endif
    11372#endif /* Not AMD64 shadow paging. */
    11473
     
    12079        rc = PDMR3LdrGetSymbolR0(pVM, NULL,       PGM_GST_NAME_R0_STR(GetPDE),           &pModeData->pfnR0GstGetPDE);
    12180        AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_R0_STR(GetPDE), rc), rc);
    122 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    123         rc = PDMR3LdrGetSymbolR0(pVM, NULL,       PGM_GST_NAME_R0_STR(MonitorCR3),       &pModeData->pfnR0GstMonitorCR3);
    124         AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_R0_STR(MonitorCR3), rc), rc);
    125         rc = PDMR3LdrGetSymbolR0(pVM, NULL,       PGM_GST_NAME_R0_STR(UnmonitorCR3),     &pModeData->pfnR0GstUnmonitorCR3);
    126         AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_R0_STR(UnmonitorCR3), rc), rc);
    127 #endif
    128 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    129 # if PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE
    130         rc = PDMR3LdrGetSymbolR0(pVM, NULL,       PGM_GST_NAME_R0_STR(WriteHandlerCR3),  &pModeData->pfnR0GstWriteHandlerCR3);
    131         AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_R0_STR(WriteHandlerCR3), rc), rc);
    132         rc = PDMR3LdrGetSymbolR0(pVM, NULL,       PGM_GST_NAME_R0_STR(WriteHandlerCR3),  &pModeData->pfnR0GstPAEWriteHandlerCR3);
    133         AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_R0_STR(WriteHandlerCR3), rc), rc);
    134 # endif
    135 #endif
    13681    }
    13782
     
    15297     * Map and monitor CR3
    15398     */
    154 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    15599    int rc = PGM_BTH_PFN(MapCR3, pVM)(pVM, GCPhysCR3);
    156 #else
    157     int rc = PGM_BTH_NAME(MapCR3)(pVM, GCPhysCR3);
    158     if (RT_SUCCESS(rc) && !pVM->pgm.s.fMappingsFixed)
    159         rc = PGM_GST_NAME(MonitorCR3)(pVM, GCPhysCR3);
    160 #endif
    161100    return rc;
    162101}
     
    187126    int rc;
    188127
    189 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    190128    rc = PGM_BTH_PFN(UnmapCR3, pVM)(pVM);
    191 #else
    192     rc = PGM_GST_NAME(UnmonitorCR3)(pVM);
    193     if (RT_SUCCESS(rc))
    194         rc = PGM_BTH_NAME(UnmapCR3)(pVM);
    195 #endif
    196129    return rc;
    197130}
    198131
    199132
    200 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    201 
    202 #if PGM_GST_TYPE == PGM_TYPE_32BIT
    203 /**
    204  * Physical write access for the Guest CR3 in 32-bit mode.
    205  *
    206  * @returns VINF_SUCCESS if the handler have carried out the operation.
    207  * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
    208  * @param   pVM             VM Handle.
    209  * @param   GCPhys          The physical address the guest is writing to.
    210  * @param   pvPhys          The HC mapping of that address.
    211  * @param   pvBuf           What the guest is reading/writing.
    212  * @param   cbBuf           How much it's reading/writing.
    213  * @param   enmAccessType   The access type.
    214  * @param   pvUser          User argument.
    215  */
    216 static DECLCALLBACK(int) pgmR3Gst32BitWriteHandlerCR3(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
    217 {
    218     AssertMsg(!pVM->pgm.s.fMappingsFixed, ("Shouldn't be registered when mappings are fixed!\n"));
    219     Assert(enmAccessType == PGMACCESSTYPE_WRITE);
    220     Log2(("pgmR3Gst32BitWriteHandlerCR3: ff=%#x GCPhys=%RGp pvPhys=%p cbBuf=%d pvBuf={%.*Rhxs}\n", pVM->fForcedActions, GCPhys, pvPhys, cbBuf, cbBuf, pvBuf));
    221 
    222     /*
    223      * Do the write operation.
    224      */
    225     memcpy(pvPhys, pvBuf, cbBuf);
    226     if (    !pVM->pgm.s.fMappingsFixed
    227         &&  !VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL))
    228     {
    229         /*
    230          * Check for conflicts.
    231          */
    232         const RTGCPTR   offPD = GCPhys & PAGE_OFFSET_MASK;
    233         const unsigned  iPD1  = offPD / sizeof(X86PDE);
    234         const unsigned  iPD2  = (unsigned)(offPD + cbBuf - 1) / sizeof(X86PDE);
    235         Assert(iPD1 - iPD2 <= 1);
    236         if (    (   pVM->pgm.s.pGst32BitPdR3->a[iPD1].n.u1Present
    237                  && pgmGetMapping(pVM, iPD1 << X86_PD_SHIFT) )
    238             ||  (   iPD1 != iPD2
    239                  && pVM->pgm.s.pGst32BitPdR3->a[iPD2].n.u1Present
    240                  && pgmGetMapping(pVM, iPD2 << X86_PD_SHIFT) )
    241            )
    242         {
    243             Log(("pgmR3Gst32BitWriteHandlerCR3: detected conflict. iPD1=%#x iPD2=%#x GCPhys=%RGp\n", iPD1, iPD2, GCPhys));
    244             STAM_COUNTER_INC(&pVM->pgm.s.StatR3GuestPDWriteConflict);
    245             VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
    246         }
    247     }
    248 
    249     STAM_COUNTER_INC(&pVM->pgm.s.StatR3GuestPDWrite);
    250     return VINF_SUCCESS;
    251 }
    252 #endif /* 32BIT */
    253 
    254 #if PGM_GST_TYPE == PGM_TYPE_PAE
    255 
    256 /**
    257  * Physical write access handler for the Guest CR3 in PAE mode.
    258  *
    259  * @returns VINF_SUCCESS if the handler have carried out the operation.
    260  * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.
    261  * @param   pVM             VM Handle.
    262  * @param   GCPhys          The physical address the guest is writing to.
    263  * @param   pvPhys          The HC mapping of that address.
    264  * @param   pvBuf           What the guest is reading/writing.
    265  * @param   cbBuf           How much it's reading/writing.
    266  * @param   enmAccessType   The access type.
    267  * @param   pvUser          User argument.
    268  */
    269 static DECLCALLBACK(int) pgmR3GstPAEWriteHandlerCR3(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)
    270 {
    271     AssertMsg(!pVM->pgm.s.fMappingsFixed, ("Shouldn't be registered when mappings are fixed!\n"));
    272     Assert(enmAccessType == PGMACCESSTYPE_WRITE);
    273     Log2(("pgmR3GstPAEWriteHandlerCR3: ff=%#x GCPhys=%RGp pvPhys=%p cbBuf=%d pvBuf={%.*Rhxs}\n", pVM->fForcedActions, GCPhys, pvPhys, cbBuf, cbBuf, pvBuf));
    274 
    275     /*
    276      * Do the write operation.
    277      */
    278     memcpy(pvPhys, pvBuf, cbBuf);
    279     if (    !pVM->pgm.s.fMappingsFixed
    280         &&  !VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL))
    281     {
    282         /*
    283          * Check if any of the PDs have changed.
    284          * We'll simply check all of them instead of figuring out which one/two to check.
    285          */
    286         for (unsigned i = 0; i < 4; i++)
    287         {
    288             if (    pVM->pgm.s.pGstPaePdptR3->a[i].n.u1Present
    289                 &&  (pVM->pgm.s.pGstPaePdptR3->a[i].u & X86_PDPE_PG_MASK) != pVM->pgm.s.aGCPhysGstPaePDsMonitored[i])
    290             {
    291                 Log(("pgmR3GstPAEWriteHandlerCR3: detected updated PDPE; [%d] = %#llx, Old GCPhys=%RGp\n",
    292                      i, pVM->pgm.s.pGstPaePdptR3->a[i].u, pVM->pgm.s.aGCPhysGstPaePDsMonitored[i]));
    293                 /*
    294                  * The PD has changed.
    295                  * We will schedule a monitoring update for the next TLB Flush,
    296                  * InvalidatePage or SyncCR3.
    297                  *
    298                  * This isn't perfect, because a lazy page sync might be dealing with an half
    299                  * updated PDPE. However, we assume that the guest OS is disabling interrupts
    300                  * and being extremely careful (cmpxchg8b) when updating a PDPE where it's
    301                  * executing.
    302                  */
    303                 pVM->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;
    304             }
    305         }
    306     }
    307     /*
    308      * Flag a updating of the monitor at the next crossroad so we don't monitor the
    309      * wrong pages for soo long that they can be reused as code pages and freak out
    310      * the recompiler or something.
    311      */
    312     else
    313         pVM->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;
    314 
    315 
    316     STAM_COUNTER_INC(&pVM->pgm.s.StatR3GuestPDWrite);
    317     return VINF_SUCCESS;
    318 }
    319 
    320 #endif /* PAE */
    321 #endif /* !VBOX_WITH_PGMPOOL_PAGING_ONLY */
  • trunk/src/VBox/VMM/PGMInternal.h

    r17513 r17586  
    5353 * @{
    5454 */
    55 
    56 /*
    57  * Enable to use the PGM pool for all levels in the paging chain in all paging modes.
    58  */
    59 #define VBOX_WITH_PGMPOOL_PAGING_ONLY
    6055
    6156/**
     
    14601455/** The first normal index. */
    14611456#define PGMPOOL_IDX_FIRST_SPECIAL       1
    1462 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    14631457/** Page directory (32-bit root). */
    14641458#define PGMPOOL_IDX_PD                  1
     
    14711465/** The first normal index. */
    14721466#define PGMPOOL_IDX_FIRST               5
    1473 #else
    1474 /** Page directory (32-bit root). */
    1475 #define PGMPOOL_IDX_PD                  1
    1476 /** The extended PAE page directory (2048 entries, works as root currently). */
    1477 #define PGMPOOL_IDX_PAE_PD              2
    1478 /** PAE Page Directory Table 0. */
    1479 #define PGMPOOL_IDX_PAE_PD_0            3
    1480 /** PAE Page Directory Table 1. */
    1481 #define PGMPOOL_IDX_PAE_PD_1            4
    1482 /** PAE Page Directory Table 2. */
    1483 #define PGMPOOL_IDX_PAE_PD_2            5
    1484 /** PAE Page Directory Table 3. */
    1485 #define PGMPOOL_IDX_PAE_PD_3            6
    1486 /** Page Directory Pointer Table (PAE root, not currently used). */
    1487 #define PGMPOOL_IDX_PDPT                7
    1488 /** AMD64 CR3 level index.*/
    1489 #define PGMPOOL_IDX_AMD64_CR3           8
    1490 /** Nested paging root.*/
    1491 #define PGMPOOL_IDX_NESTED_ROOT         9
    1492 /** The first normal index. */
    1493 #define PGMPOOL_IDX_FIRST               10
    1494 #endif
    14951467/** The last valid index. (inclusive, 14 bits) */
    14961468#define PGMPOOL_IDX_LAST                0x3fff
     
    16061578    PGMPOOLKIND_EPT_PT_FOR_PHYS,
    16071579
    1608 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    1609     /** Shw: Root 32-bit page directory. */
    1610     PGMPOOLKIND_ROOT_32BIT_PD,
    1611     /** Shw: Root PAE page directory */
    1612     PGMPOOLKIND_ROOT_PAE_PD,
    1613     /** Shw: Root PAE page directory pointer table (legacy, 4 entries). */
    1614     PGMPOOLKIND_ROOT_PDPT,
    1615 #endif
    16161580    /** Shw: Root Nested paging table. */
    16171581    PGMPOOLKIND_ROOT_NESTED,
     
    16861650     * It's a hack required because of REMR3NotifyHandlerPhysicalDeregister. */
    16871651    bool volatile       fReusedFlushPending;
    1688 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    16891652    /** Used to indicate that this page can't be flushed. Important for cr3 root pages or shadow pae pd pages). */
    16901653    bool                fLocked;
    1691 #else
    1692     /** Used to indicate that the guest is mapping the page is also used as a CR3.
    1693      * In these cases the access handler acts differently and will check
    1694      * for mapping conflicts like the normal CR3 handler.
    1695      * @todo When we change the CR3 shadowing to use pool pages, this flag can be
    1696      *       replaced by a list of pages which share access handler.
    1697      */
    1698     bool                fCR3Mix;
    1699 #endif
    17001654} PGMPOOLPAGE, *PPGMPOOLPAGE, **PPPGMPOOLPAGE;
    17011655
     
    21622116    DECLR3CALLBACKMEMBER(int,       pfnR3GstModifyPage,(PVM pVM, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
    21632117    DECLR3CALLBACKMEMBER(int,       pfnR3GstGetPDE,(PVM pVM, RTGCPTR GCPtr, PX86PDEPAE pPde));
    2164 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    2165     DECLR3CALLBACKMEMBER(int,       pfnR3GstMonitorCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
    2166     DECLR3CALLBACKMEMBER(int,       pfnR3GstUnmonitorCR3,(PVM pVM));
    2167 #endif
    2168 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    2169     R3PTRTYPE(PFNPGMR3PHYSHANDLER)  pfnR3GstWriteHandlerCR3;
    2170     R3PTRTYPE(const char *)         pszR3GstWriteHandlerCR3;
    2171     R3PTRTYPE(PFNPGMR3PHYSHANDLER)  pfnR3GstPAEWriteHandlerCR3;
    2172     R3PTRTYPE(const char *)         pszR3GstPAEWriteHandlerCR3;
    2173 #endif
    21742118    DECLRCCALLBACKMEMBER(int,       pfnRCGstGetPage,(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
    21752119    DECLRCCALLBACKMEMBER(int,       pfnRCGstModifyPage,(PVM pVM, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
    21762120    DECLRCCALLBACKMEMBER(int,       pfnRCGstGetPDE,(PVM pVM, RTGCPTR GCPtr, PX86PDEPAE pPde));
    2177 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    2178     DECLRCCALLBACKMEMBER(int,       pfnRCGstMonitorCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
    2179     DECLRCCALLBACKMEMBER(int,       pfnRCGstUnmonitorCR3,(PVM pVM));
    2180 #endif
    2181 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    2182     RCPTRTYPE(PFNPGMRCPHYSHANDLER)  pfnRCGstWriteHandlerCR3;
    2183     RCPTRTYPE(PFNPGMRCPHYSHANDLER)  pfnRCGstPAEWriteHandlerCR3;
    2184 #endif
    21852121    DECLR0CALLBACKMEMBER(int,       pfnR0GstGetPage,(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
    21862122    DECLR0CALLBACKMEMBER(int,       pfnR0GstModifyPage,(PVM pVM, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
    21872123    DECLR0CALLBACKMEMBER(int,       pfnR0GstGetPDE,(PVM pVM, RTGCPTR GCPtr, PX86PDEPAE pPde));
    2188 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    2189     DECLR0CALLBACKMEMBER(int,       pfnR0GstMonitorCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
    2190     DECLR0CALLBACKMEMBER(int,       pfnR0GstUnmonitorCR3,(PVM pVM));
    2191 #endif
    2192 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    2193     R0PTRTYPE(PFNPGMRCPHYSHANDLER)  pfnR0GstWriteHandlerCR3;
    2194     R0PTRTYPE(PFNPGMRCPHYSHANDLER)  pfnR0GstPAEWriteHandlerCR3;
    2195 #endif
    21962124    /** @} */
    21972125
     
    22932221    uint32_t                        u32Alignment;
    22942222#endif
    2295 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    2296     /** The physical address of the currently monitored guest CR3 page.
    2297      * When this value is NIL_RTGCPHYS no page is being monitored. */
    2298     RTGCPHYS                        GCPhysGstCR3Monitored;
    2299 #endif
    23002223    /** @name 32-bit Guest Paging.
    23012224     * @{ */
     
    23502273    /** @} */
    23512274
    2352 # ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    2353     /** @name Shadow paging
    2354      * @{ */
    2355     /** The root page table - R3 Ptr. */
    2356     R3PTRTYPE(void *)               pShwRootR3;
    2357 #  ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
    2358     /** The root page table - R0 Ptr. */
    2359     R0PTRTYPE(void *)               pShwRootR0;
    2360 #  endif
    2361     /** The root page table - RC Ptr. */
    2362     RCPTRTYPE(void *)               pShwRootRC;
    2363 #  if HC_ARCH_BITS == 64
    2364     uint32_t                        u32Padding1; /**< alignment padding. */
    2365 #  endif
    2366     /** The Physical Address (HC) of the current active shadow CR3. */
    2367     RTHCPHYS                        HCPhysShwCR3;
    2368 # endif
    23692275    /** Pointer to the page of the current active CR3 - R3 Ptr. */
    23702276    R3PTRTYPE(PPGMPOOLPAGE)         pShwPageCR3R3;
     
    23812287# endif
    23822288    /** @} */
    2383 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    2384     /** @name 32-bit Shadow Paging
    2385      * @{ */
    2386     /** The 32-Bit PD - R3 Ptr. */
    2387     R3PTRTYPE(PX86PD)               pShw32BitPdR3;
    2388     /** The 32-Bit PD - R0 Ptr. */
    2389     R0PTRTYPE(PX86PD)               pShw32BitPdR0;
    2390     /** The 32-Bit PD - RC Ptr. */
    2391     RCPTRTYPE(PX86PD)               pShw32BitPdRC;
    2392 # if HC_ARCH_BITS == 64
    2393     uint32_t                        u32Padding10; /**< alignment padding. */
    2394 # endif
    2395     /** The Physical Address (HC) of the 32-Bit PD. */
    2396     RTHCPHYS                        HCPhysShw32BitPD;
    2397     /** @} */
    2398 
    2399     /** @name PAE Shadow Paging
    2400      * @{ */
    2401     /** The four PDs for the low 4GB - R3 Ptr.
    2402      * Even though these are 4 pointers, what they point at is a single table.
    2403      * Thus, it's possible to walk the 2048 entries starting where apHCPaePDs[0] points. */
    2404     R3PTRTYPE(PX86PDPAE)            apShwPaePDsR3[4];
    2405 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
    2406     /** The four PDs for the low 4GB - R0 Ptr.
    2407      * Same kind of mapping as apHCPaePDs. */
    2408     R0PTRTYPE(PX86PDPAE)            apShwPaePDsR0[4];
    2409 # endif
    2410     /** The four PDs for the low 4GB - RC Ptr.
    2411      * Same kind of mapping as apHCPaePDs. */
    2412     RCPTRTYPE(PX86PDPAE)            apShwPaePDsRC[4];
    2413     /** The Physical Address (HC) of the four PDs for the low 4GB.
    2414      * These are *NOT* 4 contiguous pages. */
    2415     RTHCPHYS                        aHCPhysPaePDs[4];
    2416     /** The Physical Address (HC) of the PAE PDPT. */
    2417     RTHCPHYS                        HCPhysShwPaePdpt;
    2418     /** The PAE PDPT - R3 Ptr. */
    2419     R3PTRTYPE(PX86PDPT)             pShwPaePdptR3;
    2420     /** The PAE PDPT - R0 Ptr. */
    2421     R0PTRTYPE(PX86PDPT)             pShwPaePdptR0;
    2422     /** The PAE PDPT - RC Ptr. */
    2423     RCPTRTYPE(PX86PDPT)             pShwPaePdptRC;
    2424     /** @} */
    2425 # if HC_ARCH_BITS == 64
    2426     RTRCPTR                         alignment5; /**< structure size alignment. */
    2427 # endif
    2428     /** @name Nested Shadow Paging
    2429      * @{ */
    2430     /** Root table; format depends on the host paging mode (AMD-V) or EPT - R3 pointer. */
    2431     RTR3PTR                         pShwNestedRootR3;
    2432 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
    2433     /** Root table; format depends on the host paging mode (AMD-V) or EPT - R0 pointer. */
    2434     RTR0PTR                         pShwNestedRootR0;
    2435 # endif
    2436     /** The Physical Address (HC) of the nested paging root. */
    2437     RTHCPHYS                        HCPhysShwNestedRoot;
    2438     /** @}  */
    2439 #endif /* !VBOX_WITH_PGMPOOL_PAGING_ONLY */
    24402289
    24412290    /** @name Function pointers for Shadow paging.
     
    24632312    DECLR3CALLBACKMEMBER(int,       pfnR3GstModifyPage,(PVM pVM, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
    24642313    DECLR3CALLBACKMEMBER(int,       pfnR3GstGetPDE,(PVM pVM, RTGCPTR GCPtr, PX86PDEPAE pPde));
    2465 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    2466     DECLR3CALLBACKMEMBER(int,       pfnR3GstMonitorCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
    2467     DECLR3CALLBACKMEMBER(int,       pfnR3GstUnmonitorCR3,(PVM pVM));
    2468 #endif
    2469 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    2470     R3PTRTYPE(PFNPGMR3PHYSHANDLER)  pfnR3GstWriteHandlerCR3;
    2471     R3PTRTYPE(const char *)         pszR3GstWriteHandlerCR3;
    2472     R3PTRTYPE(PFNPGMR3PHYSHANDLER)  pfnR3GstPAEWriteHandlerCR3;
    2473     R3PTRTYPE(const char *)         pszR3GstPAEWriteHandlerCR3;
    2474 #endif
    24752314    DECLRCCALLBACKMEMBER(int,       pfnRCGstGetPage,(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys));
    24762315    DECLRCCALLBACKMEMBER(int,       pfnRCGstModifyPage,(PVM pVM, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
    24772316    DECLRCCALLBACKMEMBER(int,       pfnRCGstGetPDE,(PVM pVM, RTGCPTR GCPtr, PX86PDEPAE pPde));
    2478 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    2479     DECLRCCALLBACKMEMBER(int,       pfnRCGstMonitorCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
    2480     DECLRCCALLBACKMEMBER(int,       pfnRCGstUnmonitorCR3,(PVM pVM));
    2481 #endif
    2482 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    2483     RCPTRTYPE(PFNPGMRCPHYSHANDLER)  pfnRCGstWriteHandlerCR3;
    2484     RCPTRTYPE(PFNPGMRCPHYSHANDLER)  pfnRCGstPAEWriteHandlerCR3;
    2485 #endif
    24862317#if HC_ARCH_BITS == 64
    24872318    RTRCPTR                         alignment3; /**< structure size alignment. */
     
    24912322    DECLR0CALLBACKMEMBER(int,       pfnR0GstModifyPage,(PVM pVM, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask));
    24922323    DECLR0CALLBACKMEMBER(int,       pfnR0GstGetPDE,(PVM pVM, RTGCPTR GCPtr, PX86PDEPAE pPde));
    2493 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    2494     DECLR0CALLBACKMEMBER(int,       pfnR0GstMonitorCR3,(PVM pVM, RTGCPHYS GCPhysCR3));
    2495     DECLR0CALLBACKMEMBER(int,       pfnR0GstUnmonitorCR3,(PVM pVM));
    2496 #endif
    2497 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    2498     R0PTRTYPE(PFNPGMRCPHYSHANDLER)  pfnR0GstWriteHandlerCR3;
    2499     R0PTRTYPE(PFNPGMRCPHYSHANDLER)  pfnR0GstPAEWriteHandlerCR3;
    2500 #endif
    25012324    /** @} */
    25022325
     
    30722895int             pgmR0DynMapHCPageCommon(PVM pVM, PPGMMAPSET pSet, RTHCPHYS HCPhys, void **ppv);
    30732896#endif
    3074 #if !defined(VBOX_WITH_PGMPOOL_PAGING_ONLY) && (defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0))
    3075 void           *pgmPoolMapPageFallback(PPGM pPGM, PPGMPOOLPAGE pPage);
    3076 #endif
    30772897int             pgmPoolAlloc(PVM pVM, RTGCPHYS GCPhys, PGMPOOLKIND enmKind, uint16_t iUser, uint32_t iUserTable, PPPGMPOOLPAGE ppPage);
    30782898PPGMPOOLPAGE    pgmPoolGetPageByHCPhys(PVM pVM, RTHCPHYS HCPhys);
     
    31012921#endif
    31022922
    3103 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    31042923void            pgmMapClearShadowPDEs(PVM pVM, PPGMPOOLPAGE pShwPageCR3, PPGMMAPPING pMap, unsigned iOldPDE);
    31052924void            pgmMapSetShadowPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE);
    31062925int             pgmShwSyncPaePDPtr(PVM pVM, RTGCPTR GCPtr, PX86PDPE pGstPdpe, PX86PDPAE *ppPD);
    3107 #endif
    31082926int             pgmMapDeactivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3);
    31092927int             pgmMapActivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3);
     
    35323350        return pv;
    35333351    }
    3534 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    35353352    AssertFatalMsgFailed(("pgmPoolMapPageInlined invalid page index %x\n", pPage->idx));
    3536 #else
    3537     return pgmPoolMapPageFallback(pPGM, pPage);
    3538 #endif
    35393353}
    35403354
     
    42124026DECLINLINE(PX86PD) pgmShwGet32BitPDPtr(PPGM pPGM)
    42134027{
    4214 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    42154028    return (PX86PD)PGMPOOL_PAGE_2_PTR_BY_PGM(pPGM, pPGM->CTX_SUFF(pShwPageCR3));
    4216 #else
    4217 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    4218     PX86PD          pShwPd;
    4219     Assert(pPGM->HCPhysShw32BitPD != 0 && pPGM->HCPhysShw32BitPD != NIL_RTHCPHYS);
    4220     int rc = PGM_HCPHYS_2_PTR_BY_PGM(pPGM, pPGM->HCPhysShw32BitPD, &pShwPd);
    4221     AssertRCReturn(rc, NULL);
    4222     return pShwPd;
    4223 # else
    4224     return pPGM->CTX_SUFF(pShw32BitPd);
    4225 # endif
    4226 #endif
    42274029}
    42284030
     
    42754077DECLINLINE(PX86PDPT) pgmShwGetPaePDPTPtr(PPGM pPGM)
    42764078{
    4277 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    42784079    return (PX86PDPT)PGMPOOL_PAGE_2_PTR_BY_PGM(pPGM, pPGM->CTX_SUFF(pShwPageCR3));
    4279 #else
    4280 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    4281     PX86PDPT pShwPdpt;
    4282     Assert(pPGM->HCPhysShwPaePdpt != 0 && pPGM->HCPhysShwPaePdpt != NIL_RTHCPHYS);
    4283     int rc = PGM_HCPHYS_2_PTR_BY_PGM(pPGM, pPGM->HCPhysShwPaePdpt, &pShwPdpt);
    4284     AssertRCReturn(rc, 0);
    4285     return pShwPdpt;
    4286 # else
    4287     return pPGM->CTX_SUFF(pShwPaePdpt);
    4288 # endif
    4289 #endif
    42904080}
    42914081
     
    43004090DECLINLINE(PX86PDPAE) pgmShwGetPaePDPtr(PPGM pPGM, RTGCPTR GCPtr)
    43014091{
    4302 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    43034092    const unsigned  iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
    43044093    PX86PDPT        pPdpt = pgmShwGetPaePDPTPtr(pPGM);
     
    43124101
    43134102    return (PX86PDPAE)PGMPOOL_PAGE_2_PTR_BY_PGM(pPGM, pShwPde);
    4314 #else
    4315     const unsigned  iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
    4316 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    4317     PX86PDPAE       pPD;
    4318     int rc = PGM_HCPHYS_2_PTR_BY_PGM(pPGM, pPGM->aHCPhysPaePDs[iPdpt], &pPD);
    4319     AssertRCReturn(rc, 0);
    4320     return pPD;
    4321 # else
    4322     PX86PDPAE       pPD = pPGM->CTX_SUFF(apShwPaePDs)[iPdpt];
    4323     Assert(pPD);
    4324     return pPD;
    4325 # endif
    4326 #endif
    43274103}
    43284104
     
    43374113DECLINLINE(PX86PDPAE) pgmShwGetPaePDPtr(PPGM pPGM, PX86PDPT pPdpt, RTGCPTR GCPtr)
    43384114{
    4339 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    43404115    const unsigned  iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;
    43414116
     
    43484123
    43494124    return (PX86PDPAE)PGMPOOL_PAGE_2_PTR_BY_PGM(pPGM, pShwPde);
    4350 #else
    4351     AssertFailed();
    4352     return NULL;
    4353 #endif
    43544125}
    43554126
     
    44024173DECLINLINE(PX86PML4) pgmShwGetLongModePML4Ptr(PPGM pPGM)
    44034174{
    4404 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    44054175    return (PX86PML4)PGMPOOL_PAGE_2_PTR_BY_PGM(pPGM, pPGM->CTX_SUFF(pShwPageCR3));
    4406 #else
    4407 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    4408     PX86PML4 pShwPml4;
    4409     Assert(pPGM->HCPhysShwCR3 != 0 && pPGM->HCPhysShwCR3 != NIL_RTHCPHYS);
    4410     int rc = PGM_HCPHYS_2_PTR_BY_PGM(pPGM, pPGM->HCPhysShwCR3, &pShwPml4);
    4411     AssertRCReturn(rc, 0);
    4412     return pShwPml4;
    4413 # else
    4414     Assert(pPGM->CTX_SUFF(pShwRoot));
    4415     return (PX86PML4)pPGM->CTX_SUFF(pShwRoot);
    4416 # endif
    4417 #endif
    44184176}
    44194177
     
    47354493#endif /* PGMPOOL_WITH_CACHE */
    47364494
    4737 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    4738 
    47394495/**
    47404496 * Locks a page to prevent flushing (important for cr3 root pages or shadow pae pd pages).
     
    47814537}
    47824538
    4783 #endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY */
    4784 
    47854539/**
    47864540 * Tells if mappings are to be put into the shadow page table or not
  • trunk/src/VBox/VMM/PGMMap.cpp

    r17489 r17586  
    621621    }
    622622
    623 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    624     /*
    625      * Turn off CR3 updating monitoring.
    626      */
    627     int rc2 = PGM_GST_PFN(UnmonitorCR3, pVM)(pVM);
    628     AssertRC(rc2);
    629 #endif
    630 
    631623    /*
    632624     * Mark the mappings as fixed and return.
     
    686678    pVM->pgm.s.GCPtrMappingFixed = 0;
    687679    pVM->pgm.s.cbMappingFixed    = 0;
    688 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    689     VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
    690 
    691     /*
    692      * Re-enable the CR3 monitoring.
    693      *
    694      * Paranoia: We flush the page pool before doing that because Windows
    695      * is using the CR3 page both as a PD and a PT, e.g. the pool may
    696      * be monitoring it.
    697      */
    698 # ifdef PGMPOOL_WITH_MONITORING
    699     pgmPoolFlushAll(pVM);
    700 # endif
    701     /* Remap CR3 as we have just flushed the CR3 shadow PML4 in case we're in long mode. */
    702     int rc = PGM_BTH_PFN(MapCR3, pVM)(pVM, pVM->pgm.s.GCPhysCR3);
    703     AssertRCSuccess(rc);
    704 
    705     rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, pVM->pgm.s.GCPhysCR3);
    706     AssertRCSuccess(rc);
    707 #endif
    708680    return VINF_SUCCESS;
    709681}
     
    928900    unsigned i = pMap->cPTs;
    929901
    930 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    931902    pgmMapClearShadowPDEs(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3), pMap, iOldPDE);
    932 #endif
    933903
    934904    iOldPDE += i;
     
    941911         */
    942912        pVM->pgm.s.pInterPD->a[iOldPDE].u        = 0;
    943 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    944         pVM->pgm.s.pShw32BitPdR3->a[iOldPDE].u   = 0;
    945 #endif
    946913        /*
    947914         * PAE.
     
    950917        unsigned iPDE = iOldPDE * 2 % 512;
    951918        pVM->pgm.s.apInterPaePDs[iPD]->a[iPDE].u = 0;
    952 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    953         pVM->pgm.s.apShwPaePDsR3[iPD]->a[iPDE].u = 0;
    954 #endif
    955919        iPDE++;
    956920        AssertFatal(iPDE < 512);
    957921        pVM->pgm.s.apInterPaePDs[iPD]->a[iPDE].u = 0;
    958 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    959         pVM->pgm.s.apShwPaePDsR3[iPD]->a[iPDE].u = 0;
    960 
    961         /* Clear the PGM_PDFLAGS_MAPPING flag for the page directory pointer entry. (legacy PAE guest mode) */
    962         pVM->pgm.s.pShwPaePdptR3->a[iPD].u &= ~PGM_PLXFLAGS_MAPPING;
    963 #endif
    964922    }
    965923}
     
    978936    Assert(!pgmMapAreMappingsEnabled(&pVM->pgm.s) || PGMGetGuestMode(pVM) <= PGMMODE_PAE_NX);
    979937
    980 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    981938    pgmMapSetShadowPDEs(pVM, pMap, iNewPDE);
    982 #endif
    983939
    984940    /*
     
    994950         * 32-bit.
    995951         */
    996 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    997         Assert(!pPGM->pShw32BitPdR3->a[iNewPDE].n.u1Present || pgmMapAreMappingsEnabled(&pVM->pgm.s));
    998         if (    pgmMapAreMappingsEnabled(&pVM->pgm.s)
    999             &&  pPGM->pShw32BitPdR3->a[iNewPDE].n.u1Present)
    1000         {
    1001             Assert(!(pPGM->pShw32BitPdR3->a[iNewPDE].u & PGM_PDFLAGS_MAPPING));
    1002             pgmPoolFree(pVM, pPGM->pShw32BitPdR3->a[iNewPDE].u & X86_PDE_PG_MASK, PGMPOOL_IDX_PD, iNewPDE);
    1003         }
    1004 #endif
    1005952        X86PDE Pde;
    1006953        /* Default mapping page directory flags are read/write and supervisor; individual page attributes determine the final flags */
    1007954        Pde.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT;
    1008955        pPGM->pInterPD->a[iNewPDE]        = Pde;
    1009 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    1010         if (pgmMapAreMappingsEnabled(&pVM->pgm.s))
    1011             pPGM->pShw32BitPdR3->a[iNewPDE]   = Pde;
    1012 #endif
    1013956        /*
    1014957         * PAE.
     
    1016959        const unsigned iPD = iNewPDE / 256;
    1017960        unsigned iPDE = iNewPDE * 2 % 512;
    1018 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    1019         Assert(!pPGM->apShwPaePDsR3[iPD]->a[iPDE].n.u1Present || pgmMapAreMappingsEnabled(&pVM->pgm.s));
    1020         if (   pgmMapAreMappingsEnabled(&pVM->pgm.s)
    1021             && pPGM->apShwPaePDsR3[iPD]->a[iPDE].n.u1Present)
    1022         {
    1023             Assert(!(pPGM->apShwPaePDsR3[iPD]->a[iPDE].u & PGM_PDFLAGS_MAPPING));
    1024             pgmPoolFree(pVM, pPGM->apShwPaePDsR3[iPD]->a[iPDE].u & X86_PDE_PAE_PG_MASK, PGMPOOL_IDX_PAE_PD, iNewPDE * 2);
    1025         }
    1026 #endif
    1027961        X86PDEPAE PdePae0;
    1028962        PdePae0.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0;
    1029963        pPGM->apInterPaePDs[iPD]->a[iPDE] = PdePae0;
    1030 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    1031         if (pgmMapAreMappingsEnabled(&pVM->pgm.s))
    1032             pPGM->apShwPaePDsR3[iPD]->a[iPDE] = PdePae0;
    1033 #endif
    1034964        iPDE++;
    1035965        AssertFatal(iPDE < 512);
    1036 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    1037         Assert(!pPGM->apShwPaePDsR3[iPD]->a[iPDE].n.u1Present || pgmMapAreMappingsEnabled(&pVM->pgm.s));
    1038         if (   pgmMapAreMappingsEnabled(&pVM->pgm.s)
    1039             && pPGM->apShwPaePDsR3[iPD]->a[iPDE].n.u1Present)
    1040         {
    1041             Assert(!(pPGM->apShwPaePDsR3[iPD]->a[iPDE].u & PGM_PDFLAGS_MAPPING));
    1042             pgmPoolFree(pVM, pPGM->apShwPaePDsR3[iPD]->a[iPDE].u & X86_PDE_PAE_PG_MASK, PGMPOOL_IDX_PAE_PD, iNewPDE * 2 + 1);
    1043         }
    1044 #endif
    1045966        X86PDEPAE PdePae1;
    1046967        PdePae1.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1;
    1047968        pPGM->apInterPaePDs[iPD]->a[iPDE] = PdePae1;
    1048 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    1049         if (pgmMapAreMappingsEnabled(&pVM->pgm.s))
    1050         {
    1051             pPGM->apShwPaePDsR3[iPD]->a[iPDE] = PdePae1;
    1052 
    1053             /* Set the PGM_PDFLAGS_MAPPING flag in the page directory pointer entry. (legacy PAE guest mode) */
    1054             pPGM->pShwPaePdptR3->a[iPD].u |= PGM_PLXFLAGS_MAPPING;
    1055         }
    1056 #endif
    1057969    }
    1058970}
  • trunk/src/VBox/VMM/PGMPool.cpp

    r17489 r17586  
    255255    pPool->aPages[PGMPOOL_IDX_PD].Core.Key  = NIL_RTHCPHYS;
    256256    pPool->aPages[PGMPOOL_IDX_PD].GCPhys    = NIL_RTGCPHYS;
    257 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    258257    pPool->aPages[PGMPOOL_IDX_PD].pvPageR3  = 0;
    259258    pPool->aPages[PGMPOOL_IDX_PD].enmKind   = PGMPOOLKIND_32BIT_PD;
    260 #else
    261     pPool->aPages[PGMPOOL_IDX_PD].pvPageR3  = pVM->pgm.s.pShw32BitPdR3;
    262     pPool->aPages[PGMPOOL_IDX_PD].enmKind   = PGMPOOLKIND_ROOT_32BIT_PD;
    263 #endif
    264259    pPool->aPages[PGMPOOL_IDX_PD].idx       = PGMPOOL_IDX_PD;
    265 
    266 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    267     /* The Shadow PAE PDs. This is actually 4 pages! (32 bits guest paging)  */
    268     pPool->aPages[PGMPOOL_IDX_PAE_PD].Core.Key  = NIL_RTHCPHYS;
    269     pPool->aPages[PGMPOOL_IDX_PAE_PD].GCPhys    = NIL_RTGCPHYS;
    270     pPool->aPages[PGMPOOL_IDX_PAE_PD].pvPageR3  = pVM->pgm.s.apShwPaePDsR3[0];
    271     pPool->aPages[PGMPOOL_IDX_PAE_PD].enmKind   = PGMPOOLKIND_ROOT_PAE_PD;
    272     pPool->aPages[PGMPOOL_IDX_PAE_PD].idx       = PGMPOOL_IDX_PAE_PD;
    273 
    274     /* The Shadow PAE PDs for PAE guest mode. */
    275     for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
    276     {
    277         pPool->aPages[PGMPOOL_IDX_PAE_PD_0 + i].Core.Key  = NIL_RTHCPHYS;
    278         pPool->aPages[PGMPOOL_IDX_PAE_PD_0 + i].GCPhys    = NIL_RTGCPHYS;
    279         pPool->aPages[PGMPOOL_IDX_PAE_PD_0 + i].pvPageR3  = pVM->pgm.s.apShwPaePDsR3[i];
    280         pPool->aPages[PGMPOOL_IDX_PAE_PD_0 + i].enmKind   = PGMPOOLKIND_PAE_PD_FOR_PAE_PD;
    281         pPool->aPages[PGMPOOL_IDX_PAE_PD_0 + i].idx       = PGMPOOL_IDX_PAE_PD_0 + i;
    282     }
    283 #endif
    284260
    285261    /* The Shadow PDPT. */
    286262    pPool->aPages[PGMPOOL_IDX_PDPT].Core.Key  = NIL_RTHCPHYS;
    287263    pPool->aPages[PGMPOOL_IDX_PDPT].GCPhys    = NIL_RTGCPHYS;
    288 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    289264    pPool->aPages[PGMPOOL_IDX_PDPT].pvPageR3  = 0;
    290265    pPool->aPages[PGMPOOL_IDX_PDPT].enmKind   = PGMPOOLKIND_PAE_PDPT;
    291 #else
    292     pPool->aPages[PGMPOOL_IDX_PDPT].pvPageR3  = pVM->pgm.s.pShwPaePdptR3;
    293     pPool->aPages[PGMPOOL_IDX_PDPT].enmKind   = PGMPOOLKIND_ROOT_PDPT;
    294 #endif
    295266    pPool->aPages[PGMPOOL_IDX_PDPT].idx       = PGMPOOL_IDX_PDPT;
    296267
     
    298269    pPool->aPages[PGMPOOL_IDX_AMD64_CR3].Core.Key  = NIL_RTHCPHYS;
    299270    pPool->aPages[PGMPOOL_IDX_AMD64_CR3].GCPhys    = NIL_RTGCPHYS;
    300 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    301271    pPool->aPages[PGMPOOL_IDX_AMD64_CR3].pvPageR3  = 0;
    302 #else
    303     pPool->aPages[PGMPOOL_IDX_AMD64_CR3].pvPageR3  = pVM->pgm.s.pShwPaePdptR3;  /* not used - isn't it wrong as well? */
    304 #endif
    305272    pPool->aPages[PGMPOOL_IDX_AMD64_CR3].enmKind   = PGMPOOLKIND_64BIT_PML4;
    306273    pPool->aPages[PGMPOOL_IDX_AMD64_CR3].idx       = PGMPOOL_IDX_AMD64_CR3;
     
    309276    pPool->aPages[PGMPOOL_IDX_NESTED_ROOT].Core.Key  = NIL_RTHCPHYS;
    310277    pPool->aPages[PGMPOOL_IDX_NESTED_ROOT].GCPhys    = NIL_RTGCPHYS;
    311 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    312278    pPool->aPages[PGMPOOL_IDX_NESTED_ROOT].pvPageR3  = 0;
    313 #else
    314     pPool->aPages[PGMPOOL_IDX_NESTED_ROOT].pvPageR3  = pVM->pgm.s.pShwNestedRootR3;
    315 #endif
    316279    pPool->aPages[PGMPOOL_IDX_NESTED_ROOT].enmKind   = PGMPOOLKIND_ROOT_NESTED;
    317280    pPool->aPages[PGMPOOL_IDX_NESTED_ROOT].idx       = PGMPOOL_IDX_NESTED_ROOT;
     
    335298        pPool->aPages[iPage].iAgeNext       = NIL_PGMPOOL_IDX;
    336299        pPool->aPages[iPage].iAgePrev       = NIL_PGMPOOL_IDX;
    337 #endif
    338 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    339         Assert(VALID_PTR(pPool->aPages[iPage].pvPageR3));
    340300#endif
    341301        Assert(pPool->aPages[iPage].idx == iPage);
     
    475435        PPGMPOOLPAGE pPage = &pPool->aPages[i];
    476436
    477 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    478437        /* Allocate all pages in low (below 4 GB) memory as 32 bits guests need a page table root in low memory. */
    479438        pPage->pvPageR3 = MMR3PageAllocLow(pVM);
    480 #else
    481         pPage->pvPageR3 = MMR3PageAlloc(pVM);
    482 #endif
    483439        if (!pPage->pvPageR3)
    484440        {
     
    590546    }
    591547    else if (    (   pPage->cModifications < 96 /* it's cheaper here. */
    592 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    593548                  || pgmPoolIsPageLocked(&pVM->pgm.s, pPage)
    594 #else
    595                   || pPage->fCR3Mix
    596 #endif
    597549                  )
    598550             &&  cbBuf <= 4)
  • trunk/src/VBox/VMM/PGMShw.h

    r17559 r17586  
    110110#  define SHW_PDPE_PG_MASK      X86_PDPE_PG_MASK
    111111#  define SHW_TOTAL_PD_ENTRIES  (X86_PG_PAE_ENTRIES*X86_PG_PAE_PDPE_ENTRIES)
    112 #  ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    113112#  define SHW_POOL_ROOT_IDX     PGMPOOL_IDX_PDPT
    114 #  else
    115 #  define SHW_POOL_ROOT_IDX     PGMPOOL_IDX_PAE_PD
    116 #  endif
    117 
    118113# endif
    119114#endif
     
    184179PGM_SHW_DECL(int, Enter)(PVM pVM)
    185180{
    186 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    187 # if PGM_SHW_TYPE == PGM_TYPE_NESTED || PGM_SHW_TYPE == PGM_TYPE_EPT
     181#if PGM_SHW_TYPE == PGM_TYPE_NESTED || PGM_SHW_TYPE == PGM_TYPE_EPT
    188182    RTGCPHYS     GCPhysCR3 = RT_BIT_64(63);
    189183    PPGMPOOLPAGE pNewShwPageCR3;
     
    207201
    208202    Log(("Enter nested shadow paging mode: root %RHv phys %RHp\n", pVM->pgm.s.pShwPageCR3R3, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key));
    209 # endif
    210 #else
    211 # if PGM_SHW_TYPE == PGM_TYPE_NESTED
    212 #   ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
    213     pVM->pgm.s.pShwRootR0 = (R0PTRTYPE(void *))pVM->pgm.s.pShwNestedRootR0;
    214 #   else
    215     pVM->pgm.s.pShwRootR3 = (R3PTRTYPE(void *))pVM->pgm.s.pShwNestedRootR3;
    216 #   endif
    217     pVM->pgm.s.HCPhysShwCR3 = pVM->pgm.s.HCPhysShwNestedRoot;
    218 
    219     CPUMSetHyperCR3(pVM, PGMGetHyperCR3(pVM));
    220 # endif
    221203#endif
    222 
    223204    return VINF_SUCCESS;
    224205}
     
    247228PGM_SHW_DECL(int, Exit)(PVM pVM)
    248229{
    249 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    250 # if PGM_SHW_TYPE == PGM_TYPE_NESTED || PGM_SHW_TYPE == PGM_TYPE_EPT
     230#if PGM_SHW_TYPE == PGM_TYPE_NESTED || PGM_SHW_TYPE == PGM_TYPE_EPT
    251231    if (pVM->pgm.s.CTX_SUFF(pShwPageCR3))
    252232    {
     
    265245        pVM->pgm.s.iShwUserTable = 0;
    266246    }
    267 # endif
    268 # else
    269 # if PGM_SHW_TYPE == PGM_TYPE_NESTED
    270     Assert(HWACCMIsNestedPagingActive(pVM));
    271     pVM->pgm.s.pShwRootR3 = 0;
    272 #  ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
    273     pVM->pgm.s.pShwRootR0 = 0;
    274 #  endif
    275     pVM->pgm.s.HCPhysShwCR3 = 0;
    276 
    277247    Log(("Leave nested shadow paging mode\n"));
    278 # endif
    279248#endif
    280249    return VINF_SUCCESS;
  • trunk/src/VBox/VMM/VMMAll/PGMAll.cpp

    r17509 r17586  
    7171DECLINLINE(int) pgmShwGetLongModePDPtr(PVM pVM, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
    7272DECLINLINE(int) pgmShwGetPAEPDPtr(PVM pVM, RTGCPTR GCPtr, PX86PDPT *ppPdpt, PX86PDPAE *ppPD);
    73 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    7473DECLINLINE(int) pgmShwGetPaePoolPagePD(PPGM pPGM, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde);
    75 #endif
    7674
    7775/*
     
    738736        pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
    739737        Assert(!pVM->pgm.s.fMappingsFixed);
    740 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    741         Assert(pVM->pgm.s.GCPhysCR3 == pVM->pgm.s.GCPhysGstCR3Monitored);
    742         rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, pVM->pgm.s.GCPhysCR3);
    743 #endif
    744738    }
    745739
     
    868862        return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT;
    869863
     864    Assert(pPdpe->u & X86_PDPE_PG_MASK);
    870865    pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK);
    871866    AssertReturn(pShwPage, VERR_INTERNAL_ERROR);
     
    874869    return VINF_SUCCESS;
    875870}
    876 
    877 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    878871
    879872/**
     
    904897        PGMPOOLKIND enmKind;
    905898
    906 # if defined(IN_RC) && defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
     899# if defined(IN_RC)
    907900        /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
    908901        PGMDynLockHCPage(pVM, (uint8_t *)pPdpe);
     
    952945            Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL);
    953946            VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
    954 # if defined(IN_RC) && defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
     947# if defined(IN_RC)
    955948            PGMDynUnlockHCPage(pVM, (uint8_t *)pPdpe);
    956949# endif
     
    963956                 | (pGstPdpe->u & ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT));
    964957
    965 # if defined(IN_RC) && defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
     958# if defined(IN_RC)
    966959        /* In 32 bits PAE mode we *must* invalidate the TLB when changing a PDPT entry; the CPU fetches them only during cr3 load, so any
    967960         * non-present PDPT will continue to cause page faults.
     
    10101003}
    10111004
    1012 #endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY */
    10131005#ifndef IN_RC
    10141006
     
    10351027    PX86PML4E      pPml4e        = pgmShwGetLongModePML4EPtr(pPGM, iPml4);
    10361028    bool           fNestedPaging = HWACCMIsNestedPagingActive(pVM);
    1037 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    10381029    bool           fPaging      = !!(CPUMGetGuestCR0(pVM) & X86_CR0_PG);
    1039 #endif
    10401030    PPGMPOOLPAGE   pShwPage;
    10411031    int            rc;
     
    10451035        &&  !(pPml4e->u & X86_PML4E_PG_MASK))
    10461036    {
    1047 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    10481037        RTGCPTR64   GCPml4;
    10491038        PGMPOOLKIND enmKind;
     
    10671056        /* Create a reference back to the PDPT by using the index in its shadow page. */
    10681057        rc = pgmPoolAlloc(pVM, GCPml4, enmKind, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, &pShwPage);
    1069 #else
    1070         if (!fNestedPaging)
    1071         {
    1072             Assert(pGstPml4e && pGstPdpe);
    1073             Assert(pVM->pgm.s.CTX_SUFF(pShwPageCR3));
    1074 
    1075             rc = pgmPoolAlloc(pVM, pGstPml4e->u & X86_PML4E_PG_MASK,
    1076                               PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, &pShwPage);
    1077         }
    1078         else
    1079         {
    1080             /* AMD-V nested paging. (Intel EPT never comes here) */
    1081             RTGCPTR64 GCPml4 = (RTGCPTR64)iPml4 << EPT_PML4_SHIFT;
    1082             rc = pgmPoolAlloc(pVM, GCPml4 + RT_BIT_64(63) /* hack: make the address unique */,
    1083                               PGMPOOLKIND_64BIT_PDPT_FOR_PHYS, PGMPOOL_IDX_NESTED_ROOT, iPml4, &pShwPage);
    1084         }
    1085 #endif
    10861058        if (rc == VERR_PGM_POOL_FLUSHED)
    10871059        {
     
    11101082        &&  !(pPdpe->u & X86_PDPE_PG_MASK))
    11111083    {
    1112 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    11131084        RTGCPTR64   GCPdPt;
    11141085        PGMPOOLKIND enmKind;
     
    11301101        /* Create a reference back to the PDPT by using the index in its shadow page. */
    11311102        rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, pShwPage->idx, iPdPt, &pShwPage);
    1132 #else
    1133         if (!fNestedPaging)
    1134         {
    1135             Assert(pGstPml4e && pGstPdpe);
    1136             Assert(!(pPdpe->u & X86_PDPE_PG_MASK));
    1137             /* Create a reference back to the PDPT by using the index in its shadow page. */
    1138             rc = pgmPoolAlloc(pVM, pGstPdpe->u & X86_PDPE_PG_MASK, PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD, pShwPage->idx, iPdPt, &pShwPage);
    1139         }
    1140         else
    1141         {
    1142             /* AMD-V nested paging. (Intel EPT never comes here) */
    1143             RTGCPTR64 GCPdPt = (RTGCPTR64)iPdPt << EPT_PDPT_SHIFT;
    1144 
    1145             rc = pgmPoolAlloc(pVM, GCPdPt + RT_BIT_64(62) /* hack: make the address unique */, PGMPOOLKIND_64BIT_PD_FOR_PHYS, pShwPage->idx, iPdPt, &pShwPage);
    1146         }
    1147 #endif
    11481103        if (rc == VERR_PGM_POOL_FLUSHED)
    11491104        {
     
    12281183    Assert(HWACCMIsNestedPagingActive(pVM));
    12291184
    1230 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    12311185    pPml4 = (PEPTPML4)PGMPOOL_PAGE_2_PTR_BY_PGM(pPGM, pPGM->CTX_SUFF(pShwPageCR3));
    1232 #else
    1233 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
    1234     rc = PGM_HCPHYS_2_PTR(pVM, pPGM->HCPhysShwNestedRoot, &pPml4);
    1235     AssertRCReturn(rc, rc);
    1236 # else
    1237     pPml4 = (PEPTPML4)pPGM->CTX_SUFF(pShwNestedRoot);
    1238 # endif
    1239 #endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY */
    12401186    Assert(pPml4);
    12411187
     
    12481194        RTGCPTR64 GCPml4 = (RTGCPTR64)iPml4 << EPT_PML4_SHIFT;
    12491195
    1250 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    12511196        rc = pgmPoolAlloc(pVM, GCPml4, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOL_IDX_NESTED_ROOT, iPml4, &pShwPage);
    1252 #else
    1253         rc = pgmPoolAlloc(pVM, GCPml4 + RT_BIT_64(63) /* hack: make the address unique */, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOL_IDX_NESTED_ROOT, iPml4, &pShwPage);
    1254 #endif
    12551197        if (rc == VERR_PGM_POOL_FLUSHED)
    12561198        {
     
    12861228        RTGCPTR64 GCPdPt = (RTGCPTR64)iPdPt << EPT_PDPT_SHIFT;
    12871229
    1288 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    12891230        rc = pgmPoolAlloc(pVM, GCPdPt, PGMPOOLKIND_64BIT_PD_FOR_PHYS, pShwPage->idx, iPdPt, &pShwPage);
    1290 #else
    1291         rc = pgmPoolAlloc(pVM, GCPdPt + RT_BIT_64(62) /* hack: make the address unique */, PGMPOOLKIND_64BIT_PD_FOR_PHYS, pShwPage->idx, iPdPt, &pShwPage);
    1292 #endif
    12931231        if (rc == VERR_PGM_POOL_FLUSHED)
    12941232        {
     
    14311369VMMDECL(RTHCPHYS) PGMGetHyperCR3(PVM pVM)
    14321370{
    1433 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    14341371    Assert(pVM->pgm.s.CTX_SUFF(pShwPageCR3));
    14351372    return pVM->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key;
    1436 #else
    1437     PGMMODE enmShadowMode = pVM->pgm.s.enmShadowMode;
    1438     switch (enmShadowMode)
    1439     {
    1440         case PGMMODE_32_BIT:
    1441             return pVM->pgm.s.HCPhysShw32BitPD;
    1442 
    1443         case PGMMODE_PAE:
    1444         case PGMMODE_PAE_NX:
    1445             return pVM->pgm.s.HCPhysShwPaePdpt;
    1446 
    1447         case PGMMODE_AMD64:
    1448         case PGMMODE_AMD64_NX:
    1449             return pVM->pgm.s.HCPhysShwCR3;
    1450 
    1451         case PGMMODE_EPT:
    1452             return pVM->pgm.s.HCPhysShwNestedRoot;
    1453 
    1454         case PGMMODE_NESTED:
    1455             return PGMGetNestedCR3(pVM, PGMGetHostMode(pVM));
    1456 
    1457         default:
    1458             AssertMsgFailed(("enmShadowMode=%d\n", enmShadowMode));
    1459             return ~0;
    1460     }
    1461 #endif
    14621373}
    14631374
     
    14701381VMMDECL(RTHCPHYS) PGMGetNestedCR3(PVM pVM, PGMMODE enmShadowMode)
    14711382{
    1472 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    14731383    Assert(pVM->pgm.s.CTX_SUFF(pShwPageCR3));
    14741384    return pVM->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key;
    1475 #else
    1476     switch (enmShadowMode)
    1477     {
    1478         case PGMMODE_32_BIT:
    1479             return pVM->pgm.s.HCPhysShw32BitPD;
    1480 
    1481         case PGMMODE_PAE:
    1482         case PGMMODE_PAE_NX:
    1483             return pVM->pgm.s.HCPhysShwPaePdpt;
    1484 
    1485         case PGMMODE_AMD64:
    1486         case PGMMODE_AMD64_NX:
    1487             return pVM->pgm.s.HCPhysShwCR3;
    1488 
    1489         default:
    1490             AssertMsgFailed(("enmShadowMode=%d\n", enmShadowMode));
    1491             return ~0;
    1492     }
    1493 #endif
    14941385}
    14951386
     
    15021393VMMDECL(RTHCPHYS) PGMGetHyper32BitCR3(PVM pVM)
    15031394{
    1504 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    15051395    Assert(pVM->pgm.s.CTX_SUFF(pShwPageCR3));
    15061396    return pVM->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key;
    1507 #else
    1508     return pVM->pgm.s.HCPhysShw32BitPD;
    1509 #endif
    15101397}
    15111398
     
    15181405VMMDECL(RTHCPHYS) PGMGetHyperPaeCR3(PVM pVM)
    15191406{
    1520 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    15211407    Assert(pVM->pgm.s.CTX_SUFF(pShwPageCR3));
    15221408    return pVM->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key;
    1523 #else
    1524     return pVM->pgm.s.HCPhysShwPaePdpt;
    1525 #endif
    15261409}
    15271410
     
    15341417VMMDECL(RTHCPHYS) PGMGetHyperAmd64CR3(PVM pVM)
    15351418{
    1536 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    15371419    Assert(pVM->pgm.s.CTX_SUFF(pShwPageCR3));
    15381420    return pVM->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key;
    1539 #else
    1540     return pVM->pgm.s.HCPhysShwCR3;
    1541 #endif
    15421421}
    15431422
     
    16941573            {
    16951574                pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
    1696 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    1697                 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, GCPhysCR3);
    1698 #endif
    16991575            }
    17001576        }
     
    17231599            pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
    17241600            Assert(!pVM->pgm.s.fMappingsFixed);
    1725 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    1726             rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, GCPhysCR3);
    1727 #endif
    17281601        }
    17291602        if (fGlobal)
     
    18681741        }
    18691742
    1870 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    18711743        if (pVM->pgm.s.GCPhysCR3 != GCPhysCR3)
    18721744        {
     
    18741746            rc = PGM_BTH_PFN(MapCR3, pVM)(pVM, GCPhysCR3);
    18751747        }
    1876 #else
    1877         pVM->pgm.s.GCPhysCR3 = GCPhysCR3;
    1878         rc = PGM_BTH_PFN(MapCR3, pVM)(pVM, GCPhysCR3);
    1879 #endif
    18801748#ifdef IN_RING3
    18811749        if (rc == VINF_PGM_SYNC_CR3)
     
    19141782            pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3;
    19151783            Assert(!pVM->pgm.s.fMappingsFixed);
    1916 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    1917             Assert(pVM->pgm.s.GCPhysCR3 == pVM->pgm.s.GCPhysGstCR3Monitored);
    1918             rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, pVM->pgm.s.GCPhysCR3);
    1919 #endif
    19201784        }
    19211785    }
     
    22632127     */
    22642128    register unsigned iPage = pVM->pgm.s.iDynPageMapLast;
    2265 #  ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    22662129    unsigned i;
    22672130    for (i=0;i<(MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT);i++)
     
    22732136    }
    22742137    AssertRelease(i != (MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT));
    2275 #  else
    2276     pVM->pgm.s.iDynPageMapLast = iPage = (iPage + 1) & ((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) - 1);
    2277 #  endif
    22782138
    22792139    pVM->pgm.s.aHCPhysDynPageMapCache[iPage & (RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) - 1)] = HCPhys;
  • trunk/src/VBox/VMM/VMMAll/PGMAllBth.h

    r17562 r17586  
    8282PGM_BTH_DECL(int, Trap0eHandler)(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault)
    8383{
    84 # if defined(IN_RC) && defined(VBOX_WITH_PGMPOOL_PAGING_ONLY) && defined(VBOX_STRICT)
     84# if defined(IN_RC) && defined(VBOX_STRICT)
    8585    PGMDynCheckLocks(pVM);
    8686# endif
     
    115115#    if PGM_GST_TYPE == PGM_TYPE_PAE
    116116    unsigned        iPDSrc;
    117 #     ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    118117    X86PDPE         PdpeSrc;
    119118    PGSTPD          pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, pvFault, &iPDSrc, &PdpeSrc);
    120 #     else
    121     PGSTPD          pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, pvFault, &iPDSrc, NULL);
    122 #     endif
    123119
    124120#    elif PGM_GST_TYPE == PGM_TYPE_AMD64
     
    159155    const unsigned  iPDDst = (pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK;   /* pPDDst index, not used with the pool. */
    160156
    161 #   ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    162157    PX86PDPAE       pPDDst;
    163158#    if PGM_GST_TYPE != PGM_TYPE_PAE
     
    174169    }
    175170    Assert(pPDDst);
    176 
    177 #   else
    178     PX86PDPAE       pPDDst = pgmShwGetPaePDPtr(&pVM->pgm.s, pvFault);
    179 
    180     /* Did we mark the PDPT as not present in SyncCR3? */
    181     unsigned        iPdpt  = (pvFault >> SHW_PDPT_SHIFT) & SHW_PDPT_MASK;
    182     PX86PDPT        pPdptDst = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
    183     if (!pPdptDst->a[iPdpt].n.u1Present)
    184         pPdptDst->a[iPdpt].n.u1Present = 1;
    185 #   endif
    186171
    187172#  elif PGM_SHW_TYPE == PGM_TYPE_AMD64
     
    943928    PX86PDE         pPdeDst   = pgmShwGet32BitPDEPtr(&pVM->pgm.s, GCPtrPage);
    944929
    945 #  ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    946930    /* Fetch the pgm pool shadow descriptor. */
    947931    PPGMPOOLPAGE    pShwPde = pVM->pgm.s.CTX_SUFF(pShwPageCR3);
    948932    Assert(pShwPde);
    949 #  endif
    950933
    951934# elif PGM_SHW_TYPE == PGM_TYPE_PAE
     
    961944    }
    962945
    963 #  ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    964946    const unsigned  iPDDst  = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
    965947    PPGMPOOLPAGE    pShwPde;
     
    973955    pPDDst             = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPde);
    974956    PX86PDEPAE pPdeDst = &pPDDst->a[iPDDst];
    975 #  else
    976     const unsigned  iPDDst    = (GCPtrPage >> SHW_PD_SHIFT) /*& SHW_PD_MASK - pool index only atm! */;
    977     PX86PDEPAE      pPdeDst   = pgmShwGetPaePDEPtr(&pVM->pgm.s, GCPtrPage);
    978 #  endif
    979957
    980958# else /* PGM_SHW_TYPE == PGM_TYPE_AMD64 */
    981959    /* PML4 */
    982 #  ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    983     AssertReturn(pVM->pgm.s.pShwRootR3, VERR_INTERNAL_ERROR);
    984 #  endif
    985 
    986960    const unsigned  iPml4     = (GCPtrPage >> X86_PML4_SHIFT) & X86_PML4_MASK;
    987961    const unsigned  iPdpt     = (GCPtrPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
     
    11651139    }
    11661140# endif /* PGM_GST_TYPE == PGM_TYPE_AMD64 */
    1167 
    1168 # if PGM_GST_TYPE == PGM_TYPE_PAE && !defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
    1169     /*
    1170      * Update the shadow PDPE and free all the shadow PD entries if the PDPE is marked not present.
    1171      * Note: This shouldn't actually be necessary as we monitor the PDPT page for changes.
    1172      */
    1173     if (!pPDSrc)
    1174     {
    1175         /* Guest PDPE not present */
    1176         PX86PDPAE  pPDDst = pgmShwGetPaePDPtr(&pVM->pgm.s, GCPtrPage);
    1177         PPGMPOOL   pPool  = pVM->pgm.s.CTX_SUFF(pPool);
    1178 
    1179         Assert(!PdpeSrc.n.u1Present);
    1180         LogFlow(("InvalidatePage: guest PDPE %d not present; clear shw pdpe\n", iPdpt));
    1181 
    1182         /* for each page directory entry */
    1183         for (unsigned iPD = 0; iPD < X86_PG_PAE_ENTRIES; iPD++)
    1184         {
    1185             if (   pPDDst->a[iPD].n.u1Present
    1186                 && !(pPDDst->a[iPD].u & PGM_PDFLAGS_MAPPING))
    1187             {
    1188                 pgmPoolFree(pVM, pPDDst->a[iPD].u & SHW_PDE_PG_MASK, SHW_POOL_ROOT_IDX, iPdpt * X86_PG_PAE_ENTRIES + iPD);
    1189                 pPDDst->a[iPD].u = 0;
    1190             }
    1191         }
    1192         if (!(pPdptDst->a[iPdpt].u & PGM_PLXFLAGS_MAPPING))
    1193             pPdptDst->a[iPdpt].n.u1Present = 0;
    1194         PGM_INVL_GUEST_TLBS();
    1195     }
    1196     AssertMsg(pVM->pgm.s.fMappingsFixed || (PdpeSrc.u & X86_PDPE_PG_MASK) == pVM->pgm.s.aGCPhysGstPaePDsMonitored[iPdpt], ("%RGp vs %RGp (mon)\n", (PdpeSrc.u & X86_PDPE_PG_MASK), pVM->pgm.s.aGCPhysGstPaePDsMonitored[iPdpt]));
    1197 # endif
    11981141
    11991142
     
    12211164            LogFlow(("InvalidatePage: Out-of-sync at %RGp PdeSrc=%RX64 PdeDst=%RX64\n",
    12221165                     GCPtrPage, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
    1223 # if PGM_GST_TYPE == PGM_TYPE_AMD64 || defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
    12241166            pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst);
    1225 # else
    1226             pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, SHW_POOL_ROOT_IDX, iPDDst);
    1227 # endif
    12281167            pPdeDst->u = 0;
    12291168            STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDOutOfSync));
     
    12371176            LogFlow(("InvalidatePage: Out-of-sync (A) at %RGp PdeSrc=%RX64 PdeDst=%RX64\n",
    12381177                     GCPtrPage, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
    1239 # if PGM_GST_TYPE == PGM_TYPE_AMD64 || defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
    12401178            pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst);
    1241 # else
    1242             pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, SHW_POOL_ROOT_IDX, iPDDst);
    1243 # endif
    12441179            pPdeDst->u = 0;
    12451180            STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDNAs));
     
    12851220                LogFlow(("InvalidatePage: Out-of-sync at %RGp PdeSrc=%RX64 PdeDst=%RX64 ShwGCPhys=%RGp iPDDst=%#x\n",
    12861221                         GCPtrPage, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u, pShwPage->GCPhys, iPDDst));
    1287 # if PGM_GST_TYPE == PGM_TYPE_AMD64 || defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
    12881222                pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst);
    1289 # else
    1290                 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, SHW_POOL_ROOT_IDX, iPDDst);
    1291 # endif
    12921223                pPdeDst->u = 0;
    12931224                STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDOutOfSync));
     
    13311262            LogFlow(("InvalidatePage: Out-of-sync PD at %RGp PdeSrc=%RX64 PdeDst=%RX64\n",
    13321263                     GCPtrPage, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u));
    1333 # if PGM_GST_TYPE == PGM_TYPE_AMD64 || defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
    13341264            pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst);
    1335 # else
    1336             pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, SHW_POOL_ROOT_IDX, iPDDst);
    1337 # endif
    13381265            pPdeDst->u = 0;
    13391266            STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePage4MBPages));
     
    13481275        if (!(PdeDst.u & PGM_PDFLAGS_MAPPING))
    13491276        {
    1350 # if PGM_GST_TYPE == PGM_TYPE_AMD64 || defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
    13511277            pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst);
    1352 # else
    1353             pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, SHW_POOL_ROOT_IDX, iPDDst);
    1354 # endif
    13551278            pPdeDst->u = 0;
    13561279            STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDNPs));
     
    16831606    PX86PDE         pPdeDst  = pgmShwGet32BitPDEPtr(&pVM->pgm.s, GCPtrPage);
    16841607
    1685 #  ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    16861608    /* Fetch the pgm pool shadow descriptor. */
    16871609    PPGMPOOLPAGE    pShwPde = pVM->pgm.s.CTX_SUFF(pShwPageCR3);
    16881610    Assert(pShwPde);
    1689 #  endif
    16901611
    16911612# elif PGM_SHW_TYPE == PGM_TYPE_PAE
    1692 
    1693 #  ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    16941613    const unsigned  iPDDst  = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
    16951614    PPGMPOOLPAGE    pShwPde;
     
    17031622    pPDDst             = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPde);
    17041623    PX86PDEPAE pPdeDst = &pPDDst->a[iPDDst];
    1705 #  else
    1706     const unsigned  iPDDst   = (GCPtrPage >> SHW_PD_SHIFT) /*& SHW_PD_MASK - only pool index atm! */;
    1707     const unsigned  iPdpt    = (GCPtrPage >> X86_PDPT_SHIFT);
    1708     PX86PDPT        pPdptDst = pgmShwGetPaePDPTPtr(&pVM->pgm.s); NOREF(pPdptDst);
    1709     PX86PDEPAE      pPdeDst  = pgmShwGetPaePDEPtr(&pVM->pgm.s, GCPtrPage);
    1710     AssertReturn(pPdeDst, VERR_INTERNAL_ERROR);
    1711 #  endif
     1624
    17121625# elif PGM_SHW_TYPE == PGM_TYPE_AMD64
    17131626    const unsigned  iPDDst   = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
     
    19631876     */
    19641877    PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
    1965 # if PGM_GST_TYPE == PGM_TYPE_AMD64 || defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
    19661878    pgmPoolFreeByPage(pPool, pShwPage, pShwPde->idx, iPDDst);
    1967 # else
    1968     pgmPoolFreeByPage(pPool, pShwPage, SHW_POOL_ROOT_IDX, iPDDst);
    1969 # endif
    19701879
    19711880    pPdeDst->u = 0;
     
    24412350    PSHWPDE         pPdeDst  = pgmShwGet32BitPDEPtr(&pVM->pgm.s, GCPtrPage);
    24422351
    2443 #  ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    24442352    /* Fetch the pgm pool shadow descriptor. */
    24452353    PPGMPOOLPAGE    pShwPde  = pVM->pgm.s.CTX_SUFF(pShwPageCR3);
    24462354    Assert(pShwPde);
    2447 #  endif
    24482355
    24492356# elif PGM_SHW_TYPE == PGM_TYPE_PAE
    2450 #  ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    24512357    const unsigned  iPDDst  = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
    24522358    PPGMPOOLPAGE    pShwPde;
     
    24612367    pPDDst  = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPde);
    24622368    pPdeDst = &pPDDst->a[iPDDst];
    2463 #  else
    2464     const unsigned  iPDDst   = (GCPtrPage >> SHW_PD_SHIFT) /*& SHW_PD_MASK - only pool index atm! */;
    2465     const unsigned  iPdpt    = (GCPtrPage >> X86_PDPT_SHIFT); NOREF(iPdpt);
    2466     PX86PDPT        pPdptDst = pgmShwGetPaePDPTPtr(&pVM->pgm.s); NOREF(pPdptDst);
    2467     PSHWPDE         pPdeDst  = pgmShwGetPaePDEPtr(&pVM->pgm.s, GCPtrPage);
    2468 #  endif
     2369
    24692370# elif PGM_SHW_TYPE == PGM_TYPE_AMD64
    24702371    const unsigned  iPdpt    = (GCPtrPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64;
     
    25212422    Assert(!PdeDst.n.u1Present); /* We're only supposed to call SyncPT on PDE!P and conflicts.*/
    25222423
    2523 # if defined(IN_RC) && defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
     2424# if defined(IN_RC)
    25242425    /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */
    25252426    PGMDynLockHCPage(pVM, (uint8_t *)pPdeDst);
     
    25502451            GCPhys |= (iPDDst & 1) * (PAGE_SIZE / 2);
    25512452# endif
    2552 # if PGM_GST_TYPE == PGM_TYPE_AMD64 || defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
    25532453            rc = pgmPoolAlloc(pVM, GCPhys, BTH_PGMPOOLKIND_PT_FOR_PT, pShwPde->idx,      iPDDst, &pShwPage);
    2554 # else
    2555             rc = pgmPoolAlloc(pVM, GCPhys, BTH_PGMPOOLKIND_PT_FOR_PT, SHW_POOL_ROOT_IDX, iPDDst, &pShwPage);
    2556 # endif
    25572454        }
    25582455        else
     
    25632460            GCPhys |= GCPtrPage & (1 << X86_PD_PAE_SHIFT);
    25642461# endif
    2565 # if PGM_GST_TYPE == PGM_TYPE_AMD64 || defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
    25662462            rc = pgmPoolAlloc(pVM, GCPhys, BTH_PGMPOOLKIND_PT_FOR_BIG, pShwPde->idx,      iPDDst, &pShwPage);
    2567 # else
    2568             rc = pgmPoolAlloc(pVM, GCPhys, BTH_PGMPOOLKIND_PT_FOR_BIG, SHW_POOL_ROOT_IDX, iPDDst, &pShwPage);
    2569 # endif
    25702463        }
    25712464        if (rc == VINF_SUCCESS)
     
    25922485            }
    25932486            *pPdeDst = PdeDst;
    2594 # if defined(IN_RC) && defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
     2487# if defined(IN_RC)
    25952488            PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
    25962489# endif
     
    26002493        {
    26012494            VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
    2602 # if defined(IN_RC) && defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
     2495# if defined(IN_RC)
    26032496            PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
    26042497# endif
     
    26332526                         | (PdeSrc.u & ~(GST_PDE_PG_MASK | X86_PDE_AVL_MASK | X86_PDE_PCD | X86_PDE_PWT | X86_PDE_PS | X86_PDE4M_G | X86_PDE4M_D));
    26342527                *pPdeDst = PdeDst;
    2635 # if defined(IN_RC) && defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
     2528# if defined(IN_RC)
    26362529                PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
    26372530# endif
     
    27392632            }
    27402633            *pPdeDst = PdeDst;
    2741 # if defined(IN_RC) && defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
     2634# if defined(IN_RC)
    27422635            PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst);
    27432636# endif
     
    28912784    PSHWPDE         pPdeDst = pgmShwGet32BitPDEPtr(&pVM->pgm.s, GCPtrPage);
    28922785
    2893 #  ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    28942786    /* Fetch the pgm pool shadow descriptor. */
    28952787    PPGMPOOLPAGE    pShwPde = pVM->pgm.s.CTX_SUFF(pShwPageCR3);
    28962788    Assert(pShwPde);
    2897 #  endif
    28982789
    28992790# elif PGM_SHW_TYPE == PGM_TYPE_PAE
    2900 #  ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    29012791    const unsigned  iPDDst  = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK;
    29022792    PPGMPOOLPAGE    pShwPde;
     
    29112801    pPDDst  = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPde);
    29122802    pPdeDst = &pPDDst->a[iPDDst];
    2913 #  else
    2914     const unsigned  iPDDst  = (GCPtrPage >> SHW_PD_SHIFT) /*& SHW_PD_MASK - only pool index atm!*/;
    2915     PX86PDEPAE      pPdeDst = pgmShwGetPaePDEPtr(&pVM->pgm.s, GCPtrPage);
    2916 #  endif
    29172803
    29182804# elif PGM_SHW_TYPE == PGM_TYPE_AMD64
     
    29702856    /* Virtual address = physical address */
    29712857    GCPhys = GCPtrPage & X86_PAGE_4K_BASE_MASK;
    2972 # if PGM_SHW_TYPE == PGM_TYPE_AMD64 || PGM_SHW_TYPE == PGM_TYPE_EPT || defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
    29732858    rc = pgmPoolAlloc(pVM, GCPhys & ~(RT_BIT_64(SHW_PD_SHIFT) - 1), BTH_PGMPOOLKIND_PT_FOR_PT, pShwPde->idx, iPDDst, &pShwPage);
    2974 # else
    2975     rc = pgmPoolAlloc(pVM, GCPhys & ~(RT_BIT_64(SHW_PD_SHIFT) - 1), BTH_PGMPOOLKIND_PT_FOR_PT, SHW_POOL_ROOT_IDX, iPDDst, &pShwPage);
    2976 # endif
    29772859
    29782860    if (    rc == VINF_SUCCESS
     
    30322914#  elif PGM_GST_TYPE == PGM_TYPE_PAE
    30332915    unsigned        iPDSrc;
    3034 #   ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    30352916    X86PDPE         PdpeSrc;
    30362917    PGSTPD          pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, GCPtrPage, &iPDSrc, &PdpeSrc);
    3037 #   else
    3038     PGSTPD          pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, GCPtrPage, &iPDSrc, NULL);
    3039 #   endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY */
    30402918    if (!pPDSrc)
    30412919        return VINF_SUCCESS; /* not present */
     
    30662944        const X86PDE    PdeDst = pgmShwGet32BitPDE(&pVM->pgm.s, GCPtrPage);
    30672945# elif PGM_SHW_TYPE == PGM_TYPE_PAE
    3068 #  ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    30692946        const unsigned  iPDDst = ((GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK);
    30702947        PX86PDPAE       pPDDst;
     
    30842961        Assert(pPDDst);
    30852962        PdeDst = pPDDst->a[iPDDst];
    3086 #  else
    3087         const X86PDEPAE PdeDst = pgmShwGetPaePDE(&pVM->pgm.s, GCPtrPage);
    3088 #  endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY */
    30892963
    30902964# elif PGM_SHW_TYPE == PGM_TYPE_AMD64
     
    31773051#  elif PGM_GST_TYPE == PGM_TYPE_PAE
    31783052    unsigned        iPDSrc;
    3179 #   ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    31803053    X86PDPE         PdpeSrc;
    31813054    PGSTPD          pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, GCPtrPage, &iPDSrc, &PdpeSrc);
    3182 #   else
    3183     PGSTPD          pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, GCPtrPage, &iPDSrc, NULL);
    3184 #   endif
    31853055
    31863056    if (pPDSrc)
     
    32133083# elif PGM_SHW_TYPE == PGM_TYPE_PAE
    32143084    PX86PDEPAE      pPdeDst;
    3215 #  ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    32163085    const unsigned  iPDDst = ((GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK);
    32173086    PX86PDPAE       pPDDst;
     
    32303099    Assert(pPDDst);
    32313100    pPdeDst = &pPDDst->a[iPDDst];
    3232 #  else
    3233     pPdeDst = pgmShwGetPaePDEPtr(&pVM->pgm.s, GCPtrPage);
    3234 #  endif
     3101
    32353102# elif PGM_SHW_TYPE == PGM_TYPE_AMD64
    32363103    const unsigned  iPDDst = ((GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK);
     
    34093276#else /* PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT && PGM_SHW_TYPE != PGM_TYPE_AMD64 */
    34103277
    3411 # ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    34123278#  ifdef PGM_WITHOUT_MAPPINGS
    34133279    Assert(pVM->pgm.s.fMappingsFixed);
     
    34273293#  endif
    34283294    return VINF_SUCCESS;
    3429 # else
    3430     /*
    3431      * PAE and 32-bit legacy mode (shadow).
    3432      * (Guest PAE, 32-bit legacy, protected and real modes.)
    3433      */
    3434     Assert(fGlobal || (cr4 & X86_CR4_PGE));
    3435     MY_STAM_COUNTER_INC(fGlobal ? &pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3Global) : &pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3NotGlobal));
    3436 
    3437 # if PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE
    3438     bool const fBigPagesSupported = !!(CPUMGetGuestCR4(pVM) & X86_CR4_PSE);
    3439 
    3440     /*
    3441      * Get page directory addresses.
    3442      */
    3443 #  if PGM_SHW_TYPE == PGM_TYPE_32BIT
    3444     PX86PDE     pPDEDst = pgmShwGet32BitPDEPtr(&pVM->pgm.s, 0);
    3445 #  else /* PGM_SHW_TYPE == PGM_TYPE_PAE */
    3446 #   if PGM_GST_TYPE == PGM_TYPE_32BIT
    3447     PX86PDEPAE  pPDEDst = NULL;
    3448 #   endif
    3449 #  endif
    3450 
    3451 #  if PGM_GST_TYPE == PGM_TYPE_32BIT
    3452     PGSTPD      pPDSrc = pgmGstGet32bitPDPtr(&pVM->pgm.s);
    3453     Assert(pPDSrc);
    3454 #   if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)
    3455     Assert(PGMPhysGCPhys2R3PtrAssert(pVM, (RTGCPHYS)(cr3 & GST_CR3_PAGE_MASK), sizeof(*pPDSrc)) == (RTR3PTR)pPDSrc);
    3456 #   endif
    3457 #  endif /* PGM_GST_TYPE == PGM_TYPE_32BIT */
    3458 
    3459     /*
    3460      * Iterate the the CR3 page.
    3461      */
    3462     PPGMMAPPING pMapping;
    3463     unsigned    iPdNoMapping;
    3464     const bool  fRawR0Enabled = EMIsRawRing0Enabled(pVM);
    3465     PPGMPOOL    pPool         = pVM->pgm.s.CTX_SUFF(pPool);
    3466 
    3467     /* Only check mappings if they are supposed to be put into the shadow page table. */
    3468     if (pgmMapAreMappingsEnabled(&pVM->pgm.s))
    3469     {
    3470         pMapping      = pVM->pgm.s.CTX_SUFF(pMappings);
    3471         iPdNoMapping  = (pMapping) ? (pMapping->GCPtr >> GST_PD_SHIFT) : ~0U;
    3472     }
    3473     else
    3474     {
    3475         pMapping      = 0;
    3476         iPdNoMapping  = ~0U;
    3477     }
    3478 
    3479 #  if PGM_GST_TYPE == PGM_TYPE_PAE
    3480     for (uint64_t iPdpt = 0; iPdpt < GST_PDPE_ENTRIES; iPdpt++)
    3481     {
    3482         unsigned        iPDSrc;
    3483         X86PDPE         PdpeSrc;
    3484         PGSTPD          pPDSrc    = pgmGstGetPaePDPtr(&pVM->pgm.s, iPdpt << X86_PDPT_SHIFT, &iPDSrc, &PdpeSrc);
    3485         PX86PDEPAE      pPDEDst   = pgmShwGetPaePDEPtr(&pVM->pgm.s, iPdpt << X86_PDPT_SHIFT);
    3486         PX86PDPT        pPdptDst  = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
    3487 
    3488         if (pPDSrc == NULL)
    3489         {
    3490             /* PDPE not present */
    3491             if (pPdptDst->a[iPdpt].n.u1Present)
    3492             {
    3493                 LogFlow(("SyncCR3: guest PDPE %lld not present; clear shw pdpe\n", iPdpt));
    3494                 /* for each page directory entry */
    3495                 for (unsigned iPD = 0; iPD < RT_ELEMENTS(pPDSrc->a); iPD++)
    3496                 {
    3497                     if (   pPDEDst[iPD].n.u1Present
    3498                         && !(pPDEDst[iPD].u & PGM_PDFLAGS_MAPPING))
    3499                     {
    3500                         pgmPoolFree(pVM, pPDEDst[iPD].u & SHW_PDE_PG_MASK, SHW_POOL_ROOT_IDX, iPdpt * X86_PG_PAE_ENTRIES + iPD);
    3501                         pPDEDst[iPD].u = 0;
    3502                     }
    3503                 }
    3504             }
    3505             if (!(pPdptDst->a[iPdpt].u & PGM_PLXFLAGS_MAPPING))
    3506                 pPdptDst->a[iPdpt].n.u1Present = 0;
    3507             continue;
    3508         }
    3509 #  else  /* PGM_GST_TYPE != PGM_TYPE_PAE */
    3510     {
    3511 #  endif /* PGM_GST_TYPE != PGM_TYPE_PAE */
    3512         for (unsigned iPD = 0; iPD < RT_ELEMENTS(pPDSrc->a); iPD++)
    3513         {
    3514 #  if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
    3515             if ((iPD & 255) == 0) /* Start of new PD. */
    3516                 pPDEDst = pgmShwGetPaePDEPtr(&pVM->pgm.s, (uint32_t)iPD << GST_PD_SHIFT);
    3517 #  endif
    3518 #  if PGM_SHW_TYPE == PGM_TYPE_32BIT
    3519             Assert(pgmShwGet32BitPDEPtr(&pVM->pgm.s, (uint32_t)iPD << SHW_PD_SHIFT) == pPDEDst);
    3520 #  elif PGM_SHW_TYPE == PGM_TYPE_PAE
    3521 #   if defined(VBOX_STRICT) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) /* Unfortuantely not reliable with PGMR0DynMap and multiple VMs. */
    3522             RTGCPTR GCPtrStrict = (uint32_t)iPD << GST_PD_SHIFT;
    3523 #    if PGM_GST_TYPE == PGM_TYPE_PAE
    3524             GCPtrStrict |= iPdpt << X86_PDPT_SHIFT;
    3525 #    endif
    3526             AssertMsg(pgmShwGetPaePDEPtr(&pVM->pgm.s, GCPtrStrict) == pPDEDst, ("%p vs %p (%RGv)\n", pgmShwGetPaePDEPtr(&pVM->pgm.s, GCPtrStrict), pPDEDst, GCPtrStrict));
    3527 #   endif /* VBOX_STRICT */
    3528 #  endif
    3529             GSTPDE PdeSrc = pPDSrc->a[iPD];
    3530             if (    PdeSrc.n.u1Present
    3531                 &&  (PdeSrc.n.u1User || fRawR0Enabled))
    3532             {
    3533 #  if    (   PGM_GST_TYPE == PGM_TYPE_32BIT \
    3534           || PGM_GST_TYPE == PGM_TYPE_PAE) \
    3535       && !defined(PGM_WITHOUT_MAPPINGS)
    3536 
    3537                 /*
    3538                  * Check for conflicts with GC mappings.
    3539                  */
    3540 #   if PGM_GST_TYPE == PGM_TYPE_PAE
    3541                 if (iPD + iPdpt * X86_PG_PAE_ENTRIES == iPdNoMapping)
    3542 #   else
    3543                 if (iPD == iPdNoMapping)
    3544 #   endif
    3545                 {
    3546                     if (pVM->pgm.s.fMappingsFixed)
    3547                     {
    3548                         /* It's fixed, just skip the mapping. */
    3549                         const unsigned cPTs = pMapping->cb >> GST_PD_SHIFT;
    3550                         Assert(PGM_GST_TYPE == PGM_TYPE_32BIT || (iPD + cPTs - 1) / X86_PG_PAE_ENTRIES == iPD / X86_PG_PAE_ENTRIES);
    3551                         iPD += cPTs - 1;
    3552 #   if PGM_SHW_TYPE != PGM_GST_TYPE /* SHW==PAE && GST==32BIT */
    3553                         pPDEDst = pgmShwGetPaePDEPtr(&pVM->pgm.s, (uint32_t)(iPD + 1) << GST_PD_SHIFT);
    3554 #   else
    3555                         pPDEDst += cPTs;
    3556 #   endif
    3557                         pMapping = pMapping->CTX_SUFF(pNext);
    3558                         iPdNoMapping = pMapping ? pMapping->GCPtr >> GST_PD_SHIFT : ~0U;
    3559                         continue;
    3560                     }
    3561 #   ifdef IN_RING3
    3562 #    if PGM_GST_TYPE == PGM_TYPE_32BIT
    3563                     int rc = pgmR3SyncPTResolveConflict(pVM, pMapping, pPDSrc, iPD << GST_PD_SHIFT);
    3564 #    elif PGM_GST_TYPE == PGM_TYPE_PAE
    3565                     int rc = pgmR3SyncPTResolveConflictPAE(pVM, pMapping, (iPdpt << GST_PDPT_SHIFT) + (iPD << GST_PD_SHIFT));
    3566 #    endif
    3567                     if (RT_FAILURE(rc))
    3568                         return rc;
    3569 
    3570                     /*
    3571                      * Update iPdNoMapping and pMapping.
    3572                      */
    3573                     pMapping = pVM->pgm.s.pMappingsR3;
    3574                     while (pMapping && pMapping->GCPtr < (iPD << GST_PD_SHIFT))
    3575                         pMapping = pMapping->pNextR3;
    3576                     iPdNoMapping = pMapping ? pMapping->GCPtr >> GST_PD_SHIFT : ~0U;
    3577 #   else  /* !IN_RING3 */
    3578                     LogFlow(("SyncCR3: detected conflict -> VINF_PGM_SYNC_CR3\n"));
    3579                     return VINF_PGM_SYNC_CR3;
    3580 #   endif /* !IN_RING3 */
    3581                 }
    3582 #  else  /* (PGM_GST_TYPE != PGM_TYPE_32BIT && PGM_GST_TYPE != PGM_TYPE_PAE) || PGM_WITHOUT_MAPPINGS */
    3583                 Assert(!pgmMapAreMappingsEnabled(&pVM->pgm.s));
    3584 #  endif /* (PGM_GST_TYPE != PGM_TYPE_32BIT && PGM_GST_TYPE != PGM_TYPE_PAE) || PGM_WITHOUT_MAPPINGS */
    3585 
    3586                 /*
    3587                  * Sync page directory entry.
    3588                  *
    3589                  * The current approach is to allocated the page table but to set
    3590                  * the entry to not-present and postpone the page table synching till
    3591                  * it's actually used.
    3592                  */
    3593 #   if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
    3594                 for (unsigned i = 0, iPdShw = iPD * 2; i < 2; i++, iPdShw++) /* pray that the compiler unrolls this */
    3595 #   elif PGM_GST_TYPE == PGM_TYPE_PAE
    3596                 const unsigned iPdShw = iPD + iPdpt * X86_PG_PAE_ENTRIES; NOREF(iPdShw);
    3597 #   else
    3598                 const unsigned iPdShw = iPD; NOREF(iPdShw);
    3599 #   endif
    3600                 {
    3601                     SHWPDE PdeDst = *pPDEDst;
    3602                     if (PdeDst.n.u1Present)
    3603                     {
    3604                         PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, PdeDst.u & SHW_PDE_PG_MASK);
    3605                         RTGCPHYS     GCPhys;
    3606                         if (    !PdeSrc.b.u1Size
    3607                             ||  !fBigPagesSupported)
    3608                         {
    3609                             GCPhys = PdeSrc.u & GST_PDE_PG_MASK;
    3610 #   if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
    3611                             /* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */
    3612                             GCPhys |= i * (PAGE_SIZE / 2);
    3613 #   endif
    3614                         }
    3615                         else
    3616                         {
    3617                             GCPhys = GST_GET_PDE_BIG_PG_GCPHYS(PdeSrc);
    3618 #   if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
    3619                             /* Select the right PDE as we're emulating a 4MB page directory with two 2 MB shadow PDEs.*/
    3620                             GCPhys |= i * X86_PAGE_2M_SIZE;
    3621 #   endif
    3622                         }
    3623 
    3624                         if (    pShwPage->GCPhys == GCPhys
    3625                             &&  pShwPage->enmKind == PGM_BTH_NAME(CalcPageKind)(&PdeSrc, cr4)
    3626                             &&  (   pShwPage->fCached
    3627                                 || (   !fGlobal
    3628                                     && (   false
    3629 #   ifdef PGM_SKIP_GLOBAL_PAGEDIRS_ON_NONGLOBAL_FLUSH
    3630                                         || (   (PdeSrc.u & (X86_PDE4M_PS | X86_PDE4M_G)) == (X86_PDE4M_PS | X86_PDE4M_G)
    3631                                             && (cr4 & (X86_CR4_PGE | X86_CR4_PSE)) == (X86_CR4_PGE | X86_CR4_PSE)) /* global 2/4MB page. */
    3632                                         || (  !pShwPage->fSeenNonGlobal
    3633                                             && (cr4 & X86_CR4_PGE))
    3634 #   endif
    3635                                         )
    3636                                     )
    3637                                 )
    3638                             &&  (   (PdeSrc.u & (X86_PDE_US | X86_PDE_RW)) == (PdeDst.u & (X86_PDE_US | X86_PDE_RW))
    3639                                 || (   fBigPagesSupported
    3640                                     &&     ((PdeSrc.u & (X86_PDE_US | X86_PDE4M_PS | X86_PDE4M_D)) | PGM_PDFLAGS_TRACK_DIRTY)
    3641                                         ==  ((PdeDst.u & (X86_PDE_US | X86_PDE_RW | PGM_PDFLAGS_TRACK_DIRTY)) | X86_PDE4M_PS))
    3642                                 )
    3643                         )
    3644                         {
    3645 #   ifdef VBOX_WITH_STATISTICS
    3646                             if (   !fGlobal
    3647                                 && (PdeSrc.u & (X86_PDE4M_PS | X86_PDE4M_G)) == (X86_PDE4M_PS | X86_PDE4M_G)
    3648                                 && (cr4 & (X86_CR4_PGE | X86_CR4_PSE)) == (X86_CR4_PGE | X86_CR4_PSE))
    3649                                 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3DstSkippedGlobalPD));
    3650                             else if (!fGlobal && !pShwPage->fSeenNonGlobal && (cr4 & X86_CR4_PGE))
    3651                                 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3DstSkippedGlobalPT));
    3652                             else
    3653                                 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3DstCacheHit));
    3654 #   endif /* VBOX_WITH_STATISTICS */
    3655     /** @todo a replacement strategy isn't really needed unless we're using a very small pool < 512 pages.
    3656     * The whole ageing stuff should be put in yet another set of #ifdefs. For now, let's just skip it. */
    3657     //#  ifdef PGMPOOL_WITH_CACHE
    3658     //                        pgmPoolCacheUsed(pPool, pShwPage);
    3659     //#  endif
    3660                         }
    3661                         else
    3662                         {
    3663                             pgmPoolFreeByPage(pPool, pShwPage, SHW_POOL_ROOT_IDX, iPdShw);
    3664                             pPDEDst->u = 0;
    3665                             MY_STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3DstFreed));
    3666                         }
    3667                     }
    3668                     else
    3669                         MY_STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3DstNotPresent));
    3670 
    3671                     /* advance */
    3672                     pPDEDst++;
    3673                 } /* foreach 2MB PAE PDE in 4MB guest PDE */
    3674             }
    3675 #  if PGM_GST_TYPE == PGM_TYPE_PAE
    3676             else if (iPD + iPdpt * X86_PG_PAE_ENTRIES != iPdNoMapping)
    3677 #  else
    3678             else if (iPD != iPdNoMapping)
    3679 #  endif
    3680             {
    3681                 /*
    3682                  * Check if there is any page directory to mark not present here.
    3683                  */
    3684 #   if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT
    3685                 for (unsigned i = 0, iPdShw = iPD * 2; i < 2; i++, iPdShw++) /* pray that the compiler unrolls this */
    3686 #   elif PGM_GST_TYPE == PGM_TYPE_PAE
    3687                 const unsigned iPdShw = iPD + iPdpt * X86_PG_PAE_ENTRIES;
    3688 #   else
    3689                 const unsigned iPdShw = iPD;
    3690 #   endif
    3691                 {
    3692                     if (pPDEDst->n.u1Present)
    3693                     {
    3694                         pgmPoolFree(pVM, pPDEDst->u & SHW_PDE_PG_MASK, SHW_POOL_ROOT_IDX, iPdShw);
    3695                         pPDEDst->u = 0;
    3696                         MY_STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3DstFreedSrcNP));
    3697                     }
    3698                     pPDEDst++;
    3699                 }
    3700             }
    3701             else
    3702             {
    3703 #  if    (   PGM_GST_TYPE == PGM_TYPE_32BIT \
    3704           || PGM_GST_TYPE == PGM_TYPE_PAE)  \
    3705       && !defined(PGM_WITHOUT_MAPPINGS)
    3706 
    3707                 const unsigned cPTs = pMapping->cb >> GST_PD_SHIFT;
    3708 
    3709                 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
    3710                 if (pVM->pgm.s.fMappingsFixed)
    3711                 {
    3712                     /* It's fixed, just skip the mapping. */
    3713                     pMapping = pMapping->CTX_SUFF(pNext);
    3714                     iPdNoMapping = pMapping ? pMapping->GCPtr >> GST_PD_SHIFT : ~0U;
    3715                 }
    3716                 else
    3717                 {
    3718                     /*
    3719                      * Check for conflicts for subsequent pagetables
    3720                      * and advance to the next mapping.
    3721                      */
    3722                     iPdNoMapping = ~0U;
    3723                     unsigned iPT = cPTs;
    3724                     while (iPT-- > 1)
    3725                     {
    3726                         if (    pPDSrc->a[iPD + iPT].n.u1Present
    3727                             &&  (pPDSrc->a[iPD + iPT].n.u1User || fRawR0Enabled))
    3728                         {
    3729 #   ifdef IN_RING3
    3730 #    if PGM_GST_TYPE == PGM_TYPE_32BIT
    3731                             int rc = pgmR3SyncPTResolveConflict(pVM, pMapping, pPDSrc, iPD << GST_PD_SHIFT);
    3732 #    elif PGM_GST_TYPE == PGM_TYPE_PAE
    3733                             int rc = pgmR3SyncPTResolveConflictPAE(pVM, pMapping, (iPdpt << GST_PDPT_SHIFT) + (iPD << GST_PD_SHIFT));
    3734 #    endif
    3735                             if (RT_FAILURE(rc))
    3736                                 return rc;
    3737 
    3738                             /*
    3739                              * Update iPdNoMapping and pMapping.
    3740                              */
    3741                             pMapping = pVM->pgm.s.CTX_SUFF(pMappings);
    3742                             while (pMapping && pMapping->GCPtr < (iPD << GST_PD_SHIFT))
    3743                                 pMapping = pMapping->CTX_SUFF(pNext);
    3744                             iPdNoMapping = pMapping ? pMapping->GCPtr >> GST_PD_SHIFT : ~0U;
    3745                             break;
    3746 #   else
    3747                             LogFlow(("SyncCR3: detected conflict -> VINF_PGM_SYNC_CR3\n"));
    3748                             return VINF_PGM_SYNC_CR3;
    3749 #   endif
    3750                         }
    3751                     }
    3752                     if (iPdNoMapping == ~0U && pMapping)
    3753                     {
    3754                         pMapping = pMapping->CTX_SUFF(pNext);
    3755                         if (pMapping)
    3756                             iPdNoMapping = pMapping->GCPtr >> GST_PD_SHIFT;
    3757                     }
    3758                 }
    3759 
    3760                 /* advance. */
    3761                 Assert(PGM_GST_TYPE == PGM_TYPE_32BIT || (iPD + cPTs - 1) / X86_PG_PAE_ENTRIES == iPD / X86_PG_PAE_ENTRIES);
    3762                 iPD += cPTs - 1;
    3763 #   if PGM_SHW_TYPE != PGM_GST_TYPE /* SHW==PAE && GST==32BIT */
    3764                 pPDEDst = pgmShwGetPaePDEPtr(&pVM->pgm.s, (uint32_t)(iPD + 1) << GST_PD_SHIFT);
    3765 #   else
    3766                 pPDEDst += cPTs;
    3767 #   endif
    3768 #   if PGM_GST_TYPE != PGM_SHW_TYPE
    3769                 AssertCompile(PGM_GST_TYPE == PGM_TYPE_32BIT && PGM_SHW_TYPE == PGM_TYPE_PAE);
    3770 #   endif
    3771 #  else  /* (PGM_GST_TYPE != PGM_TYPE_32BIT && PGM_GST_TYPE != PGM_TYPE_PAE) || PGM_WITHOUT_MAPPINGS */
    3772                 Assert(!pgmMapAreMappingsEnabled(&pVM->pgm.s));
    3773 #  endif /* (PGM_GST_TYPE != PGM_TYPE_32BIT && PGM_GST_TYPE != PGM_TYPE_PAE) || PGM_WITHOUT_MAPPINGS */
    3774             }
    3775 
    3776         } /* for iPD */
    3777     } /* for each PDPTE (PAE) */
    3778     return VINF_SUCCESS;
    3779 
    3780 # else /* guest real and protected mode */
    3781     return VINF_SUCCESS;
    3782 # endif
    3783 #endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY */
    37843295#endif /* PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT && PGM_SHW_TYPE != PGM_TYPE_AMD64 */
    37853296}
     
    46944205            pVM->pgm.s.pGstAmd64Pml4R0 = (R0PTRTYPE(PX86PML4))HCPtrGuestCR3;
    46954206#  endif
    4696 #  ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    4697             if (!HWACCMIsNestedPagingActive(pVM))
    4698             {
    4699                 /*
    4700                  * Update the shadow root page as well since that's not fixed.
    4701                  */
    4702                 /** @todo Move this into PGMAllBth.h. */
    4703                 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
    4704                 if (pVM->pgm.s.CTX_SUFF(pShwPageCR3))
    4705                 {
    4706                     /* It might have been freed already by a pool flush (see e.g. PGMR3MappingsUnfix). */
    4707                     /** @todo Coordinate this better with the pool. */
    4708                     if (pVM->pgm.s.CTX_SUFF(pShwPageCR3)->enmKind != PGMPOOLKIND_FREE)
    4709                         pgmPoolFreeByPage(pPool, pVM->pgm.s.CTX_SUFF(pShwPageCR3), PGMPOOL_IDX_AMD64_CR3, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->GCPhys >> PAGE_SHIFT);
    4710                     pVM->pgm.s.pShwPageCR3R3 = 0;
    4711                     pVM->pgm.s.pShwPageCR3R0 = 0;
    4712                     pVM->pgm.s.pShwRootR3    = 0;
    4713 #  ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
    4714                     pVM->pgm.s.pShwRootR0    = 0;
    4715 #  endif
    4716                     pVM->pgm.s.HCPhysShwCR3  = 0;
    4717                 }
    4718 
    4719                 Assert(!(GCPhysCR3 >> (PAGE_SHIFT + 32)));
    4720                 rc = pgmPoolAlloc(pVM, GCPhysCR3, PGMPOOLKIND_64BIT_PML4, PGMPOOL_IDX_AMD64_CR3, GCPhysCR3 >> PAGE_SHIFT, &pVM->pgm.s.CTX_SUFF(pShwPageCR3));
    4721                 if (rc == VERR_PGM_POOL_FLUSHED)
    4722                 {
    4723                     Log(("MapCR3: PGM pool flushed -> signal sync cr3\n"));
    4724                     Assert(VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3));
    4725                     return VINF_PGM_SYNC_CR3;
    4726                 }
    4727                 AssertRCReturn(rc, rc);
    4728 #  ifdef IN_RING0
    4729                 pVM->pgm.s.pShwPageCR3R3 = MMHyperCCToR3(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3));
    4730 #  else
    4731                 pVM->pgm.s.pShwPageCR3R0 = MMHyperCCToR0(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3));
    4732 #  endif
    4733                 pVM->pgm.s.pShwRootR3    = (R3PTRTYPE(void *))pVM->pgm.s.CTX_SUFF(pShwPageCR3)->pvPageR3;
    4734                 Assert(pVM->pgm.s.pShwRootR3);
    4735 #  ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
    4736                 pVM->pgm.s.pShwRootR0    = (R0PTRTYPE(void *))PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pVM->pgm.s.CTX_SUFF(pShwPageCR3));
    4737 #  endif
    4738                 pVM->pgm.s.HCPhysShwCR3  = pVM->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key;
    4739                 rc = VINF_SUCCESS; /* clear it - pgmPoolAlloc returns hints. */
    4740             }
    4741 #  endif /* !VBOX_WITH_PGMPOOL_PAGING_ONLY */
    47424207# endif
    47434208        }
     
    47524217#endif
    47534218
    4754 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    47554219    /* Update shadow paging info for guest modes with paging (32, pae, 64). */
    47564220# if  (   (   PGM_SHW_TYPE == PGM_TYPE_32BIT \
     
    48384302
    48394303# endif
    4840 #endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY */
    48414304
    48424305    return rc;
     
    48844347    pVM->pgm.s.pGstAmd64Pml4R0 = 0;
    48854348# endif
    4886 # ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    4887     if (!HWACCMIsNestedPagingActive(pVM))
    4888     {
    4889         pVM->pgm.s.pShwRootR3 = 0;
    4890 #  ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
    4891         pVM->pgm.s.pShwRootR0 = 0;
    4892 #  endif
    4893         pVM->pgm.s.HCPhysShwCR3 = 0;
    4894         if (pVM->pgm.s.CTX_SUFF(pShwPageCR3))
    4895         {
    4896             PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);
    4897             pgmPoolFreeByPage(pPool, pVM->pgm.s.CTX_SUFF(pShwPageCR3), PGMPOOL_IDX_AMD64_CR3, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->GCPhys >> PAGE_SHIFT);
    4898             pVM->pgm.s.pShwPageCR3R3 = 0;
    4899             pVM->pgm.s.pShwPageCR3R0 = 0;
    4900         }
    4901     }
    4902 # endif /* !VBOX_WITH_PGMPOOL_PAGING_ONLY */
    49034349
    49044350#else /* prot/real mode stub */
     
    49064352#endif
    49074353
    4908 #if defined(VBOX_WITH_PGMPOOL_PAGING_ONLY) && !defined(IN_RC) /* In RC we rely on MapCR3 to do the shadow part for us at a safe time */
     4354#if !defined(IN_RC) /* In RC we rely on MapCR3 to do the shadow part for us at a safe time */
    49094355    /* Update shadow paging info. */
    49104356# if  (   (   PGM_SHW_TYPE == PGM_TYPE_32BIT  \
     
    49394385    }
    49404386# endif
    4941 #endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY && !IN_RC*/
     4387#endif /* !IN_RC*/
    49424388
    49434389    return rc;
  • trunk/src/VBox/VMM/VMMAll/PGMAllGst.h

    r17215 r17586  
    2828PGM_GST_DECL(int, ModifyPage)(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask);
    2929PGM_GST_DECL(int, GetPDE)(PVM pVM, RTGCPTR GCPtr, PX86PDEPAE pPDE);
    30 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    31 PGM_GST_DECL(int, MonitorCR3)(PVM pVM, RTGCPHYS GCPhysCR3);
    32 PGM_GST_DECL(int, UnmonitorCR3)(PVM pVM);
    33 #endif
    3430PGM_GST_DECL(bool, HandlerVirtualUpdate)(PVM pVM, uint32_t cr4);
    35 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    36 # ifndef IN_RING3
    37 PGM_GST_DECL(int, WriteHandlerCR3)(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser);
    38 #  if PGM_GST_TYPE == PGM_TYPE_PAE \
    39   || PGM_GST_TYPE == PGM_TYPE_AMD64
    40 PGM_GST_DECL(int, PAEWriteHandlerPD)(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser);
    41 #  endif
    42 # endif
    43 #endif
    4431__END_DECLS
    4532
     
    311298#endif
    312299}
    313 
    314 
    315 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    316 
    317 #undef LOG_GROUP
    318 #define LOG_GROUP LOG_GROUP_PGM_POOL
    319 
    320 /**
    321  * Registers physical page monitors for the necessary paging
    322  * structures to detect conflicts with our guest mappings.
    323  *
    324  * This is always called after mapping CR3.
    325  * This is never called with fixed mappings.
    326  *
    327  * @returns VBox status, no specials.
    328  * @param   pVM             VM handle.
    329  * @param   GCPhysCR3       The physical address in the CR3 register.
    330  */
    331 PGM_GST_DECL(int, MonitorCR3)(PVM pVM, RTGCPHYS GCPhysCR3)
    332 {
    333     Assert(!pVM->pgm.s.fMappingsFixed);
    334     int rc = VINF_SUCCESS;
    335 
    336     /*
    337      * Register/Modify write phys handler for guest's CR3 if it changed.
    338      */
    339 #if PGM_GST_TYPE == PGM_TYPE_32BIT
    340 
    341     if (pVM->pgm.s.GCPhysGstCR3Monitored != GCPhysCR3)
    342     {
    343 # ifndef PGMPOOL_WITH_MIXED_PT_CR3
    344         const unsigned cbCR3Stuff = PGM_GST_TYPE == PGM_TYPE_PAE ? 32 : PAGE_SIZE;
    345         if (pVM->pgm.s.GCPhysGstCR3Monitored != NIL_RTGCPHYS)
    346             rc = PGMHandlerPhysicalModify(pVM, pVM->pgm.s.GCPhysGstCR3Monitored, GCPhysCR3, GCPhysCR3 + cbCR3Stuff - 1);
    347         else
    348             rc = PGMHandlerPhysicalRegisterEx(pVM, PGMPHYSHANDLERTYPE_PHYSICAL_WRITE, GCPhysCR3, GCPhysCR3 + cbCR3Stuff - 1,
    349                                               pVM->pgm.s.pfnR3GstWriteHandlerCR3, 0,
    350                                               pVM->pgm.s.pfnR0GstWriteHandlerCR3, 0,
    351                                               pVM->pgm.s.pfnRCGstWriteHandlerCR3, 0,
    352                                               pVM->pgm.s.pszR3GstWriteHandlerCR3);
    353 # else  /* PGMPOOL_WITH_MIXED_PT_CR3 */
    354         rc = pgmPoolMonitorMonitorCR3(pVM->pgm.s.CTX_SUFF(pPool),
    355                                          pVM->pgm.s.enmShadowMode == PGMMODE_PAE
    356                                       || pVM->pgm.s.enmShadowMode == PGMMODE_PAE_NX
    357                                       ? PGMPOOL_IDX_PAE_PD
    358                                       : PGMPOOL_IDX_PD,
    359                                       GCPhysCR3);
    360 # endif /* PGMPOOL_WITH_MIXED_PT_CR3 */
    361         if (RT_FAILURE(rc))
    362         {
    363             AssertMsgFailed(("PGMHandlerPhysicalModify/PGMR3HandlerPhysicalRegister failed, rc=%Rrc GCPhysGstCR3Monitored=%RGp GCPhysCR3=%RGp\n",
    364                              rc, pVM->pgm.s.GCPhysGstCR3Monitored, GCPhysCR3));
    365             return rc;
    366         }
    367         pVM->pgm.s.GCPhysGstCR3Monitored = GCPhysCR3;
    368     }
    369 
    370 #elif PGM_GST_TYPE == PGM_TYPE_PAE
    371     /* Monitor the PDPT page */
    372     /*
    373      * Register/Modify write phys handler for guest's CR3 if it changed.
    374      */
    375 # ifndef PGMPOOL_WITH_MIXED_PT_CR3
    376     AssertFailed();
    377 # endif
    378     if (pVM->pgm.s.GCPhysGstCR3Monitored != GCPhysCR3)
    379     {
    380         rc = pgmPoolMonitorMonitorCR3(pVM->pgm.s.CTX_SUFF(pPool), PGMPOOL_IDX_PDPT, GCPhysCR3);
    381         if (RT_FAILURE(rc))
    382         {
    383             AssertMsgFailed(("PGMHandlerPhysicalModify/PGMR3HandlerPhysicalRegister failed, rc=%Rrc GCPhysGstCR3Monitored=%RGp GCPhysCR3=%RGp\n",
    384                              rc, pVM->pgm.s.GCPhysGstCR3Monitored, GCPhysCR3));
    385             return rc;
    386         }
    387         pVM->pgm.s.GCPhysGstCR3Monitored = GCPhysCR3;
    388     }
    389 
    390     /*
    391      * Do the 4 PDs.
    392      */
    393     PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(&pVM->pgm.s);
    394     for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
    395     {
    396         if (pGuestPDPT->a[i].n.u1Present)
    397         {
    398             RTGCPHYS GCPhys = pGuestPDPT->a[i].u & X86_PDPE_PG_MASK;
    399             if (pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] != GCPhys)
    400             {
    401                 Assert(pVM->pgm.s.enmShadowMode == PGMMODE_PAE || pVM->pgm.s.enmShadowMode == PGMMODE_PAE_NX);
    402 
    403                 rc = pgmPoolMonitorMonitorCR3(pVM->pgm.s.CTX_SUFF(pPool), PGMPOOL_IDX_PAE_PD_0 + i, GCPhys);
    404             }
    405 
    406             if (RT_FAILURE(rc))
    407             {
    408                 AssertMsgFailed(("PGMHandlerPhysicalModify/PGMR3HandlerPhysicalRegister failed, rc=%Rrc GCPhysGstCR3Monitored=%RGp GCPhysCR3=%RGp\n",
    409                                  rc, pVM->pgm.s.aGCPhysGstPaePDsMonitored[i], GCPhys));
    410                 return rc;
    411             }
    412             pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] = GCPhys;
    413         }
    414         else if (pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] != NIL_RTGCPHYS)
    415         {
    416             rc = pgmPoolMonitorUnmonitorCR3(pVM->pgm.s.CTX_SUFF(pPool), PGMPOOL_IDX_PAE_PD_0 + i);
    417             AssertRC(rc);
    418             pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] = NIL_RTGCPHYS;
    419         }
    420     }
    421 
    422 #else
    423     /* prot/real/amd64 mode stub */
    424 
    425 #endif
    426     return rc;
    427 }
    428 
    429 /**
    430  * Deregisters any physical page monitors installed by MonitorCR3.
    431  *
    432  * @returns VBox status code, no specials.
    433  * @param   pVM         The VM handle.
    434  */
    435 PGM_GST_DECL(int, UnmonitorCR3)(PVM pVM)
    436 {
    437     int rc = VINF_SUCCESS;
    438 
    439     /*
    440      * Deregister the access handlers.
    441      *
    442      * PGMSyncCR3 will reinstall it if required and PGMSyncCR3 will be executed
    443      * before we enter GC again.
    444      */
    445 #if PGM_GST_TYPE == PGM_TYPE_32BIT
    446     if (pVM->pgm.s.GCPhysGstCR3Monitored != NIL_RTGCPHYS)
    447     {
    448 # ifndef PGMPOOL_WITH_MIXED_PT_CR3
    449         rc = PGMHandlerPhysicalDeregister(pVM, pVM->pgm.s.GCPhysGstCR3Monitored);
    450         AssertRCReturn(rc, rc);
    451 # else /* PGMPOOL_WITH_MIXED_PT_CR3 */
    452         rc = pgmPoolMonitorUnmonitorCR3(pVM->pgm.s.CTX_SUFF(pPool),
    453                                            pVM->pgm.s.enmShadowMode == PGMMODE_PAE
    454                                         || pVM->pgm.s.enmShadowMode == PGMMODE_PAE_NX
    455                                         ? PGMPOOL_IDX_PAE_PD
    456                                         : PGMPOOL_IDX_PD);
    457         AssertRCReturn(rc, rc);
    458 # endif /* PGMPOOL_WITH_MIXED_PT_CR3 */
    459         pVM->pgm.s.GCPhysGstCR3Monitored = NIL_RTGCPHYS;
    460     }
    461 
    462 #elif PGM_GST_TYPE == PGM_TYPE_PAE
    463     /* The PDPT page */
    464 # ifndef PGMPOOL_WITH_MIXED_PT_CR3
    465     AssertFailed();
    466 # endif
    467 
    468     if (pVM->pgm.s.GCPhysGstCR3Monitored != NIL_RTGCPHYS)
    469     {
    470         rc = pgmPoolMonitorUnmonitorCR3(pVM->pgm.s.CTX_SUFF(pPool), PGMPOOL_IDX_PDPT);
    471         AssertRC(rc);
    472     }
    473 
    474     /* The 4 PDs. */
    475     for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
    476     {
    477         if (pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] != NIL_RTGCPHYS)
    478         {
    479             Assert(pVM->pgm.s.enmShadowMode == PGMMODE_PAE || pVM->pgm.s.enmShadowMode == PGMMODE_PAE_NX);
    480             int rc2 = pgmPoolMonitorUnmonitorCR3(pVM->pgm.s.CTX_SUFF(pPool), PGMPOOL_IDX_PAE_PD_0 + i);
    481             AssertRC(rc2);
    482             if (RT_FAILURE(rc2))
    483                 rc = rc2;
    484             pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] = NIL_RTGCPHYS;
    485         }
    486     }
    487 #else
    488     /* prot/real/amd64 mode stub */
    489 #endif
    490     return rc;
    491 
    492 }
    493 
    494 #undef LOG_GROUP
    495 #define LOG_GROUP LOG_GROUP_PGM
    496 
    497 #endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY */
    498300
    499301
     
    704506}
    705507
    706 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    707 
    708 #if PGM_GST_TYPE == PGM_TYPE_32BIT && !defined(IN_RING3)
    709 
    710 /**
    711  * Write access handler for the Guest CR3 page in 32-bit mode.
    712  *
    713  * This will try interpret the instruction, if failure fail back to the recompiler.
    714  * Check if the changed PDEs are marked present and conflicts with our
    715  * mappings. If conflict, we'll switch to the host context and resolve it there
    716  *
    717  * @returns VBox status code (appropritate for trap handling and GC return).
    718  * @param   pVM         VM Handle.
    719  * @param   uErrorCode  CPU Error code.
    720  * @param   pRegFrame   Trap register frame.
    721  * @param   pvFault     The fault address (cr2).
    722  * @param   GCPhysFault The GC physical address corresponding to pvFault.
    723  * @param   pvUser      User argument.
    724  */
    725 PGM_GST_DECL(int, WriteHandlerCR3)(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
    726 {
    727     AssertMsg(!pVM->pgm.s.fMappingsFixed, ("Shouldn't be registered when mappings are fixed!\n"));
    728 
    729     /*
    730      * Try interpret the instruction.
    731      */
    732     uint32_t cb;
    733     int rc = EMInterpretInstruction(pVM, pRegFrame, pvFault, &cb);
    734     if (RT_SUCCESS(rc) && cb)
    735     {
    736         /*
    737          * Check if the modified PDEs are present and mappings.
    738          */
    739         const RTGCPTR   offPD = GCPhysFault & PAGE_OFFSET_MASK;
    740         const unsigned  iPD1  = offPD / sizeof(X86PDE);
    741         const unsigned  iPD2  = (offPD + cb - 1) / sizeof(X86PDE);
    742 
    743         Assert(cb > 0 && cb <= 8);
    744         Assert(iPD1 < X86_PG_ENTRIES);
    745         Assert(iPD2 < X86_PG_ENTRIES);
    746 
    747 #ifdef DEBUG
    748         Log(("pgmXXGst32BitWriteHandlerCR3: emulated change to PD %#x addr=%x\n", iPD1, iPD1 << X86_PD_SHIFT));
    749         if (iPD1 != iPD2)
    750             Log(("pgmXXGst32BitWriteHandlerCR3: emulated change to PD %#x addr=%x\n", iPD2, iPD2 << X86_PD_SHIFT));
    751 #endif
    752 
    753         if (!pVM->pgm.s.fMappingsFixed)
    754         {
    755             PX86PD pPDSrc = pgmGstGet32bitPDPtr(&pVM->pgm.s);
    756             if (    (   pPDSrc->a[iPD1].n.u1Present
    757                      && pgmGetMapping(pVM, (RTGCPTR)(iPD1 << X86_PD_SHIFT)) )
    758                 ||  (   iPD1 != iPD2
    759                      && pPDSrc->a[iPD2].n.u1Present
    760                      && pgmGetMapping(pVM, (RTGCPTR)(iPD2 << X86_PD_SHIFT)) )
    761                )
    762             {
    763                 STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestCR3WriteConflict);
    764                 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
    765                 if (rc == VINF_SUCCESS)
    766                     rc = VINF_PGM_SYNC_CR3;
    767                 Log(("pgmXXGst32BitWriteHandlerCR3: detected conflict iPD1=%#x iPD2=%#x - returns %Rrc\n", iPD1, iPD2, rc));
    768                 return rc;
    769             }
    770         }
    771 
    772         STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestCR3WriteHandled);
    773     }
    774     else
    775     {
    776         Assert(RT_FAILURE(rc));
    777         if (rc == VERR_EM_INTERPRETER)
    778             rc = VINF_EM_RAW_EMULATE_INSTR_PD_FAULT;
    779         Log(("pgmXXGst32BitWriteHandlerCR3: returns %Rrc\n", rc));
    780         STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestCR3WriteUnhandled);
    781     }
    782     return rc;
    783 }
    784 
    785 #endif /* PGM_TYPE_32BIT && !IN_RING3 */
    786 #if PGM_GST_TYPE == PGM_TYPE_PAE && !defined(IN_RING3)
    787 
    788 /**
    789  * Write access handler for the Guest CR3 page in PAE mode.
    790  *
    791  * This will try interpret the instruction, if failure fail back to the recompiler.
    792  * Check if the changed PDEs are marked present and conflicts with our
    793  * mappings. If conflict, we'll switch to the host context and resolve it there
    794  *
    795  * @returns VBox status code (appropritate for trap handling and GC return).
    796  * @param   pVM         VM Handle.
    797  * @param   uErrorCode  CPU Error code.
    798  * @param   pRegFrame   Trap register frame.
    799  * @param   pvFault     The fault address (cr2).
    800  * @param   GCPhysFault The GC physical address corresponding to pvFault.
    801  * @param   pvUser      User argument.
    802  */
    803 PGM_GST_DECL(int, WriteHandlerCR3)(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
    804 {
    805     AssertMsg(!pVM->pgm.s.fMappingsFixed, ("Shouldn't be registered when mappings are fixed!\n"));
    806 
    807     /*
    808      * Try interpret the instruction.
    809      */
    810     uint32_t cb;
    811     int rc = EMInterpretInstruction(pVM, pRegFrame, pvFault, &cb);
    812     if (RT_SUCCESS(rc) && cb)
    813     {
    814         /*
    815          * Check if any of the PDs have changed.
    816          * We'll simply check all of them instead of figuring out which one/two to check.
    817          */
    818         PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(&pVM->pgm.s);
    819         for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
    820         {
    821             if (    pGuestPDPT->a[i].n.u1Present
    822                 &&      (pGuestPDPT->a[i].u & X86_PDPE_PG_MASK)
    823                     !=  pVM->pgm.s.aGCPhysGstPaePDsMonitored[i])
    824             {
    825                 /*
    826                  * The PDPE has changed.
    827                  * We will schedule a monitoring update for the next TLB Flush,
    828                  * InvalidatePage or SyncCR3.
    829                  *
    830                  * This isn't perfect, because a lazy page sync might be dealing with an half
    831                  * updated PDPE. However, we assume that the guest OS is disabling interrupts
    832                  * and being extremely careful (cmpxchg8b) when updating a PDPE where it's
    833                  * executing.
    834                  */
    835                 pVM->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;
    836                 Log(("pgmXXGstPaeWriteHandlerCR3: detected updated PDPE; [%d] = %#llx, Old GCPhys=%RGp\n",
    837                      i, pGuestPDPT->a[i].u, pVM->pgm.s.aGCPhysGstPaePDsMonitored[i]));
    838             }
    839         }
    840 
    841         STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestCR3WriteHandled);
    842     }
    843     else
    844     {
    845         Assert(RT_FAILURE(rc));
    846         STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestCR3WriteUnhandled);
    847         if (rc == VERR_EM_INTERPRETER)
    848             rc = VINF_EM_RAW_EMULATE_INSTR_PD_FAULT;
    849     }
    850     Log(("pgmXXGstPaeWriteHandlerCR3: returns %Rrc\n", rc));
    851     return rc;
    852 }
    853 
    854 
    855 /**
    856  * Write access handler for the Guest PDs in PAE mode.
    857  *
    858  * This will try interpret the instruction, if failure fail back to the recompiler.
    859  * Check if the changed PDEs are marked present and conflicts with our
    860  * mappings. If conflict, we'll switch to the host context and resolve it there
    861  *
    862  * @returns VBox status code (appropritate for trap handling and GC return).
    863  * @param   pVM         VM Handle.
    864  * @param   uErrorCode  CPU Error code.
    865  * @param   pRegFrame   Trap register frame.
    866  * @param   pvFault     The fault address (cr2).
    867  * @param   GCPhysFault The GC physical address corresponding to pvFault.
    868  * @param   pvUser      User argument.
    869  */
    870 PGM_GST_DECL(int, WriteHandlerPD)(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)
    871 {
    872     AssertMsg(!pVM->pgm.s.fMappingsFixed, ("Shouldn't be registered when mappings are fixed!\n"));
    873 
    874     /*
    875      * Try interpret the instruction.
    876      */
    877     uint32_t cb;
    878     int rc = EMInterpretInstruction(pVM, pRegFrame, pvFault, &cb);
    879     if (RT_SUCCESS(rc) && cb)
    880     {
    881         /*
    882          * Figure out which of the 4 PDs this is.
    883          */
    884         RTGCPTR i;
    885         PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(&pVM->pgm.s);
    886         for (i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)
    887             if (pGuestPDPT->a[i].u == (GCPhysFault & X86_PTE_PAE_PG_MASK))
    888             {
    889                 PX86PDPAE       pPDSrc = pgmGstGetPaePD(&pVM->pgm.s, i << X86_PDPT_SHIFT);
    890                 const RTGCPTR   offPD  = GCPhysFault & PAGE_OFFSET_MASK;
    891                 const unsigned  iPD1   = offPD / sizeof(X86PDEPAE);
    892                 const unsigned  iPD2   = (offPD + cb - 1) / sizeof(X86PDEPAE);
    893 
    894                 Assert(cb > 0 && cb <= 8);
    895                 Assert(iPD1 < X86_PG_PAE_ENTRIES);
    896                 Assert(iPD2 < X86_PG_PAE_ENTRIES);
    897 
    898 # ifdef LOG_ENABLED
    899                 Log(("pgmXXGstPaeWriteHandlerPD: emulated change to i=%d iPD1=%#05x (%x)\n",
    900                      i, iPD1, (i << X86_PDPT_SHIFT) | (iPD1 << X86_PD_PAE_SHIFT)));
    901                 if (iPD1 != iPD2)
    902                     Log(("pgmXXGstPaeWriteHandlerPD: emulated change to i=%d iPD2=%#05x (%x)\n",
    903                          i, iPD2, (i << X86_PDPT_SHIFT) | (iPD2 << X86_PD_PAE_SHIFT)));
    904 # endif
    905 
    906                 if (!pVM->pgm.s.fMappingsFixed)
    907                 {
    908                     if (    (   pPDSrc->a[iPD1].n.u1Present
    909                              && pgmGetMapping(pVM, (RTGCPTR)((i << X86_PDPT_SHIFT) | (iPD1 << X86_PD_PAE_SHIFT))) )
    910                         ||  (   iPD1 != iPD2
    911                              && pPDSrc->a[iPD2].n.u1Present
    912                              && pgmGetMapping(pVM, (RTGCPTR)((i << X86_PDPT_SHIFT) | (iPD2 << X86_PD_PAE_SHIFT))) )
    913                        )
    914                     {
    915                         Log(("pgmXXGstPaeWriteHandlerPD: detected conflict iPD1=%#x iPD2=%#x\n", iPD1, iPD2));
    916                         STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestCR3WriteConflict);
    917                         VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
    918                         return VINF_PGM_SYNC_CR3;
    919                     }
    920                 }
    921                 break; /* ASSUMES no duplicate entries... */
    922             }
    923         Assert(i < 4);
    924 
    925         STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestCR3WriteHandled);
    926     }
    927     else
    928     {
    929         Assert(RT_FAILURE(rc));
    930         if (rc == VERR_EM_INTERPRETER)
    931             rc = VINF_EM_RAW_EMULATE_INSTR_PD_FAULT;
    932         else
    933             Log(("pgmXXGst32BitWriteHandlerCR3: returns %Rrc\n", rc));
    934         STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestCR3WriteUnhandled);
    935     }
    936     return rc;
    937 }
    938 
    939 #endif /* PGM_TYPE_PAE && !IN_RING3 */
    940 
    941 #endif /* !VBOX_WITH_PGMPOOL_PAGING_ONLY */
  • trunk/src/VBox/VMM/VMMAll/PGMAllMap.cpp

    r17468 r17586  
    224224        return;
    225225
    226 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    227226    if (!pVM->pgm.s.CTX_SUFF(pShwPageCR3))
    228227        return;    /* too early */
    229 #endif
    230228
    231229    PGMMODE enmShadowMode = PGMGetShadowMode(pVM);
     
    272270                Assert(pShwPdpt);
    273271                pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, (iPdPt << X86_PDPT_SHIFT));
    274 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    275272                if (!pShwPaePd)
    276273                {
     
    299296                    }
    300297                }
    301 #endif
    302298                AssertFatal(pShwPaePd);
    303299
     
    305301                AssertFatal(pPoolPagePd);
    306302
    307 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    308303                if (!pgmPoolIsPageLocked(&pVM->pgm.s, pPoolPagePd))
    309304                {
     
    328323                }
    329324
    330 #else
    331                 if (pShwPaePd->a[iPDE].n.u1Present)
    332                 {
    333                     Assert(!(pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING));
    334                     pgmPoolFree(pVM, pShwPaePd->a[iPDE].u & X86_PDE_PG_MASK, pPoolPagePd->idx, iNewPDE);
    335                 }
    336 #endif
    337325                X86PDEPAE PdePae0;
    338326                PdePae0.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0;
     
    343331                AssertFatal(iPDE < 512);
    344332
    345 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    346333                if (    pShwPaePd->a[iPDE].n.u1Present
    347334                    &&  !(pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING))
     
    349336                    pgmPoolFree(pVM, pShwPaePd->a[iPDE].u & X86_PDE_PG_MASK, pPoolPagePd->idx, iPDE);
    350337                }
    351 #else
    352                 if (pShwPaePd->a[iPDE].n.u1Present)
    353                 {
    354                     Assert(!(pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING));
    355                     pgmPoolFree(pVM, pShwPaePd->a[iPDE].u & X86_PDE_PG_MASK, pPoolPagePd->idx, iNewPDE);
    356                 }
    357 #endif
    358338                X86PDEPAE PdePae1;
    359339                PdePae1.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1;
     
    387367        return;
    388368
    389 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    390369    Assert(pShwPageCR3);
    391370# ifdef IN_RC
     
    400379        pCurrentShwPdpt = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
    401380    }
    402 #endif
    403381
    404382    unsigned i = pMap->cPTs;
     
    414392            case PGMMODE_32_BIT:
    415393            {
    416 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    417394                PX86PD pShw32BitPd = (PX86PD)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3);
    418 #else
    419                 PX86PD pShw32BitPd = pgmShwGet32BitPDPtr(&pVM->pgm.s);
    420 #endif
    421395                AssertFatal(pShw32BitPd);
    422396
     
    434408                const unsigned iPdpt = iOldPDE / 256;         /* iOldPDE * 2 / 512; iOldPDE is in 4 MB pages */
    435409                unsigned iPDE = iOldPDE * 2 % 512;
    436 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    437410                pShwPdpt  = (PX86PDPT)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3);
    438411                pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, pShwPdpt, (iPdpt << X86_PDPT_SHIFT));
     
    444417                        break;
    445418                }
    446 #else
    447                 pShwPdpt  = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
    448                 pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, (iPdpt << X86_PDPT_SHIFT));
    449 #endif
    450419                AssertFatal(pShwPaePd);
    451420
     
    461430                pShwPdpt->a[iPdpt].u &= ~PGM_PLXFLAGS_MAPPING;
    462431
    463 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    464432                PPGMPOOLPAGE pPoolPagePd = pgmPoolGetPageByHCPhys(pVM, pShwPdpt->a[iPdpt].u & X86_PDPE_PG_MASK);
    465433                AssertFatal(pPoolPagePd);
     
    470438                    pgmPoolUnlockPage(pVM->pgm.s.CTX_SUFF(pPool), pPoolPagePd);
    471439                }
    472 #endif
    473 
    474440                break;
    475441            }
     
    494460void pgmMapCheckShadowPDEs(PVM pVM, PPGMPOOLPAGE pShwPageCR3, PPGMMAPPING pMap, unsigned iPDE)
    495461{
    496 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    497462    Assert(pShwPageCR3);
    498 #endif
    499463
    500464    unsigned i = pMap->cPTs;
     
    510474            case PGMMODE_32_BIT:
    511475            {
    512 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    513476                PX86PD pShw32BitPd = (PX86PD)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3);
    514 #else
    515                 PX86PD pShw32BitPd = pgmShwGet32BitPDPtr(&pVM->pgm.s);
    516 #endif
    517477                AssertFatal(pShw32BitPd);
    518478
     
    530490                const unsigned iPD = iPDE / 256;         /* iPDE * 2 / 512; iPDE is in 4 MB pages */
    531491                unsigned iPaePDE = iPDE * 2 % 512;
    532 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    533492                pPdpt     = (PX86PDPT)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3);
    534493                pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, pPdpt, (iPD << X86_PDPT_SHIFT));
    535 #else
    536                 pPdpt     = pgmShwGetPaePDPTPtr(&pVM->pgm.s);
    537                 pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, (iPD << X86_PDPT_SHIFT));
    538 #endif
    539494                AssertFatal(pShwPaePd);
    540495
     
    566521VMMDECL(void) PGMMapCheck(PVM pVM)
    567522{
    568 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    569523    /*
    570524     * Can skip this if mappings are disabled.
     
    573527        return;
    574528
    575 #  ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    576529    Assert(pVM->pgm.s.CTX_SUFF(pShwPageCR3));
    577 #  endif
    578530
    579531    /*
     
    586538        pgmMapCheckShadowPDEs(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3), pCur, iPDE);
    587539    }
    588 #endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY */
    589540}
    590541#endif /* defined(VBOX_STRICT) && !defined(IN_RING0) */
     
    600551int pgmMapActivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3)
    601552{
    602 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    603553    /*
    604554     * Can skip this if mappings are disabled.
    605555     */
    606556    if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
    607 #else
    608     /*
    609      * Can skip this if mappings are safely fixed.
    610      */
    611     if (pVM->pgm.s.fMappingsFixed)
    612 #endif
    613557        return VINF_SUCCESS;
    614558
     
    616560    Log4(("PGMMapActivateAll fixed mappings=%d\n", pVM->pgm.s.fMappingsFixed));
    617561
    618 # ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    619562    Assert(pShwPageCR3 && pShwPageCR3 == pVM->pgm.s.CTX_SUFF(pShwPageCR3));
    620 # endif
    621563
    622564    /*
     
    642584int pgmMapDeactivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3)
    643585{
    644 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    645586    /*
    646587     * Can skip this if mappings are disabled.
    647588     */
    648589    if (!pgmMapAreMappingsEnabled(&pVM->pgm.s))
    649 #else
    650     /*
    651      * Can skip this if mappings are safely fixed.
    652      */
    653     if (pVM->pgm.s.fMappingsFixed)
    654 #endif
    655590        return VINF_SUCCESS;
    656591
    657 # ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    658592    Assert(pShwPageCR3);
    659 # endif
    660593
    661594    /*
     
    762695}
    763696
    764 # ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    765697/**
    766698 * Checks and resolves (ring 3 only) guest conflicts with VMM GC mappings.
     
    878810    return VINF_SUCCESS;
    879811}
    880 # endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY */
    881812
    882813#endif /* IN_RING0 */
  • trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp

    r17559 r17586  
    136136# define PGMPOOL_UNLOCK_PTR(pVM, pPage)  do {} while (0)
    137137#endif
    138 
    139 #if !defined(VBOX_WITH_PGMPOOL_PAGING_ONLY) && (defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0))
    140 /**
    141  * Maps a pool page into the current context.
    142  *
    143  * @returns Pointer to the mapping.
    144  * @param   pPGM    Pointer to the PGM instance data.
    145  * @param   pPage   The page to map.
    146  */
    147 void *pgmPoolMapPageFallback(PPGM pPGM, PPGMPOOLPAGE pPage)
    148 {
    149     /* general pages are take care of by the inlined part, it
    150        only ends up here in case of failure. */
    151     AssertReleaseReturn(pPage->idx < PGMPOOL_IDX_FIRST, NULL);
    152 
    153 /** @todo make sure HCPhys is valid for *all* indexes. */
    154     /* special pages. */
    155 # ifdef IN_RC
    156     switch (pPage->idx)
    157     {
    158         case PGMPOOL_IDX_PD:
    159             return pPGM->pShw32BitPdRC;
    160         case PGMPOOL_IDX_PAE_PD:
    161         case PGMPOOL_IDX_PAE_PD_0:
    162             return pPGM->apShwPaePDsRC[0];
    163         case PGMPOOL_IDX_PAE_PD_1:
    164             return pPGM->apShwPaePDsRC[1];
    165         case PGMPOOL_IDX_PAE_PD_2:
    166             return pPGM->apShwPaePDsRC[2];
    167         case PGMPOOL_IDX_PAE_PD_3:
    168             return pPGM->apShwPaePDsRC[3];
    169         case PGMPOOL_IDX_PDPT:
    170             return pPGM->pShwPaePdptRC;
    171         default:
    172             AssertReleaseMsgFailed(("Invalid index %d\n", pPage->idx));
    173             return NULL;
    174     }
    175 
    176 # else  /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
    177     RTHCPHYS HCPhys;
    178     switch (pPage->idx)
    179     {
    180         case PGMPOOL_IDX_PD:
    181             HCPhys = pPGM->HCPhysShw32BitPD;
    182             break;
    183         case PGMPOOL_IDX_PAE_PD_0:
    184             HCPhys = pPGM->aHCPhysPaePDs[0];
    185             break;
    186         case PGMPOOL_IDX_PAE_PD_1:
    187             HCPhys = pPGM->aHCPhysPaePDs[1];
    188             break;
    189         case PGMPOOL_IDX_PAE_PD_2:
    190             HCPhys = pPGM->aHCPhysPaePDs[2];
    191             break;
    192         case PGMPOOL_IDX_PAE_PD_3:
    193             HCPhys = pPGM->aHCPhysPaePDs[3];
    194             break;
    195         case PGMPOOL_IDX_PDPT:
    196             HCPhys = pPGM->HCPhysShwPaePdpt;
    197             break;
    198         case PGMPOOL_IDX_NESTED_ROOT:
    199             HCPhys = pPGM->HCPhysShwNestedRoot;
    200             break;
    201         case PGMPOOL_IDX_PAE_PD:
    202             AssertReleaseMsgFailed(("PGMPOOL_IDX_PAE_PD is not usable in VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 context\n"));
    203             return NULL;
    204         default:
    205             AssertReleaseMsgFailed(("Invalid index %d\n", pPage->idx));
    206             return NULL;
    207     }
    208     AssertMsg(HCPhys && HCPhys != NIL_RTHCPHYS && !(PAGE_OFFSET_MASK & HCPhys), ("%RHp\n", HCPhys));
    209 
    210     void *pv;
    211     pgmR0DynMapHCPageInlined(pPGM, HCPhys, &pv);
    212     return pv;
    213 # endif /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
    214 }
    215 #endif /* IN_RC || VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */
    216138
    217139
     
    388310            }
    389311
    390 # ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    391312            case PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD:
    392313            case PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD:
     
    457378                break;
    458379            }
    459 # endif
    460 
    461380
    462381            case PGMPOOLKIND_PAE_PT_FOR_PAE_PT:
     
    508427            }
    509428
    510 #  ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    511429            case PGMPOOLKIND_32BIT_PD:
    512 #  else
    513             case PGMPOOLKIND_ROOT_32BIT_PD:
    514 #  endif
    515430            {
    516431                uShw.pv = PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage);
    517432                const unsigned iShw = off / sizeof(X86PTE);         // ASSUMING 32-bit guest paging!
    518433
    519 #  ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    520434                LogFlow(("pgmPoolMonitorChainChanging: PGMPOOLKIND_32BIT_PD %x\n", iShw));
    521 #  endif
    522435#  ifndef IN_RING0
    523436                if (uShw.pPD->a[iShw].u & PGM_PDFLAGS_MAPPING)
     
    530443                }
    531444#  endif /* !IN_RING0 */
    532 #  ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    533445#   ifndef IN_RING0
    534446                else
     
    545457                    }
    546458                }
    547 #  endif
    548459                /* paranoia / a bit assumptive. */
    549460                if (   pCpu
     
    564475                        }
    565476#  endif /* !IN_RING0 */
    566 #  ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    567477#   ifndef IN_RING0
    568478                        else
     
    579489                            }
    580490                        }
    581 #  endif
    582491                    }
    583492                }
     
    596505                break;
    597506            }
    598 
    599 # ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    600             case PGMPOOLKIND_ROOT_PAE_PD:
    601             {
    602                 unsigned iGst     = off / sizeof(X86PDE);           // ASSUMING 32-bit guest paging!
    603                 unsigned iShwPdpt = iGst / 256;
    604                 unsigned iShw     = (iGst % 256) * 2;
    605                 Assert(pPage->idx == PGMPOOL_IDX_PAE_PD);
    606                 PPGMPOOLPAGE pPage2 = pPage + 1 + iShwPdpt;
    607                 Assert(pPage2->idx == PGMPOOL_IDX_PAE_PD_0 + iShwPdpt);
    608                 uShw.pv = PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage2);
    609                 for (unsigned i = 0; i < 2; i++, iShw++)
    610                 {
    611                     if ((uShw.pPDPae->a[iShw].u & (PGM_PDFLAGS_MAPPING | X86_PDE_P)) == (PGM_PDFLAGS_MAPPING | X86_PDE_P))
    612                     {
    613                         Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
    614                         VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
    615                         LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShwPdpt=%#x iShw=%#x!\n", iShwPdpt, iShw));
    616                     }
    617                     /* paranoia / a bit assumptive. */
    618                     else if (   pCpu
    619                              && (off & 3)
    620                              && (off & 3) + cbWrite > 4)
    621                     {
    622                         const unsigned iShw2 = iShw + 2;
    623                         if (    iShw2 < RT_ELEMENTS(uShw.pPDPae->a) /** @todo was completely wrong, it's better now after #1865 but still wrong from cross PD. */
    624                             &&  (uShw.pPDPae->a[iShw2].u & (PGM_PDFLAGS_MAPPING | X86_PDE_P)) == (PGM_PDFLAGS_MAPPING | X86_PDE_P))
    625                         {
    626                             Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));
    627                             VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);
    628                             LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShwPdpt=%#x iShw2=%#x!\n", iShwPdpt, iShw2));
    629                         }
    630                     }
    631 #if 0 /* useful when running PGMAssertCR3(), a bit too troublesome for general use (TLBs). */
    632                     if (    uShw.pPDPae->a[iShw].n.u1Present
    633                         &&  !VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3))
    634                     {
    635                         LogFlow(("pgmPoolMonitorChainChanging: iShwPdpt=%#x iShw=%#x: %RX64 -> freeing it!\n", iShwPdpt, iShw, uShw.pPDPae->a[iShw].u));
    636 # ifdef IN_RC           /* TLB load - we're pushing things a bit... */
    637                         ASMProbeReadByte(pvAddress);
    638 # endif
    639                         pgmPoolFree(pVM, uShw.pPDPae->a[iShw].u & X86_PDE_PAE_PG_MASK, pPage->idx, iShw + iShwPdpt * X86_PG_PAE_ENTRIES);
    640                         uShw.pPDPae->a[iShw].u = 0;
    641                     }
    642 #endif
    643                 }
    644                 break;
    645             }
    646 #  endif /* !VBOX_WITH_PGMPOOL_PAGING_ONLY */
    647507
    648508            case PGMPOOLKIND_PAE_PD_FOR_PAE_PD:
     
    660520                }
    661521#endif /* !IN_RING0 */
    662 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    663522                /*
    664523                 * Causes trouble when the guest uses a PDE to refer to the whole page table level
     
    675534                        pgmPoolFree(pVM,
    676535                                    uShw.pPDPae->a[iShw].u & X86_PDE_PAE_PG_MASK,
    677 # ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    678536                                    pPage->idx,
    679537                                    iShw);
    680 # else
    681                                     /* Note: hardcoded PAE implementation dependency */
    682                                     (pPage->enmKind == PGMPOOLKIND_PAE_PD_FOR_PAE_PD) ? PGMPOOL_IDX_PAE_PD : pPage->idx,
    683                                     (pPage->enmKind == PGMPOOLKIND_PAE_PD_FOR_PAE_PD) ? iShw + (pPage->idx - PGMPOOL_IDX_PAE_PD_0) * X86_PG_PAE_ENTRIES : iShw);
    684 # endif
    685538                        uShw.pPDPae->a[iShw].u = 0;
    686539                    }
    687540                }
    688 #endif
    689541                /* paranoia / a bit assumptive. */
    690542                if (   pCpu
     
    705557                    }
    706558#endif /* !IN_RING0 */
    707 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    708559# ifndef IN_RING0
    709560                    else
     
    714565                        pgmPoolFree(pVM,
    715566                                    uShw.pPDPae->a[iShw2].u & X86_PDE_PAE_PG_MASK,
    716 # ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    717567                                    pPage->idx,
    718568                                    iShw2);
    719 # else
    720                                     /* Note: hardcoded PAE implementation dependency */
    721                                     (pPage->enmKind == PGMPOOLKIND_PAE_PD_FOR_PAE_PD) ? PGMPOOL_IDX_PAE_PD : pPage->idx,
    722                                     (pPage->enmKind == PGMPOOLKIND_PAE_PD_FOR_PAE_PD) ? iShw2 + (pPage->idx - PGMPOOL_IDX_PAE_PD_0) * X86_PG_PAE_ENTRIES : iShw2);
    723 # endif
    724569                        uShw.pPDPae->a[iShw2].u = 0;
    725570                    }
    726 #endif
    727571                }
    728572                break;
    729573            }
    730574
    731 #  ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    732575            case PGMPOOLKIND_PAE_PDPT:
    733 #  else
    734             case PGMPOOLKIND_ROOT_PDPT:
    735 #  endif
    736576            {
    737577                /*
     
    740580                 * - messing with the bits of pd pointers without changing the physical address
    741581                 */
    742 # ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    743582                /* PDPT roots are not page aligned; 32 byte only! */
    744583                const unsigned offPdpt = GCPhysFault - pPage->GCPhys;
    745 # else
    746                 const unsigned offPdpt = off;
    747 # endif
     584
    748585                uShw.pv = PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage);
    749586                const unsigned iShw = offPdpt / sizeof(X86PDPE);
     
    760597                    }
    761598# endif /* !IN_RING0 */
    762 # ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    763599#  ifndef IN_RING0
    764600                    else
     
    773609                        uShw.pPDPT->a[iShw].u = 0;
    774610                    }
    775 # endif
    776611
    777612                    /* paranoia / a bit assumptive. */
     
    793628                            }
    794629# endif /* !IN_RING0 */
    795 # ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    796630#  ifndef IN_RING0
    797631                            else
     
    806640                                uShw.pPDPT->a[iShw2].u = 0;
    807641                            }
    808 # endif
    809642                        }
    810643                    }
     
    856689                 * - messing with the bits of pd pointers without changing the physical address
    857690                 */
    858 # ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    859691                if (!VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3))
    860 # endif
    861692                {
    862693                    uShw.pv = PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage);
     
    891722                 * - messing with the bits of pd pointers without changing the physical address
    892723                 */
    893 # ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    894724                if (!VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3))
    895 # endif
    896725                {
    897726                    uShw.pv = PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage);
     
    12821111    bool fReused = false;
    12831112    if (    (   pPage->cModifications < 48   /** @todo #define */ /** @todo need to check that it's not mapping EIP. */ /** @todo adjust this! */
    1284 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    12851113             || pgmPoolIsPageLocked(&pVM->pgm.s, pPage)
    1286 #else
    1287              || pPage->fCR3Mix
    1288 #endif
    12891114            )
    12901115        &&  !(fReused = pgmPoolMonitorIsReused(pVM, pPage, pRegFrame, &Cpu, pvFault))
     
    14451270     * Reject any attempts at flushing the currently active shadow CR3 mapping
    14461271     */
    1447 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    14481272    if (pgmPoolIsPageLocked(&pPool->CTX_SUFF(pVM)->pgm.s, pPage))
    1449 #else
    1450     if (PGMGetHyperCR3(pPool->CTX_SUFF(pVM)) == pPage->Core.Key)
    1451 #endif
    14521273    {
    14531274        /* Refresh the cr3 mapping by putting it at the head of the age list. */
     
    14901311        case PGMPOOLKIND_EPT_PD_FOR_PHYS:
    14911312        case PGMPOOLKIND_EPT_PDPT_FOR_PHYS:
    1492 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    14931313        case PGMPOOLKIND_PAE_PDPT_FOR_32BIT: /* never reuse them for other types */
    14941314            return false;
    1495 #else
    1496             return true;
    1497 #endif
    14981315
    14991316        /*
     
    15641381         * These cannot be flushed, and it's common to reuse the PDs as PTs.
    15651382         */
    1566 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    1567         case PGMPOOLKIND_ROOT_32BIT_PD:
    1568         case PGMPOOLKIND_ROOT_PAE_PD:
    1569         case PGMPOOLKIND_ROOT_PDPT:
    1570 #endif
    15711383        case PGMPOOLKIND_ROOT_NESTED:
    15721384            return false;
     
    17701582                case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT:
    17711583                case PGMPOOLKIND_64BIT_PML4:
    1772 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    17731584                case PGMPOOLKIND_32BIT_PD:
    17741585                case PGMPOOLKIND_PAE_PDPT:
    1775 #else
    1776                 case PGMPOOLKIND_ROOT_32BIT_PD:
    1777                 case PGMPOOLKIND_ROOT_PAE_PD:
    1778                 case PGMPOOLKIND_ROOT_PDPT:
    1779 #endif
    17801586                {
    17811587                    /* find the head */
     
    18031609                case PGMPOOLKIND_PAE_PDPT_PHYS:
    18041610                case PGMPOOLKIND_32BIT_PD_PHYS:
    1805 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    18061611                case PGMPOOLKIND_PAE_PDPT_FOR_32BIT:
    1807 #endif
    18081612                    break;
    18091613                default:
     
    18451649        case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT:
    18461650        case PGMPOOLKIND_64BIT_PML4:
    1847 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    18481651        case PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD:
    18491652        case PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD:
     
    18521655        case PGMPOOLKIND_32BIT_PD:
    18531656        case PGMPOOLKIND_PAE_PDPT:
    1854 #else
    1855         case PGMPOOLKIND_ROOT_PDPT:
    1856 #endif
    18571657            break;
    18581658
     
    18711671            return VINF_SUCCESS;
    18721672
    1873 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    18741673        case PGMPOOLKIND_32BIT_PD_PHYS:
    18751674        case PGMPOOLKIND_PAE_PDPT_PHYS:
     
    18781677            /* Nothing to monitor here. */
    18791678            return VINF_SUCCESS;
    1880 #else
    1881         case PGMPOOLKIND_ROOT_32BIT_PD:
    1882         case PGMPOOLKIND_ROOT_PAE_PD:
    1883 #endif
    18841679#ifdef PGMPOOL_WITH_MIXED_PT_CR3
    18851680            break;
     
    19521747        case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT:
    19531748        case PGMPOOLKIND_64BIT_PML4:
    1954 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    19551749        case PGMPOOLKIND_32BIT_PD:
    19561750        case PGMPOOLKIND_PAE_PDPT:
     
    19591753        case PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD:
    19601754        case PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD:
    1961 #else
    1962         case PGMPOOLKIND_ROOT_PDPT:
    1963 #endif
    19641755            break;
    19651756
     
    19811772            return VINF_SUCCESS;
    19821773
    1983 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    1984         case PGMPOOLKIND_ROOT_32BIT_PD:
    1985         case PGMPOOLKIND_ROOT_PAE_PD:
    1986 #endif
    19871774#ifdef PGMPOOL_WITH_MIXED_PT_CR3
    19881775            break;
    1989 #endif
    1990 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    1991         case PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD:
    19921776#endif
    19931777        default:
     
    20071791            PPGMPOOLPAGE pNewHead = &pPool->aPages[pPage->iMonitoredNext];
    20081792            pNewHead->iMonitoredPrev = NIL_PGMPOOL_IDX;
    2009 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    2010             pNewHead->fCR3Mix = pPage->fCR3Mix;
    2011 #endif
    20121793            rc = PGMHandlerPhysicalChangeCallbacks(pVM, pPage->GCPhys & ~(RTGCPHYS)(PAGE_SIZE - 1),
    20131794                                                   pPool->pfnAccessHandlerR3, MMHyperCCToR3(pVM, pNewHead),
     
    20471828}
    20481829
    2049 # if defined(PGMPOOL_WITH_MIXED_PT_CR3) && !defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
    2050 
    2051 /**
    2052  * Set or clear the fCR3Mix attribute in a chain of monitored pages.
    2053  *
    2054  * @param   pPool       The Pool.
    2055  * @param   pPage       A page in the chain.
    2056  * @param   fCR3Mix     The new fCR3Mix value.
    2057  */
    2058 static void pgmPoolMonitorChainChangeCR3Mix(PPGMPOOL pPool, PPGMPOOLPAGE pPage, bool fCR3Mix)
    2059 {
    2060     /* current */
    2061     pPage->fCR3Mix = fCR3Mix;
    2062 
    2063     /* before */
    2064     int16_t idx = pPage->iMonitoredPrev;
    2065     while (idx != NIL_PGMPOOL_IDX)
    2066     {
    2067         pPool->aPages[idx].fCR3Mix = fCR3Mix;
    2068         idx = pPool->aPages[idx].iMonitoredPrev;
    2069     }
    2070 
    2071     /* after */
    2072     idx = pPage->iMonitoredNext;
    2073     while (idx != NIL_PGMPOOL_IDX)
    2074     {
    2075         pPool->aPages[idx].fCR3Mix = fCR3Mix;
    2076         idx = pPool->aPages[idx].iMonitoredNext;
    2077     }
    2078 }
    2079 
    2080 
    2081 /**
    2082  * Installs or modifies monitoring of a CR3 page (special).
    2083  *
    2084  * We're pretending the CR3 page is shadowed by the pool so we can use the
    2085  * generic mechanisms in detecting chained monitoring. (This also gives us a
    2086  * tast of what code changes are required to really pool CR3 shadow pages.)
    2087  *
    2088  * @returns VBox status code.
    2089  * @param   pPool       The pool.
    2090  * @param   idxRoot     The CR3 (root) page index.
    2091  * @param   GCPhysCR3   The (new) CR3 value.
    2092  */
    2093 int pgmPoolMonitorMonitorCR3(PPGMPOOL pPool, uint16_t idxRoot, RTGCPHYS GCPhysCR3)
    2094 {
    2095     Assert(idxRoot != NIL_PGMPOOL_IDX && idxRoot < PGMPOOL_IDX_FIRST);
    2096     PPGMPOOLPAGE pPage = &pPool->aPages[idxRoot];
    2097     LogFlow(("pgmPoolMonitorMonitorCR3: idxRoot=%d pPage=%p:{.GCPhys=%RGp, .fMonitored=%d} GCPhysCR3=%RGp\n",
    2098              idxRoot, pPage, pPage->GCPhys, pPage->fMonitored, GCPhysCR3));
    2099 
    2100     /*
    2101      * The unlikely case where it already matches.
    2102      */
    2103     if (pPage->GCPhys == GCPhysCR3)
    2104     {
    2105         Assert(pPage->fMonitored);
    2106         return VINF_SUCCESS;
    2107     }
    2108 
    2109     /*
    2110      * Flush the current monitoring and remove it from the hash.
    2111      */
    2112     int rc = VINF_SUCCESS;
    2113     if (pPage->fMonitored)
    2114     {
    2115         pgmPoolMonitorChainChangeCR3Mix(pPool, pPage, false);
    2116         rc = pgmPoolMonitorFlush(pPool, pPage);
    2117         if (rc == VERR_PGM_POOL_CLEARED)
    2118             rc = VINF_SUCCESS;
    2119         else
    2120             AssertFatalRC(rc);
    2121         pgmPoolHashRemove(pPool, pPage);
    2122     }
    2123 
    2124     /*
    2125      * Monitor the page at the new location and insert it into the hash.
    2126      */
    2127     pPage->GCPhys = GCPhysCR3;
    2128     int rc2 = pgmPoolMonitorInsert(pPool, pPage);
    2129     if (rc2 != VERR_PGM_POOL_CLEARED)
    2130     {
    2131         AssertFatalRC(rc2);
    2132         if (rc2 != VINF_SUCCESS && rc == VINF_SUCCESS)
    2133             rc = rc2;
    2134     }
    2135     pgmPoolHashInsert(pPool, pPage);
    2136     pgmPoolMonitorChainChangeCR3Mix(pPool, pPage, true);
    2137     return rc;
    2138 }
    2139 
    2140 
    2141 /**
    2142  * Removes the monitoring of a CR3 page (special).
    2143  *
    2144  * @returns VBox status code.
    2145  * @param   pPool       The pool.
    2146  * @param   idxRoot     The CR3 (root) page index.
    2147  */
    2148 int pgmPoolMonitorUnmonitorCR3(PPGMPOOL pPool, uint16_t idxRoot)
    2149 {
    2150     Assert(idxRoot != NIL_PGMPOOL_IDX && idxRoot < PGMPOOL_IDX_FIRST);
    2151     PPGMPOOLPAGE pPage = &pPool->aPages[idxRoot];
    2152     LogFlow(("pgmPoolMonitorUnmonitorCR3: idxRoot=%d pPage=%p:{.GCPhys=%RGp, .fMonitored=%d}\n",
    2153              idxRoot, pPage, pPage->GCPhys, pPage->fMonitored));
    2154 
    2155     if (!pPage->fMonitored)
    2156         return VINF_SUCCESS;
    2157 
    2158     pgmPoolMonitorChainChangeCR3Mix(pPool, pPage, false);
    2159     int rc = pgmPoolMonitorFlush(pPool, pPage);
    2160     if (rc != VERR_PGM_POOL_CLEARED)
    2161         AssertFatalRC(rc);
    2162     else
    2163         rc = VINF_SUCCESS;
    2164     pgmPoolHashRemove(pPool, pPage);
    2165     Assert(!pPage->fMonitored);
    2166     pPage->GCPhys = NIL_RTGCPHYS;
    2167     return rc;
    2168 }
    2169 
    2170 # endif /* PGMPOOL_WITH_MIXED_PT_CR3 && !VBOX_WITH_PGMPOOL_PAGING_ONLY*/
    21711830
    21721831/**
     
    27132372        case PGMPOOLKIND_32BIT_PT_FOR_PHYS:
    27142373        case PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB:
    2715 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    27162374        case PGMPOOLKIND_32BIT_PD:
    27172375        case PGMPOOLKIND_32BIT_PD_PHYS:
    2718 #else
    2719         case PGMPOOLKIND_ROOT_32BIT_PD:
    2720 #endif
    27212376            return 4;
    27222377
     
    27342389        case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT:
    27352390        case PGMPOOLKIND_64BIT_PML4:
    2736 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    2737         case PGMPOOLKIND_ROOT_PAE_PD:
    2738         case PGMPOOLKIND_ROOT_PDPT:
    2739 #endif
    27402391        case PGMPOOLKIND_PAE_PDPT:
    27412392        case PGMPOOLKIND_ROOT_NESTED:
     
    27702421        case PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT:
    27712422        case PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB:
    2772 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    27732423        case PGMPOOLKIND_32BIT_PD:
    2774 #else
    2775         case PGMPOOLKIND_ROOT_32BIT_PD:
    2776 #endif
    27772424        case PGMPOOLKIND_PAE_PT_FOR_32BIT_PT:
    27782425        case PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB:
     
    27892436        case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT:
    27902437        case PGMPOOLKIND_64BIT_PML4:
    2791 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    27922438        case PGMPOOLKIND_PAE_PDPT:
    2793 #else
    2794         case PGMPOOLKIND_ROOT_PAE_PD:
    2795         case PGMPOOLKIND_ROOT_PDPT:
    2796 #endif
    27972439            return 8;
    27982440
     
    32012843     */
    32022844    PPGMPOOLPAGE pUserPage = &pPool->aPages[pUser->iUser];
    3203 #if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) && !defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
    3204     if (pUserPage->enmKind == PGMPOOLKIND_ROOT_PAE_PD)
    3205     {
    3206         /* Must translate the fake 2048 entry PD to a 512 PD one since the R0 mapping is not linear. */
    3207         Assert(pUser->iUser == PGMPOOL_IDX_PAE_PD);
    3208         uint32_t iPdpt = iUserTable / X86_PG_PAE_ENTRIES;
    3209         iUserTable    %= X86_PG_PAE_ENTRIES;
    3210         pUserPage      = &pPool->aPages[PGMPOOL_IDX_PAE_PD_0 + iPdpt];
    3211         Assert(pUserPage->enmKind == PGMPOOLKIND_PAE_PD_FOR_PAE_PD);
    3212     }
    3213 #endif
    32142845    union
    32152846    {
     
    32222853
    32232854    /* Safety precaution in case we change the paging for other modes too in the future. */
    3224 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    32252855    Assert(!pgmPoolIsPageLocked(&pPool->CTX_SUFF(pVM)->pgm.s, pPage));
    3226 #else
    3227     Assert(PGMGetHyperCR3(pPool->CTX_SUFF(pVM)) != pPage->Core.Key);
    3228 #endif
    32292856
    32302857#ifdef VBOX_STRICT
     
    32342861    switch (pUserPage->enmKind)
    32352862    {
    3236 # ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    32372863        case PGMPOOLKIND_32BIT_PD:
    32382864            case PGMPOOLKIND_32BIT_PD_PHYS:
    32392865            Assert(iUserTable < X86_PG_ENTRIES);
    32402866            break;
    3241 # else
    3242         case PGMPOOLKIND_ROOT_32BIT_PD:
    3243             Assert(iUserTable < X86_PG_ENTRIES);
    3244             Assert(!(u.pau32[iUserTable] & PGM_PDFLAGS_MAPPING));
    3245             break;
    3246 # endif
    3247 # if !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) && !defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
    3248         case PGMPOOLKIND_ROOT_PAE_PD:
    3249             Assert(iUserTable < 2048 && pUser->iUser == PGMPOOL_IDX_PAE_PD);
    3250             AssertMsg(!(u.pau64[iUserTable] & PGM_PDFLAGS_MAPPING), ("%llx %d\n", u.pau64[iUserTable], iUserTable));
    3251             break;
    3252 # endif
    3253 # ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    32542867        case PGMPOOLKIND_PAE_PDPT:
    32552868        case PGMPOOLKIND_PAE_PDPT_FOR_32BIT:
    32562869        case PGMPOOLKIND_PAE_PDPT_PHYS:
    3257 # else
    3258         case PGMPOOLKIND_ROOT_PDPT:
    3259 # endif
    32602870            Assert(iUserTable < 4);
    32612871            Assert(!(u.pau64[iUserTable] & PGM_PLXFLAGS_PERMANENT));
     
    33072917    {
    33082918        /* 32-bit entries */
    3309 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    33102919        case PGMPOOLKIND_32BIT_PD:
    33112920            case PGMPOOLKIND_32BIT_PD_PHYS:
    3312 #else
    3313         case PGMPOOLKIND_ROOT_32BIT_PD:
    3314 #endif
    33152921            u.pau32[iUserTable] = 0;
    33162922            break;
     
    33222928        case PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD:
    33232929        case PGMPOOLKIND_PAE_PD_FOR_PAE_PD:
    3324 #if defined(IN_RC) && defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
     2930#if defined(IN_RC)
    33252931            /* In 32 bits PAE mode we *must* invalidate the TLB when changing a PDPT entry; the CPU fetches them only during cr3 load, so any
    33262932             * non-present PDPT will continue to cause page faults.
     
    33362942        case PGMPOOLKIND_64BIT_PDPT_FOR_PHYS:
    33372943        case PGMPOOLKIND_64BIT_PD_FOR_PHYS:
    3338 #if !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) && !defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)
    3339         case PGMPOOLKIND_ROOT_PAE_PD:
    3340 #endif
    3341 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    33422944        case PGMPOOLKIND_PAE_PDPT:
    33432945        case PGMPOOLKIND_PAE_PDPT_FOR_32BIT:
    3344 #else
    3345         case PGMPOOLKIND_ROOT_PDPT:
    3346 #endif
    33472946        case PGMPOOLKIND_ROOT_NESTED:
    33482947        case PGMPOOLKIND_EPT_PDPT_FOR_PHYS:
     
    38483447
    38493448
    3850 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    38513449/**
    38523450 * Clear references to shadowed pages in a 32 bits page directory.
     
    38723470    }
    38733471}
    3874 #endif
    38753472
    38763473/**
     
    38863483    {
    38873484        if (    pShwPD->a[i].n.u1Present
    3888 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    38893485            &&  !(pShwPD->a[i].u & PGM_PDFLAGS_MAPPING)
    3890 #endif
    38913486           )
    38923487        {
     
    39143509    {
    39153510        if (    pShwPDPT->a[i].n.u1Present
    3916 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    39173511            &&  !(pShwPDPT->a[i].u & PGM_PLXFLAGS_MAPPING)
    3918 #endif
    39193512           )
    39203513        {
     
    41113704            break;
    41123705
    4113 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    41143706        case PGMPOOLKIND_32BIT_PD_PHYS:
    41153707        case PGMPOOLKIND_32BIT_PD:
     
    41203712        case PGMPOOLKIND_PAE_PDPT:
    41213713        case PGMPOOLKIND_PAE_PDPT_PHYS:
    4122 #endif
    41233714        case PGMPOOLKIND_64BIT_PDPT_FOR_PHYS:
    41243715        case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT:
     
    41753766     */
    41763767    Assert(NIL_PGMPOOL_IDX == 0);
    4177 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    4178     for (unsigned i = 1; i < PGMPOOL_IDX_FIRST; i++)
    4179     {
    4180         /*
    4181          * Get the page address.
    4182          */
    4183         PPGMPOOLPAGE pPage = &pPool->aPages[i];
    4184         union
    4185         {
    4186             uint64_t *pau64;
    4187             uint32_t *pau32;
    4188         } u;
    4189 
    4190         /*
    4191          * Mark stuff not present.
    4192          */
    4193         switch (pPage->enmKind)
    4194         {
    4195             case PGMPOOLKIND_ROOT_32BIT_PD:
    4196                 u.pau64 = (uint64_t *)PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage);
    4197                 for (unsigned iPage = 0; iPage < X86_PG_ENTRIES; iPage++)
    4198                     if ((u.pau32[iPage] & (PGM_PDFLAGS_MAPPING | X86_PDE_P)) == X86_PDE_P)
    4199                         u.pau32[iPage] = 0;
    4200                 break;
    4201 
    4202             case PGMPOOLKIND_PAE_PD_FOR_PAE_PD:
    4203                 u.pau64 = (uint64_t *)PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage);
    4204                 for (unsigned iPage = 0; iPage < X86_PG_PAE_ENTRIES; iPage++)
    4205                     if ((u.pau64[iPage] & (PGM_PDFLAGS_MAPPING | X86_PDE_P)) == X86_PDE_P)
    4206                         u.pau64[iPage] = 0;
    4207                 break;
    4208 
    4209             case PGMPOOLKIND_ROOT_PDPT:
    4210                 /* Not root of shadowed pages currently, ignore it. */
    4211                 break;
    4212 
    4213             case PGMPOOLKIND_ROOT_NESTED:
    4214                 u.pau64 = (uint64_t *)PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage);
    4215                 ASMMemZero32(u.pau64, PAGE_SIZE);
    4216                 break;
    4217         }
    4218     }
    4219 #endif
    42203768
    42213769    /*
     
    42553803    }
    42563804
    4257 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    42583805    /* Unmap the old CR3 value before flushing everything. */
    42593806    int rc = PGM_BTH_PFN(UnmapCR3, pVM)(pVM);
     
    42633810    rc = PGM_SHW_PFN(Exit, pVM)(pVM);
    42643811    AssertRC(rc);
    4265 #endif
    42663812
    42673813    /*
     
    43023848        pPage->iAgePrev  = NIL_PGMPOOL_IDX;
    43033849#endif
    4304 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    43053850        pPage->fLocked   = false;
    4306 #endif
    43073851    }
    43083852    pPool->aPages[pPool->cCurPages - 1].iNext = NIL_PGMPOOL_IDX;
     
    44093953    }
    44103954
    4411 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    44123955    /* Force a shadow mode reinit (necessary for nested paging and ept). */
    44133956    pVM->pgm.s.enmShadowMode = PGMMODE_INVALID;
     
    44163959    rc = PGMR3ChangeMode(pVM, PGMGetGuestMode(pVM));
    44173960    AssertRC(rc);
    4418 #endif
    44193961
    44203962    /*
     
    44584000     * Quietly reject any attempts at flushing the currently active shadow CR3 mapping
    44594001     */
    4460 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    44614002    if (pgmPoolIsPageLocked(&pPool->CTX_SUFF(pVM)->pgm.s, pPage))
    44624003    {
     
    44714012                  || pPage->enmKind == PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD,
    44724013                  ("Can't free the shadow CR3! (%RHp vs %RHp kind=%d\n", PGMGetHyperCR3(pPool->CTX_SUFF(pVM)), pPage->Core.Key, pPage->enmKind));
    4473 #else
    4474     if (PGMGetHyperCR3(pPool->CTX_SUFF(pVM)) == pPage->Core.Key)
    4475     {
    4476         AssertMsg(pPage->enmKind == PGMPOOLKIND_64BIT_PML4,
    4477                   ("Can't free the shadow CR3! (%RHp vs %RHp kind=%d\n", PGMGetHyperCR3(pPool->CTX_SUFF(pVM)), pPage->Core.Key, pPage->enmKind));
    4478 #endif
    44794014        Log(("pgmPoolFlushPage: current active shadow CR3, rejected. enmKind=%s idx=%d\n", pgmPoolPoolKindToStr(pPage->enmKind), pPage->idx));
    44804015        return VINF_SUCCESS;
     
    45854120     */
    45864121    if (    pPool->cCurPages < pPool->cMaxPages
    4587 #if defined(VBOX_WITH_PGMPOOL_PAGING_ONLY) && defined(IN_RC)
     4122#if defined(IN_RC)
    45884123        /* Hack alert: we can't deal with jumps to ring 3 when called from MapCR3 and allocating pages for PAE PDs. */
    45894124        &&  enmKind != PGMPOOLKIND_PAE_PD_FOR_PAE_PD
     
    48754410    case PGMPOOLKIND_EPT_PT_FOR_PHYS:
    48764411        return "PGMPOOLKIND_EPT_PT_FOR_PHYS";
    4877 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    4878     case PGMPOOLKIND_ROOT_32BIT_PD:
    4879         return "PGMPOOLKIND_ROOT_32BIT_PD";
    4880     case PGMPOOLKIND_ROOT_PAE_PD:
    4881         return "PGMPOOLKIND_ROOT_PAE_PD";
    4882     case PGMPOOLKIND_ROOT_PDPT:
    4883         return "PGMPOOLKIND_ROOT_PDPT";
    4884 #endif
    48854412    case PGMPOOLKIND_ROOT_NESTED:
    48864413        return "PGMPOOLKIND_ROOT_NESTED";
  • trunk/src/VBox/VMM/VMMAll/PGMAllShw.h

    r16465 r17586  
    110110#  define SHW_PDPE_PG_MASK      X86_PDPE_PG_MASK
    111111#  define SHW_TOTAL_PD_ENTRIES  (X86_PG_PAE_ENTRIES*X86_PG_PAE_PDPE_ENTRIES)
    112 #  ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    113112#  define SHW_POOL_ROOT_IDX     PGMPOOL_IDX_PDPT
    114 #  else
    115 #  define SHW_POOL_ROOT_IDX     PGMPOOL_IDX_PAE_PD
    116 #  endif
    117113
    118114# endif
  • trunk/src/VBox/VMM/VMMSwitcher.cpp

    r16859 r17586  
    716716                    GCPtrGDT,
    717717                    PGMGetInter32BitCR3(pVM), PGMGetInterPaeCR3(pVM), PGMGetInterAmd64CR3(pVM),
    718 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY
    719718                    /* @todo No need for three GetHyper calls; one and the same base is used */
    720 #endif
    721719                    PGMGetHyper32BitCR3(pVM), PGMGetHyperPaeCR3(pVM), PGMGetHyperAmd64CR3(pVM),
    722720                    SelCS, SelDS, SelCS64, SelTSS);
  • trunk/src/VBox/VMM/testcase/tstVMStructGC.cpp

    r17537 r17586  
    416416    GEN_CHECK_OFF(PGM, GCPhysCR3);
    417417    GEN_CHECK_OFF(PGM, GCPtrCR3Mapping);
    418 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    419     GEN_CHECK_OFF(PGM, GCPhysGstCR3Monitored);
    420 #endif
    421418    GEN_CHECK_OFF(PGM, pGst32BitPdR3);
    422419#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
     
    436433    GEN_CHECK_OFF(PGM, aGCPhysGstPaePDs);
    437434    GEN_CHECK_OFF(PGM, aGCPhysGstPaePDsMonitored);
    438 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    439     GEN_CHECK_OFF(PGM, pShw32BitPdR3);
    440 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
    441     GEN_CHECK_OFF(PGM, pShw32BitPdR0);
    442 # endif
    443     GEN_CHECK_OFF(PGM, pShw32BitPdRC);
    444     GEN_CHECK_OFF(PGM, HCPhysShw32BitPD);
    445     GEN_CHECK_OFF(PGM, apShwPaePDsR3);
    446 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
    447     GEN_CHECK_OFF(PGM, apShwPaePDsR0);
    448 # endif
    449     GEN_CHECK_OFF(PGM, apShwPaePDsRC);
    450     GEN_CHECK_OFF(PGM, aHCPhysPaePDs);
    451     GEN_CHECK_OFF(PGM, pShwPaePdptR3);
    452     GEN_CHECK_OFF(PGM, pShwPaePdptR0);
    453     GEN_CHECK_OFF(PGM, pShwPaePdptRC);
    454     GEN_CHECK_OFF(PGM, HCPhysShwPaePdpt);
    455 #endif
    456 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    457     GEN_CHECK_OFF(PGM, pShwRootR3);
    458 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
    459     GEN_CHECK_OFF(PGM, pShwRootR0);
    460 # endif
    461     GEN_CHECK_OFF(PGM, HCPhysShwCR3);
    462 #endif
    463435    GEN_CHECK_OFF(PGM, pShwPageCR3R3);
    464436    GEN_CHECK_OFF(PGM, pShwPageCR3R0);
     
    472444    GEN_CHECK_OFF(PGM, pfnR3GstRelocate);
    473445    GEN_CHECK_OFF(PGM, pfnR3GstExit);
    474 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    475     GEN_CHECK_OFF(PGM, pfnR3GstMonitorCR3);
    476     GEN_CHECK_OFF(PGM, pfnR3GstUnmonitorCR3);
    477 #endif
    478446    GEN_CHECK_OFF(PGM, pfnR3BthMapCR3);
    479447    GEN_CHECK_OFF(PGM, pfnR3BthUnmapCR3);
     
    689657    GEN_CHECK_OFF(PGMPOOLPAGE, fCached);
    690658    GEN_CHECK_OFF(PGMPOOLPAGE, fReusedFlushPending);
    691 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY
    692     GEN_CHECK_OFF(PGMPOOLPAGE, fCR3Mix);
    693 #endif
    694659    GEN_CHECK_SIZE(PGMPOOL);
    695660    GEN_CHECK_OFF(PGMPOOL, pVMR3);
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette