- Timestamp:
- Mar 9, 2009 3:28:25 PM (16 years ago)
- Location:
- trunk/src/VBox/VMM
- Files:
-
- 15 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/VMM/PGM.cpp
r17556 r17586 1177 1177 pVM->pgm.s.enmHostMode = SUPPAGINGMODE_INVALID; 1178 1178 pVM->pgm.s.GCPhysCR3 = NIL_RTGCPHYS; 1179 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY1180 pVM->pgm.s.GCPhysGstCR3Monitored = NIL_RTGCPHYS;1181 #endif1182 1179 pVM->pgm.s.fA20Enabled = true; 1183 1180 pVM->pgm.s.GCPhys4MBPSEMask = RT_BIT_64(32) - 1; /* default; checked later */ … … 1298 1295 rc = pgmR3PoolInit(pVM); 1299 1296 } 1300 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY1301 1297 if (RT_SUCCESS(rc)) 1302 1298 rc = PGMR3ChangeMode(pVM, PGMMODE_REAL); 1303 #endif 1299 1304 1300 if (RT_SUCCESS(rc)) 1305 1301 { … … 1473 1469 1474 1470 /* 1475 * Allocate pages for the three possible guest contexts (AMD64, PAE and plain 32-Bit).1476 * We allocate pages for all three posibilities in order to simplify mappings and1477 * avoid resource failure during mode switches. So, we need to cover all levels of the1478 * of the first 4GB down to PD level.1479 * As with the intermediate context, AMD64 uses the PAE PDPT and PDs.1480 */1481 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY1482 pVM->pgm.s.pShw32BitPdR3 = (PX86PD)MMR3PageAllocLow(pVM);1483 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE1484 pVM->pgm.s.pShw32BitPdR0 = (uintptr_t)pVM->pgm.s.pShw32BitPdR3;1485 # endif1486 pVM->pgm.s.apShwPaePDsR3[0] = (PX86PDPAE)MMR3PageAlloc(pVM);1487 pVM->pgm.s.apShwPaePDsR3[1] = (PX86PDPAE)MMR3PageAlloc(pVM);1488 AssertRelease((uintptr_t)pVM->pgm.s.apShwPaePDsR3[0] + PAGE_SIZE == (uintptr_t)pVM->pgm.s.apShwPaePDsR3[1]);1489 pVM->pgm.s.apShwPaePDsR3[2] = (PX86PDPAE)MMR3PageAlloc(pVM);1490 AssertRelease((uintptr_t)pVM->pgm.s.apShwPaePDsR3[1] + PAGE_SIZE == (uintptr_t)pVM->pgm.s.apShwPaePDsR3[2]);1491 pVM->pgm.s.apShwPaePDsR3[3] = (PX86PDPAE)MMR3PageAlloc(pVM);1492 AssertRelease((uintptr_t)pVM->pgm.s.apShwPaePDsR3[2] + PAGE_SIZE == (uintptr_t)pVM->pgm.s.apShwPaePDsR3[3]);1493 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE1494 pVM->pgm.s.apShwPaePDsR0[0] = (uintptr_t)pVM->pgm.s.apShwPaePDsR3[0];1495 pVM->pgm.s.apShwPaePDsR0[1] = (uintptr_t)pVM->pgm.s.apShwPaePDsR3[1];1496 pVM->pgm.s.apShwPaePDsR0[2] = (uintptr_t)pVM->pgm.s.apShwPaePDsR3[2];1497 pVM->pgm.s.apShwPaePDsR0[3] = (uintptr_t)pVM->pgm.s.apShwPaePDsR3[3];1498 # endif1499 pVM->pgm.s.pShwPaePdptR3 = (PX86PDPT)MMR3PageAllocLow(pVM);1500 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE1501 pVM->pgm.s.pShwPaePdptR0 = (uintptr_t)pVM->pgm.s.pShwPaePdptR3;1502 # endif1503 pVM->pgm.s.pShwNestedRootR3 = MMR3PageAllocLow(pVM);1504 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE1505 pVM->pgm.s.pShwNestedRootR0 = (uintptr_t)pVM->pgm.s.pShwNestedRootR3;1506 # endif1507 #endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY */1508 1509 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY1510 if ( !pVM->pgm.s.pShw32BitPdR31511 || !pVM->pgm.s.apShwPaePDsR3[0]1512 || !pVM->pgm.s.apShwPaePDsR3[1]1513 || !pVM->pgm.s.apShwPaePDsR3[2]1514 || !pVM->pgm.s.apShwPaePDsR3[3]1515 || !pVM->pgm.s.pShwPaePdptR31516 || !pVM->pgm.s.pShwNestedRootR3)1517 {1518 AssertMsgFailed(("Failed to allocate pages for the intermediate context!\n"));1519 return VERR_NO_PAGE_MEMORY;1520 }1521 #endif1522 1523 /* get physical addresses. */1524 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY1525 pVM->pgm.s.HCPhysShw32BitPD = MMPage2Phys(pVM, pVM->pgm.s.pShw32BitPdR3);1526 Assert(MMPagePhys2Page(pVM, pVM->pgm.s.HCPhysShw32BitPD) == pVM->pgm.s.pShw32BitPdR3);1527 pVM->pgm.s.aHCPhysPaePDs[0] = MMPage2Phys(pVM, pVM->pgm.s.apShwPaePDsR3[0]);1528 pVM->pgm.s.aHCPhysPaePDs[1] = MMPage2Phys(pVM, pVM->pgm.s.apShwPaePDsR3[1]);1529 pVM->pgm.s.aHCPhysPaePDs[2] = MMPage2Phys(pVM, pVM->pgm.s.apShwPaePDsR3[2]);1530 pVM->pgm.s.aHCPhysPaePDs[3] = MMPage2Phys(pVM, pVM->pgm.s.apShwPaePDsR3[3]);1531 pVM->pgm.s.HCPhysShwPaePdpt = MMPage2Phys(pVM, pVM->pgm.s.pShwPaePdptR3);1532 pVM->pgm.s.HCPhysShwNestedRoot = MMPage2Phys(pVM, pVM->pgm.s.pShwNestedRootR3);1533 #endif1534 1535 /*1536 * Initialize the pages, setting up the PML4 and PDPT for action below 4GB.1537 */1538 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY1539 ASMMemZero32(pVM->pgm.s.pShw32BitPdR3, PAGE_SIZE);1540 ASMMemZero32(pVM->pgm.s.pShwPaePdptR3, PAGE_SIZE);1541 ASMMemZero32(pVM->pgm.s.pShwNestedRootR3, PAGE_SIZE);1542 1543 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.apShwPaePDsR3); i++)1544 {1545 ASMMemZero32(pVM->pgm.s.apShwPaePDsR3[i], PAGE_SIZE);1546 pVM->pgm.s.pShwPaePdptR3->a[i].u = X86_PDPE_P | PGM_PLXFLAGS_PERMANENT | pVM->pgm.s.aHCPhysPaePDs[i];1547 /* The flags will be corrected when entering and leaving long mode. */1548 }1549 #endif1550 1551 /*1552 1471 * Initialize paging workers and mode from current host mode 1553 1472 * and the guest running in real mode. … … 1582 1501 } 1583 1502 rc = pgmR3ModeDataInit(pVM, false /* don't resolve GC and R0 syms yet */); 1584 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY1585 if (RT_SUCCESS(rc))1586 rc = PGMR3ChangeMode(pVM, PGMMODE_REAL);1587 #endif1588 1503 if (RT_SUCCESS(rc)) 1589 1504 { 1590 1505 LogFlow(("pgmR3InitPaging: returns successfully\n")); 1591 1506 #if HC_ARCH_BITS == 64 1592 # ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY1593 LogRel(("Debug: HCPhysShw32BitPD=%RHp aHCPhysPaePDs={%RHp,%RHp,%RHp,%RHp} HCPhysShwPaePdpt=%RHp\n",1594 pVM->pgm.s.HCPhysShw32BitPD,1595 pVM->pgm.s.aHCPhysPaePDs[0], pVM->pgm.s.aHCPhysPaePDs[1], pVM->pgm.s.aHCPhysPaePDs[2], pVM->pgm.s.aHCPhysPaePDs[3],1596 pVM->pgm.s.HCPhysShwPaePdpt));1597 # endif1598 1507 LogRel(("Debug: HCPhysInterPD=%RHp HCPhysInterPaePDPT=%RHp HCPhysInterPaePML4=%RHp\n", 1599 1508 pVM->pgm.s.HCPhysInterPD, pVM->pgm.s.HCPhysInterPaePDPT, pVM->pgm.s.HCPhysInterPaePML4)); … … 1891 1800 int rc; 1892 1801 1893 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY1894 /*1895 * Reserve space for mapping the paging pages into guest context.1896 */1897 rc = MMR3HyperReserve(pVM, PAGE_SIZE * (2 + RT_ELEMENTS(pVM->pgm.s.apShwPaePDsR3) + 1 + 2 + 2), "Paging", &GCPtr);1898 AssertRCReturn(rc, rc);1899 pVM->pgm.s.pShw32BitPdRC = GCPtr;1900 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL);1901 #endif1902 1903 1802 /* 1904 1803 * Reserve space for the dynamic mappings. … … 1933 1832 { 1934 1833 int rc; 1935 1936 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY1937 /*1938 * Map the paging pages into the guest context.1939 */1940 RTGCPTR GCPtr = pVM->pgm.s.pShw32BitPdRC;1941 AssertReleaseReturn(GCPtr, VERR_INTERNAL_ERROR);1942 1943 rc = PGMMap(pVM, GCPtr, pVM->pgm.s.HCPhysShw32BitPD, PAGE_SIZE, 0);1944 AssertRCReturn(rc, rc);1945 pVM->pgm.s.pShw32BitPdRC = GCPtr;1946 GCPtr += PAGE_SIZE;1947 GCPtr += PAGE_SIZE; /* reserved page */1948 1949 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.apShwPaePDsR3); i++)1950 {1951 rc = PGMMap(pVM, GCPtr, pVM->pgm.s.aHCPhysPaePDs[i], PAGE_SIZE, 0);1952 AssertRCReturn(rc, rc);1953 pVM->pgm.s.apShwPaePDsRC[i] = GCPtr;1954 GCPtr += PAGE_SIZE;1955 }1956 /* A bit of paranoia is justified. */1957 AssertRelease(pVM->pgm.s.apShwPaePDsRC[0] + PAGE_SIZE == pVM->pgm.s.apShwPaePDsRC[1]);1958 AssertRelease(pVM->pgm.s.apShwPaePDsRC[1] + PAGE_SIZE == pVM->pgm.s.apShwPaePDsRC[2]);1959 AssertRelease(pVM->pgm.s.apShwPaePDsRC[2] + PAGE_SIZE == pVM->pgm.s.apShwPaePDsRC[3]);1960 GCPtr += PAGE_SIZE; /* reserved page */1961 1962 rc = PGMMap(pVM, GCPtr, pVM->pgm.s.HCPhysShwPaePdpt, PAGE_SIZE, 0);1963 AssertRCReturn(rc, rc);1964 pVM->pgm.s.pShwPaePdptRC = GCPtr;1965 GCPtr += PAGE_SIZE;1966 GCPtr += PAGE_SIZE; /* reserved page */1967 #endif1968 1834 1969 1835 /* … … 2027 1893 pVM->pgm.s.GCPtrCR3Mapping += offDelta; 2028 1894 /** @todo move this into shadow and guest specific relocation functions. */ 2029 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY2030 AssertMsg(pVM->pgm.s.pShw32BitPdR3, ("Init order, no relocation before paging is initialized!\n"));2031 pVM->pgm.s.pShw32BitPdRC += offDelta;2032 #endif2033 1895 pVM->pgm.s.pGst32BitPdRC += offDelta; 2034 1896 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.apGstPaePDsRC); i++) 2035 1897 { 2036 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY2037 AssertCompile(RT_ELEMENTS(pVM->pgm.s.apShwPaePDsRC) == RT_ELEMENTS(pVM->pgm.s.apGstPaePDsRC));2038 pVM->pgm.s.apShwPaePDsRC[i] += offDelta;2039 #endif2040 1898 pVM->pgm.s.apGstPaePDsRC[i] += offDelta; 2041 1899 } 2042 1900 pVM->pgm.s.pGstPaePdptRC += offDelta; 2043 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 2044 pVM->pgm.s.pShwPaePdptRC += offDelta; 2045 #endif 2046 2047 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY 1901 2048 1902 pVM->pgm.s.pShwPageCR3RC += offDelta; 2049 #endif2050 1903 2051 1904 pgmR3ModeDataInit(pVM, true /* resolve GC/R0 symbols */); … … 3116 2969 pVM->pgm.s.pfnR3GstModifyPage = pModeData->pfnR3GstModifyPage; 3117 2970 pVM->pgm.s.pfnR3GstGetPDE = pModeData->pfnR3GstGetPDE; 3118 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY3119 pVM->pgm.s.pfnR3GstMonitorCR3 = pModeData->pfnR3GstMonitorCR3;3120 pVM->pgm.s.pfnR3GstUnmonitorCR3 = pModeData->pfnR3GstUnmonitorCR3;3121 #endif3122 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY3123 pVM->pgm.s.pfnR3GstWriteHandlerCR3 = pModeData->pfnR3GstWriteHandlerCR3;3124 pVM->pgm.s.pszR3GstWriteHandlerCR3 = pModeData->pszR3GstWriteHandlerCR3;3125 pVM->pgm.s.pfnR3GstPAEWriteHandlerCR3 = pModeData->pfnR3GstPAEWriteHandlerCR3;3126 pVM->pgm.s.pszR3GstPAEWriteHandlerCR3 = pModeData->pszR3GstPAEWriteHandlerCR3;3127 #endif3128 2971 pVM->pgm.s.pfnRCGstGetPage = pModeData->pfnRCGstGetPage; 3129 2972 pVM->pgm.s.pfnRCGstModifyPage = pModeData->pfnRCGstModifyPage; 3130 2973 pVM->pgm.s.pfnRCGstGetPDE = pModeData->pfnRCGstGetPDE; 3131 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY3132 pVM->pgm.s.pfnRCGstMonitorCR3 = pModeData->pfnRCGstMonitorCR3;3133 pVM->pgm.s.pfnRCGstUnmonitorCR3 = pModeData->pfnRCGstUnmonitorCR3;3134 #endif3135 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY3136 pVM->pgm.s.pfnRCGstWriteHandlerCR3 = pModeData->pfnRCGstWriteHandlerCR3;3137 pVM->pgm.s.pfnRCGstPAEWriteHandlerCR3 = pModeData->pfnRCGstPAEWriteHandlerCR3;3138 #endif3139 2974 pVM->pgm.s.pfnR0GstGetPage = pModeData->pfnR0GstGetPage; 3140 2975 pVM->pgm.s.pfnR0GstModifyPage = pModeData->pfnR0GstModifyPage; 3141 2976 pVM->pgm.s.pfnR0GstGetPDE = pModeData->pfnR0GstGetPDE; 3142 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY3143 pVM->pgm.s.pfnR0GstMonitorCR3 = pModeData->pfnR0GstMonitorCR3;3144 pVM->pgm.s.pfnR0GstUnmonitorCR3 = pModeData->pfnR0GstUnmonitorCR3;3145 #endif3146 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY3147 pVM->pgm.s.pfnR0GstWriteHandlerCR3 = pModeData->pfnR0GstWriteHandlerCR3;3148 pVM->pgm.s.pfnR0GstPAEWriteHandlerCR3 = pModeData->pfnR0GstPAEWriteHandlerCR3;3149 #endif3150 2977 3151 2978 /* both */ … … 3486 3313 } 3487 3314 } 3488 3489 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY3490 /** @todo This is a bug!3491 *3492 * We must flush the PGM pool cache if the guest mode changes; we don't always3493 * switch shadow paging mode (e.g. protected->32-bit) and shouldn't reuse3494 * the shadow page tables.3495 *3496 * That only applies when switching between paging and non-paging modes.3497 */3498 /** @todo A20 setting */3499 if ( pVM->pgm.s.CTX_SUFF(pPool)3500 && !HWACCMIsNestedPagingActive(pVM)3501 && PGMMODE_WITH_PAGING(pVM->pgm.s.enmGuestMode) != PGMMODE_WITH_PAGING(enmGuestMode))3502 {3503 Log(("PGMR3ChangeMode: changing guest paging mode -> flush pgm pool cache!\n"));3504 pgmPoolFlushAll(pVM);3505 }3506 #endif3507 3315 3508 3316 /* -
trunk/src/VBox/VMM/PGMBth.h
r17483 r17586 130 130 PGM_BTH_DECL(int, Enter)(PVM pVM, RTGCPHYS GCPhysCR3) 131 131 { 132 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY133 132 /* Here we deal with allocation of the root shadow page table for real and protected mode during mode switches; 134 133 * Other modes rely on MapCR3/UnmapCR3 to setup the shadow root page tables. 135 134 */ 136 # 137 138 139 140 135 #if ( ( PGM_SHW_TYPE == PGM_TYPE_32BIT \ 136 || PGM_SHW_TYPE == PGM_TYPE_PAE \ 137 || PGM_SHW_TYPE == PGM_TYPE_AMD64) \ 138 && ( PGM_GST_TYPE == PGM_TYPE_REAL \ 139 || PGM_GST_TYPE == PGM_TYPE_PROT)) 141 140 142 141 Assert(!HWACCMIsNestedPagingActive(pVM)); … … 188 187 /* Apply all hypervisor mappings to the new CR3. */ 189 188 return pgmMapActivateCR3(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3)); 190 # endif191 #else192 /* nothing special to do here - InitData does the job. */193 189 #endif 194 190 return VINF_SUCCESS; -
trunk/src/VBox/VMM/PGMGst.h
r17215 r17586 31 31 PGM_GST_DECL(int, Exit)(PVM pVM); 32 32 33 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY34 static DECLCALLBACK(int) pgmR3Gst32BitWriteHandlerCR3(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);35 static DECLCALLBACK(int) pgmR3GstPAEWriteHandlerCR3(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser);36 #endif37 38 33 /* all */ 39 34 PGM_GST_DECL(int, GetPage)(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys); 40 35 PGM_GST_DECL(int, ModifyPage)(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask); 41 36 PGM_GST_DECL(int, GetPDE)(PVM pVM, RTGCPTR GCPtr, PX86PDEPAE pPDE); 42 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY43 PGM_GST_DECL(int, MonitorCR3)(PVM pVM, RTGCPHYS GCPhysCR3);44 PGM_GST_DECL(int, UnmonitorCR3)(PVM pVM);45 #endif46 37 __END_DECLS 47 38 … … 66 57 pModeData->pfnR3GstGetPage = PGM_GST_NAME(GetPage); 67 58 pModeData->pfnR3GstModifyPage = PGM_GST_NAME(ModifyPage); 68 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY69 pModeData->pfnR3GstMonitorCR3 = PGM_GST_NAME(MonitorCR3);70 pModeData->pfnR3GstUnmonitorCR3 = PGM_GST_NAME(UnmonitorCR3);71 #endif72 73 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY74 # if PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE75 pModeData->pfnR3GstWriteHandlerCR3 = PGM_GST_NAME(WriteHandlerCR3);76 pModeData->pszR3GstWriteHandlerCR3 = "Guest CR3 Write access handler";77 pModeData->pfnR3GstPAEWriteHandlerCR3 = PGM_GST_NAME(WriteHandlerCR3);78 pModeData->pszR3GstPAEWriteHandlerCR3 = "Guest CR3 Write access handler (PAE)";79 # else80 pModeData->pfnR3GstWriteHandlerCR3 = NULL;81 pModeData->pszR3GstWriteHandlerCR3 = NULL;82 pModeData->pfnR3GstPAEWriteHandlerCR3 = NULL;83 pModeData->pszR3GstPAEWriteHandlerCR3 = NULL;84 # endif85 #endif86 59 87 60 if (fResolveGCAndR0) … … 97 70 rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_GST_NAME_RC_STR(GetPDE), &pModeData->pfnRCGstGetPDE); 98 71 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_RC_STR(GetPDE), rc), rc); 99 # ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY100 rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_GST_NAME_RC_STR(MonitorCR3), &pModeData->pfnRCGstMonitorCR3);101 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_RC_STR(MonitorCR3), rc), rc);102 rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_GST_NAME_RC_STR(UnmonitorCR3), &pModeData->pfnRCGstUnmonitorCR3);103 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_RC_STR(UnmonitorCR3), rc), rc);104 # endif105 # ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY106 # if PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE107 rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_GST_NAME_RC_STR(WriteHandlerCR3), &pModeData->pfnRCGstWriteHandlerCR3);108 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_RC_STR(WriteHandlerCR3), rc), rc);109 rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_GST_NAME_RC_STR(WriteHandlerCR3), &pModeData->pfnRCGstPAEWriteHandlerCR3);110 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_RC_STR(WriteHandlerCR3), rc), rc);111 # endif112 # endif113 72 #endif /* Not AMD64 shadow paging. */ 114 73 … … 120 79 rc = PDMR3LdrGetSymbolR0(pVM, NULL, PGM_GST_NAME_R0_STR(GetPDE), &pModeData->pfnR0GstGetPDE); 121 80 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_R0_STR(GetPDE), rc), rc); 122 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY123 rc = PDMR3LdrGetSymbolR0(pVM, NULL, PGM_GST_NAME_R0_STR(MonitorCR3), &pModeData->pfnR0GstMonitorCR3);124 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_R0_STR(MonitorCR3), rc), rc);125 rc = PDMR3LdrGetSymbolR0(pVM, NULL, PGM_GST_NAME_R0_STR(UnmonitorCR3), &pModeData->pfnR0GstUnmonitorCR3);126 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_R0_STR(UnmonitorCR3), rc), rc);127 #endif128 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY129 # if PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE130 rc = PDMR3LdrGetSymbolR0(pVM, NULL, PGM_GST_NAME_R0_STR(WriteHandlerCR3), &pModeData->pfnR0GstWriteHandlerCR3);131 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_R0_STR(WriteHandlerCR3), rc), rc);132 rc = PDMR3LdrGetSymbolR0(pVM, NULL, PGM_GST_NAME_R0_STR(WriteHandlerCR3), &pModeData->pfnR0GstPAEWriteHandlerCR3);133 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_R0_STR(WriteHandlerCR3), rc), rc);134 # endif135 #endif136 81 } 137 82 … … 152 97 * Map and monitor CR3 153 98 */ 154 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY155 99 int rc = PGM_BTH_PFN(MapCR3, pVM)(pVM, GCPhysCR3); 156 #else157 int rc = PGM_BTH_NAME(MapCR3)(pVM, GCPhysCR3);158 if (RT_SUCCESS(rc) && !pVM->pgm.s.fMappingsFixed)159 rc = PGM_GST_NAME(MonitorCR3)(pVM, GCPhysCR3);160 #endif161 100 return rc; 162 101 } … … 187 126 int rc; 188 127 189 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY190 128 rc = PGM_BTH_PFN(UnmapCR3, pVM)(pVM); 191 #else192 rc = PGM_GST_NAME(UnmonitorCR3)(pVM);193 if (RT_SUCCESS(rc))194 rc = PGM_BTH_NAME(UnmapCR3)(pVM);195 #endif196 129 return rc; 197 130 } 198 131 199 132 200 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY201 202 #if PGM_GST_TYPE == PGM_TYPE_32BIT203 /**204 * Physical write access for the Guest CR3 in 32-bit mode.205 *206 * @returns VINF_SUCCESS if the handler have carried out the operation.207 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.208 * @param pVM VM Handle.209 * @param GCPhys The physical address the guest is writing to.210 * @param pvPhys The HC mapping of that address.211 * @param pvBuf What the guest is reading/writing.212 * @param cbBuf How much it's reading/writing.213 * @param enmAccessType The access type.214 * @param pvUser User argument.215 */216 static DECLCALLBACK(int) pgmR3Gst32BitWriteHandlerCR3(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)217 {218 AssertMsg(!pVM->pgm.s.fMappingsFixed, ("Shouldn't be registered when mappings are fixed!\n"));219 Assert(enmAccessType == PGMACCESSTYPE_WRITE);220 Log2(("pgmR3Gst32BitWriteHandlerCR3: ff=%#x GCPhys=%RGp pvPhys=%p cbBuf=%d pvBuf={%.*Rhxs}\n", pVM->fForcedActions, GCPhys, pvPhys, cbBuf, cbBuf, pvBuf));221 222 /*223 * Do the write operation.224 */225 memcpy(pvPhys, pvBuf, cbBuf);226 if ( !pVM->pgm.s.fMappingsFixed227 && !VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL))228 {229 /*230 * Check for conflicts.231 */232 const RTGCPTR offPD = GCPhys & PAGE_OFFSET_MASK;233 const unsigned iPD1 = offPD / sizeof(X86PDE);234 const unsigned iPD2 = (unsigned)(offPD + cbBuf - 1) / sizeof(X86PDE);235 Assert(iPD1 - iPD2 <= 1);236 if ( ( pVM->pgm.s.pGst32BitPdR3->a[iPD1].n.u1Present237 && pgmGetMapping(pVM, iPD1 << X86_PD_SHIFT) )238 || ( iPD1 != iPD2239 && pVM->pgm.s.pGst32BitPdR3->a[iPD2].n.u1Present240 && pgmGetMapping(pVM, iPD2 << X86_PD_SHIFT) )241 )242 {243 Log(("pgmR3Gst32BitWriteHandlerCR3: detected conflict. iPD1=%#x iPD2=%#x GCPhys=%RGp\n", iPD1, iPD2, GCPhys));244 STAM_COUNTER_INC(&pVM->pgm.s.StatR3GuestPDWriteConflict);245 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);246 }247 }248 249 STAM_COUNTER_INC(&pVM->pgm.s.StatR3GuestPDWrite);250 return VINF_SUCCESS;251 }252 #endif /* 32BIT */253 254 #if PGM_GST_TYPE == PGM_TYPE_PAE255 256 /**257 * Physical write access handler for the Guest CR3 in PAE mode.258 *259 * @returns VINF_SUCCESS if the handler have carried out the operation.260 * @returns VINF_PGM_HANDLER_DO_DEFAULT if the caller should carry out the access operation.261 * @param pVM VM Handle.262 * @param GCPhys The physical address the guest is writing to.263 * @param pvPhys The HC mapping of that address.264 * @param pvBuf What the guest is reading/writing.265 * @param cbBuf How much it's reading/writing.266 * @param enmAccessType The access type.267 * @param pvUser User argument.268 */269 static DECLCALLBACK(int) pgmR3GstPAEWriteHandlerCR3(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser)270 {271 AssertMsg(!pVM->pgm.s.fMappingsFixed, ("Shouldn't be registered when mappings are fixed!\n"));272 Assert(enmAccessType == PGMACCESSTYPE_WRITE);273 Log2(("pgmR3GstPAEWriteHandlerCR3: ff=%#x GCPhys=%RGp pvPhys=%p cbBuf=%d pvBuf={%.*Rhxs}\n", pVM->fForcedActions, GCPhys, pvPhys, cbBuf, cbBuf, pvBuf));274 275 /*276 * Do the write operation.277 */278 memcpy(pvPhys, pvBuf, cbBuf);279 if ( !pVM->pgm.s.fMappingsFixed280 && !VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL))281 {282 /*283 * Check if any of the PDs have changed.284 * We'll simply check all of them instead of figuring out which one/two to check.285 */286 for (unsigned i = 0; i < 4; i++)287 {288 if ( pVM->pgm.s.pGstPaePdptR3->a[i].n.u1Present289 && (pVM->pgm.s.pGstPaePdptR3->a[i].u & X86_PDPE_PG_MASK) != pVM->pgm.s.aGCPhysGstPaePDsMonitored[i])290 {291 Log(("pgmR3GstPAEWriteHandlerCR3: detected updated PDPE; [%d] = %#llx, Old GCPhys=%RGp\n",292 i, pVM->pgm.s.pGstPaePdptR3->a[i].u, pVM->pgm.s.aGCPhysGstPaePDsMonitored[i]));293 /*294 * The PD has changed.295 * We will schedule a monitoring update for the next TLB Flush,296 * InvalidatePage or SyncCR3.297 *298 * This isn't perfect, because a lazy page sync might be dealing with an half299 * updated PDPE. However, we assume that the guest OS is disabling interrupts300 * and being extremely careful (cmpxchg8b) when updating a PDPE where it's301 * executing.302 */303 pVM->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;304 }305 }306 }307 /*308 * Flag a updating of the monitor at the next crossroad so we don't monitor the309 * wrong pages for soo long that they can be reused as code pages and freak out310 * the recompiler or something.311 */312 else313 pVM->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;314 315 316 STAM_COUNTER_INC(&pVM->pgm.s.StatR3GuestPDWrite);317 return VINF_SUCCESS;318 }319 320 #endif /* PAE */321 #endif /* !VBOX_WITH_PGMPOOL_PAGING_ONLY */ -
trunk/src/VBox/VMM/PGMInternal.h
r17513 r17586 53 53 * @{ 54 54 */ 55 56 /*57 * Enable to use the PGM pool for all levels in the paging chain in all paging modes.58 */59 #define VBOX_WITH_PGMPOOL_PAGING_ONLY60 55 61 56 /** … … 1460 1455 /** The first normal index. */ 1461 1456 #define PGMPOOL_IDX_FIRST_SPECIAL 1 1462 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY1463 1457 /** Page directory (32-bit root). */ 1464 1458 #define PGMPOOL_IDX_PD 1 … … 1471 1465 /** The first normal index. */ 1472 1466 #define PGMPOOL_IDX_FIRST 5 1473 #else1474 /** Page directory (32-bit root). */1475 #define PGMPOOL_IDX_PD 11476 /** The extended PAE page directory (2048 entries, works as root currently). */1477 #define PGMPOOL_IDX_PAE_PD 21478 /** PAE Page Directory Table 0. */1479 #define PGMPOOL_IDX_PAE_PD_0 31480 /** PAE Page Directory Table 1. */1481 #define PGMPOOL_IDX_PAE_PD_1 41482 /** PAE Page Directory Table 2. */1483 #define PGMPOOL_IDX_PAE_PD_2 51484 /** PAE Page Directory Table 3. */1485 #define PGMPOOL_IDX_PAE_PD_3 61486 /** Page Directory Pointer Table (PAE root, not currently used). */1487 #define PGMPOOL_IDX_PDPT 71488 /** AMD64 CR3 level index.*/1489 #define PGMPOOL_IDX_AMD64_CR3 81490 /** Nested paging root.*/1491 #define PGMPOOL_IDX_NESTED_ROOT 91492 /** The first normal index. */1493 #define PGMPOOL_IDX_FIRST 101494 #endif1495 1467 /** The last valid index. (inclusive, 14 bits) */ 1496 1468 #define PGMPOOL_IDX_LAST 0x3fff … … 1606 1578 PGMPOOLKIND_EPT_PT_FOR_PHYS, 1607 1579 1608 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY1609 /** Shw: Root 32-bit page directory. */1610 PGMPOOLKIND_ROOT_32BIT_PD,1611 /** Shw: Root PAE page directory */1612 PGMPOOLKIND_ROOT_PAE_PD,1613 /** Shw: Root PAE page directory pointer table (legacy, 4 entries). */1614 PGMPOOLKIND_ROOT_PDPT,1615 #endif1616 1580 /** Shw: Root Nested paging table. */ 1617 1581 PGMPOOLKIND_ROOT_NESTED, … … 1686 1650 * It's a hack required because of REMR3NotifyHandlerPhysicalDeregister. */ 1687 1651 bool volatile fReusedFlushPending; 1688 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY1689 1652 /** Used to indicate that this page can't be flushed. Important for cr3 root pages or shadow pae pd pages). */ 1690 1653 bool fLocked; 1691 #else1692 /** Used to indicate that the guest is mapping the page is also used as a CR3.1693 * In these cases the access handler acts differently and will check1694 * for mapping conflicts like the normal CR3 handler.1695 * @todo When we change the CR3 shadowing to use pool pages, this flag can be1696 * replaced by a list of pages which share access handler.1697 */1698 bool fCR3Mix;1699 #endif1700 1654 } PGMPOOLPAGE, *PPGMPOOLPAGE, **PPPGMPOOLPAGE; 1701 1655 … … 2162 2116 DECLR3CALLBACKMEMBER(int, pfnR3GstModifyPage,(PVM pVM, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask)); 2163 2117 DECLR3CALLBACKMEMBER(int, pfnR3GstGetPDE,(PVM pVM, RTGCPTR GCPtr, PX86PDEPAE pPde)); 2164 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY2165 DECLR3CALLBACKMEMBER(int, pfnR3GstMonitorCR3,(PVM pVM, RTGCPHYS GCPhysCR3));2166 DECLR3CALLBACKMEMBER(int, pfnR3GstUnmonitorCR3,(PVM pVM));2167 #endif2168 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY2169 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnR3GstWriteHandlerCR3;2170 R3PTRTYPE(const char *) pszR3GstWriteHandlerCR3;2171 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnR3GstPAEWriteHandlerCR3;2172 R3PTRTYPE(const char *) pszR3GstPAEWriteHandlerCR3;2173 #endif2174 2118 DECLRCCALLBACKMEMBER(int, pfnRCGstGetPage,(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)); 2175 2119 DECLRCCALLBACKMEMBER(int, pfnRCGstModifyPage,(PVM pVM, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask)); 2176 2120 DECLRCCALLBACKMEMBER(int, pfnRCGstGetPDE,(PVM pVM, RTGCPTR GCPtr, PX86PDEPAE pPde)); 2177 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY2178 DECLRCCALLBACKMEMBER(int, pfnRCGstMonitorCR3,(PVM pVM, RTGCPHYS GCPhysCR3));2179 DECLRCCALLBACKMEMBER(int, pfnRCGstUnmonitorCR3,(PVM pVM));2180 #endif2181 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY2182 RCPTRTYPE(PFNPGMRCPHYSHANDLER) pfnRCGstWriteHandlerCR3;2183 RCPTRTYPE(PFNPGMRCPHYSHANDLER) pfnRCGstPAEWriteHandlerCR3;2184 #endif2185 2121 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPage,(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)); 2186 2122 DECLR0CALLBACKMEMBER(int, pfnR0GstModifyPage,(PVM pVM, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask)); 2187 2123 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPDE,(PVM pVM, RTGCPTR GCPtr, PX86PDEPAE pPde)); 2188 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY2189 DECLR0CALLBACKMEMBER(int, pfnR0GstMonitorCR3,(PVM pVM, RTGCPHYS GCPhysCR3));2190 DECLR0CALLBACKMEMBER(int, pfnR0GstUnmonitorCR3,(PVM pVM));2191 #endif2192 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY2193 R0PTRTYPE(PFNPGMRCPHYSHANDLER) pfnR0GstWriteHandlerCR3;2194 R0PTRTYPE(PFNPGMRCPHYSHANDLER) pfnR0GstPAEWriteHandlerCR3;2195 #endif2196 2124 /** @} */ 2197 2125 … … 2293 2221 uint32_t u32Alignment; 2294 2222 #endif 2295 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY2296 /** The physical address of the currently monitored guest CR3 page.2297 * When this value is NIL_RTGCPHYS no page is being monitored. */2298 RTGCPHYS GCPhysGstCR3Monitored;2299 #endif2300 2223 /** @name 32-bit Guest Paging. 2301 2224 * @{ */ … … 2350 2273 /** @} */ 2351 2274 2352 # ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY2353 /** @name Shadow paging2354 * @{ */2355 /** The root page table - R3 Ptr. */2356 R3PTRTYPE(void *) pShwRootR3;2357 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE2358 /** The root page table - R0 Ptr. */2359 R0PTRTYPE(void *) pShwRootR0;2360 # endif2361 /** The root page table - RC Ptr. */2362 RCPTRTYPE(void *) pShwRootRC;2363 # if HC_ARCH_BITS == 642364 uint32_t u32Padding1; /**< alignment padding. */2365 # endif2366 /** The Physical Address (HC) of the current active shadow CR3. */2367 RTHCPHYS HCPhysShwCR3;2368 # endif2369 2275 /** Pointer to the page of the current active CR3 - R3 Ptr. */ 2370 2276 R3PTRTYPE(PPGMPOOLPAGE) pShwPageCR3R3; … … 2381 2287 # endif 2382 2288 /** @} */ 2383 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY2384 /** @name 32-bit Shadow Paging2385 * @{ */2386 /** The 32-Bit PD - R3 Ptr. */2387 R3PTRTYPE(PX86PD) pShw32BitPdR3;2388 /** The 32-Bit PD - R0 Ptr. */2389 R0PTRTYPE(PX86PD) pShw32BitPdR0;2390 /** The 32-Bit PD - RC Ptr. */2391 RCPTRTYPE(PX86PD) pShw32BitPdRC;2392 # if HC_ARCH_BITS == 642393 uint32_t u32Padding10; /**< alignment padding. */2394 # endif2395 /** The Physical Address (HC) of the 32-Bit PD. */2396 RTHCPHYS HCPhysShw32BitPD;2397 /** @} */2398 2399 /** @name PAE Shadow Paging2400 * @{ */2401 /** The four PDs for the low 4GB - R3 Ptr.2402 * Even though these are 4 pointers, what they point at is a single table.2403 * Thus, it's possible to walk the 2048 entries starting where apHCPaePDs[0] points. */2404 R3PTRTYPE(PX86PDPAE) apShwPaePDsR3[4];2405 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE2406 /** The four PDs for the low 4GB - R0 Ptr.2407 * Same kind of mapping as apHCPaePDs. */2408 R0PTRTYPE(PX86PDPAE) apShwPaePDsR0[4];2409 # endif2410 /** The four PDs for the low 4GB - RC Ptr.2411 * Same kind of mapping as apHCPaePDs. */2412 RCPTRTYPE(PX86PDPAE) apShwPaePDsRC[4];2413 /** The Physical Address (HC) of the four PDs for the low 4GB.2414 * These are *NOT* 4 contiguous pages. */2415 RTHCPHYS aHCPhysPaePDs[4];2416 /** The Physical Address (HC) of the PAE PDPT. */2417 RTHCPHYS HCPhysShwPaePdpt;2418 /** The PAE PDPT - R3 Ptr. */2419 R3PTRTYPE(PX86PDPT) pShwPaePdptR3;2420 /** The PAE PDPT - R0 Ptr. */2421 R0PTRTYPE(PX86PDPT) pShwPaePdptR0;2422 /** The PAE PDPT - RC Ptr. */2423 RCPTRTYPE(PX86PDPT) pShwPaePdptRC;2424 /** @} */2425 # if HC_ARCH_BITS == 642426 RTRCPTR alignment5; /**< structure size alignment. */2427 # endif2428 /** @name Nested Shadow Paging2429 * @{ */2430 /** Root table; format depends on the host paging mode (AMD-V) or EPT - R3 pointer. */2431 RTR3PTR pShwNestedRootR3;2432 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE2433 /** Root table; format depends on the host paging mode (AMD-V) or EPT - R0 pointer. */2434 RTR0PTR pShwNestedRootR0;2435 # endif2436 /** The Physical Address (HC) of the nested paging root. */2437 RTHCPHYS HCPhysShwNestedRoot;2438 /** @} */2439 #endif /* !VBOX_WITH_PGMPOOL_PAGING_ONLY */2440 2289 2441 2290 /** @name Function pointers for Shadow paging. … … 2463 2312 DECLR3CALLBACKMEMBER(int, pfnR3GstModifyPage,(PVM pVM, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask)); 2464 2313 DECLR3CALLBACKMEMBER(int, pfnR3GstGetPDE,(PVM pVM, RTGCPTR GCPtr, PX86PDEPAE pPde)); 2465 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY2466 DECLR3CALLBACKMEMBER(int, pfnR3GstMonitorCR3,(PVM pVM, RTGCPHYS GCPhysCR3));2467 DECLR3CALLBACKMEMBER(int, pfnR3GstUnmonitorCR3,(PVM pVM));2468 #endif2469 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY2470 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnR3GstWriteHandlerCR3;2471 R3PTRTYPE(const char *) pszR3GstWriteHandlerCR3;2472 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnR3GstPAEWriteHandlerCR3;2473 R3PTRTYPE(const char *) pszR3GstPAEWriteHandlerCR3;2474 #endif2475 2314 DECLRCCALLBACKMEMBER(int, pfnRCGstGetPage,(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)); 2476 2315 DECLRCCALLBACKMEMBER(int, pfnRCGstModifyPage,(PVM pVM, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask)); 2477 2316 DECLRCCALLBACKMEMBER(int, pfnRCGstGetPDE,(PVM pVM, RTGCPTR GCPtr, PX86PDEPAE pPde)); 2478 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY2479 DECLRCCALLBACKMEMBER(int, pfnRCGstMonitorCR3,(PVM pVM, RTGCPHYS GCPhysCR3));2480 DECLRCCALLBACKMEMBER(int, pfnRCGstUnmonitorCR3,(PVM pVM));2481 #endif2482 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY2483 RCPTRTYPE(PFNPGMRCPHYSHANDLER) pfnRCGstWriteHandlerCR3;2484 RCPTRTYPE(PFNPGMRCPHYSHANDLER) pfnRCGstPAEWriteHandlerCR3;2485 #endif2486 2317 #if HC_ARCH_BITS == 64 2487 2318 RTRCPTR alignment3; /**< structure size alignment. */ … … 2491 2322 DECLR0CALLBACKMEMBER(int, pfnR0GstModifyPage,(PVM pVM, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask)); 2492 2323 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPDE,(PVM pVM, RTGCPTR GCPtr, PX86PDEPAE pPde)); 2493 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY2494 DECLR0CALLBACKMEMBER(int, pfnR0GstMonitorCR3,(PVM pVM, RTGCPHYS GCPhysCR3));2495 DECLR0CALLBACKMEMBER(int, pfnR0GstUnmonitorCR3,(PVM pVM));2496 #endif2497 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY2498 R0PTRTYPE(PFNPGMRCPHYSHANDLER) pfnR0GstWriteHandlerCR3;2499 R0PTRTYPE(PFNPGMRCPHYSHANDLER) pfnR0GstPAEWriteHandlerCR3;2500 #endif2501 2324 /** @} */ 2502 2325 … … 3072 2895 int pgmR0DynMapHCPageCommon(PVM pVM, PPGMMAPSET pSet, RTHCPHYS HCPhys, void **ppv); 3073 2896 #endif 3074 #if !defined(VBOX_WITH_PGMPOOL_PAGING_ONLY) && (defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0))3075 void *pgmPoolMapPageFallback(PPGM pPGM, PPGMPOOLPAGE pPage);3076 #endif3077 2897 int pgmPoolAlloc(PVM pVM, RTGCPHYS GCPhys, PGMPOOLKIND enmKind, uint16_t iUser, uint32_t iUserTable, PPPGMPOOLPAGE ppPage); 3078 2898 PPGMPOOLPAGE pgmPoolGetPageByHCPhys(PVM pVM, RTHCPHYS HCPhys); … … 3101 2921 #endif 3102 2922 3103 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY3104 2923 void pgmMapClearShadowPDEs(PVM pVM, PPGMPOOLPAGE pShwPageCR3, PPGMMAPPING pMap, unsigned iOldPDE); 3105 2924 void pgmMapSetShadowPDEs(PVM pVM, PPGMMAPPING pMap, unsigned iNewPDE); 3106 2925 int pgmShwSyncPaePDPtr(PVM pVM, RTGCPTR GCPtr, PX86PDPE pGstPdpe, PX86PDPAE *ppPD); 3107 #endif3108 2926 int pgmMapDeactivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3); 3109 2927 int pgmMapActivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3); … … 3532 3350 return pv; 3533 3351 } 3534 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY3535 3352 AssertFatalMsgFailed(("pgmPoolMapPageInlined invalid page index %x\n", pPage->idx)); 3536 #else3537 return pgmPoolMapPageFallback(pPGM, pPage);3538 #endif3539 3353 } 3540 3354 … … 4212 4026 DECLINLINE(PX86PD) pgmShwGet32BitPDPtr(PPGM pPGM) 4213 4027 { 4214 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY4215 4028 return (PX86PD)PGMPOOL_PAGE_2_PTR_BY_PGM(pPGM, pPGM->CTX_SUFF(pShwPageCR3)); 4216 #else4217 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R04218 PX86PD pShwPd;4219 Assert(pPGM->HCPhysShw32BitPD != 0 && pPGM->HCPhysShw32BitPD != NIL_RTHCPHYS);4220 int rc = PGM_HCPHYS_2_PTR_BY_PGM(pPGM, pPGM->HCPhysShw32BitPD, &pShwPd);4221 AssertRCReturn(rc, NULL);4222 return pShwPd;4223 # else4224 return pPGM->CTX_SUFF(pShw32BitPd);4225 # endif4226 #endif4227 4029 } 4228 4030 … … 4275 4077 DECLINLINE(PX86PDPT) pgmShwGetPaePDPTPtr(PPGM pPGM) 4276 4078 { 4277 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY4278 4079 return (PX86PDPT)PGMPOOL_PAGE_2_PTR_BY_PGM(pPGM, pPGM->CTX_SUFF(pShwPageCR3)); 4279 #else4280 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R04281 PX86PDPT pShwPdpt;4282 Assert(pPGM->HCPhysShwPaePdpt != 0 && pPGM->HCPhysShwPaePdpt != NIL_RTHCPHYS);4283 int rc = PGM_HCPHYS_2_PTR_BY_PGM(pPGM, pPGM->HCPhysShwPaePdpt, &pShwPdpt);4284 AssertRCReturn(rc, 0);4285 return pShwPdpt;4286 # else4287 return pPGM->CTX_SUFF(pShwPaePdpt);4288 # endif4289 #endif4290 4080 } 4291 4081 … … 4300 4090 DECLINLINE(PX86PDPAE) pgmShwGetPaePDPtr(PPGM pPGM, RTGCPTR GCPtr) 4301 4091 { 4302 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY4303 4092 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE; 4304 4093 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pPGM); … … 4312 4101 4313 4102 return (PX86PDPAE)PGMPOOL_PAGE_2_PTR_BY_PGM(pPGM, pShwPde); 4314 #else4315 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;4316 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R04317 PX86PDPAE pPD;4318 int rc = PGM_HCPHYS_2_PTR_BY_PGM(pPGM, pPGM->aHCPhysPaePDs[iPdpt], &pPD);4319 AssertRCReturn(rc, 0);4320 return pPD;4321 # else4322 PX86PDPAE pPD = pPGM->CTX_SUFF(apShwPaePDs)[iPdpt];4323 Assert(pPD);4324 return pPD;4325 # endif4326 #endif4327 4103 } 4328 4104 … … 4337 4113 DECLINLINE(PX86PDPAE) pgmShwGetPaePDPtr(PPGM pPGM, PX86PDPT pPdpt, RTGCPTR GCPtr) 4338 4114 { 4339 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY4340 4115 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE; 4341 4116 … … 4348 4123 4349 4124 return (PX86PDPAE)PGMPOOL_PAGE_2_PTR_BY_PGM(pPGM, pShwPde); 4350 #else4351 AssertFailed();4352 return NULL;4353 #endif4354 4125 } 4355 4126 … … 4402 4173 DECLINLINE(PX86PML4) pgmShwGetLongModePML4Ptr(PPGM pPGM) 4403 4174 { 4404 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY4405 4175 return (PX86PML4)PGMPOOL_PAGE_2_PTR_BY_PGM(pPGM, pPGM->CTX_SUFF(pShwPageCR3)); 4406 #else4407 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R04408 PX86PML4 pShwPml4;4409 Assert(pPGM->HCPhysShwCR3 != 0 && pPGM->HCPhysShwCR3 != NIL_RTHCPHYS);4410 int rc = PGM_HCPHYS_2_PTR_BY_PGM(pPGM, pPGM->HCPhysShwCR3, &pShwPml4);4411 AssertRCReturn(rc, 0);4412 return pShwPml4;4413 # else4414 Assert(pPGM->CTX_SUFF(pShwRoot));4415 return (PX86PML4)pPGM->CTX_SUFF(pShwRoot);4416 # endif4417 #endif4418 4176 } 4419 4177 … … 4735 4493 #endif /* PGMPOOL_WITH_CACHE */ 4736 4494 4737 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY4738 4739 4495 /** 4740 4496 * Locks a page to prevent flushing (important for cr3 root pages or shadow pae pd pages). … … 4781 4537 } 4782 4538 4783 #endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY */4784 4785 4539 /** 4786 4540 * Tells if mappings are to be put into the shadow page table or not -
trunk/src/VBox/VMM/PGMMap.cpp
r17489 r17586 621 621 } 622 622 623 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY624 /*625 * Turn off CR3 updating monitoring.626 */627 int rc2 = PGM_GST_PFN(UnmonitorCR3, pVM)(pVM);628 AssertRC(rc2);629 #endif630 631 623 /* 632 624 * Mark the mappings as fixed and return. … … 686 678 pVM->pgm.s.GCPtrMappingFixed = 0; 687 679 pVM->pgm.s.cbMappingFixed = 0; 688 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY689 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);690 691 /*692 * Re-enable the CR3 monitoring.693 *694 * Paranoia: We flush the page pool before doing that because Windows695 * is using the CR3 page both as a PD and a PT, e.g. the pool may696 * be monitoring it.697 */698 # ifdef PGMPOOL_WITH_MONITORING699 pgmPoolFlushAll(pVM);700 # endif701 /* Remap CR3 as we have just flushed the CR3 shadow PML4 in case we're in long mode. */702 int rc = PGM_BTH_PFN(MapCR3, pVM)(pVM, pVM->pgm.s.GCPhysCR3);703 AssertRCSuccess(rc);704 705 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, pVM->pgm.s.GCPhysCR3);706 AssertRCSuccess(rc);707 #endif708 680 return VINF_SUCCESS; 709 681 } … … 928 900 unsigned i = pMap->cPTs; 929 901 930 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY931 902 pgmMapClearShadowPDEs(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3), pMap, iOldPDE); 932 #endif933 903 934 904 iOldPDE += i; … … 941 911 */ 942 912 pVM->pgm.s.pInterPD->a[iOldPDE].u = 0; 943 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY944 pVM->pgm.s.pShw32BitPdR3->a[iOldPDE].u = 0;945 #endif946 913 /* 947 914 * PAE. … … 950 917 unsigned iPDE = iOldPDE * 2 % 512; 951 918 pVM->pgm.s.apInterPaePDs[iPD]->a[iPDE].u = 0; 952 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY953 pVM->pgm.s.apShwPaePDsR3[iPD]->a[iPDE].u = 0;954 #endif955 919 iPDE++; 956 920 AssertFatal(iPDE < 512); 957 921 pVM->pgm.s.apInterPaePDs[iPD]->a[iPDE].u = 0; 958 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY959 pVM->pgm.s.apShwPaePDsR3[iPD]->a[iPDE].u = 0;960 961 /* Clear the PGM_PDFLAGS_MAPPING flag for the page directory pointer entry. (legacy PAE guest mode) */962 pVM->pgm.s.pShwPaePdptR3->a[iPD].u &= ~PGM_PLXFLAGS_MAPPING;963 #endif964 922 } 965 923 } … … 978 936 Assert(!pgmMapAreMappingsEnabled(&pVM->pgm.s) || PGMGetGuestMode(pVM) <= PGMMODE_PAE_NX); 979 937 980 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY981 938 pgmMapSetShadowPDEs(pVM, pMap, iNewPDE); 982 #endif983 939 984 940 /* … … 994 950 * 32-bit. 995 951 */ 996 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY997 Assert(!pPGM->pShw32BitPdR3->a[iNewPDE].n.u1Present || pgmMapAreMappingsEnabled(&pVM->pgm.s));998 if ( pgmMapAreMappingsEnabled(&pVM->pgm.s)999 && pPGM->pShw32BitPdR3->a[iNewPDE].n.u1Present)1000 {1001 Assert(!(pPGM->pShw32BitPdR3->a[iNewPDE].u & PGM_PDFLAGS_MAPPING));1002 pgmPoolFree(pVM, pPGM->pShw32BitPdR3->a[iNewPDE].u & X86_PDE_PG_MASK, PGMPOOL_IDX_PD, iNewPDE);1003 }1004 #endif1005 952 X86PDE Pde; 1006 953 /* Default mapping page directory flags are read/write and supervisor; individual page attributes determine the final flags */ 1007 954 Pde.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT; 1008 955 pPGM->pInterPD->a[iNewPDE] = Pde; 1009 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY1010 if (pgmMapAreMappingsEnabled(&pVM->pgm.s))1011 pPGM->pShw32BitPdR3->a[iNewPDE] = Pde;1012 #endif1013 956 /* 1014 957 * PAE. … … 1016 959 const unsigned iPD = iNewPDE / 256; 1017 960 unsigned iPDE = iNewPDE * 2 % 512; 1018 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY1019 Assert(!pPGM->apShwPaePDsR3[iPD]->a[iPDE].n.u1Present || pgmMapAreMappingsEnabled(&pVM->pgm.s));1020 if ( pgmMapAreMappingsEnabled(&pVM->pgm.s)1021 && pPGM->apShwPaePDsR3[iPD]->a[iPDE].n.u1Present)1022 {1023 Assert(!(pPGM->apShwPaePDsR3[iPD]->a[iPDE].u & PGM_PDFLAGS_MAPPING));1024 pgmPoolFree(pVM, pPGM->apShwPaePDsR3[iPD]->a[iPDE].u & X86_PDE_PAE_PG_MASK, PGMPOOL_IDX_PAE_PD, iNewPDE * 2);1025 }1026 #endif1027 961 X86PDEPAE PdePae0; 1028 962 PdePae0.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0; 1029 963 pPGM->apInterPaePDs[iPD]->a[iPDE] = PdePae0; 1030 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY1031 if (pgmMapAreMappingsEnabled(&pVM->pgm.s))1032 pPGM->apShwPaePDsR3[iPD]->a[iPDE] = PdePae0;1033 #endif1034 964 iPDE++; 1035 965 AssertFatal(iPDE < 512); 1036 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY1037 Assert(!pPGM->apShwPaePDsR3[iPD]->a[iPDE].n.u1Present || pgmMapAreMappingsEnabled(&pVM->pgm.s));1038 if ( pgmMapAreMappingsEnabled(&pVM->pgm.s)1039 && pPGM->apShwPaePDsR3[iPD]->a[iPDE].n.u1Present)1040 {1041 Assert(!(pPGM->apShwPaePDsR3[iPD]->a[iPDE].u & PGM_PDFLAGS_MAPPING));1042 pgmPoolFree(pVM, pPGM->apShwPaePDsR3[iPD]->a[iPDE].u & X86_PDE_PAE_PG_MASK, PGMPOOL_IDX_PAE_PD, iNewPDE * 2 + 1);1043 }1044 #endif1045 966 X86PDEPAE PdePae1; 1046 967 PdePae1.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1; 1047 968 pPGM->apInterPaePDs[iPD]->a[iPDE] = PdePae1; 1048 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY1049 if (pgmMapAreMappingsEnabled(&pVM->pgm.s))1050 {1051 pPGM->apShwPaePDsR3[iPD]->a[iPDE] = PdePae1;1052 1053 /* Set the PGM_PDFLAGS_MAPPING flag in the page directory pointer entry. (legacy PAE guest mode) */1054 pPGM->pShwPaePdptR3->a[iPD].u |= PGM_PLXFLAGS_MAPPING;1055 }1056 #endif1057 969 } 1058 970 } -
trunk/src/VBox/VMM/PGMPool.cpp
r17489 r17586 255 255 pPool->aPages[PGMPOOL_IDX_PD].Core.Key = NIL_RTHCPHYS; 256 256 pPool->aPages[PGMPOOL_IDX_PD].GCPhys = NIL_RTGCPHYS; 257 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY258 257 pPool->aPages[PGMPOOL_IDX_PD].pvPageR3 = 0; 259 258 pPool->aPages[PGMPOOL_IDX_PD].enmKind = PGMPOOLKIND_32BIT_PD; 260 #else261 pPool->aPages[PGMPOOL_IDX_PD].pvPageR3 = pVM->pgm.s.pShw32BitPdR3;262 pPool->aPages[PGMPOOL_IDX_PD].enmKind = PGMPOOLKIND_ROOT_32BIT_PD;263 #endif264 259 pPool->aPages[PGMPOOL_IDX_PD].idx = PGMPOOL_IDX_PD; 265 266 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY267 /* The Shadow PAE PDs. This is actually 4 pages! (32 bits guest paging) */268 pPool->aPages[PGMPOOL_IDX_PAE_PD].Core.Key = NIL_RTHCPHYS;269 pPool->aPages[PGMPOOL_IDX_PAE_PD].GCPhys = NIL_RTGCPHYS;270 pPool->aPages[PGMPOOL_IDX_PAE_PD].pvPageR3 = pVM->pgm.s.apShwPaePDsR3[0];271 pPool->aPages[PGMPOOL_IDX_PAE_PD].enmKind = PGMPOOLKIND_ROOT_PAE_PD;272 pPool->aPages[PGMPOOL_IDX_PAE_PD].idx = PGMPOOL_IDX_PAE_PD;273 274 /* The Shadow PAE PDs for PAE guest mode. */275 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)276 {277 pPool->aPages[PGMPOOL_IDX_PAE_PD_0 + i].Core.Key = NIL_RTHCPHYS;278 pPool->aPages[PGMPOOL_IDX_PAE_PD_0 + i].GCPhys = NIL_RTGCPHYS;279 pPool->aPages[PGMPOOL_IDX_PAE_PD_0 + i].pvPageR3 = pVM->pgm.s.apShwPaePDsR3[i];280 pPool->aPages[PGMPOOL_IDX_PAE_PD_0 + i].enmKind = PGMPOOLKIND_PAE_PD_FOR_PAE_PD;281 pPool->aPages[PGMPOOL_IDX_PAE_PD_0 + i].idx = PGMPOOL_IDX_PAE_PD_0 + i;282 }283 #endif284 260 285 261 /* The Shadow PDPT. */ 286 262 pPool->aPages[PGMPOOL_IDX_PDPT].Core.Key = NIL_RTHCPHYS; 287 263 pPool->aPages[PGMPOOL_IDX_PDPT].GCPhys = NIL_RTGCPHYS; 288 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY289 264 pPool->aPages[PGMPOOL_IDX_PDPT].pvPageR3 = 0; 290 265 pPool->aPages[PGMPOOL_IDX_PDPT].enmKind = PGMPOOLKIND_PAE_PDPT; 291 #else292 pPool->aPages[PGMPOOL_IDX_PDPT].pvPageR3 = pVM->pgm.s.pShwPaePdptR3;293 pPool->aPages[PGMPOOL_IDX_PDPT].enmKind = PGMPOOLKIND_ROOT_PDPT;294 #endif295 266 pPool->aPages[PGMPOOL_IDX_PDPT].idx = PGMPOOL_IDX_PDPT; 296 267 … … 298 269 pPool->aPages[PGMPOOL_IDX_AMD64_CR3].Core.Key = NIL_RTHCPHYS; 299 270 pPool->aPages[PGMPOOL_IDX_AMD64_CR3].GCPhys = NIL_RTGCPHYS; 300 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY301 271 pPool->aPages[PGMPOOL_IDX_AMD64_CR3].pvPageR3 = 0; 302 #else303 pPool->aPages[PGMPOOL_IDX_AMD64_CR3].pvPageR3 = pVM->pgm.s.pShwPaePdptR3; /* not used - isn't it wrong as well? */304 #endif305 272 pPool->aPages[PGMPOOL_IDX_AMD64_CR3].enmKind = PGMPOOLKIND_64BIT_PML4; 306 273 pPool->aPages[PGMPOOL_IDX_AMD64_CR3].idx = PGMPOOL_IDX_AMD64_CR3; … … 309 276 pPool->aPages[PGMPOOL_IDX_NESTED_ROOT].Core.Key = NIL_RTHCPHYS; 310 277 pPool->aPages[PGMPOOL_IDX_NESTED_ROOT].GCPhys = NIL_RTGCPHYS; 311 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY312 278 pPool->aPages[PGMPOOL_IDX_NESTED_ROOT].pvPageR3 = 0; 313 #else314 pPool->aPages[PGMPOOL_IDX_NESTED_ROOT].pvPageR3 = pVM->pgm.s.pShwNestedRootR3;315 #endif316 279 pPool->aPages[PGMPOOL_IDX_NESTED_ROOT].enmKind = PGMPOOLKIND_ROOT_NESTED; 317 280 pPool->aPages[PGMPOOL_IDX_NESTED_ROOT].idx = PGMPOOL_IDX_NESTED_ROOT; … … 335 298 pPool->aPages[iPage].iAgeNext = NIL_PGMPOOL_IDX; 336 299 pPool->aPages[iPage].iAgePrev = NIL_PGMPOOL_IDX; 337 #endif338 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY339 Assert(VALID_PTR(pPool->aPages[iPage].pvPageR3));340 300 #endif 341 301 Assert(pPool->aPages[iPage].idx == iPage); … … 475 435 PPGMPOOLPAGE pPage = &pPool->aPages[i]; 476 436 477 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY478 437 /* Allocate all pages in low (below 4 GB) memory as 32 bits guests need a page table root in low memory. */ 479 438 pPage->pvPageR3 = MMR3PageAllocLow(pVM); 480 #else481 pPage->pvPageR3 = MMR3PageAlloc(pVM);482 #endif483 439 if (!pPage->pvPageR3) 484 440 { … … 590 546 } 591 547 else if ( ( pPage->cModifications < 96 /* it's cheaper here. */ 592 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY593 548 || pgmPoolIsPageLocked(&pVM->pgm.s, pPage) 594 #else595 || pPage->fCR3Mix596 #endif597 549 ) 598 550 && cbBuf <= 4) -
trunk/src/VBox/VMM/PGMShw.h
r17559 r17586 110 110 # define SHW_PDPE_PG_MASK X86_PDPE_PG_MASK 111 111 # define SHW_TOTAL_PD_ENTRIES (X86_PG_PAE_ENTRIES*X86_PG_PAE_PDPE_ENTRIES) 112 # ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY113 112 # define SHW_POOL_ROOT_IDX PGMPOOL_IDX_PDPT 114 # else115 # define SHW_POOL_ROOT_IDX PGMPOOL_IDX_PAE_PD116 # endif117 118 113 # endif 119 114 #endif … … 184 179 PGM_SHW_DECL(int, Enter)(PVM pVM) 185 180 { 186 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY 187 # if PGM_SHW_TYPE == PGM_TYPE_NESTED || PGM_SHW_TYPE == PGM_TYPE_EPT 181 #if PGM_SHW_TYPE == PGM_TYPE_NESTED || PGM_SHW_TYPE == PGM_TYPE_EPT 188 182 RTGCPHYS GCPhysCR3 = RT_BIT_64(63); 189 183 PPGMPOOLPAGE pNewShwPageCR3; … … 207 201 208 202 Log(("Enter nested shadow paging mode: root %RHv phys %RHp\n", pVM->pgm.s.pShwPageCR3R3, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key)); 209 # endif210 #else211 # if PGM_SHW_TYPE == PGM_TYPE_NESTED212 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE213 pVM->pgm.s.pShwRootR0 = (R0PTRTYPE(void *))pVM->pgm.s.pShwNestedRootR0;214 # else215 pVM->pgm.s.pShwRootR3 = (R3PTRTYPE(void *))pVM->pgm.s.pShwNestedRootR3;216 # endif217 pVM->pgm.s.HCPhysShwCR3 = pVM->pgm.s.HCPhysShwNestedRoot;218 219 CPUMSetHyperCR3(pVM, PGMGetHyperCR3(pVM));220 # endif221 203 #endif 222 223 204 return VINF_SUCCESS; 224 205 } … … 247 228 PGM_SHW_DECL(int, Exit)(PVM pVM) 248 229 { 249 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY 250 # if PGM_SHW_TYPE == PGM_TYPE_NESTED || PGM_SHW_TYPE == PGM_TYPE_EPT 230 #if PGM_SHW_TYPE == PGM_TYPE_NESTED || PGM_SHW_TYPE == PGM_TYPE_EPT 251 231 if (pVM->pgm.s.CTX_SUFF(pShwPageCR3)) 252 232 { … … 265 245 pVM->pgm.s.iShwUserTable = 0; 266 246 } 267 # endif268 # else269 # if PGM_SHW_TYPE == PGM_TYPE_NESTED270 Assert(HWACCMIsNestedPagingActive(pVM));271 pVM->pgm.s.pShwRootR3 = 0;272 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE273 pVM->pgm.s.pShwRootR0 = 0;274 # endif275 pVM->pgm.s.HCPhysShwCR3 = 0;276 277 247 Log(("Leave nested shadow paging mode\n")); 278 # endif279 248 #endif 280 249 return VINF_SUCCESS; -
trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
r17509 r17586 71 71 DECLINLINE(int) pgmShwGetLongModePDPtr(PVM pVM, RTGCPTR64 GCPtr, PX86PML4E *ppPml4e, PX86PDPT *ppPdpt, PX86PDPAE *ppPD); 72 72 DECLINLINE(int) pgmShwGetPAEPDPtr(PVM pVM, RTGCPTR GCPtr, PX86PDPT *ppPdpt, PX86PDPAE *ppPD); 73 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY74 73 DECLINLINE(int) pgmShwGetPaePoolPagePD(PPGM pPGM, RTGCPTR GCPtr, PPGMPOOLPAGE *ppShwPde); 75 #endif76 74 77 75 /* … … 738 736 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3; 739 737 Assert(!pVM->pgm.s.fMappingsFixed); 740 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY741 Assert(pVM->pgm.s.GCPhysCR3 == pVM->pgm.s.GCPhysGstCR3Monitored);742 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, pVM->pgm.s.GCPhysCR3);743 #endif744 738 } 745 739 … … 868 862 return VERR_PAGE_DIRECTORY_PTR_NOT_PRESENT; 869 863 864 Assert(pPdpe->u & X86_PDPE_PG_MASK); 870 865 pShwPage = pgmPoolGetPage(pPool, pPdpe->u & X86_PDPE_PG_MASK); 871 866 AssertReturn(pShwPage, VERR_INTERNAL_ERROR); … … 874 869 return VINF_SUCCESS; 875 870 } 876 877 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY878 871 879 872 /** … … 904 897 PGMPOOLKIND enmKind; 905 898 906 # if defined(IN_RC) && defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)899 # if defined(IN_RC) 907 900 /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */ 908 901 PGMDynLockHCPage(pVM, (uint8_t *)pPdpe); … … 952 945 Assert(pVM->pgm.s.fSyncFlags & PGM_SYNC_CLEAR_PGM_POOL); 953 946 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3); 954 # if defined(IN_RC) && defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)947 # if defined(IN_RC) 955 948 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdpe); 956 949 # endif … … 963 956 | (pGstPdpe->u & ~(X86_PDPE_PG_MASK | X86_PDPE_AVL_MASK | X86_PDPE_PCD | X86_PDPE_PWT)); 964 957 965 # if defined(IN_RC) && defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)958 # if defined(IN_RC) 966 959 /* In 32 bits PAE mode we *must* invalidate the TLB when changing a PDPT entry; the CPU fetches them only during cr3 load, so any 967 960 * non-present PDPT will continue to cause page faults. … … 1010 1003 } 1011 1004 1012 #endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY */1013 1005 #ifndef IN_RC 1014 1006 … … 1035 1027 PX86PML4E pPml4e = pgmShwGetLongModePML4EPtr(pPGM, iPml4); 1036 1028 bool fNestedPaging = HWACCMIsNestedPagingActive(pVM); 1037 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY1038 1029 bool fPaging = !!(CPUMGetGuestCR0(pVM) & X86_CR0_PG); 1039 #endif1040 1030 PPGMPOOLPAGE pShwPage; 1041 1031 int rc; … … 1045 1035 && !(pPml4e->u & X86_PML4E_PG_MASK)) 1046 1036 { 1047 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY1048 1037 RTGCPTR64 GCPml4; 1049 1038 PGMPOOLKIND enmKind; … … 1067 1056 /* Create a reference back to the PDPT by using the index in its shadow page. */ 1068 1057 rc = pgmPoolAlloc(pVM, GCPml4, enmKind, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, &pShwPage); 1069 #else1070 if (!fNestedPaging)1071 {1072 Assert(pGstPml4e && pGstPdpe);1073 Assert(pVM->pgm.s.CTX_SUFF(pShwPageCR3));1074 1075 rc = pgmPoolAlloc(pVM, pGstPml4e->u & X86_PML4E_PG_MASK,1076 PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->idx, iPml4, &pShwPage);1077 }1078 else1079 {1080 /* AMD-V nested paging. (Intel EPT never comes here) */1081 RTGCPTR64 GCPml4 = (RTGCPTR64)iPml4 << EPT_PML4_SHIFT;1082 rc = pgmPoolAlloc(pVM, GCPml4 + RT_BIT_64(63) /* hack: make the address unique */,1083 PGMPOOLKIND_64BIT_PDPT_FOR_PHYS, PGMPOOL_IDX_NESTED_ROOT, iPml4, &pShwPage);1084 }1085 #endif1086 1058 if (rc == VERR_PGM_POOL_FLUSHED) 1087 1059 { … … 1110 1082 && !(pPdpe->u & X86_PDPE_PG_MASK)) 1111 1083 { 1112 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY1113 1084 RTGCPTR64 GCPdPt; 1114 1085 PGMPOOLKIND enmKind; … … 1130 1101 /* Create a reference back to the PDPT by using the index in its shadow page. */ 1131 1102 rc = pgmPoolAlloc(pVM, GCPdPt, enmKind, pShwPage->idx, iPdPt, &pShwPage); 1132 #else1133 if (!fNestedPaging)1134 {1135 Assert(pGstPml4e && pGstPdpe);1136 Assert(!(pPdpe->u & X86_PDPE_PG_MASK));1137 /* Create a reference back to the PDPT by using the index in its shadow page. */1138 rc = pgmPoolAlloc(pVM, pGstPdpe->u & X86_PDPE_PG_MASK, PGMPOOLKIND_64BIT_PD_FOR_64BIT_PD, pShwPage->idx, iPdPt, &pShwPage);1139 }1140 else1141 {1142 /* AMD-V nested paging. (Intel EPT never comes here) */1143 RTGCPTR64 GCPdPt = (RTGCPTR64)iPdPt << EPT_PDPT_SHIFT;1144 1145 rc = pgmPoolAlloc(pVM, GCPdPt + RT_BIT_64(62) /* hack: make the address unique */, PGMPOOLKIND_64BIT_PD_FOR_PHYS, pShwPage->idx, iPdPt, &pShwPage);1146 }1147 #endif1148 1103 if (rc == VERR_PGM_POOL_FLUSHED) 1149 1104 { … … 1228 1183 Assert(HWACCMIsNestedPagingActive(pVM)); 1229 1184 1230 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY1231 1185 pPml4 = (PEPTPML4)PGMPOOL_PAGE_2_PTR_BY_PGM(pPGM, pPGM->CTX_SUFF(pShwPageCR3)); 1232 #else1233 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R01234 rc = PGM_HCPHYS_2_PTR(pVM, pPGM->HCPhysShwNestedRoot, &pPml4);1235 AssertRCReturn(rc, rc);1236 # else1237 pPml4 = (PEPTPML4)pPGM->CTX_SUFF(pShwNestedRoot);1238 # endif1239 #endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY */1240 1186 Assert(pPml4); 1241 1187 … … 1248 1194 RTGCPTR64 GCPml4 = (RTGCPTR64)iPml4 << EPT_PML4_SHIFT; 1249 1195 1250 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY1251 1196 rc = pgmPoolAlloc(pVM, GCPml4, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOL_IDX_NESTED_ROOT, iPml4, &pShwPage); 1252 #else1253 rc = pgmPoolAlloc(pVM, GCPml4 + RT_BIT_64(63) /* hack: make the address unique */, PGMPOOLKIND_EPT_PDPT_FOR_PHYS, PGMPOOL_IDX_NESTED_ROOT, iPml4, &pShwPage);1254 #endif1255 1197 if (rc == VERR_PGM_POOL_FLUSHED) 1256 1198 { … … 1286 1228 RTGCPTR64 GCPdPt = (RTGCPTR64)iPdPt << EPT_PDPT_SHIFT; 1287 1229 1288 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY1289 1230 rc = pgmPoolAlloc(pVM, GCPdPt, PGMPOOLKIND_64BIT_PD_FOR_PHYS, pShwPage->idx, iPdPt, &pShwPage); 1290 #else1291 rc = pgmPoolAlloc(pVM, GCPdPt + RT_BIT_64(62) /* hack: make the address unique */, PGMPOOLKIND_64BIT_PD_FOR_PHYS, pShwPage->idx, iPdPt, &pShwPage);1292 #endif1293 1231 if (rc == VERR_PGM_POOL_FLUSHED) 1294 1232 { … … 1431 1369 VMMDECL(RTHCPHYS) PGMGetHyperCR3(PVM pVM) 1432 1370 { 1433 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY1434 1371 Assert(pVM->pgm.s.CTX_SUFF(pShwPageCR3)); 1435 1372 return pVM->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key; 1436 #else1437 PGMMODE enmShadowMode = pVM->pgm.s.enmShadowMode;1438 switch (enmShadowMode)1439 {1440 case PGMMODE_32_BIT:1441 return pVM->pgm.s.HCPhysShw32BitPD;1442 1443 case PGMMODE_PAE:1444 case PGMMODE_PAE_NX:1445 return pVM->pgm.s.HCPhysShwPaePdpt;1446 1447 case PGMMODE_AMD64:1448 case PGMMODE_AMD64_NX:1449 return pVM->pgm.s.HCPhysShwCR3;1450 1451 case PGMMODE_EPT:1452 return pVM->pgm.s.HCPhysShwNestedRoot;1453 1454 case PGMMODE_NESTED:1455 return PGMGetNestedCR3(pVM, PGMGetHostMode(pVM));1456 1457 default:1458 AssertMsgFailed(("enmShadowMode=%d\n", enmShadowMode));1459 return ~0;1460 }1461 #endif1462 1373 } 1463 1374 … … 1470 1381 VMMDECL(RTHCPHYS) PGMGetNestedCR3(PVM pVM, PGMMODE enmShadowMode) 1471 1382 { 1472 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY1473 1383 Assert(pVM->pgm.s.CTX_SUFF(pShwPageCR3)); 1474 1384 return pVM->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key; 1475 #else1476 switch (enmShadowMode)1477 {1478 case PGMMODE_32_BIT:1479 return pVM->pgm.s.HCPhysShw32BitPD;1480 1481 case PGMMODE_PAE:1482 case PGMMODE_PAE_NX:1483 return pVM->pgm.s.HCPhysShwPaePdpt;1484 1485 case PGMMODE_AMD64:1486 case PGMMODE_AMD64_NX:1487 return pVM->pgm.s.HCPhysShwCR3;1488 1489 default:1490 AssertMsgFailed(("enmShadowMode=%d\n", enmShadowMode));1491 return ~0;1492 }1493 #endif1494 1385 } 1495 1386 … … 1502 1393 VMMDECL(RTHCPHYS) PGMGetHyper32BitCR3(PVM pVM) 1503 1394 { 1504 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY1505 1395 Assert(pVM->pgm.s.CTX_SUFF(pShwPageCR3)); 1506 1396 return pVM->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key; 1507 #else1508 return pVM->pgm.s.HCPhysShw32BitPD;1509 #endif1510 1397 } 1511 1398 … … 1518 1405 VMMDECL(RTHCPHYS) PGMGetHyperPaeCR3(PVM pVM) 1519 1406 { 1520 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY1521 1407 Assert(pVM->pgm.s.CTX_SUFF(pShwPageCR3)); 1522 1408 return pVM->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key; 1523 #else1524 return pVM->pgm.s.HCPhysShwPaePdpt;1525 #endif1526 1409 } 1527 1410 … … 1534 1417 VMMDECL(RTHCPHYS) PGMGetHyperAmd64CR3(PVM pVM) 1535 1418 { 1536 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY1537 1419 Assert(pVM->pgm.s.CTX_SUFF(pShwPageCR3)); 1538 1420 return pVM->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key; 1539 #else1540 return pVM->pgm.s.HCPhysShwCR3;1541 #endif1542 1421 } 1543 1422 … … 1694 1573 { 1695 1574 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3; 1696 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY1697 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, GCPhysCR3);1698 #endif1699 1575 } 1700 1576 } … … 1723 1599 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3; 1724 1600 Assert(!pVM->pgm.s.fMappingsFixed); 1725 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY1726 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, GCPhysCR3);1727 #endif1728 1601 } 1729 1602 if (fGlobal) … … 1868 1741 } 1869 1742 1870 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY1871 1743 if (pVM->pgm.s.GCPhysCR3 != GCPhysCR3) 1872 1744 { … … 1874 1746 rc = PGM_BTH_PFN(MapCR3, pVM)(pVM, GCPhysCR3); 1875 1747 } 1876 #else1877 pVM->pgm.s.GCPhysCR3 = GCPhysCR3;1878 rc = PGM_BTH_PFN(MapCR3, pVM)(pVM, GCPhysCR3);1879 #endif1880 1748 #ifdef IN_RING3 1881 1749 if (rc == VINF_PGM_SYNC_CR3) … … 1914 1782 pVM->pgm.s.fSyncFlags &= ~PGM_SYNC_MONITOR_CR3; 1915 1783 Assert(!pVM->pgm.s.fMappingsFixed); 1916 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY1917 Assert(pVM->pgm.s.GCPhysCR3 == pVM->pgm.s.GCPhysGstCR3Monitored);1918 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, pVM->pgm.s.GCPhysCR3);1919 #endif1920 1784 } 1921 1785 } … … 2263 2127 */ 2264 2128 register unsigned iPage = pVM->pgm.s.iDynPageMapLast; 2265 # ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY2266 2129 unsigned i; 2267 2130 for (i=0;i<(MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT);i++) … … 2273 2136 } 2274 2137 AssertRelease(i != (MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT)); 2275 # else2276 pVM->pgm.s.iDynPageMapLast = iPage = (iPage + 1) & ((MM_HYPER_DYNAMIC_SIZE >> PAGE_SHIFT) - 1);2277 # endif2278 2138 2279 2139 pVM->pgm.s.aHCPhysDynPageMapCache[iPage & (RT_ELEMENTS(pVM->pgm.s.aHCPhysDynPageMapCache) - 1)] = HCPhys; -
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r17562 r17586 82 82 PGM_BTH_DECL(int, Trap0eHandler)(PVM pVM, RTGCUINT uErr, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault) 83 83 { 84 # if defined(IN_RC) && defined(VBOX_ WITH_PGMPOOL_PAGING_ONLY) && defined(VBOX_STRICT)84 # if defined(IN_RC) && defined(VBOX_STRICT) 85 85 PGMDynCheckLocks(pVM); 86 86 # endif … … 115 115 # if PGM_GST_TYPE == PGM_TYPE_PAE 116 116 unsigned iPDSrc; 117 # ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY118 117 X86PDPE PdpeSrc; 119 118 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, pvFault, &iPDSrc, &PdpeSrc); 120 # else121 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, pvFault, &iPDSrc, NULL);122 # endif123 119 124 120 # elif PGM_GST_TYPE == PGM_TYPE_AMD64 … … 159 155 const unsigned iPDDst = (pvFault >> SHW_PD_SHIFT) & SHW_PD_MASK; /* pPDDst index, not used with the pool. */ 160 156 161 # ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY162 157 PX86PDPAE pPDDst; 163 158 # if PGM_GST_TYPE != PGM_TYPE_PAE … … 174 169 } 175 170 Assert(pPDDst); 176 177 # else178 PX86PDPAE pPDDst = pgmShwGetPaePDPtr(&pVM->pgm.s, pvFault);179 180 /* Did we mark the PDPT as not present in SyncCR3? */181 unsigned iPdpt = (pvFault >> SHW_PDPT_SHIFT) & SHW_PDPT_MASK;182 PX86PDPT pPdptDst = pgmShwGetPaePDPTPtr(&pVM->pgm.s);183 if (!pPdptDst->a[iPdpt].n.u1Present)184 pPdptDst->a[iPdpt].n.u1Present = 1;185 # endif186 171 187 172 # elif PGM_SHW_TYPE == PGM_TYPE_AMD64 … … 943 928 PX86PDE pPdeDst = pgmShwGet32BitPDEPtr(&pVM->pgm.s, GCPtrPage); 944 929 945 # ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY946 930 /* Fetch the pgm pool shadow descriptor. */ 947 931 PPGMPOOLPAGE pShwPde = pVM->pgm.s.CTX_SUFF(pShwPageCR3); 948 932 Assert(pShwPde); 949 # endif950 933 951 934 # elif PGM_SHW_TYPE == PGM_TYPE_PAE … … 961 944 } 962 945 963 # ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY964 946 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK; 965 947 PPGMPOOLPAGE pShwPde; … … 973 955 pPDDst = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPde); 974 956 PX86PDEPAE pPdeDst = &pPDDst->a[iPDDst]; 975 # else976 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) /*& SHW_PD_MASK - pool index only atm! */;977 PX86PDEPAE pPdeDst = pgmShwGetPaePDEPtr(&pVM->pgm.s, GCPtrPage);978 # endif979 957 980 958 # else /* PGM_SHW_TYPE == PGM_TYPE_AMD64 */ 981 959 /* PML4 */ 982 # ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY983 AssertReturn(pVM->pgm.s.pShwRootR3, VERR_INTERNAL_ERROR);984 # endif985 986 960 const unsigned iPml4 = (GCPtrPage >> X86_PML4_SHIFT) & X86_PML4_MASK; 987 961 const unsigned iPdpt = (GCPtrPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64; … … 1165 1139 } 1166 1140 # endif /* PGM_GST_TYPE == PGM_TYPE_AMD64 */ 1167 1168 # if PGM_GST_TYPE == PGM_TYPE_PAE && !defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)1169 /*1170 * Update the shadow PDPE and free all the shadow PD entries if the PDPE is marked not present.1171 * Note: This shouldn't actually be necessary as we monitor the PDPT page for changes.1172 */1173 if (!pPDSrc)1174 {1175 /* Guest PDPE not present */1176 PX86PDPAE pPDDst = pgmShwGetPaePDPtr(&pVM->pgm.s, GCPtrPage);1177 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);1178 1179 Assert(!PdpeSrc.n.u1Present);1180 LogFlow(("InvalidatePage: guest PDPE %d not present; clear shw pdpe\n", iPdpt));1181 1182 /* for each page directory entry */1183 for (unsigned iPD = 0; iPD < X86_PG_PAE_ENTRIES; iPD++)1184 {1185 if ( pPDDst->a[iPD].n.u1Present1186 && !(pPDDst->a[iPD].u & PGM_PDFLAGS_MAPPING))1187 {1188 pgmPoolFree(pVM, pPDDst->a[iPD].u & SHW_PDE_PG_MASK, SHW_POOL_ROOT_IDX, iPdpt * X86_PG_PAE_ENTRIES + iPD);1189 pPDDst->a[iPD].u = 0;1190 }1191 }1192 if (!(pPdptDst->a[iPdpt].u & PGM_PLXFLAGS_MAPPING))1193 pPdptDst->a[iPdpt].n.u1Present = 0;1194 PGM_INVL_GUEST_TLBS();1195 }1196 AssertMsg(pVM->pgm.s.fMappingsFixed || (PdpeSrc.u & X86_PDPE_PG_MASK) == pVM->pgm.s.aGCPhysGstPaePDsMonitored[iPdpt], ("%RGp vs %RGp (mon)\n", (PdpeSrc.u & X86_PDPE_PG_MASK), pVM->pgm.s.aGCPhysGstPaePDsMonitored[iPdpt]));1197 # endif1198 1141 1199 1142 … … 1221 1164 LogFlow(("InvalidatePage: Out-of-sync at %RGp PdeSrc=%RX64 PdeDst=%RX64\n", 1222 1165 GCPtrPage, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u)); 1223 # if PGM_GST_TYPE == PGM_TYPE_AMD64 || defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)1224 1166 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst); 1225 # else1226 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, SHW_POOL_ROOT_IDX, iPDDst);1227 # endif1228 1167 pPdeDst->u = 0; 1229 1168 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDOutOfSync)); … … 1237 1176 LogFlow(("InvalidatePage: Out-of-sync (A) at %RGp PdeSrc=%RX64 PdeDst=%RX64\n", 1238 1177 GCPtrPage, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u)); 1239 # if PGM_GST_TYPE == PGM_TYPE_AMD64 || defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)1240 1178 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst); 1241 # else1242 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, SHW_POOL_ROOT_IDX, iPDDst);1243 # endif1244 1179 pPdeDst->u = 0; 1245 1180 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDNAs)); … … 1285 1220 LogFlow(("InvalidatePage: Out-of-sync at %RGp PdeSrc=%RX64 PdeDst=%RX64 ShwGCPhys=%RGp iPDDst=%#x\n", 1286 1221 GCPtrPage, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u, pShwPage->GCPhys, iPDDst)); 1287 # if PGM_GST_TYPE == PGM_TYPE_AMD64 || defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)1288 1222 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst); 1289 # else1290 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, SHW_POOL_ROOT_IDX, iPDDst);1291 # endif1292 1223 pPdeDst->u = 0; 1293 1224 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDOutOfSync)); … … 1331 1262 LogFlow(("InvalidatePage: Out-of-sync PD at %RGp PdeSrc=%RX64 PdeDst=%RX64\n", 1332 1263 GCPtrPage, (uint64_t)PdeSrc.u, (uint64_t)PdeDst.u)); 1333 # if PGM_GST_TYPE == PGM_TYPE_AMD64 || defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)1334 1264 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst); 1335 # else1336 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, SHW_POOL_ROOT_IDX, iPDDst);1337 # endif1338 1265 pPdeDst->u = 0; 1339 1266 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePage4MBPages)); … … 1348 1275 if (!(PdeDst.u & PGM_PDFLAGS_MAPPING)) 1349 1276 { 1350 # if PGM_GST_TYPE == PGM_TYPE_AMD64 || defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)1351 1277 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, pShwPde->idx, iPDDst); 1352 # else1353 pgmPoolFree(pVM, PdeDst.u & SHW_PDE_PG_MASK, SHW_POOL_ROOT_IDX, iPDDst);1354 # endif1355 1278 pPdeDst->u = 0; 1356 1279 STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,InvalidatePagePDNPs)); … … 1683 1606 PX86PDE pPdeDst = pgmShwGet32BitPDEPtr(&pVM->pgm.s, GCPtrPage); 1684 1607 1685 # ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY1686 1608 /* Fetch the pgm pool shadow descriptor. */ 1687 1609 PPGMPOOLPAGE pShwPde = pVM->pgm.s.CTX_SUFF(pShwPageCR3); 1688 1610 Assert(pShwPde); 1689 # endif1690 1611 1691 1612 # elif PGM_SHW_TYPE == PGM_TYPE_PAE 1692 1693 # ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY1694 1613 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK; 1695 1614 PPGMPOOLPAGE pShwPde; … … 1703 1622 pPDDst = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPde); 1704 1623 PX86PDEPAE pPdeDst = &pPDDst->a[iPDDst]; 1705 # else 1706 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) /*& SHW_PD_MASK - only pool index atm! */; 1707 const unsigned iPdpt = (GCPtrPage >> X86_PDPT_SHIFT); 1708 PX86PDPT pPdptDst = pgmShwGetPaePDPTPtr(&pVM->pgm.s); NOREF(pPdptDst); 1709 PX86PDEPAE pPdeDst = pgmShwGetPaePDEPtr(&pVM->pgm.s, GCPtrPage); 1710 AssertReturn(pPdeDst, VERR_INTERNAL_ERROR); 1711 # endif 1624 1712 1625 # elif PGM_SHW_TYPE == PGM_TYPE_AMD64 1713 1626 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK; … … 1963 1876 */ 1964 1877 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool); 1965 # if PGM_GST_TYPE == PGM_TYPE_AMD64 || defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)1966 1878 pgmPoolFreeByPage(pPool, pShwPage, pShwPde->idx, iPDDst); 1967 # else1968 pgmPoolFreeByPage(pPool, pShwPage, SHW_POOL_ROOT_IDX, iPDDst);1969 # endif1970 1879 1971 1880 pPdeDst->u = 0; … … 2441 2350 PSHWPDE pPdeDst = pgmShwGet32BitPDEPtr(&pVM->pgm.s, GCPtrPage); 2442 2351 2443 # ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY2444 2352 /* Fetch the pgm pool shadow descriptor. */ 2445 2353 PPGMPOOLPAGE pShwPde = pVM->pgm.s.CTX_SUFF(pShwPageCR3); 2446 2354 Assert(pShwPde); 2447 # endif2448 2355 2449 2356 # elif PGM_SHW_TYPE == PGM_TYPE_PAE 2450 # ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY2451 2357 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK; 2452 2358 PPGMPOOLPAGE pShwPde; … … 2461 2367 pPDDst = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPde); 2462 2368 pPdeDst = &pPDDst->a[iPDDst]; 2463 # else 2464 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) /*& SHW_PD_MASK - only pool index atm! */; 2465 const unsigned iPdpt = (GCPtrPage >> X86_PDPT_SHIFT); NOREF(iPdpt); 2466 PX86PDPT pPdptDst = pgmShwGetPaePDPTPtr(&pVM->pgm.s); NOREF(pPdptDst); 2467 PSHWPDE pPdeDst = pgmShwGetPaePDEPtr(&pVM->pgm.s, GCPtrPage); 2468 # endif 2369 2469 2370 # elif PGM_SHW_TYPE == PGM_TYPE_AMD64 2470 2371 const unsigned iPdpt = (GCPtrPage >> X86_PDPT_SHIFT) & X86_PDPT_MASK_AMD64; … … 2521 2422 Assert(!PdeDst.n.u1Present); /* We're only supposed to call SyncPT on PDE!P and conflicts.*/ 2522 2423 2523 # if defined(IN_RC) && defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)2424 # if defined(IN_RC) 2524 2425 /* Make sure the dynamic pPdeDst mapping will not be reused during this function. */ 2525 2426 PGMDynLockHCPage(pVM, (uint8_t *)pPdeDst); … … 2550 2451 GCPhys |= (iPDDst & 1) * (PAGE_SIZE / 2); 2551 2452 # endif 2552 # if PGM_GST_TYPE == PGM_TYPE_AMD64 || defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)2553 2453 rc = pgmPoolAlloc(pVM, GCPhys, BTH_PGMPOOLKIND_PT_FOR_PT, pShwPde->idx, iPDDst, &pShwPage); 2554 # else2555 rc = pgmPoolAlloc(pVM, GCPhys, BTH_PGMPOOLKIND_PT_FOR_PT, SHW_POOL_ROOT_IDX, iPDDst, &pShwPage);2556 # endif2557 2454 } 2558 2455 else … … 2563 2460 GCPhys |= GCPtrPage & (1 << X86_PD_PAE_SHIFT); 2564 2461 # endif 2565 # if PGM_GST_TYPE == PGM_TYPE_AMD64 || defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)2566 2462 rc = pgmPoolAlloc(pVM, GCPhys, BTH_PGMPOOLKIND_PT_FOR_BIG, pShwPde->idx, iPDDst, &pShwPage); 2567 # else2568 rc = pgmPoolAlloc(pVM, GCPhys, BTH_PGMPOOLKIND_PT_FOR_BIG, SHW_POOL_ROOT_IDX, iPDDst, &pShwPage);2569 # endif2570 2463 } 2571 2464 if (rc == VINF_SUCCESS) … … 2592 2485 } 2593 2486 *pPdeDst = PdeDst; 2594 # if defined(IN_RC) && defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)2487 # if defined(IN_RC) 2595 2488 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst); 2596 2489 # endif … … 2600 2493 { 2601 2494 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3); 2602 # if defined(IN_RC) && defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)2495 # if defined(IN_RC) 2603 2496 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst); 2604 2497 # endif … … 2633 2526 | (PdeSrc.u & ~(GST_PDE_PG_MASK | X86_PDE_AVL_MASK | X86_PDE_PCD | X86_PDE_PWT | X86_PDE_PS | X86_PDE4M_G | X86_PDE4M_D)); 2634 2527 *pPdeDst = PdeDst; 2635 # if defined(IN_RC) && defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)2528 # if defined(IN_RC) 2636 2529 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst); 2637 2530 # endif … … 2739 2632 } 2740 2633 *pPdeDst = PdeDst; 2741 # if defined(IN_RC) && defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)2634 # if defined(IN_RC) 2742 2635 PGMDynUnlockHCPage(pVM, (uint8_t *)pPdeDst); 2743 2636 # endif … … 2891 2784 PSHWPDE pPdeDst = pgmShwGet32BitPDEPtr(&pVM->pgm.s, GCPtrPage); 2892 2785 2893 # ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY2894 2786 /* Fetch the pgm pool shadow descriptor. */ 2895 2787 PPGMPOOLPAGE pShwPde = pVM->pgm.s.CTX_SUFF(pShwPageCR3); 2896 2788 Assert(pShwPde); 2897 # endif2898 2789 2899 2790 # elif PGM_SHW_TYPE == PGM_TYPE_PAE 2900 # ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY2901 2791 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK; 2902 2792 PPGMPOOLPAGE pShwPde; … … 2911 2801 pPDDst = (PX86PDPAE)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPde); 2912 2802 pPdeDst = &pPDDst->a[iPDDst]; 2913 # else2914 const unsigned iPDDst = (GCPtrPage >> SHW_PD_SHIFT) /*& SHW_PD_MASK - only pool index atm!*/;2915 PX86PDEPAE pPdeDst = pgmShwGetPaePDEPtr(&pVM->pgm.s, GCPtrPage);2916 # endif2917 2803 2918 2804 # elif PGM_SHW_TYPE == PGM_TYPE_AMD64 … … 2970 2856 /* Virtual address = physical address */ 2971 2857 GCPhys = GCPtrPage & X86_PAGE_4K_BASE_MASK; 2972 # if PGM_SHW_TYPE == PGM_TYPE_AMD64 || PGM_SHW_TYPE == PGM_TYPE_EPT || defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)2973 2858 rc = pgmPoolAlloc(pVM, GCPhys & ~(RT_BIT_64(SHW_PD_SHIFT) - 1), BTH_PGMPOOLKIND_PT_FOR_PT, pShwPde->idx, iPDDst, &pShwPage); 2974 # else2975 rc = pgmPoolAlloc(pVM, GCPhys & ~(RT_BIT_64(SHW_PD_SHIFT) - 1), BTH_PGMPOOLKIND_PT_FOR_PT, SHW_POOL_ROOT_IDX, iPDDst, &pShwPage);2976 # endif2977 2859 2978 2860 if ( rc == VINF_SUCCESS … … 3032 2914 # elif PGM_GST_TYPE == PGM_TYPE_PAE 3033 2915 unsigned iPDSrc; 3034 # ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY3035 2916 X86PDPE PdpeSrc; 3036 2917 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, GCPtrPage, &iPDSrc, &PdpeSrc); 3037 # else3038 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, GCPtrPage, &iPDSrc, NULL);3039 # endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY */3040 2918 if (!pPDSrc) 3041 2919 return VINF_SUCCESS; /* not present */ … … 3066 2944 const X86PDE PdeDst = pgmShwGet32BitPDE(&pVM->pgm.s, GCPtrPage); 3067 2945 # elif PGM_SHW_TYPE == PGM_TYPE_PAE 3068 # ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY3069 2946 const unsigned iPDDst = ((GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK); 3070 2947 PX86PDPAE pPDDst; … … 3084 2961 Assert(pPDDst); 3085 2962 PdeDst = pPDDst->a[iPDDst]; 3086 # else3087 const X86PDEPAE PdeDst = pgmShwGetPaePDE(&pVM->pgm.s, GCPtrPage);3088 # endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY */3089 2963 3090 2964 # elif PGM_SHW_TYPE == PGM_TYPE_AMD64 … … 3177 3051 # elif PGM_GST_TYPE == PGM_TYPE_PAE 3178 3052 unsigned iPDSrc; 3179 # ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY3180 3053 X86PDPE PdpeSrc; 3181 3054 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, GCPtrPage, &iPDSrc, &PdpeSrc); 3182 # else3183 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, GCPtrPage, &iPDSrc, NULL);3184 # endif3185 3055 3186 3056 if (pPDSrc) … … 3213 3083 # elif PGM_SHW_TYPE == PGM_TYPE_PAE 3214 3084 PX86PDEPAE pPdeDst; 3215 # ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY3216 3085 const unsigned iPDDst = ((GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK); 3217 3086 PX86PDPAE pPDDst; … … 3230 3099 Assert(pPDDst); 3231 3100 pPdeDst = &pPDDst->a[iPDDst]; 3232 # else 3233 pPdeDst = pgmShwGetPaePDEPtr(&pVM->pgm.s, GCPtrPage); 3234 # endif 3101 3235 3102 # elif PGM_SHW_TYPE == PGM_TYPE_AMD64 3236 3103 const unsigned iPDDst = ((GCPtrPage >> SHW_PD_SHIFT) & SHW_PD_MASK); … … 3409 3276 #else /* PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT && PGM_SHW_TYPE != PGM_TYPE_AMD64 */ 3410 3277 3411 # ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY3412 3278 # ifdef PGM_WITHOUT_MAPPINGS 3413 3279 Assert(pVM->pgm.s.fMappingsFixed); … … 3427 3293 # endif 3428 3294 return VINF_SUCCESS; 3429 # else3430 /*3431 * PAE and 32-bit legacy mode (shadow).3432 * (Guest PAE, 32-bit legacy, protected and real modes.)3433 */3434 Assert(fGlobal || (cr4 & X86_CR4_PGE));3435 MY_STAM_COUNTER_INC(fGlobal ? &pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3Global) : &pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3NotGlobal));3436 3437 # if PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE3438 bool const fBigPagesSupported = !!(CPUMGetGuestCR4(pVM) & X86_CR4_PSE);3439 3440 /*3441 * Get page directory addresses.3442 */3443 # if PGM_SHW_TYPE == PGM_TYPE_32BIT3444 PX86PDE pPDEDst = pgmShwGet32BitPDEPtr(&pVM->pgm.s, 0);3445 # else /* PGM_SHW_TYPE == PGM_TYPE_PAE */3446 # if PGM_GST_TYPE == PGM_TYPE_32BIT3447 PX86PDEPAE pPDEDst = NULL;3448 # endif3449 # endif3450 3451 # if PGM_GST_TYPE == PGM_TYPE_32BIT3452 PGSTPD pPDSrc = pgmGstGet32bitPDPtr(&pVM->pgm.s);3453 Assert(pPDSrc);3454 # if !defined(IN_RC) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)3455 Assert(PGMPhysGCPhys2R3PtrAssert(pVM, (RTGCPHYS)(cr3 & GST_CR3_PAGE_MASK), sizeof(*pPDSrc)) == (RTR3PTR)pPDSrc);3456 # endif3457 # endif /* PGM_GST_TYPE == PGM_TYPE_32BIT */3458 3459 /*3460 * Iterate the the CR3 page.3461 */3462 PPGMMAPPING pMapping;3463 unsigned iPdNoMapping;3464 const bool fRawR0Enabled = EMIsRawRing0Enabled(pVM);3465 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);3466 3467 /* Only check mappings if they are supposed to be put into the shadow page table. */3468 if (pgmMapAreMappingsEnabled(&pVM->pgm.s))3469 {3470 pMapping = pVM->pgm.s.CTX_SUFF(pMappings);3471 iPdNoMapping = (pMapping) ? (pMapping->GCPtr >> GST_PD_SHIFT) : ~0U;3472 }3473 else3474 {3475 pMapping = 0;3476 iPdNoMapping = ~0U;3477 }3478 3479 # if PGM_GST_TYPE == PGM_TYPE_PAE3480 for (uint64_t iPdpt = 0; iPdpt < GST_PDPE_ENTRIES; iPdpt++)3481 {3482 unsigned iPDSrc;3483 X86PDPE PdpeSrc;3484 PGSTPD pPDSrc = pgmGstGetPaePDPtr(&pVM->pgm.s, iPdpt << X86_PDPT_SHIFT, &iPDSrc, &PdpeSrc);3485 PX86PDEPAE pPDEDst = pgmShwGetPaePDEPtr(&pVM->pgm.s, iPdpt << X86_PDPT_SHIFT);3486 PX86PDPT pPdptDst = pgmShwGetPaePDPTPtr(&pVM->pgm.s);3487 3488 if (pPDSrc == NULL)3489 {3490 /* PDPE not present */3491 if (pPdptDst->a[iPdpt].n.u1Present)3492 {3493 LogFlow(("SyncCR3: guest PDPE %lld not present; clear shw pdpe\n", iPdpt));3494 /* for each page directory entry */3495 for (unsigned iPD = 0; iPD < RT_ELEMENTS(pPDSrc->a); iPD++)3496 {3497 if ( pPDEDst[iPD].n.u1Present3498 && !(pPDEDst[iPD].u & PGM_PDFLAGS_MAPPING))3499 {3500 pgmPoolFree(pVM, pPDEDst[iPD].u & SHW_PDE_PG_MASK, SHW_POOL_ROOT_IDX, iPdpt * X86_PG_PAE_ENTRIES + iPD);3501 pPDEDst[iPD].u = 0;3502 }3503 }3504 }3505 if (!(pPdptDst->a[iPdpt].u & PGM_PLXFLAGS_MAPPING))3506 pPdptDst->a[iPdpt].n.u1Present = 0;3507 continue;3508 }3509 # else /* PGM_GST_TYPE != PGM_TYPE_PAE */3510 {3511 # endif /* PGM_GST_TYPE != PGM_TYPE_PAE */3512 for (unsigned iPD = 0; iPD < RT_ELEMENTS(pPDSrc->a); iPD++)3513 {3514 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT3515 if ((iPD & 255) == 0) /* Start of new PD. */3516 pPDEDst = pgmShwGetPaePDEPtr(&pVM->pgm.s, (uint32_t)iPD << GST_PD_SHIFT);3517 # endif3518 # if PGM_SHW_TYPE == PGM_TYPE_32BIT3519 Assert(pgmShwGet32BitPDEPtr(&pVM->pgm.s, (uint32_t)iPD << SHW_PD_SHIFT) == pPDEDst);3520 # elif PGM_SHW_TYPE == PGM_TYPE_PAE3521 # if defined(VBOX_STRICT) && !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) /* Unfortuantely not reliable with PGMR0DynMap and multiple VMs. */3522 RTGCPTR GCPtrStrict = (uint32_t)iPD << GST_PD_SHIFT;3523 # if PGM_GST_TYPE == PGM_TYPE_PAE3524 GCPtrStrict |= iPdpt << X86_PDPT_SHIFT;3525 # endif3526 AssertMsg(pgmShwGetPaePDEPtr(&pVM->pgm.s, GCPtrStrict) == pPDEDst, ("%p vs %p (%RGv)\n", pgmShwGetPaePDEPtr(&pVM->pgm.s, GCPtrStrict), pPDEDst, GCPtrStrict));3527 # endif /* VBOX_STRICT */3528 # endif3529 GSTPDE PdeSrc = pPDSrc->a[iPD];3530 if ( PdeSrc.n.u1Present3531 && (PdeSrc.n.u1User || fRawR0Enabled))3532 {3533 # if ( PGM_GST_TYPE == PGM_TYPE_32BIT \3534 || PGM_GST_TYPE == PGM_TYPE_PAE) \3535 && !defined(PGM_WITHOUT_MAPPINGS)3536 3537 /*3538 * Check for conflicts with GC mappings.3539 */3540 # if PGM_GST_TYPE == PGM_TYPE_PAE3541 if (iPD + iPdpt * X86_PG_PAE_ENTRIES == iPdNoMapping)3542 # else3543 if (iPD == iPdNoMapping)3544 # endif3545 {3546 if (pVM->pgm.s.fMappingsFixed)3547 {3548 /* It's fixed, just skip the mapping. */3549 const unsigned cPTs = pMapping->cb >> GST_PD_SHIFT;3550 Assert(PGM_GST_TYPE == PGM_TYPE_32BIT || (iPD + cPTs - 1) / X86_PG_PAE_ENTRIES == iPD / X86_PG_PAE_ENTRIES);3551 iPD += cPTs - 1;3552 # if PGM_SHW_TYPE != PGM_GST_TYPE /* SHW==PAE && GST==32BIT */3553 pPDEDst = pgmShwGetPaePDEPtr(&pVM->pgm.s, (uint32_t)(iPD + 1) << GST_PD_SHIFT);3554 # else3555 pPDEDst += cPTs;3556 # endif3557 pMapping = pMapping->CTX_SUFF(pNext);3558 iPdNoMapping = pMapping ? pMapping->GCPtr >> GST_PD_SHIFT : ~0U;3559 continue;3560 }3561 # ifdef IN_RING33562 # if PGM_GST_TYPE == PGM_TYPE_32BIT3563 int rc = pgmR3SyncPTResolveConflict(pVM, pMapping, pPDSrc, iPD << GST_PD_SHIFT);3564 # elif PGM_GST_TYPE == PGM_TYPE_PAE3565 int rc = pgmR3SyncPTResolveConflictPAE(pVM, pMapping, (iPdpt << GST_PDPT_SHIFT) + (iPD << GST_PD_SHIFT));3566 # endif3567 if (RT_FAILURE(rc))3568 return rc;3569 3570 /*3571 * Update iPdNoMapping and pMapping.3572 */3573 pMapping = pVM->pgm.s.pMappingsR3;3574 while (pMapping && pMapping->GCPtr < (iPD << GST_PD_SHIFT))3575 pMapping = pMapping->pNextR3;3576 iPdNoMapping = pMapping ? pMapping->GCPtr >> GST_PD_SHIFT : ~0U;3577 # else /* !IN_RING3 */3578 LogFlow(("SyncCR3: detected conflict -> VINF_PGM_SYNC_CR3\n"));3579 return VINF_PGM_SYNC_CR3;3580 # endif /* !IN_RING3 */3581 }3582 # else /* (PGM_GST_TYPE != PGM_TYPE_32BIT && PGM_GST_TYPE != PGM_TYPE_PAE) || PGM_WITHOUT_MAPPINGS */3583 Assert(!pgmMapAreMappingsEnabled(&pVM->pgm.s));3584 # endif /* (PGM_GST_TYPE != PGM_TYPE_32BIT && PGM_GST_TYPE != PGM_TYPE_PAE) || PGM_WITHOUT_MAPPINGS */3585 3586 /*3587 * Sync page directory entry.3588 *3589 * The current approach is to allocated the page table but to set3590 * the entry to not-present and postpone the page table synching till3591 * it's actually used.3592 */3593 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT3594 for (unsigned i = 0, iPdShw = iPD * 2; i < 2; i++, iPdShw++) /* pray that the compiler unrolls this */3595 # elif PGM_GST_TYPE == PGM_TYPE_PAE3596 const unsigned iPdShw = iPD + iPdpt * X86_PG_PAE_ENTRIES; NOREF(iPdShw);3597 # else3598 const unsigned iPdShw = iPD; NOREF(iPdShw);3599 # endif3600 {3601 SHWPDE PdeDst = *pPDEDst;3602 if (PdeDst.n.u1Present)3603 {3604 PPGMPOOLPAGE pShwPage = pgmPoolGetPage(pPool, PdeDst.u & SHW_PDE_PG_MASK);3605 RTGCPHYS GCPhys;3606 if ( !PdeSrc.b.u1Size3607 || !fBigPagesSupported)3608 {3609 GCPhys = PdeSrc.u & GST_PDE_PG_MASK;3610 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT3611 /* Select the right PDE as we're emulating a 4kb page table with 2 shadow page tables. */3612 GCPhys |= i * (PAGE_SIZE / 2);3613 # endif3614 }3615 else3616 {3617 GCPhys = GST_GET_PDE_BIG_PG_GCPHYS(PdeSrc);3618 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT3619 /* Select the right PDE as we're emulating a 4MB page directory with two 2 MB shadow PDEs.*/3620 GCPhys |= i * X86_PAGE_2M_SIZE;3621 # endif3622 }3623 3624 if ( pShwPage->GCPhys == GCPhys3625 && pShwPage->enmKind == PGM_BTH_NAME(CalcPageKind)(&PdeSrc, cr4)3626 && ( pShwPage->fCached3627 || ( !fGlobal3628 && ( false3629 # ifdef PGM_SKIP_GLOBAL_PAGEDIRS_ON_NONGLOBAL_FLUSH3630 || ( (PdeSrc.u & (X86_PDE4M_PS | X86_PDE4M_G)) == (X86_PDE4M_PS | X86_PDE4M_G)3631 && (cr4 & (X86_CR4_PGE | X86_CR4_PSE)) == (X86_CR4_PGE | X86_CR4_PSE)) /* global 2/4MB page. */3632 || ( !pShwPage->fSeenNonGlobal3633 && (cr4 & X86_CR4_PGE))3634 # endif3635 )3636 )3637 )3638 && ( (PdeSrc.u & (X86_PDE_US | X86_PDE_RW)) == (PdeDst.u & (X86_PDE_US | X86_PDE_RW))3639 || ( fBigPagesSupported3640 && ((PdeSrc.u & (X86_PDE_US | X86_PDE4M_PS | X86_PDE4M_D)) | PGM_PDFLAGS_TRACK_DIRTY)3641 == ((PdeDst.u & (X86_PDE_US | X86_PDE_RW | PGM_PDFLAGS_TRACK_DIRTY)) | X86_PDE4M_PS))3642 )3643 )3644 {3645 # ifdef VBOX_WITH_STATISTICS3646 if ( !fGlobal3647 && (PdeSrc.u & (X86_PDE4M_PS | X86_PDE4M_G)) == (X86_PDE4M_PS | X86_PDE4M_G)3648 && (cr4 & (X86_CR4_PGE | X86_CR4_PSE)) == (X86_CR4_PGE | X86_CR4_PSE))3649 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3DstSkippedGlobalPD));3650 else if (!fGlobal && !pShwPage->fSeenNonGlobal && (cr4 & X86_CR4_PGE))3651 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3DstSkippedGlobalPT));3652 else3653 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3DstCacheHit));3654 # endif /* VBOX_WITH_STATISTICS */3655 /** @todo a replacement strategy isn't really needed unless we're using a very small pool < 512 pages.3656 * The whole ageing stuff should be put in yet another set of #ifdefs. For now, let's just skip it. */3657 //# ifdef PGMPOOL_WITH_CACHE3658 // pgmPoolCacheUsed(pPool, pShwPage);3659 //# endif3660 }3661 else3662 {3663 pgmPoolFreeByPage(pPool, pShwPage, SHW_POOL_ROOT_IDX, iPdShw);3664 pPDEDst->u = 0;3665 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3DstFreed));3666 }3667 }3668 else3669 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3DstNotPresent));3670 3671 /* advance */3672 pPDEDst++;3673 } /* foreach 2MB PAE PDE in 4MB guest PDE */3674 }3675 # if PGM_GST_TYPE == PGM_TYPE_PAE3676 else if (iPD + iPdpt * X86_PG_PAE_ENTRIES != iPdNoMapping)3677 # else3678 else if (iPD != iPdNoMapping)3679 # endif3680 {3681 /*3682 * Check if there is any page directory to mark not present here.3683 */3684 # if PGM_SHW_TYPE == PGM_TYPE_PAE && PGM_GST_TYPE == PGM_TYPE_32BIT3685 for (unsigned i = 0, iPdShw = iPD * 2; i < 2; i++, iPdShw++) /* pray that the compiler unrolls this */3686 # elif PGM_GST_TYPE == PGM_TYPE_PAE3687 const unsigned iPdShw = iPD + iPdpt * X86_PG_PAE_ENTRIES;3688 # else3689 const unsigned iPdShw = iPD;3690 # endif3691 {3692 if (pPDEDst->n.u1Present)3693 {3694 pgmPoolFree(pVM, pPDEDst->u & SHW_PDE_PG_MASK, SHW_POOL_ROOT_IDX, iPdShw);3695 pPDEDst->u = 0;3696 MY_STAM_COUNTER_INC(&pVM->pgm.s.CTX_MID_Z(Stat,SyncCR3DstFreedSrcNP));3697 }3698 pPDEDst++;3699 }3700 }3701 else3702 {3703 # if ( PGM_GST_TYPE == PGM_TYPE_32BIT \3704 || PGM_GST_TYPE == PGM_TYPE_PAE) \3705 && !defined(PGM_WITHOUT_MAPPINGS)3706 3707 const unsigned cPTs = pMapping->cb >> GST_PD_SHIFT;3708 3709 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));3710 if (pVM->pgm.s.fMappingsFixed)3711 {3712 /* It's fixed, just skip the mapping. */3713 pMapping = pMapping->CTX_SUFF(pNext);3714 iPdNoMapping = pMapping ? pMapping->GCPtr >> GST_PD_SHIFT : ~0U;3715 }3716 else3717 {3718 /*3719 * Check for conflicts for subsequent pagetables3720 * and advance to the next mapping.3721 */3722 iPdNoMapping = ~0U;3723 unsigned iPT = cPTs;3724 while (iPT-- > 1)3725 {3726 if ( pPDSrc->a[iPD + iPT].n.u1Present3727 && (pPDSrc->a[iPD + iPT].n.u1User || fRawR0Enabled))3728 {3729 # ifdef IN_RING33730 # if PGM_GST_TYPE == PGM_TYPE_32BIT3731 int rc = pgmR3SyncPTResolveConflict(pVM, pMapping, pPDSrc, iPD << GST_PD_SHIFT);3732 # elif PGM_GST_TYPE == PGM_TYPE_PAE3733 int rc = pgmR3SyncPTResolveConflictPAE(pVM, pMapping, (iPdpt << GST_PDPT_SHIFT) + (iPD << GST_PD_SHIFT));3734 # endif3735 if (RT_FAILURE(rc))3736 return rc;3737 3738 /*3739 * Update iPdNoMapping and pMapping.3740 */3741 pMapping = pVM->pgm.s.CTX_SUFF(pMappings);3742 while (pMapping && pMapping->GCPtr < (iPD << GST_PD_SHIFT))3743 pMapping = pMapping->CTX_SUFF(pNext);3744 iPdNoMapping = pMapping ? pMapping->GCPtr >> GST_PD_SHIFT : ~0U;3745 break;3746 # else3747 LogFlow(("SyncCR3: detected conflict -> VINF_PGM_SYNC_CR3\n"));3748 return VINF_PGM_SYNC_CR3;3749 # endif3750 }3751 }3752 if (iPdNoMapping == ~0U && pMapping)3753 {3754 pMapping = pMapping->CTX_SUFF(pNext);3755 if (pMapping)3756 iPdNoMapping = pMapping->GCPtr >> GST_PD_SHIFT;3757 }3758 }3759 3760 /* advance. */3761 Assert(PGM_GST_TYPE == PGM_TYPE_32BIT || (iPD + cPTs - 1) / X86_PG_PAE_ENTRIES == iPD / X86_PG_PAE_ENTRIES);3762 iPD += cPTs - 1;3763 # if PGM_SHW_TYPE != PGM_GST_TYPE /* SHW==PAE && GST==32BIT */3764 pPDEDst = pgmShwGetPaePDEPtr(&pVM->pgm.s, (uint32_t)(iPD + 1) << GST_PD_SHIFT);3765 # else3766 pPDEDst += cPTs;3767 # endif3768 # if PGM_GST_TYPE != PGM_SHW_TYPE3769 AssertCompile(PGM_GST_TYPE == PGM_TYPE_32BIT && PGM_SHW_TYPE == PGM_TYPE_PAE);3770 # endif3771 # else /* (PGM_GST_TYPE != PGM_TYPE_32BIT && PGM_GST_TYPE != PGM_TYPE_PAE) || PGM_WITHOUT_MAPPINGS */3772 Assert(!pgmMapAreMappingsEnabled(&pVM->pgm.s));3773 # endif /* (PGM_GST_TYPE != PGM_TYPE_32BIT && PGM_GST_TYPE != PGM_TYPE_PAE) || PGM_WITHOUT_MAPPINGS */3774 }3775 3776 } /* for iPD */3777 } /* for each PDPTE (PAE) */3778 return VINF_SUCCESS;3779 3780 # else /* guest real and protected mode */3781 return VINF_SUCCESS;3782 # endif3783 #endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY */3784 3295 #endif /* PGM_SHW_TYPE != PGM_TYPE_NESTED && PGM_SHW_TYPE != PGM_TYPE_EPT && PGM_SHW_TYPE != PGM_TYPE_AMD64 */ 3785 3296 } … … 4694 4205 pVM->pgm.s.pGstAmd64Pml4R0 = (R0PTRTYPE(PX86PML4))HCPtrGuestCR3; 4695 4206 # endif 4696 # ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY4697 if (!HWACCMIsNestedPagingActive(pVM))4698 {4699 /*4700 * Update the shadow root page as well since that's not fixed.4701 */4702 /** @todo Move this into PGMAllBth.h. */4703 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);4704 if (pVM->pgm.s.CTX_SUFF(pShwPageCR3))4705 {4706 /* It might have been freed already by a pool flush (see e.g. PGMR3MappingsUnfix). */4707 /** @todo Coordinate this better with the pool. */4708 if (pVM->pgm.s.CTX_SUFF(pShwPageCR3)->enmKind != PGMPOOLKIND_FREE)4709 pgmPoolFreeByPage(pPool, pVM->pgm.s.CTX_SUFF(pShwPageCR3), PGMPOOL_IDX_AMD64_CR3, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->GCPhys >> PAGE_SHIFT);4710 pVM->pgm.s.pShwPageCR3R3 = 0;4711 pVM->pgm.s.pShwPageCR3R0 = 0;4712 pVM->pgm.s.pShwRootR3 = 0;4713 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE4714 pVM->pgm.s.pShwRootR0 = 0;4715 # endif4716 pVM->pgm.s.HCPhysShwCR3 = 0;4717 }4718 4719 Assert(!(GCPhysCR3 >> (PAGE_SHIFT + 32)));4720 rc = pgmPoolAlloc(pVM, GCPhysCR3, PGMPOOLKIND_64BIT_PML4, PGMPOOL_IDX_AMD64_CR3, GCPhysCR3 >> PAGE_SHIFT, &pVM->pgm.s.CTX_SUFF(pShwPageCR3));4721 if (rc == VERR_PGM_POOL_FLUSHED)4722 {4723 Log(("MapCR3: PGM pool flushed -> signal sync cr3\n"));4724 Assert(VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3));4725 return VINF_PGM_SYNC_CR3;4726 }4727 AssertRCReturn(rc, rc);4728 # ifdef IN_RING04729 pVM->pgm.s.pShwPageCR3R3 = MMHyperCCToR3(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3));4730 # else4731 pVM->pgm.s.pShwPageCR3R0 = MMHyperCCToR0(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3));4732 # endif4733 pVM->pgm.s.pShwRootR3 = (R3PTRTYPE(void *))pVM->pgm.s.CTX_SUFF(pShwPageCR3)->pvPageR3;4734 Assert(pVM->pgm.s.pShwRootR3);4735 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE4736 pVM->pgm.s.pShwRootR0 = (R0PTRTYPE(void *))PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pVM->pgm.s.CTX_SUFF(pShwPageCR3));4737 # endif4738 pVM->pgm.s.HCPhysShwCR3 = pVM->pgm.s.CTX_SUFF(pShwPageCR3)->Core.Key;4739 rc = VINF_SUCCESS; /* clear it - pgmPoolAlloc returns hints. */4740 }4741 # endif /* !VBOX_WITH_PGMPOOL_PAGING_ONLY */4742 4207 # endif 4743 4208 } … … 4752 4217 #endif 4753 4218 4754 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY4755 4219 /* Update shadow paging info for guest modes with paging (32, pae, 64). */ 4756 4220 # if ( ( PGM_SHW_TYPE == PGM_TYPE_32BIT \ … … 4838 4302 4839 4303 # endif 4840 #endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY */4841 4304 4842 4305 return rc; … … 4884 4347 pVM->pgm.s.pGstAmd64Pml4R0 = 0; 4885 4348 # endif 4886 # ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY4887 if (!HWACCMIsNestedPagingActive(pVM))4888 {4889 pVM->pgm.s.pShwRootR3 = 0;4890 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE4891 pVM->pgm.s.pShwRootR0 = 0;4892 # endif4893 pVM->pgm.s.HCPhysShwCR3 = 0;4894 if (pVM->pgm.s.CTX_SUFF(pShwPageCR3))4895 {4896 PPGMPOOL pPool = pVM->pgm.s.CTX_SUFF(pPool);4897 pgmPoolFreeByPage(pPool, pVM->pgm.s.CTX_SUFF(pShwPageCR3), PGMPOOL_IDX_AMD64_CR3, pVM->pgm.s.CTX_SUFF(pShwPageCR3)->GCPhys >> PAGE_SHIFT);4898 pVM->pgm.s.pShwPageCR3R3 = 0;4899 pVM->pgm.s.pShwPageCR3R0 = 0;4900 }4901 }4902 # endif /* !VBOX_WITH_PGMPOOL_PAGING_ONLY */4903 4349 4904 4350 #else /* prot/real mode stub */ … … 4906 4352 #endif 4907 4353 4908 #if defined(VBOX_WITH_PGMPOOL_PAGING_ONLY) &&!defined(IN_RC) /* In RC we rely on MapCR3 to do the shadow part for us at a safe time */4354 #if !defined(IN_RC) /* In RC we rely on MapCR3 to do the shadow part for us at a safe time */ 4909 4355 /* Update shadow paging info. */ 4910 4356 # if ( ( PGM_SHW_TYPE == PGM_TYPE_32BIT \ … … 4939 4385 } 4940 4386 # endif 4941 #endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY &&!IN_RC*/4387 #endif /* !IN_RC*/ 4942 4388 4943 4389 return rc; -
trunk/src/VBox/VMM/VMMAll/PGMAllGst.h
r17215 r17586 28 28 PGM_GST_DECL(int, ModifyPage)(PVM pVM, RTGCPTR GCPtr, size_t cb, uint64_t fFlags, uint64_t fMask); 29 29 PGM_GST_DECL(int, GetPDE)(PVM pVM, RTGCPTR GCPtr, PX86PDEPAE pPDE); 30 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY31 PGM_GST_DECL(int, MonitorCR3)(PVM pVM, RTGCPHYS GCPhysCR3);32 PGM_GST_DECL(int, UnmonitorCR3)(PVM pVM);33 #endif34 30 PGM_GST_DECL(bool, HandlerVirtualUpdate)(PVM pVM, uint32_t cr4); 35 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY36 # ifndef IN_RING337 PGM_GST_DECL(int, WriteHandlerCR3)(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser);38 # if PGM_GST_TYPE == PGM_TYPE_PAE \39 || PGM_GST_TYPE == PGM_TYPE_AMD6440 PGM_GST_DECL(int, PAEWriteHandlerPD)(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser);41 # endif42 # endif43 #endif44 31 __END_DECLS 45 32 … … 311 298 #endif 312 299 } 313 314 315 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY316 317 #undef LOG_GROUP318 #define LOG_GROUP LOG_GROUP_PGM_POOL319 320 /**321 * Registers physical page monitors for the necessary paging322 * structures to detect conflicts with our guest mappings.323 *324 * This is always called after mapping CR3.325 * This is never called with fixed mappings.326 *327 * @returns VBox status, no specials.328 * @param pVM VM handle.329 * @param GCPhysCR3 The physical address in the CR3 register.330 */331 PGM_GST_DECL(int, MonitorCR3)(PVM pVM, RTGCPHYS GCPhysCR3)332 {333 Assert(!pVM->pgm.s.fMappingsFixed);334 int rc = VINF_SUCCESS;335 336 /*337 * Register/Modify write phys handler for guest's CR3 if it changed.338 */339 #if PGM_GST_TYPE == PGM_TYPE_32BIT340 341 if (pVM->pgm.s.GCPhysGstCR3Monitored != GCPhysCR3)342 {343 # ifndef PGMPOOL_WITH_MIXED_PT_CR3344 const unsigned cbCR3Stuff = PGM_GST_TYPE == PGM_TYPE_PAE ? 32 : PAGE_SIZE;345 if (pVM->pgm.s.GCPhysGstCR3Monitored != NIL_RTGCPHYS)346 rc = PGMHandlerPhysicalModify(pVM, pVM->pgm.s.GCPhysGstCR3Monitored, GCPhysCR3, GCPhysCR3 + cbCR3Stuff - 1);347 else348 rc = PGMHandlerPhysicalRegisterEx(pVM, PGMPHYSHANDLERTYPE_PHYSICAL_WRITE, GCPhysCR3, GCPhysCR3 + cbCR3Stuff - 1,349 pVM->pgm.s.pfnR3GstWriteHandlerCR3, 0,350 pVM->pgm.s.pfnR0GstWriteHandlerCR3, 0,351 pVM->pgm.s.pfnRCGstWriteHandlerCR3, 0,352 pVM->pgm.s.pszR3GstWriteHandlerCR3);353 # else /* PGMPOOL_WITH_MIXED_PT_CR3 */354 rc = pgmPoolMonitorMonitorCR3(pVM->pgm.s.CTX_SUFF(pPool),355 pVM->pgm.s.enmShadowMode == PGMMODE_PAE356 || pVM->pgm.s.enmShadowMode == PGMMODE_PAE_NX357 ? PGMPOOL_IDX_PAE_PD358 : PGMPOOL_IDX_PD,359 GCPhysCR3);360 # endif /* PGMPOOL_WITH_MIXED_PT_CR3 */361 if (RT_FAILURE(rc))362 {363 AssertMsgFailed(("PGMHandlerPhysicalModify/PGMR3HandlerPhysicalRegister failed, rc=%Rrc GCPhysGstCR3Monitored=%RGp GCPhysCR3=%RGp\n",364 rc, pVM->pgm.s.GCPhysGstCR3Monitored, GCPhysCR3));365 return rc;366 }367 pVM->pgm.s.GCPhysGstCR3Monitored = GCPhysCR3;368 }369 370 #elif PGM_GST_TYPE == PGM_TYPE_PAE371 /* Monitor the PDPT page */372 /*373 * Register/Modify write phys handler for guest's CR3 if it changed.374 */375 # ifndef PGMPOOL_WITH_MIXED_PT_CR3376 AssertFailed();377 # endif378 if (pVM->pgm.s.GCPhysGstCR3Monitored != GCPhysCR3)379 {380 rc = pgmPoolMonitorMonitorCR3(pVM->pgm.s.CTX_SUFF(pPool), PGMPOOL_IDX_PDPT, GCPhysCR3);381 if (RT_FAILURE(rc))382 {383 AssertMsgFailed(("PGMHandlerPhysicalModify/PGMR3HandlerPhysicalRegister failed, rc=%Rrc GCPhysGstCR3Monitored=%RGp GCPhysCR3=%RGp\n",384 rc, pVM->pgm.s.GCPhysGstCR3Monitored, GCPhysCR3));385 return rc;386 }387 pVM->pgm.s.GCPhysGstCR3Monitored = GCPhysCR3;388 }389 390 /*391 * Do the 4 PDs.392 */393 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(&pVM->pgm.s);394 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)395 {396 if (pGuestPDPT->a[i].n.u1Present)397 {398 RTGCPHYS GCPhys = pGuestPDPT->a[i].u & X86_PDPE_PG_MASK;399 if (pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] != GCPhys)400 {401 Assert(pVM->pgm.s.enmShadowMode == PGMMODE_PAE || pVM->pgm.s.enmShadowMode == PGMMODE_PAE_NX);402 403 rc = pgmPoolMonitorMonitorCR3(pVM->pgm.s.CTX_SUFF(pPool), PGMPOOL_IDX_PAE_PD_0 + i, GCPhys);404 }405 406 if (RT_FAILURE(rc))407 {408 AssertMsgFailed(("PGMHandlerPhysicalModify/PGMR3HandlerPhysicalRegister failed, rc=%Rrc GCPhysGstCR3Monitored=%RGp GCPhysCR3=%RGp\n",409 rc, pVM->pgm.s.aGCPhysGstPaePDsMonitored[i], GCPhys));410 return rc;411 }412 pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] = GCPhys;413 }414 else if (pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] != NIL_RTGCPHYS)415 {416 rc = pgmPoolMonitorUnmonitorCR3(pVM->pgm.s.CTX_SUFF(pPool), PGMPOOL_IDX_PAE_PD_0 + i);417 AssertRC(rc);418 pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] = NIL_RTGCPHYS;419 }420 }421 422 #else423 /* prot/real/amd64 mode stub */424 425 #endif426 return rc;427 }428 429 /**430 * Deregisters any physical page monitors installed by MonitorCR3.431 *432 * @returns VBox status code, no specials.433 * @param pVM The VM handle.434 */435 PGM_GST_DECL(int, UnmonitorCR3)(PVM pVM)436 {437 int rc = VINF_SUCCESS;438 439 /*440 * Deregister the access handlers.441 *442 * PGMSyncCR3 will reinstall it if required and PGMSyncCR3 will be executed443 * before we enter GC again.444 */445 #if PGM_GST_TYPE == PGM_TYPE_32BIT446 if (pVM->pgm.s.GCPhysGstCR3Monitored != NIL_RTGCPHYS)447 {448 # ifndef PGMPOOL_WITH_MIXED_PT_CR3449 rc = PGMHandlerPhysicalDeregister(pVM, pVM->pgm.s.GCPhysGstCR3Monitored);450 AssertRCReturn(rc, rc);451 # else /* PGMPOOL_WITH_MIXED_PT_CR3 */452 rc = pgmPoolMonitorUnmonitorCR3(pVM->pgm.s.CTX_SUFF(pPool),453 pVM->pgm.s.enmShadowMode == PGMMODE_PAE454 || pVM->pgm.s.enmShadowMode == PGMMODE_PAE_NX455 ? PGMPOOL_IDX_PAE_PD456 : PGMPOOL_IDX_PD);457 AssertRCReturn(rc, rc);458 # endif /* PGMPOOL_WITH_MIXED_PT_CR3 */459 pVM->pgm.s.GCPhysGstCR3Monitored = NIL_RTGCPHYS;460 }461 462 #elif PGM_GST_TYPE == PGM_TYPE_PAE463 /* The PDPT page */464 # ifndef PGMPOOL_WITH_MIXED_PT_CR3465 AssertFailed();466 # endif467 468 if (pVM->pgm.s.GCPhysGstCR3Monitored != NIL_RTGCPHYS)469 {470 rc = pgmPoolMonitorUnmonitorCR3(pVM->pgm.s.CTX_SUFF(pPool), PGMPOOL_IDX_PDPT);471 AssertRC(rc);472 }473 474 /* The 4 PDs. */475 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)476 {477 if (pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] != NIL_RTGCPHYS)478 {479 Assert(pVM->pgm.s.enmShadowMode == PGMMODE_PAE || pVM->pgm.s.enmShadowMode == PGMMODE_PAE_NX);480 int rc2 = pgmPoolMonitorUnmonitorCR3(pVM->pgm.s.CTX_SUFF(pPool), PGMPOOL_IDX_PAE_PD_0 + i);481 AssertRC(rc2);482 if (RT_FAILURE(rc2))483 rc = rc2;484 pVM->pgm.s.aGCPhysGstPaePDsMonitored[i] = NIL_RTGCPHYS;485 }486 }487 #else488 /* prot/real/amd64 mode stub */489 #endif490 return rc;491 492 }493 494 #undef LOG_GROUP495 #define LOG_GROUP LOG_GROUP_PGM496 497 #endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY */498 300 499 301 … … 704 506 } 705 507 706 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY707 708 #if PGM_GST_TYPE == PGM_TYPE_32BIT && !defined(IN_RING3)709 710 /**711 * Write access handler for the Guest CR3 page in 32-bit mode.712 *713 * This will try interpret the instruction, if failure fail back to the recompiler.714 * Check if the changed PDEs are marked present and conflicts with our715 * mappings. If conflict, we'll switch to the host context and resolve it there716 *717 * @returns VBox status code (appropritate for trap handling and GC return).718 * @param pVM VM Handle.719 * @param uErrorCode CPU Error code.720 * @param pRegFrame Trap register frame.721 * @param pvFault The fault address (cr2).722 * @param GCPhysFault The GC physical address corresponding to pvFault.723 * @param pvUser User argument.724 */725 PGM_GST_DECL(int, WriteHandlerCR3)(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)726 {727 AssertMsg(!pVM->pgm.s.fMappingsFixed, ("Shouldn't be registered when mappings are fixed!\n"));728 729 /*730 * Try interpret the instruction.731 */732 uint32_t cb;733 int rc = EMInterpretInstruction(pVM, pRegFrame, pvFault, &cb);734 if (RT_SUCCESS(rc) && cb)735 {736 /*737 * Check if the modified PDEs are present and mappings.738 */739 const RTGCPTR offPD = GCPhysFault & PAGE_OFFSET_MASK;740 const unsigned iPD1 = offPD / sizeof(X86PDE);741 const unsigned iPD2 = (offPD + cb - 1) / sizeof(X86PDE);742 743 Assert(cb > 0 && cb <= 8);744 Assert(iPD1 < X86_PG_ENTRIES);745 Assert(iPD2 < X86_PG_ENTRIES);746 747 #ifdef DEBUG748 Log(("pgmXXGst32BitWriteHandlerCR3: emulated change to PD %#x addr=%x\n", iPD1, iPD1 << X86_PD_SHIFT));749 if (iPD1 != iPD2)750 Log(("pgmXXGst32BitWriteHandlerCR3: emulated change to PD %#x addr=%x\n", iPD2, iPD2 << X86_PD_SHIFT));751 #endif752 753 if (!pVM->pgm.s.fMappingsFixed)754 {755 PX86PD pPDSrc = pgmGstGet32bitPDPtr(&pVM->pgm.s);756 if ( ( pPDSrc->a[iPD1].n.u1Present757 && pgmGetMapping(pVM, (RTGCPTR)(iPD1 << X86_PD_SHIFT)) )758 || ( iPD1 != iPD2759 && pPDSrc->a[iPD2].n.u1Present760 && pgmGetMapping(pVM, (RTGCPTR)(iPD2 << X86_PD_SHIFT)) )761 )762 {763 STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestCR3WriteConflict);764 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);765 if (rc == VINF_SUCCESS)766 rc = VINF_PGM_SYNC_CR3;767 Log(("pgmXXGst32BitWriteHandlerCR3: detected conflict iPD1=%#x iPD2=%#x - returns %Rrc\n", iPD1, iPD2, rc));768 return rc;769 }770 }771 772 STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestCR3WriteHandled);773 }774 else775 {776 Assert(RT_FAILURE(rc));777 if (rc == VERR_EM_INTERPRETER)778 rc = VINF_EM_RAW_EMULATE_INSTR_PD_FAULT;779 Log(("pgmXXGst32BitWriteHandlerCR3: returns %Rrc\n", rc));780 STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestCR3WriteUnhandled);781 }782 return rc;783 }784 785 #endif /* PGM_TYPE_32BIT && !IN_RING3 */786 #if PGM_GST_TYPE == PGM_TYPE_PAE && !defined(IN_RING3)787 788 /**789 * Write access handler for the Guest CR3 page in PAE mode.790 *791 * This will try interpret the instruction, if failure fail back to the recompiler.792 * Check if the changed PDEs are marked present and conflicts with our793 * mappings. If conflict, we'll switch to the host context and resolve it there794 *795 * @returns VBox status code (appropritate for trap handling and GC return).796 * @param pVM VM Handle.797 * @param uErrorCode CPU Error code.798 * @param pRegFrame Trap register frame.799 * @param pvFault The fault address (cr2).800 * @param GCPhysFault The GC physical address corresponding to pvFault.801 * @param pvUser User argument.802 */803 PGM_GST_DECL(int, WriteHandlerCR3)(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)804 {805 AssertMsg(!pVM->pgm.s.fMappingsFixed, ("Shouldn't be registered when mappings are fixed!\n"));806 807 /*808 * Try interpret the instruction.809 */810 uint32_t cb;811 int rc = EMInterpretInstruction(pVM, pRegFrame, pvFault, &cb);812 if (RT_SUCCESS(rc) && cb)813 {814 /*815 * Check if any of the PDs have changed.816 * We'll simply check all of them instead of figuring out which one/two to check.817 */818 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(&pVM->pgm.s);819 for (unsigned i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)820 {821 if ( pGuestPDPT->a[i].n.u1Present822 && (pGuestPDPT->a[i].u & X86_PDPE_PG_MASK)823 != pVM->pgm.s.aGCPhysGstPaePDsMonitored[i])824 {825 /*826 * The PDPE has changed.827 * We will schedule a monitoring update for the next TLB Flush,828 * InvalidatePage or SyncCR3.829 *830 * This isn't perfect, because a lazy page sync might be dealing with an half831 * updated PDPE. However, we assume that the guest OS is disabling interrupts832 * and being extremely careful (cmpxchg8b) when updating a PDPE where it's833 * executing.834 */835 pVM->pgm.s.fSyncFlags |= PGM_SYNC_MONITOR_CR3;836 Log(("pgmXXGstPaeWriteHandlerCR3: detected updated PDPE; [%d] = %#llx, Old GCPhys=%RGp\n",837 i, pGuestPDPT->a[i].u, pVM->pgm.s.aGCPhysGstPaePDsMonitored[i]));838 }839 }840 841 STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestCR3WriteHandled);842 }843 else844 {845 Assert(RT_FAILURE(rc));846 STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestCR3WriteUnhandled);847 if (rc == VERR_EM_INTERPRETER)848 rc = VINF_EM_RAW_EMULATE_INSTR_PD_FAULT;849 }850 Log(("pgmXXGstPaeWriteHandlerCR3: returns %Rrc\n", rc));851 return rc;852 }853 854 855 /**856 * Write access handler for the Guest PDs in PAE mode.857 *858 * This will try interpret the instruction, if failure fail back to the recompiler.859 * Check if the changed PDEs are marked present and conflicts with our860 * mappings. If conflict, we'll switch to the host context and resolve it there861 *862 * @returns VBox status code (appropritate for trap handling and GC return).863 * @param pVM VM Handle.864 * @param uErrorCode CPU Error code.865 * @param pRegFrame Trap register frame.866 * @param pvFault The fault address (cr2).867 * @param GCPhysFault The GC physical address corresponding to pvFault.868 * @param pvUser User argument.869 */870 PGM_GST_DECL(int, WriteHandlerPD)(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPHYS GCPhysFault, void *pvUser)871 {872 AssertMsg(!pVM->pgm.s.fMappingsFixed, ("Shouldn't be registered when mappings are fixed!\n"));873 874 /*875 * Try interpret the instruction.876 */877 uint32_t cb;878 int rc = EMInterpretInstruction(pVM, pRegFrame, pvFault, &cb);879 if (RT_SUCCESS(rc) && cb)880 {881 /*882 * Figure out which of the 4 PDs this is.883 */884 RTGCPTR i;885 PX86PDPT pGuestPDPT = pgmGstGetPaePDPTPtr(&pVM->pgm.s);886 for (i = 0; i < X86_PG_PAE_PDPE_ENTRIES; i++)887 if (pGuestPDPT->a[i].u == (GCPhysFault & X86_PTE_PAE_PG_MASK))888 {889 PX86PDPAE pPDSrc = pgmGstGetPaePD(&pVM->pgm.s, i << X86_PDPT_SHIFT);890 const RTGCPTR offPD = GCPhysFault & PAGE_OFFSET_MASK;891 const unsigned iPD1 = offPD / sizeof(X86PDEPAE);892 const unsigned iPD2 = (offPD + cb - 1) / sizeof(X86PDEPAE);893 894 Assert(cb > 0 && cb <= 8);895 Assert(iPD1 < X86_PG_PAE_ENTRIES);896 Assert(iPD2 < X86_PG_PAE_ENTRIES);897 898 # ifdef LOG_ENABLED899 Log(("pgmXXGstPaeWriteHandlerPD: emulated change to i=%d iPD1=%#05x (%x)\n",900 i, iPD1, (i << X86_PDPT_SHIFT) | (iPD1 << X86_PD_PAE_SHIFT)));901 if (iPD1 != iPD2)902 Log(("pgmXXGstPaeWriteHandlerPD: emulated change to i=%d iPD2=%#05x (%x)\n",903 i, iPD2, (i << X86_PDPT_SHIFT) | (iPD2 << X86_PD_PAE_SHIFT)));904 # endif905 906 if (!pVM->pgm.s.fMappingsFixed)907 {908 if ( ( pPDSrc->a[iPD1].n.u1Present909 && pgmGetMapping(pVM, (RTGCPTR)((i << X86_PDPT_SHIFT) | (iPD1 << X86_PD_PAE_SHIFT))) )910 || ( iPD1 != iPD2911 && pPDSrc->a[iPD2].n.u1Present912 && pgmGetMapping(pVM, (RTGCPTR)((i << X86_PDPT_SHIFT) | (iPD2 << X86_PD_PAE_SHIFT))) )913 )914 {915 Log(("pgmXXGstPaeWriteHandlerPD: detected conflict iPD1=%#x iPD2=%#x\n", iPD1, iPD2));916 STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestCR3WriteConflict);917 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);918 return VINF_PGM_SYNC_CR3;919 }920 }921 break; /* ASSUMES no duplicate entries... */922 }923 Assert(i < 4);924 925 STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestCR3WriteHandled);926 }927 else928 {929 Assert(RT_FAILURE(rc));930 if (rc == VERR_EM_INTERPRETER)931 rc = VINF_EM_RAW_EMULATE_INSTR_PD_FAULT;932 else933 Log(("pgmXXGst32BitWriteHandlerCR3: returns %Rrc\n", rc));934 STAM_COUNTER_INC(&pVM->pgm.s.StatRZGuestCR3WriteUnhandled);935 }936 return rc;937 }938 939 #endif /* PGM_TYPE_PAE && !IN_RING3 */940 941 #endif /* !VBOX_WITH_PGMPOOL_PAGING_ONLY */ -
trunk/src/VBox/VMM/VMMAll/PGMAllMap.cpp
r17468 r17586 224 224 return; 225 225 226 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY227 226 if (!pVM->pgm.s.CTX_SUFF(pShwPageCR3)) 228 227 return; /* too early */ 229 #endif230 228 231 229 PGMMODE enmShadowMode = PGMGetShadowMode(pVM); … … 272 270 Assert(pShwPdpt); 273 271 pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, (iPdPt << X86_PDPT_SHIFT)); 274 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY275 272 if (!pShwPaePd) 276 273 { … … 299 296 } 300 297 } 301 #endif302 298 AssertFatal(pShwPaePd); 303 299 … … 305 301 AssertFatal(pPoolPagePd); 306 302 307 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY308 303 if (!pgmPoolIsPageLocked(&pVM->pgm.s, pPoolPagePd)) 309 304 { … … 328 323 } 329 324 330 #else331 if (pShwPaePd->a[iPDE].n.u1Present)332 {333 Assert(!(pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING));334 pgmPoolFree(pVM, pShwPaePd->a[iPDE].u & X86_PDE_PG_MASK, pPoolPagePd->idx, iNewPDE);335 }336 #endif337 325 X86PDEPAE PdePae0; 338 326 PdePae0.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0; … … 343 331 AssertFatal(iPDE < 512); 344 332 345 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY346 333 if ( pShwPaePd->a[iPDE].n.u1Present 347 334 && !(pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING)) … … 349 336 pgmPoolFree(pVM, pShwPaePd->a[iPDE].u & X86_PDE_PG_MASK, pPoolPagePd->idx, iPDE); 350 337 } 351 #else352 if (pShwPaePd->a[iPDE].n.u1Present)353 {354 Assert(!(pShwPaePd->a[iPDE].u & PGM_PDFLAGS_MAPPING));355 pgmPoolFree(pVM, pShwPaePd->a[iPDE].u & X86_PDE_PG_MASK, pPoolPagePd->idx, iNewPDE);356 }357 #endif358 338 X86PDEPAE PdePae1; 359 339 PdePae1.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1; … … 387 367 return; 388 368 389 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY390 369 Assert(pShwPageCR3); 391 370 # ifdef IN_RC … … 400 379 pCurrentShwPdpt = pgmShwGetPaePDPTPtr(&pVM->pgm.s); 401 380 } 402 #endif403 381 404 382 unsigned i = pMap->cPTs; … … 414 392 case PGMMODE_32_BIT: 415 393 { 416 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY417 394 PX86PD pShw32BitPd = (PX86PD)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3); 418 #else419 PX86PD pShw32BitPd = pgmShwGet32BitPDPtr(&pVM->pgm.s);420 #endif421 395 AssertFatal(pShw32BitPd); 422 396 … … 434 408 const unsigned iPdpt = iOldPDE / 256; /* iOldPDE * 2 / 512; iOldPDE is in 4 MB pages */ 435 409 unsigned iPDE = iOldPDE * 2 % 512; 436 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY437 410 pShwPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3); 438 411 pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, pShwPdpt, (iPdpt << X86_PDPT_SHIFT)); … … 444 417 break; 445 418 } 446 #else447 pShwPdpt = pgmShwGetPaePDPTPtr(&pVM->pgm.s);448 pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, (iPdpt << X86_PDPT_SHIFT));449 #endif450 419 AssertFatal(pShwPaePd); 451 420 … … 461 430 pShwPdpt->a[iPdpt].u &= ~PGM_PLXFLAGS_MAPPING; 462 431 463 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY464 432 PPGMPOOLPAGE pPoolPagePd = pgmPoolGetPageByHCPhys(pVM, pShwPdpt->a[iPdpt].u & X86_PDPE_PG_MASK); 465 433 AssertFatal(pPoolPagePd); … … 470 438 pgmPoolUnlockPage(pVM->pgm.s.CTX_SUFF(pPool), pPoolPagePd); 471 439 } 472 #endif473 474 440 break; 475 441 } … … 494 460 void pgmMapCheckShadowPDEs(PVM pVM, PPGMPOOLPAGE pShwPageCR3, PPGMMAPPING pMap, unsigned iPDE) 495 461 { 496 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY497 462 Assert(pShwPageCR3); 498 #endif499 463 500 464 unsigned i = pMap->cPTs; … … 510 474 case PGMMODE_32_BIT: 511 475 { 512 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY513 476 PX86PD pShw32BitPd = (PX86PD)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3); 514 #else515 PX86PD pShw32BitPd = pgmShwGet32BitPDPtr(&pVM->pgm.s);516 #endif517 477 AssertFatal(pShw32BitPd); 518 478 … … 530 490 const unsigned iPD = iPDE / 256; /* iPDE * 2 / 512; iPDE is in 4 MB pages */ 531 491 unsigned iPaePDE = iPDE * 2 % 512; 532 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY533 492 pPdpt = (PX86PDPT)PGMPOOL_PAGE_2_PTR_BY_PGM(&pVM->pgm.s, pShwPageCR3); 534 493 pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, pPdpt, (iPD << X86_PDPT_SHIFT)); 535 #else536 pPdpt = pgmShwGetPaePDPTPtr(&pVM->pgm.s);537 pShwPaePd = pgmShwGetPaePDPtr(&pVM->pgm.s, (iPD << X86_PDPT_SHIFT));538 #endif539 494 AssertFatal(pShwPaePd); 540 495 … … 566 521 VMMDECL(void) PGMMapCheck(PVM pVM) 567 522 { 568 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY569 523 /* 570 524 * Can skip this if mappings are disabled. … … 573 527 return; 574 528 575 # ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY576 529 Assert(pVM->pgm.s.CTX_SUFF(pShwPageCR3)); 577 # endif578 530 579 531 /* … … 586 538 pgmMapCheckShadowPDEs(pVM, pVM->pgm.s.CTX_SUFF(pShwPageCR3), pCur, iPDE); 587 539 } 588 #endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY */589 540 } 590 541 #endif /* defined(VBOX_STRICT) && !defined(IN_RING0) */ … … 600 551 int pgmMapActivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3) 601 552 { 602 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY603 553 /* 604 554 * Can skip this if mappings are disabled. 605 555 */ 606 556 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s)) 607 #else608 /*609 * Can skip this if mappings are safely fixed.610 */611 if (pVM->pgm.s.fMappingsFixed)612 #endif613 557 return VINF_SUCCESS; 614 558 … … 616 560 Log4(("PGMMapActivateAll fixed mappings=%d\n", pVM->pgm.s.fMappingsFixed)); 617 561 618 # ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY619 562 Assert(pShwPageCR3 && pShwPageCR3 == pVM->pgm.s.CTX_SUFF(pShwPageCR3)); 620 # endif621 563 622 564 /* … … 642 584 int pgmMapDeactivateCR3(PVM pVM, PPGMPOOLPAGE pShwPageCR3) 643 585 { 644 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY645 586 /* 646 587 * Can skip this if mappings are disabled. 647 588 */ 648 589 if (!pgmMapAreMappingsEnabled(&pVM->pgm.s)) 649 #else650 /*651 * Can skip this if mappings are safely fixed.652 */653 if (pVM->pgm.s.fMappingsFixed)654 #endif655 590 return VINF_SUCCESS; 656 591 657 # ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY658 592 Assert(pShwPageCR3); 659 # endif660 593 661 594 /* … … 762 695 } 763 696 764 # ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY765 697 /** 766 698 * Checks and resolves (ring 3 only) guest conflicts with VMM GC mappings. … … 878 810 return VINF_SUCCESS; 879 811 } 880 # endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY */881 812 882 813 #endif /* IN_RING0 */ -
trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp
r17559 r17586 136 136 # define PGMPOOL_UNLOCK_PTR(pVM, pPage) do {} while (0) 137 137 #endif 138 139 #if !defined(VBOX_WITH_PGMPOOL_PAGING_ONLY) && (defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0))140 /**141 * Maps a pool page into the current context.142 *143 * @returns Pointer to the mapping.144 * @param pPGM Pointer to the PGM instance data.145 * @param pPage The page to map.146 */147 void *pgmPoolMapPageFallback(PPGM pPGM, PPGMPOOLPAGE pPage)148 {149 /* general pages are take care of by the inlined part, it150 only ends up here in case of failure. */151 AssertReleaseReturn(pPage->idx < PGMPOOL_IDX_FIRST, NULL);152 153 /** @todo make sure HCPhys is valid for *all* indexes. */154 /* special pages. */155 # ifdef IN_RC156 switch (pPage->idx)157 {158 case PGMPOOL_IDX_PD:159 return pPGM->pShw32BitPdRC;160 case PGMPOOL_IDX_PAE_PD:161 case PGMPOOL_IDX_PAE_PD_0:162 return pPGM->apShwPaePDsRC[0];163 case PGMPOOL_IDX_PAE_PD_1:164 return pPGM->apShwPaePDsRC[1];165 case PGMPOOL_IDX_PAE_PD_2:166 return pPGM->apShwPaePDsRC[2];167 case PGMPOOL_IDX_PAE_PD_3:168 return pPGM->apShwPaePDsRC[3];169 case PGMPOOL_IDX_PDPT:170 return pPGM->pShwPaePdptRC;171 default:172 AssertReleaseMsgFailed(("Invalid index %d\n", pPage->idx));173 return NULL;174 }175 176 # else /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */177 RTHCPHYS HCPhys;178 switch (pPage->idx)179 {180 case PGMPOOL_IDX_PD:181 HCPhys = pPGM->HCPhysShw32BitPD;182 break;183 case PGMPOOL_IDX_PAE_PD_0:184 HCPhys = pPGM->aHCPhysPaePDs[0];185 break;186 case PGMPOOL_IDX_PAE_PD_1:187 HCPhys = pPGM->aHCPhysPaePDs[1];188 break;189 case PGMPOOL_IDX_PAE_PD_2:190 HCPhys = pPGM->aHCPhysPaePDs[2];191 break;192 case PGMPOOL_IDX_PAE_PD_3:193 HCPhys = pPGM->aHCPhysPaePDs[3];194 break;195 case PGMPOOL_IDX_PDPT:196 HCPhys = pPGM->HCPhysShwPaePdpt;197 break;198 case PGMPOOL_IDX_NESTED_ROOT:199 HCPhys = pPGM->HCPhysShwNestedRoot;200 break;201 case PGMPOOL_IDX_PAE_PD:202 AssertReleaseMsgFailed(("PGMPOOL_IDX_PAE_PD is not usable in VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 context\n"));203 return NULL;204 default:205 AssertReleaseMsgFailed(("Invalid index %d\n", pPage->idx));206 return NULL;207 }208 AssertMsg(HCPhys && HCPhys != NIL_RTHCPHYS && !(PAGE_OFFSET_MASK & HCPhys), ("%RHp\n", HCPhys));209 210 void *pv;211 pgmR0DynMapHCPageInlined(pPGM, HCPhys, &pv);212 return pv;213 # endif /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */214 }215 #endif /* IN_RC || VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */216 138 217 139 … … 388 310 } 389 311 390 # ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY391 312 case PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD: 392 313 case PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD: … … 457 378 break; 458 379 } 459 # endif460 461 380 462 381 case PGMPOOLKIND_PAE_PT_FOR_PAE_PT: … … 508 427 } 509 428 510 # ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY511 429 case PGMPOOLKIND_32BIT_PD: 512 # else513 case PGMPOOLKIND_ROOT_32BIT_PD:514 # endif515 430 { 516 431 uShw.pv = PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage); 517 432 const unsigned iShw = off / sizeof(X86PTE); // ASSUMING 32-bit guest paging! 518 433 519 # ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY520 434 LogFlow(("pgmPoolMonitorChainChanging: PGMPOOLKIND_32BIT_PD %x\n", iShw)); 521 # endif522 435 # ifndef IN_RING0 523 436 if (uShw.pPD->a[iShw].u & PGM_PDFLAGS_MAPPING) … … 530 443 } 531 444 # endif /* !IN_RING0 */ 532 # ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY533 445 # ifndef IN_RING0 534 446 else … … 545 457 } 546 458 } 547 # endif548 459 /* paranoia / a bit assumptive. */ 549 460 if ( pCpu … … 564 475 } 565 476 # endif /* !IN_RING0 */ 566 # ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY567 477 # ifndef IN_RING0 568 478 else … … 579 489 } 580 490 } 581 # endif582 491 } 583 492 } … … 596 505 break; 597 506 } 598 599 # ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY600 case PGMPOOLKIND_ROOT_PAE_PD:601 {602 unsigned iGst = off / sizeof(X86PDE); // ASSUMING 32-bit guest paging!603 unsigned iShwPdpt = iGst / 256;604 unsigned iShw = (iGst % 256) * 2;605 Assert(pPage->idx == PGMPOOL_IDX_PAE_PD);606 PPGMPOOLPAGE pPage2 = pPage + 1 + iShwPdpt;607 Assert(pPage2->idx == PGMPOOL_IDX_PAE_PD_0 + iShwPdpt);608 uShw.pv = PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage2);609 for (unsigned i = 0; i < 2; i++, iShw++)610 {611 if ((uShw.pPDPae->a[iShw].u & (PGM_PDFLAGS_MAPPING | X86_PDE_P)) == (PGM_PDFLAGS_MAPPING | X86_PDE_P))612 {613 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));614 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);615 LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShwPdpt=%#x iShw=%#x!\n", iShwPdpt, iShw));616 }617 /* paranoia / a bit assumptive. */618 else if ( pCpu619 && (off & 3)620 && (off & 3) + cbWrite > 4)621 {622 const unsigned iShw2 = iShw + 2;623 if ( iShw2 < RT_ELEMENTS(uShw.pPDPae->a) /** @todo was completely wrong, it's better now after #1865 but still wrong from cross PD. */624 && (uShw.pPDPae->a[iShw2].u & (PGM_PDFLAGS_MAPPING | X86_PDE_P)) == (PGM_PDFLAGS_MAPPING | X86_PDE_P))625 {626 Assert(pgmMapAreMappingsEnabled(&pVM->pgm.s));627 VM_FF_SET(pVM, VM_FF_PGM_SYNC_CR3);628 LogFlow(("pgmPoolMonitorChainChanging: Detected conflict at iShwPdpt=%#x iShw2=%#x!\n", iShwPdpt, iShw2));629 }630 }631 #if 0 /* useful when running PGMAssertCR3(), a bit too troublesome for general use (TLBs). */632 if ( uShw.pPDPae->a[iShw].n.u1Present633 && !VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3))634 {635 LogFlow(("pgmPoolMonitorChainChanging: iShwPdpt=%#x iShw=%#x: %RX64 -> freeing it!\n", iShwPdpt, iShw, uShw.pPDPae->a[iShw].u));636 # ifdef IN_RC /* TLB load - we're pushing things a bit... */637 ASMProbeReadByte(pvAddress);638 # endif639 pgmPoolFree(pVM, uShw.pPDPae->a[iShw].u & X86_PDE_PAE_PG_MASK, pPage->idx, iShw + iShwPdpt * X86_PG_PAE_ENTRIES);640 uShw.pPDPae->a[iShw].u = 0;641 }642 #endif643 }644 break;645 }646 # endif /* !VBOX_WITH_PGMPOOL_PAGING_ONLY */647 507 648 508 case PGMPOOLKIND_PAE_PD_FOR_PAE_PD: … … 660 520 } 661 521 #endif /* !IN_RING0 */ 662 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY663 522 /* 664 523 * Causes trouble when the guest uses a PDE to refer to the whole page table level … … 675 534 pgmPoolFree(pVM, 676 535 uShw.pPDPae->a[iShw].u & X86_PDE_PAE_PG_MASK, 677 # ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY678 536 pPage->idx, 679 537 iShw); 680 # else681 /* Note: hardcoded PAE implementation dependency */682 (pPage->enmKind == PGMPOOLKIND_PAE_PD_FOR_PAE_PD) ? PGMPOOL_IDX_PAE_PD : pPage->idx,683 (pPage->enmKind == PGMPOOLKIND_PAE_PD_FOR_PAE_PD) ? iShw + (pPage->idx - PGMPOOL_IDX_PAE_PD_0) * X86_PG_PAE_ENTRIES : iShw);684 # endif685 538 uShw.pPDPae->a[iShw].u = 0; 686 539 } 687 540 } 688 #endif689 541 /* paranoia / a bit assumptive. */ 690 542 if ( pCpu … … 705 557 } 706 558 #endif /* !IN_RING0 */ 707 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY708 559 # ifndef IN_RING0 709 560 else … … 714 565 pgmPoolFree(pVM, 715 566 uShw.pPDPae->a[iShw2].u & X86_PDE_PAE_PG_MASK, 716 # ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY717 567 pPage->idx, 718 568 iShw2); 719 # else720 /* Note: hardcoded PAE implementation dependency */721 (pPage->enmKind == PGMPOOLKIND_PAE_PD_FOR_PAE_PD) ? PGMPOOL_IDX_PAE_PD : pPage->idx,722 (pPage->enmKind == PGMPOOLKIND_PAE_PD_FOR_PAE_PD) ? iShw2 + (pPage->idx - PGMPOOL_IDX_PAE_PD_0) * X86_PG_PAE_ENTRIES : iShw2);723 # endif724 569 uShw.pPDPae->a[iShw2].u = 0; 725 570 } 726 #endif727 571 } 728 572 break; 729 573 } 730 574 731 # ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY732 575 case PGMPOOLKIND_PAE_PDPT: 733 # else734 case PGMPOOLKIND_ROOT_PDPT:735 # endif736 576 { 737 577 /* … … 740 580 * - messing with the bits of pd pointers without changing the physical address 741 581 */ 742 # ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY743 582 /* PDPT roots are not page aligned; 32 byte only! */ 744 583 const unsigned offPdpt = GCPhysFault - pPage->GCPhys; 745 # else 746 const unsigned offPdpt = off; 747 # endif 584 748 585 uShw.pv = PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage); 749 586 const unsigned iShw = offPdpt / sizeof(X86PDPE); … … 760 597 } 761 598 # endif /* !IN_RING0 */ 762 # ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY763 599 # ifndef IN_RING0 764 600 else … … 773 609 uShw.pPDPT->a[iShw].u = 0; 774 610 } 775 # endif776 611 777 612 /* paranoia / a bit assumptive. */ … … 793 628 } 794 629 # endif /* !IN_RING0 */ 795 # ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY796 630 # ifndef IN_RING0 797 631 else … … 806 640 uShw.pPDPT->a[iShw2].u = 0; 807 641 } 808 # endif809 642 } 810 643 } … … 856 689 * - messing with the bits of pd pointers without changing the physical address 857 690 */ 858 # ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY859 691 if (!VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3)) 860 # endif861 692 { 862 693 uShw.pv = PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage); … … 891 722 * - messing with the bits of pd pointers without changing the physical address 892 723 */ 893 # ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY894 724 if (!VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3)) 895 # endif896 725 { 897 726 uShw.pv = PGMPOOL_PAGE_2_LOCKED_PTR(pVM, pPage); … … 1282 1111 bool fReused = false; 1283 1112 if ( ( pPage->cModifications < 48 /** @todo #define */ /** @todo need to check that it's not mapping EIP. */ /** @todo adjust this! */ 1284 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY1285 1113 || pgmPoolIsPageLocked(&pVM->pgm.s, pPage) 1286 #else1287 || pPage->fCR3Mix1288 #endif1289 1114 ) 1290 1115 && !(fReused = pgmPoolMonitorIsReused(pVM, pPage, pRegFrame, &Cpu, pvFault)) … … 1445 1270 * Reject any attempts at flushing the currently active shadow CR3 mapping 1446 1271 */ 1447 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY1448 1272 if (pgmPoolIsPageLocked(&pPool->CTX_SUFF(pVM)->pgm.s, pPage)) 1449 #else1450 if (PGMGetHyperCR3(pPool->CTX_SUFF(pVM)) == pPage->Core.Key)1451 #endif1452 1273 { 1453 1274 /* Refresh the cr3 mapping by putting it at the head of the age list. */ … … 1490 1311 case PGMPOOLKIND_EPT_PD_FOR_PHYS: 1491 1312 case PGMPOOLKIND_EPT_PDPT_FOR_PHYS: 1492 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY1493 1313 case PGMPOOLKIND_PAE_PDPT_FOR_32BIT: /* never reuse them for other types */ 1494 1314 return false; 1495 #else1496 return true;1497 #endif1498 1315 1499 1316 /* … … 1564 1381 * These cannot be flushed, and it's common to reuse the PDs as PTs. 1565 1382 */ 1566 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY1567 case PGMPOOLKIND_ROOT_32BIT_PD:1568 case PGMPOOLKIND_ROOT_PAE_PD:1569 case PGMPOOLKIND_ROOT_PDPT:1570 #endif1571 1383 case PGMPOOLKIND_ROOT_NESTED: 1572 1384 return false; … … 1770 1582 case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT: 1771 1583 case PGMPOOLKIND_64BIT_PML4: 1772 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY1773 1584 case PGMPOOLKIND_32BIT_PD: 1774 1585 case PGMPOOLKIND_PAE_PDPT: 1775 #else1776 case PGMPOOLKIND_ROOT_32BIT_PD:1777 case PGMPOOLKIND_ROOT_PAE_PD:1778 case PGMPOOLKIND_ROOT_PDPT:1779 #endif1780 1586 { 1781 1587 /* find the head */ … … 1803 1609 case PGMPOOLKIND_PAE_PDPT_PHYS: 1804 1610 case PGMPOOLKIND_32BIT_PD_PHYS: 1805 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY1806 1611 case PGMPOOLKIND_PAE_PDPT_FOR_32BIT: 1807 #endif1808 1612 break; 1809 1613 default: … … 1845 1649 case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT: 1846 1650 case PGMPOOLKIND_64BIT_PML4: 1847 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY1848 1651 case PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD: 1849 1652 case PGMPOOLKIND_PAE_PD1_FOR_32BIT_PD: … … 1852 1655 case PGMPOOLKIND_32BIT_PD: 1853 1656 case PGMPOOLKIND_PAE_PDPT: 1854 #else1855 case PGMPOOLKIND_ROOT_PDPT:1856 #endif1857 1657 break; 1858 1658 … … 1871 1671 return VINF_SUCCESS; 1872 1672 1873 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY1874 1673 case PGMPOOLKIND_32BIT_PD_PHYS: 1875 1674 case PGMPOOLKIND_PAE_PDPT_PHYS: … … 1878 1677 /* Nothing to monitor here. */ 1879 1678 return VINF_SUCCESS; 1880 #else1881 case PGMPOOLKIND_ROOT_32BIT_PD:1882 case PGMPOOLKIND_ROOT_PAE_PD:1883 #endif1884 1679 #ifdef PGMPOOL_WITH_MIXED_PT_CR3 1885 1680 break; … … 1952 1747 case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT: 1953 1748 case PGMPOOLKIND_64BIT_PML4: 1954 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY1955 1749 case PGMPOOLKIND_32BIT_PD: 1956 1750 case PGMPOOLKIND_PAE_PDPT: … … 1959 1753 case PGMPOOLKIND_PAE_PD2_FOR_32BIT_PD: 1960 1754 case PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD: 1961 #else1962 case PGMPOOLKIND_ROOT_PDPT:1963 #endif1964 1755 break; 1965 1756 … … 1981 1772 return VINF_SUCCESS; 1982 1773 1983 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY1984 case PGMPOOLKIND_ROOT_32BIT_PD:1985 case PGMPOOLKIND_ROOT_PAE_PD:1986 #endif1987 1774 #ifdef PGMPOOL_WITH_MIXED_PT_CR3 1988 1775 break; 1989 #endif1990 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY1991 case PGMPOOLKIND_PAE_PD0_FOR_32BIT_PD:1992 1776 #endif 1993 1777 default: … … 2007 1791 PPGMPOOLPAGE pNewHead = &pPool->aPages[pPage->iMonitoredNext]; 2008 1792 pNewHead->iMonitoredPrev = NIL_PGMPOOL_IDX; 2009 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY2010 pNewHead->fCR3Mix = pPage->fCR3Mix;2011 #endif2012 1793 rc = PGMHandlerPhysicalChangeCallbacks(pVM, pPage->GCPhys & ~(RTGCPHYS)(PAGE_SIZE - 1), 2013 1794 pPool->pfnAccessHandlerR3, MMHyperCCToR3(pVM, pNewHead), … … 2047 1828 } 2048 1829 2049 # if defined(PGMPOOL_WITH_MIXED_PT_CR3) && !defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)2050 2051 /**2052 * Set or clear the fCR3Mix attribute in a chain of monitored pages.2053 *2054 * @param pPool The Pool.2055 * @param pPage A page in the chain.2056 * @param fCR3Mix The new fCR3Mix value.2057 */2058 static void pgmPoolMonitorChainChangeCR3Mix(PPGMPOOL pPool, PPGMPOOLPAGE pPage, bool fCR3Mix)2059 {2060 /* current */2061 pPage->fCR3Mix = fCR3Mix;2062 2063 /* before */2064 int16_t idx = pPage->iMonitoredPrev;2065 while (idx != NIL_PGMPOOL_IDX)2066 {2067 pPool->aPages[idx].fCR3Mix = fCR3Mix;2068 idx = pPool->aPages[idx].iMonitoredPrev;2069 }2070 2071 /* after */2072 idx = pPage->iMonitoredNext;2073 while (idx != NIL_PGMPOOL_IDX)2074 {2075 pPool->aPages[idx].fCR3Mix = fCR3Mix;2076 idx = pPool->aPages[idx].iMonitoredNext;2077 }2078 }2079 2080 2081 /**2082 * Installs or modifies monitoring of a CR3 page (special).2083 *2084 * We're pretending the CR3 page is shadowed by the pool so we can use the2085 * generic mechanisms in detecting chained monitoring. (This also gives us a2086 * tast of what code changes are required to really pool CR3 shadow pages.)2087 *2088 * @returns VBox status code.2089 * @param pPool The pool.2090 * @param idxRoot The CR3 (root) page index.2091 * @param GCPhysCR3 The (new) CR3 value.2092 */2093 int pgmPoolMonitorMonitorCR3(PPGMPOOL pPool, uint16_t idxRoot, RTGCPHYS GCPhysCR3)2094 {2095 Assert(idxRoot != NIL_PGMPOOL_IDX && idxRoot < PGMPOOL_IDX_FIRST);2096 PPGMPOOLPAGE pPage = &pPool->aPages[idxRoot];2097 LogFlow(("pgmPoolMonitorMonitorCR3: idxRoot=%d pPage=%p:{.GCPhys=%RGp, .fMonitored=%d} GCPhysCR3=%RGp\n",2098 idxRoot, pPage, pPage->GCPhys, pPage->fMonitored, GCPhysCR3));2099 2100 /*2101 * The unlikely case where it already matches.2102 */2103 if (pPage->GCPhys == GCPhysCR3)2104 {2105 Assert(pPage->fMonitored);2106 return VINF_SUCCESS;2107 }2108 2109 /*2110 * Flush the current monitoring and remove it from the hash.2111 */2112 int rc = VINF_SUCCESS;2113 if (pPage->fMonitored)2114 {2115 pgmPoolMonitorChainChangeCR3Mix(pPool, pPage, false);2116 rc = pgmPoolMonitorFlush(pPool, pPage);2117 if (rc == VERR_PGM_POOL_CLEARED)2118 rc = VINF_SUCCESS;2119 else2120 AssertFatalRC(rc);2121 pgmPoolHashRemove(pPool, pPage);2122 }2123 2124 /*2125 * Monitor the page at the new location and insert it into the hash.2126 */2127 pPage->GCPhys = GCPhysCR3;2128 int rc2 = pgmPoolMonitorInsert(pPool, pPage);2129 if (rc2 != VERR_PGM_POOL_CLEARED)2130 {2131 AssertFatalRC(rc2);2132 if (rc2 != VINF_SUCCESS && rc == VINF_SUCCESS)2133 rc = rc2;2134 }2135 pgmPoolHashInsert(pPool, pPage);2136 pgmPoolMonitorChainChangeCR3Mix(pPool, pPage, true);2137 return rc;2138 }2139 2140 2141 /**2142 * Removes the monitoring of a CR3 page (special).2143 *2144 * @returns VBox status code.2145 * @param pPool The pool.2146 * @param idxRoot The CR3 (root) page index.2147 */2148 int pgmPoolMonitorUnmonitorCR3(PPGMPOOL pPool, uint16_t idxRoot)2149 {2150 Assert(idxRoot != NIL_PGMPOOL_IDX && idxRoot < PGMPOOL_IDX_FIRST);2151 PPGMPOOLPAGE pPage = &pPool->aPages[idxRoot];2152 LogFlow(("pgmPoolMonitorUnmonitorCR3: idxRoot=%d pPage=%p:{.GCPhys=%RGp, .fMonitored=%d}\n",2153 idxRoot, pPage, pPage->GCPhys, pPage->fMonitored));2154 2155 if (!pPage->fMonitored)2156 return VINF_SUCCESS;2157 2158 pgmPoolMonitorChainChangeCR3Mix(pPool, pPage, false);2159 int rc = pgmPoolMonitorFlush(pPool, pPage);2160 if (rc != VERR_PGM_POOL_CLEARED)2161 AssertFatalRC(rc);2162 else2163 rc = VINF_SUCCESS;2164 pgmPoolHashRemove(pPool, pPage);2165 Assert(!pPage->fMonitored);2166 pPage->GCPhys = NIL_RTGCPHYS;2167 return rc;2168 }2169 2170 # endif /* PGMPOOL_WITH_MIXED_PT_CR3 && !VBOX_WITH_PGMPOOL_PAGING_ONLY*/2171 1830 2172 1831 /** … … 2713 2372 case PGMPOOLKIND_32BIT_PT_FOR_PHYS: 2714 2373 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB: 2715 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY2716 2374 case PGMPOOLKIND_32BIT_PD: 2717 2375 case PGMPOOLKIND_32BIT_PD_PHYS: 2718 #else2719 case PGMPOOLKIND_ROOT_32BIT_PD:2720 #endif2721 2376 return 4; 2722 2377 … … 2734 2389 case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT: 2735 2390 case PGMPOOLKIND_64BIT_PML4: 2736 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY2737 case PGMPOOLKIND_ROOT_PAE_PD:2738 case PGMPOOLKIND_ROOT_PDPT:2739 #endif2740 2391 case PGMPOOLKIND_PAE_PDPT: 2741 2392 case PGMPOOLKIND_ROOT_NESTED: … … 2770 2421 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_PT: 2771 2422 case PGMPOOLKIND_32BIT_PT_FOR_32BIT_4MB: 2772 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY2773 2423 case PGMPOOLKIND_32BIT_PD: 2774 #else2775 case PGMPOOLKIND_ROOT_32BIT_PD:2776 #endif2777 2424 case PGMPOOLKIND_PAE_PT_FOR_32BIT_PT: 2778 2425 case PGMPOOLKIND_PAE_PT_FOR_32BIT_4MB: … … 2789 2436 case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT: 2790 2437 case PGMPOOLKIND_64BIT_PML4: 2791 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY2792 2438 case PGMPOOLKIND_PAE_PDPT: 2793 #else2794 case PGMPOOLKIND_ROOT_PAE_PD:2795 case PGMPOOLKIND_ROOT_PDPT:2796 #endif2797 2439 return 8; 2798 2440 … … 3201 2843 */ 3202 2844 PPGMPOOLPAGE pUserPage = &pPool->aPages[pUser->iUser]; 3203 #if defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) && !defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)3204 if (pUserPage->enmKind == PGMPOOLKIND_ROOT_PAE_PD)3205 {3206 /* Must translate the fake 2048 entry PD to a 512 PD one since the R0 mapping is not linear. */3207 Assert(pUser->iUser == PGMPOOL_IDX_PAE_PD);3208 uint32_t iPdpt = iUserTable / X86_PG_PAE_ENTRIES;3209 iUserTable %= X86_PG_PAE_ENTRIES;3210 pUserPage = &pPool->aPages[PGMPOOL_IDX_PAE_PD_0 + iPdpt];3211 Assert(pUserPage->enmKind == PGMPOOLKIND_PAE_PD_FOR_PAE_PD);3212 }3213 #endif3214 2845 union 3215 2846 { … … 3222 2853 3223 2854 /* Safety precaution in case we change the paging for other modes too in the future. */ 3224 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY3225 2855 Assert(!pgmPoolIsPageLocked(&pPool->CTX_SUFF(pVM)->pgm.s, pPage)); 3226 #else3227 Assert(PGMGetHyperCR3(pPool->CTX_SUFF(pVM)) != pPage->Core.Key);3228 #endif3229 2856 3230 2857 #ifdef VBOX_STRICT … … 3234 2861 switch (pUserPage->enmKind) 3235 2862 { 3236 # ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY3237 2863 case PGMPOOLKIND_32BIT_PD: 3238 2864 case PGMPOOLKIND_32BIT_PD_PHYS: 3239 2865 Assert(iUserTable < X86_PG_ENTRIES); 3240 2866 break; 3241 # else3242 case PGMPOOLKIND_ROOT_32BIT_PD:3243 Assert(iUserTable < X86_PG_ENTRIES);3244 Assert(!(u.pau32[iUserTable] & PGM_PDFLAGS_MAPPING));3245 break;3246 # endif3247 # if !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) && !defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)3248 case PGMPOOLKIND_ROOT_PAE_PD:3249 Assert(iUserTable < 2048 && pUser->iUser == PGMPOOL_IDX_PAE_PD);3250 AssertMsg(!(u.pau64[iUserTable] & PGM_PDFLAGS_MAPPING), ("%llx %d\n", u.pau64[iUserTable], iUserTable));3251 break;3252 # endif3253 # ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY3254 2867 case PGMPOOLKIND_PAE_PDPT: 3255 2868 case PGMPOOLKIND_PAE_PDPT_FOR_32BIT: 3256 2869 case PGMPOOLKIND_PAE_PDPT_PHYS: 3257 # else3258 case PGMPOOLKIND_ROOT_PDPT:3259 # endif3260 2870 Assert(iUserTable < 4); 3261 2871 Assert(!(u.pau64[iUserTable] & PGM_PLXFLAGS_PERMANENT)); … … 3307 2917 { 3308 2918 /* 32-bit entries */ 3309 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY3310 2919 case PGMPOOLKIND_32BIT_PD: 3311 2920 case PGMPOOLKIND_32BIT_PD_PHYS: 3312 #else3313 case PGMPOOLKIND_ROOT_32BIT_PD:3314 #endif3315 2921 u.pau32[iUserTable] = 0; 3316 2922 break; … … 3322 2928 case PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD: 3323 2929 case PGMPOOLKIND_PAE_PD_FOR_PAE_PD: 3324 #if defined(IN_RC) && defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)2930 #if defined(IN_RC) 3325 2931 /* In 32 bits PAE mode we *must* invalidate the TLB when changing a PDPT entry; the CPU fetches them only during cr3 load, so any 3326 2932 * non-present PDPT will continue to cause page faults. … … 3336 2942 case PGMPOOLKIND_64BIT_PDPT_FOR_PHYS: 3337 2943 case PGMPOOLKIND_64BIT_PD_FOR_PHYS: 3338 #if !defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) && !defined(VBOX_WITH_PGMPOOL_PAGING_ONLY)3339 case PGMPOOLKIND_ROOT_PAE_PD:3340 #endif3341 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY3342 2944 case PGMPOOLKIND_PAE_PDPT: 3343 2945 case PGMPOOLKIND_PAE_PDPT_FOR_32BIT: 3344 #else3345 case PGMPOOLKIND_ROOT_PDPT:3346 #endif3347 2946 case PGMPOOLKIND_ROOT_NESTED: 3348 2947 case PGMPOOLKIND_EPT_PDPT_FOR_PHYS: … … 3848 3447 3849 3448 3850 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY3851 3449 /** 3852 3450 * Clear references to shadowed pages in a 32 bits page directory. … … 3872 3470 } 3873 3471 } 3874 #endif3875 3472 3876 3473 /** … … 3886 3483 { 3887 3484 if ( pShwPD->a[i].n.u1Present 3888 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY3889 3485 && !(pShwPD->a[i].u & PGM_PDFLAGS_MAPPING) 3890 #endif3891 3486 ) 3892 3487 { … … 3914 3509 { 3915 3510 if ( pShwPDPT->a[i].n.u1Present 3916 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY3917 3511 && !(pShwPDPT->a[i].u & PGM_PLXFLAGS_MAPPING) 3918 #endif3919 3512 ) 3920 3513 { … … 4111 3704 break; 4112 3705 4113 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY4114 3706 case PGMPOOLKIND_32BIT_PD_PHYS: 4115 3707 case PGMPOOLKIND_32BIT_PD: … … 4120 3712 case PGMPOOLKIND_PAE_PDPT: 4121 3713 case PGMPOOLKIND_PAE_PDPT_PHYS: 4122 #endif4123 3714 case PGMPOOLKIND_64BIT_PDPT_FOR_PHYS: 4124 3715 case PGMPOOLKIND_64BIT_PDPT_FOR_64BIT_PDPT: … … 4175 3766 */ 4176 3767 Assert(NIL_PGMPOOL_IDX == 0); 4177 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY4178 for (unsigned i = 1; i < PGMPOOL_IDX_FIRST; i++)4179 {4180 /*4181 * Get the page address.4182 */4183 PPGMPOOLPAGE pPage = &pPool->aPages[i];4184 union4185 {4186 uint64_t *pau64;4187 uint32_t *pau32;4188 } u;4189 4190 /*4191 * Mark stuff not present.4192 */4193 switch (pPage->enmKind)4194 {4195 case PGMPOOLKIND_ROOT_32BIT_PD:4196 u.pau64 = (uint64_t *)PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage);4197 for (unsigned iPage = 0; iPage < X86_PG_ENTRIES; iPage++)4198 if ((u.pau32[iPage] & (PGM_PDFLAGS_MAPPING | X86_PDE_P)) == X86_PDE_P)4199 u.pau32[iPage] = 0;4200 break;4201 4202 case PGMPOOLKIND_PAE_PD_FOR_PAE_PD:4203 u.pau64 = (uint64_t *)PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage);4204 for (unsigned iPage = 0; iPage < X86_PG_PAE_ENTRIES; iPage++)4205 if ((u.pau64[iPage] & (PGM_PDFLAGS_MAPPING | X86_PDE_P)) == X86_PDE_P)4206 u.pau64[iPage] = 0;4207 break;4208 4209 case PGMPOOLKIND_ROOT_PDPT:4210 /* Not root of shadowed pages currently, ignore it. */4211 break;4212 4213 case PGMPOOLKIND_ROOT_NESTED:4214 u.pau64 = (uint64_t *)PGMPOOL_PAGE_2_PTR(pPool->CTX_SUFF(pVM), pPage);4215 ASMMemZero32(u.pau64, PAGE_SIZE);4216 break;4217 }4218 }4219 #endif4220 3768 4221 3769 /* … … 4255 3803 } 4256 3804 4257 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY4258 3805 /* Unmap the old CR3 value before flushing everything. */ 4259 3806 int rc = PGM_BTH_PFN(UnmapCR3, pVM)(pVM); … … 4263 3810 rc = PGM_SHW_PFN(Exit, pVM)(pVM); 4264 3811 AssertRC(rc); 4265 #endif4266 3812 4267 3813 /* … … 4302 3848 pPage->iAgePrev = NIL_PGMPOOL_IDX; 4303 3849 #endif 4304 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY4305 3850 pPage->fLocked = false; 4306 #endif4307 3851 } 4308 3852 pPool->aPages[pPool->cCurPages - 1].iNext = NIL_PGMPOOL_IDX; … … 4409 3953 } 4410 3954 4411 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY4412 3955 /* Force a shadow mode reinit (necessary for nested paging and ept). */ 4413 3956 pVM->pgm.s.enmShadowMode = PGMMODE_INVALID; … … 4416 3959 rc = PGMR3ChangeMode(pVM, PGMGetGuestMode(pVM)); 4417 3960 AssertRC(rc); 4418 #endif4419 3961 4420 3962 /* … … 4458 4000 * Quietly reject any attempts at flushing the currently active shadow CR3 mapping 4459 4001 */ 4460 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY4461 4002 if (pgmPoolIsPageLocked(&pPool->CTX_SUFF(pVM)->pgm.s, pPage)) 4462 4003 { … … 4471 4012 || pPage->enmKind == PGMPOOLKIND_PAE_PD3_FOR_32BIT_PD, 4472 4013 ("Can't free the shadow CR3! (%RHp vs %RHp kind=%d\n", PGMGetHyperCR3(pPool->CTX_SUFF(pVM)), pPage->Core.Key, pPage->enmKind)); 4473 #else4474 if (PGMGetHyperCR3(pPool->CTX_SUFF(pVM)) == pPage->Core.Key)4475 {4476 AssertMsg(pPage->enmKind == PGMPOOLKIND_64BIT_PML4,4477 ("Can't free the shadow CR3! (%RHp vs %RHp kind=%d\n", PGMGetHyperCR3(pPool->CTX_SUFF(pVM)), pPage->Core.Key, pPage->enmKind));4478 #endif4479 4014 Log(("pgmPoolFlushPage: current active shadow CR3, rejected. enmKind=%s idx=%d\n", pgmPoolPoolKindToStr(pPage->enmKind), pPage->idx)); 4480 4015 return VINF_SUCCESS; … … 4585 4120 */ 4586 4121 if ( pPool->cCurPages < pPool->cMaxPages 4587 #if defined( VBOX_WITH_PGMPOOL_PAGING_ONLY) && defined(IN_RC)4122 #if defined(IN_RC) 4588 4123 /* Hack alert: we can't deal with jumps to ring 3 when called from MapCR3 and allocating pages for PAE PDs. */ 4589 4124 && enmKind != PGMPOOLKIND_PAE_PD_FOR_PAE_PD … … 4875 4410 case PGMPOOLKIND_EPT_PT_FOR_PHYS: 4876 4411 return "PGMPOOLKIND_EPT_PT_FOR_PHYS"; 4877 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY4878 case PGMPOOLKIND_ROOT_32BIT_PD:4879 return "PGMPOOLKIND_ROOT_32BIT_PD";4880 case PGMPOOLKIND_ROOT_PAE_PD:4881 return "PGMPOOLKIND_ROOT_PAE_PD";4882 case PGMPOOLKIND_ROOT_PDPT:4883 return "PGMPOOLKIND_ROOT_PDPT";4884 #endif4885 4412 case PGMPOOLKIND_ROOT_NESTED: 4886 4413 return "PGMPOOLKIND_ROOT_NESTED"; -
trunk/src/VBox/VMM/VMMAll/PGMAllShw.h
r16465 r17586 110 110 # define SHW_PDPE_PG_MASK X86_PDPE_PG_MASK 111 111 # define SHW_TOTAL_PD_ENTRIES (X86_PG_PAE_ENTRIES*X86_PG_PAE_PDPE_ENTRIES) 112 # ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY113 112 # define SHW_POOL_ROOT_IDX PGMPOOL_IDX_PDPT 114 # else115 # define SHW_POOL_ROOT_IDX PGMPOOL_IDX_PAE_PD116 # endif117 113 118 114 # endif -
trunk/src/VBox/VMM/VMMSwitcher.cpp
r16859 r17586 716 716 GCPtrGDT, 717 717 PGMGetInter32BitCR3(pVM), PGMGetInterPaeCR3(pVM), PGMGetInterAmd64CR3(pVM), 718 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY719 718 /* @todo No need for three GetHyper calls; one and the same base is used */ 720 #endif721 719 PGMGetHyper32BitCR3(pVM), PGMGetHyperPaeCR3(pVM), PGMGetHyperAmd64CR3(pVM), 722 720 SelCS, SelDS, SelCS64, SelTSS); -
trunk/src/VBox/VMM/testcase/tstVMStructGC.cpp
r17537 r17586 416 416 GEN_CHECK_OFF(PGM, GCPhysCR3); 417 417 GEN_CHECK_OFF(PGM, GCPtrCR3Mapping); 418 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY419 GEN_CHECK_OFF(PGM, GCPhysGstCR3Monitored);420 #endif421 418 GEN_CHECK_OFF(PGM, pGst32BitPdR3); 422 419 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE … … 436 433 GEN_CHECK_OFF(PGM, aGCPhysGstPaePDs); 437 434 GEN_CHECK_OFF(PGM, aGCPhysGstPaePDsMonitored); 438 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY439 GEN_CHECK_OFF(PGM, pShw32BitPdR3);440 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE441 GEN_CHECK_OFF(PGM, pShw32BitPdR0);442 # endif443 GEN_CHECK_OFF(PGM, pShw32BitPdRC);444 GEN_CHECK_OFF(PGM, HCPhysShw32BitPD);445 GEN_CHECK_OFF(PGM, apShwPaePDsR3);446 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE447 GEN_CHECK_OFF(PGM, apShwPaePDsR0);448 # endif449 GEN_CHECK_OFF(PGM, apShwPaePDsRC);450 GEN_CHECK_OFF(PGM, aHCPhysPaePDs);451 GEN_CHECK_OFF(PGM, pShwPaePdptR3);452 GEN_CHECK_OFF(PGM, pShwPaePdptR0);453 GEN_CHECK_OFF(PGM, pShwPaePdptRC);454 GEN_CHECK_OFF(PGM, HCPhysShwPaePdpt);455 #endif456 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY457 GEN_CHECK_OFF(PGM, pShwRootR3);458 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE459 GEN_CHECK_OFF(PGM, pShwRootR0);460 # endif461 GEN_CHECK_OFF(PGM, HCPhysShwCR3);462 #endif463 435 GEN_CHECK_OFF(PGM, pShwPageCR3R3); 464 436 GEN_CHECK_OFF(PGM, pShwPageCR3R0); … … 472 444 GEN_CHECK_OFF(PGM, pfnR3GstRelocate); 473 445 GEN_CHECK_OFF(PGM, pfnR3GstExit); 474 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY475 GEN_CHECK_OFF(PGM, pfnR3GstMonitorCR3);476 GEN_CHECK_OFF(PGM, pfnR3GstUnmonitorCR3);477 #endif478 446 GEN_CHECK_OFF(PGM, pfnR3BthMapCR3); 479 447 GEN_CHECK_OFF(PGM, pfnR3BthUnmapCR3); … … 689 657 GEN_CHECK_OFF(PGMPOOLPAGE, fCached); 690 658 GEN_CHECK_OFF(PGMPOOLPAGE, fReusedFlushPending); 691 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY692 GEN_CHECK_OFF(PGMPOOLPAGE, fCR3Mix);693 #endif694 659 GEN_CHECK_SIZE(PGMPOOL); 695 660 GEN_CHECK_OFF(PGMPOOL, pVMR3);
Note:
See TracChangeset
for help on using the changeset viewer.