Changeset 14301 in vbox
- Timestamp:
- Nov 18, 2008 1:31:42 PM (16 years ago)
- Location:
- trunk
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/pgm.h
r14115 r14301 535 535 VMMR3DECL(bool) PGMR3MapHasConflicts(PVM pVM, uint64_t cr3, bool fRawR0); 536 536 VMMR3DECL(int) PGMR3MapRead(PVM pVM, void *pvDst, RTGCPTR GCPtrSrc, size_t cb); 537 VMMR3DECL(int) PGMR3MapActivate(PVM pVM); 538 VMMR3DECL(int) PGMR3MapDeactivate(PVM pVM); 539 537 540 VMMR3DECL(int) PGMR3HandlerPhysicalRegister(PVM pVM, PGMPHYSHANDLERTYPE enmType, RTGCPHYS GCPhys, RTGCPHYS GCPhysLast, 538 541 PFNPGMR3PHYSHANDLER pfnHandlerR3, void *pvUserR3, -
trunk/src/VBox/VMM/PGM.cpp
r14260 r14301 1413 1413 * As with the intermediate context, AMD64 uses the PAE PDPT and PDs. 1414 1414 */ 1415 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 1415 1416 pVM->pgm.s.pShw32BitPdR3 = (PX86PD)MMR3PageAllocLow(pVM); 1416 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE1417 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE 1417 1418 pVM->pgm.s.pShw32BitPdR0 = (uintptr_t)pVM->pgm.s.pShw32BitPdR3; 1418 # endif1419 # endif 1419 1420 pVM->pgm.s.apShwPaePDsR3[0] = (PX86PDPAE)MMR3PageAlloc(pVM); 1420 1421 pVM->pgm.s.apShwPaePDsR3[1] = (PX86PDPAE)MMR3PageAlloc(pVM); … … 1424 1425 pVM->pgm.s.apShwPaePDsR3[3] = (PX86PDPAE)MMR3PageAlloc(pVM); 1425 1426 AssertRelease((uintptr_t)pVM->pgm.s.apShwPaePDsR3[2] + PAGE_SIZE == (uintptr_t)pVM->pgm.s.apShwPaePDsR3[3]); 1426 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE1427 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE 1427 1428 pVM->pgm.s.apShwPaePDsR0[0] = (uintptr_t)pVM->pgm.s.apShwPaePDsR3[0]; 1428 1429 pVM->pgm.s.apShwPaePDsR0[1] = (uintptr_t)pVM->pgm.s.apShwPaePDsR3[1]; 1429 1430 pVM->pgm.s.apShwPaePDsR0[2] = (uintptr_t)pVM->pgm.s.apShwPaePDsR3[2]; 1430 1431 pVM->pgm.s.apShwPaePDsR0[3] = (uintptr_t)pVM->pgm.s.apShwPaePDsR3[3]; 1431 # endif1432 # endif 1432 1433 pVM->pgm.s.pShwPaePdptR3 = (PX86PDPT)MMR3PageAllocLow(pVM); 1433 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE1434 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE 1434 1435 pVM->pgm.s.pShwPaePdptR0 = (uintptr_t)pVM->pgm.s.pShwPaePdptR3; 1435 #endif 1436 # endif 1437 #endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY */ 1436 1438 pVM->pgm.s.pShwNestedRootR3 = MMR3PageAllocLow(pVM); 1437 1439 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE … … 1439 1441 #endif 1440 1442 1443 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY 1444 if (!pVM->pgm.s.pShwNestedRootR3) 1445 #else 1441 1446 if ( !pVM->pgm.s.pShw32BitPdR3 1442 1447 || !pVM->pgm.s.apShwPaePDsR3[0] … … 1446 1451 || !pVM->pgm.s.pShwPaePdptR3 1447 1452 || !pVM->pgm.s.pShwNestedRootR3) 1453 #endif 1448 1454 { 1449 1455 AssertMsgFailed(("Failed to allocate pages for the intermediate context!\n")); … … 1452 1458 1453 1459 /* get physical addresses. */ 1460 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 1454 1461 pVM->pgm.s.HCPhysShw32BitPD = MMPage2Phys(pVM, pVM->pgm.s.pShw32BitPdR3); 1455 1462 Assert(MMPagePhys2Page(pVM, pVM->pgm.s.HCPhysShw32BitPD) == pVM->pgm.s.pShw32BitPdR3); … … 1459 1466 pVM->pgm.s.aHCPhysPaePDs[3] = MMPage2Phys(pVM, pVM->pgm.s.apShwPaePDsR3[3]); 1460 1467 pVM->pgm.s.HCPhysShwPaePdpt = MMPage2Phys(pVM, pVM->pgm.s.pShwPaePdptR3); 1468 #endif 1461 1469 pVM->pgm.s.HCPhysShwNestedRoot = MMPage2Phys(pVM, pVM->pgm.s.pShwNestedRootR3); 1462 1470 … … 1464 1472 * Initialize the pages, setting up the PML4 and PDPT for action below 4GB. 1465 1473 */ 1474 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 1466 1475 ASMMemZero32(pVM->pgm.s.pShw32BitPdR3, PAGE_SIZE); 1467 1476 ASMMemZero32(pVM->pgm.s.pShwPaePdptR3, PAGE_SIZE); 1477 #endif 1468 1478 ASMMemZero32(pVM->pgm.s.pShwNestedRootR3, PAGE_SIZE); 1479 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 1469 1480 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.apShwPaePDsR3); i++) 1470 1481 { … … 1475 1486 1476 1487 CPUMSetHyperCR3(pVM, (uint32_t)pVM->pgm.s.HCPhysShw32BitPD); 1488 #endif 1477 1489 1478 1490 /* … … 1515 1527 LogFlow(("pgmR3InitPaging: returns successfully\n")); 1516 1528 #if HC_ARCH_BITS == 64 1529 # ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 1517 1530 LogRel(("Debug: HCPhysShw32BitPD=%RHp aHCPhysPaePDs={%RHp,%RHp,%RHp,%RHp} HCPhysShwPaePdpt=%RHp HCPhysShwPaePml4=%RHp\n", 1518 1531 pVM->pgm.s.HCPhysShw32BitPD, … … 1520 1533 pVM->pgm.s.HCPhysShwPaePdpt, 1521 1534 pVM->pgm.s.HCPhysShwPaePml4)); 1535 # endif 1522 1536 LogRel(("Debug: HCPhysInterPD=%RHp HCPhysInterPaePDPT=%RHp HCPhysInterPaePML4=%RHp\n", 1523 1537 pVM->pgm.s.HCPhysInterPD, pVM->pgm.s.HCPhysInterPaePDPT, pVM->pgm.s.HCPhysInterPaePML4)); … … 1772 1786 { 1773 1787 RTGCPTR GCPtr; 1788 int rc; 1789 1790 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 1774 1791 /* 1775 1792 * Reserve space for mapping the paging pages into guest context. 1776 1793 */ 1777 intrc = MMR3HyperReserve(pVM, PAGE_SIZE * (2 + RT_ELEMENTS(pVM->pgm.s.apShwPaePDsR3) + 1 + 2 + 2), "Paging", &GCPtr);1794 rc = MMR3HyperReserve(pVM, PAGE_SIZE * (2 + RT_ELEMENTS(pVM->pgm.s.apShwPaePDsR3) + 1 + 2 + 2), "Paging", &GCPtr); 1778 1795 AssertRCReturn(rc, rc); 1779 1796 pVM->pgm.s.pShw32BitPdRC = GCPtr; 1780 1797 MMR3HyperReserve(pVM, PAGE_SIZE, "fence", NULL); 1798 #endif 1781 1799 1782 1800 /* … … 1811 1829 VMMR3DECL(int) PGMR3InitFinalize(PVM pVM) 1812 1830 { 1831 int rc; 1832 1833 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 1813 1834 /* 1814 1835 * Map the paging pages into the guest context. … … 1817 1838 AssertReleaseReturn(GCPtr, VERR_INTERNAL_ERROR); 1818 1839 1819 intrc = PGMMap(pVM, GCPtr, pVM->pgm.s.HCPhysShw32BitPD, PAGE_SIZE, 0);1840 rc = PGMMap(pVM, GCPtr, pVM->pgm.s.HCPhysShw32BitPD, PAGE_SIZE, 0); 1820 1841 AssertRCReturn(rc, rc); 1821 1842 pVM->pgm.s.pShw32BitPdRC = GCPtr; … … 1841 1862 GCPtr += PAGE_SIZE; 1842 1863 GCPtr += PAGE_SIZE; /* reserved page */ 1843 1864 #endif 1844 1865 1845 1866 /* … … 1905 1926 /** @todo move this into shadow and guest specific relocation functions. */ 1906 1927 AssertMsg(pVM->pgm.s.pShw32BitPdR3, ("Init order, no relocation before paging is initialized!\n")); 1928 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 1907 1929 pVM->pgm.s.pShw32BitPdRC += offDelta; 1930 #endif 1908 1931 pVM->pgm.s.pGst32BitPdRC += offDelta; 1909 1932 AssertCompile(RT_ELEMENTS(pVM->pgm.s.apShwPaePDsRC) == RT_ELEMENTS(pVM->pgm.s.apGstPaePDsRC)); 1910 1933 for (unsigned i = 0; i < RT_ELEMENTS(pVM->pgm.s.apShwPaePDsRC); i++) 1911 1934 { 1935 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 1912 1936 pVM->pgm.s.apShwPaePDsRC[i] += offDelta; 1937 #endif 1913 1938 pVM->pgm.s.apGstPaePDsRC[i] += offDelta; 1914 1939 } 1915 1940 pVM->pgm.s.pGstPaePdptRC += offDelta; 1941 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 1916 1942 pVM->pgm.s.pShwPaePdptRC += offDelta; 1943 #endif 1917 1944 1918 1945 pgmR3ModeDataInit(pVM, true /* resolve GC/R0 symbols */); … … 2965 2992 pVM->pgm.s.pfnR3GstModifyPage = pModeData->pfnR3GstModifyPage; 2966 2993 pVM->pgm.s.pfnR3GstGetPDE = pModeData->pfnR3GstGetPDE; 2994 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 2967 2995 pVM->pgm.s.pfnR3GstMonitorCR3 = pModeData->pfnR3GstMonitorCR3; 2968 2996 pVM->pgm.s.pfnR3GstUnmonitorCR3 = pModeData->pfnR3GstUnmonitorCR3; 2997 #endif 2969 2998 pVM->pgm.s.pfnR3GstMapCR3 = pModeData->pfnR3GstMapCR3; 2970 2999 pVM->pgm.s.pfnR3GstUnmapCR3 = pModeData->pfnR3GstUnmapCR3; 3000 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 2971 3001 pVM->pgm.s.pfnR3GstWriteHandlerCR3 = pModeData->pfnR3GstWriteHandlerCR3; 2972 3002 pVM->pgm.s.pszR3GstWriteHandlerCR3 = pModeData->pszR3GstWriteHandlerCR3; 2973 3003 pVM->pgm.s.pfnR3GstPAEWriteHandlerCR3 = pModeData->pfnR3GstPAEWriteHandlerCR3; 2974 3004 pVM->pgm.s.pszR3GstPAEWriteHandlerCR3 = pModeData->pszR3GstPAEWriteHandlerCR3; 2975 3005 #endif 2976 3006 pVM->pgm.s.pfnRCGstGetPage = pModeData->pfnRCGstGetPage; 2977 3007 pVM->pgm.s.pfnRCGstModifyPage = pModeData->pfnRCGstModifyPage; 2978 3008 pVM->pgm.s.pfnRCGstGetPDE = pModeData->pfnRCGstGetPDE; 3009 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 2979 3010 pVM->pgm.s.pfnRCGstMonitorCR3 = pModeData->pfnRCGstMonitorCR3; 2980 3011 pVM->pgm.s.pfnRCGstUnmonitorCR3 = pModeData->pfnRCGstUnmonitorCR3; 3012 #endif 2981 3013 pVM->pgm.s.pfnRCGstMapCR3 = pModeData->pfnRCGstMapCR3; 2982 3014 pVM->pgm.s.pfnRCGstUnmapCR3 = pModeData->pfnRCGstUnmapCR3; 3015 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 2983 3016 pVM->pgm.s.pfnRCGstWriteHandlerCR3 = pModeData->pfnRCGstWriteHandlerCR3; 2984 3017 pVM->pgm.s.pfnRCGstPAEWriteHandlerCR3 = pModeData->pfnRCGstPAEWriteHandlerCR3; 2985 3018 #endif 2986 3019 pVM->pgm.s.pfnR0GstGetPage = pModeData->pfnR0GstGetPage; 2987 3020 pVM->pgm.s.pfnR0GstModifyPage = pModeData->pfnR0GstModifyPage; 2988 3021 pVM->pgm.s.pfnR0GstGetPDE = pModeData->pfnR0GstGetPDE; 3022 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 2989 3023 pVM->pgm.s.pfnR0GstMonitorCR3 = pModeData->pfnR0GstMonitorCR3; 2990 3024 pVM->pgm.s.pfnR0GstUnmonitorCR3 = pModeData->pfnR0GstUnmonitorCR3; 3025 #endif 2991 3026 pVM->pgm.s.pfnR0GstMapCR3 = pModeData->pfnR0GstMapCR3; 2992 3027 pVM->pgm.s.pfnR0GstUnmapCR3 = pModeData->pfnR0GstUnmapCR3; 3028 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 2993 3029 pVM->pgm.s.pfnR0GstWriteHandlerCR3 = pModeData->pfnR0GstWriteHandlerCR3; 2994 3030 pVM->pgm.s.pfnR0GstPAEWriteHandlerCR3 = pModeData->pfnR0GstPAEWriteHandlerCR3; 2995 3031 #endif 2996 3032 2997 3033 /* both */ -
trunk/src/VBox/VMM/PGMGst.h
r14154 r14301 110 110 PGM_GST_DECL(int, Exit)(PVM pVM); 111 111 112 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 112 113 static DECLCALLBACK(int) pgmR3Gst32BitWriteHandlerCR3(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser); 113 114 static DECLCALLBACK(int) pgmR3GstPAEWriteHandlerCR3(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser); 114 #if 0115 115 static DECLCALLBACK(int) pgmR3GstPAEWriteHandlerPD(PVM pVM, RTGCPHYS GCPhys, void *pvPhys, void *pvBuf, size_t cbBuf, PGMACCESSTYPE enmAccessType, void *pvUser); 116 116 #endif … … 122 122 PGM_GST_DECL(int, MapCR3)(PVM pVM, RTGCPHYS GCPhysCR3); 123 123 PGM_GST_DECL(int, UnmapCR3)(PVM pVM); 124 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 124 125 PGM_GST_DECL(int, MonitorCR3)(PVM pVM, RTGCPHYS GCPhysCR3); 125 126 PGM_GST_DECL(int, UnmonitorCR3)(PVM pVM); 127 #endif 126 128 __END_DECLS 127 129 … … 148 150 pModeData->pfnR3GstMapCR3 = PGM_GST_NAME(MapCR3); 149 151 pModeData->pfnR3GstUnmapCR3 = PGM_GST_NAME(UnmapCR3); 152 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 150 153 pModeData->pfnR3GstMonitorCR3 = PGM_GST_NAME(MonitorCR3); 151 154 pModeData->pfnR3GstUnmonitorCR3 = PGM_GST_NAME(UnmonitorCR3); 152 153 #if PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE 155 #endif 156 157 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 158 # if PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE 154 159 pModeData->pfnR3GstWriteHandlerCR3 = PGM_GST_NAME(WriteHandlerCR3); 155 160 pModeData->pszR3GstWriteHandlerCR3 = "Guest CR3 Write access handler"; 156 161 pModeData->pfnR3GstPAEWriteHandlerCR3 = PGM_GST_NAME(WriteHandlerCR3); 157 162 pModeData->pszR3GstPAEWriteHandlerCR3 = "Guest CR3 Write access handler (PAE)"; 158 # else163 # else 159 164 pModeData->pfnR3GstWriteHandlerCR3 = NULL; 160 165 pModeData->pszR3GstWriteHandlerCR3 = NULL; 161 166 pModeData->pfnR3GstPAEWriteHandlerCR3 = NULL; 162 167 pModeData->pszR3GstPAEWriteHandlerCR3 = NULL; 168 # endif 163 169 #endif 164 170 … … 175 181 rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_GST_NAME_RC_STR(GetPDE), &pModeData->pfnRCGstGetPDE); 176 182 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_RC_STR(GetPDE), rc), rc); 183 # ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 177 184 rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_GST_NAME_RC_STR(MonitorCR3), &pModeData->pfnRCGstMonitorCR3); 178 185 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_RC_STR(MonitorCR3), rc), rc); 179 186 rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_GST_NAME_RC_STR(UnmonitorCR3), &pModeData->pfnRCGstUnmonitorCR3); 180 187 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_RC_STR(UnmonitorCR3), rc), rc); 188 # endif 181 189 rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_GST_NAME_RC_STR(MapCR3), &pModeData->pfnRCGstMapCR3); 182 190 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_RC_STR(MapCR3), rc), rc); 183 191 rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_GST_NAME_RC_STR(UnmapCR3), &pModeData->pfnRCGstUnmapCR3); 184 192 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_RC_STR(UnmapCR3), rc), rc); 185 # if PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE 193 # ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 194 # if PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE 186 195 rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_GST_NAME_RC_STR(WriteHandlerCR3), &pModeData->pfnRCGstWriteHandlerCR3); 187 196 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_RC_STR(WriteHandlerCR3), rc), rc); 188 197 rc = PDMR3LdrGetSymbolRC(pVM, NULL, PGM_GST_NAME_RC_STR(WriteHandlerCR3), &pModeData->pfnRCGstPAEWriteHandlerCR3); 189 198 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_RC_STR(WriteHandlerCR3), rc), rc); 199 # endif 190 200 # endif 191 201 #endif /* Not AMD64 shadow paging. */ … … 198 208 rc = PDMR3LdrGetSymbolR0(pVM, NULL, PGM_GST_NAME_R0_STR(GetPDE), &pModeData->pfnR0GstGetPDE); 199 209 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_R0_STR(GetPDE), rc), rc); 210 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 200 211 rc = PDMR3LdrGetSymbolR0(pVM, NULL, PGM_GST_NAME_R0_STR(MonitorCR3), &pModeData->pfnR0GstMonitorCR3); 201 212 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_R0_STR(MonitorCR3), rc), rc); 202 213 rc = PDMR3LdrGetSymbolR0(pVM, NULL, PGM_GST_NAME_R0_STR(UnmonitorCR3), &pModeData->pfnR0GstUnmonitorCR3); 203 214 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_R0_STR(UnmonitorCR3), rc), rc); 215 #endif 204 216 rc = PDMR3LdrGetSymbolR0(pVM, NULL, PGM_GST_NAME_R0_STR(MapCR3), &pModeData->pfnR0GstMapCR3); 205 217 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_R0_STR(MapCR3), rc), rc); 206 218 rc = PDMR3LdrGetSymbolR0(pVM, NULL, PGM_GST_NAME_R0_STR(UnmapCR3), &pModeData->pfnR0GstUnmapCR3); 207 219 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_R0_STR(UnmapCR3), rc), rc); 208 #if PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE 220 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 221 # if PGM_GST_TYPE == PGM_TYPE_32BIT || PGM_GST_TYPE == PGM_TYPE_PAE 209 222 rc = PDMR3LdrGetSymbolR0(pVM, NULL, PGM_GST_NAME_R0_STR(WriteHandlerCR3), &pModeData->pfnR0GstWriteHandlerCR3); 210 223 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_R0_STR(WriteHandlerCR3), rc), rc); 211 224 rc = PDMR3LdrGetSymbolR0(pVM, NULL, PGM_GST_NAME_R0_STR(WriteHandlerCR3), &pModeData->pfnR0GstPAEWriteHandlerCR3); 212 225 AssertMsgRCReturn(rc, ("%s -> rc=%Rrc\n", PGM_GST_NAME_R0_STR(WriteHandlerCR3), rc), rc); 226 # endif 213 227 #endif 214 228 } … … 231 245 */ 232 246 int rc = PGM_GST_NAME(MapCR3)(pVM, GCPhysCR3); 247 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 233 248 if (RT_SUCCESS(rc) && !pVM->pgm.s.fMappingsFixed) 234 249 rc = PGM_GST_NAME(MonitorCR3)(pVM, GCPhysCR3); 250 #endif 235 251 return rc; 236 252 } … … 259 275 PGM_GST_DECL(int, Exit)(PVM pVM) 260 276 { 261 int rc = PGM_GST_NAME(UnmonitorCR3)(pVM); 277 int rc; 278 279 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 280 rc = PGM_GST_NAME(UnmonitorCR3)(pVM); 262 281 if (RT_SUCCESS(rc)) 282 #endif 263 283 rc = PGM_GST_NAME(UnmapCR3)(pVM); 264 284 return rc; -
trunk/src/VBox/VMM/PGMInternal.h
r14244 r14301 1342 1342 /** Page directory (32-bit root). */ 1343 1343 #define PGMPOOL_IDX_PD 1 1344 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY 1345 /** Page directory (32-bit root). */ 1346 #define PGMPOOL_IDX_PD 1 1347 /** Page Directory Pointer Table (PAE root). */ 1348 #define PGMPOOL_IDX_PDPT 2 1349 /** AMD64 CR3 level index.*/ 1350 #define PGMPOOL_IDX_AMD64_CR3 3 1351 /** Nested paging root.*/ 1352 #define PGMPOOL_IDX_NESTED_ROOT 4 1353 /** The first normal index. */ 1354 #define PGMPOOL_IDX_FIRST 5 1355 #else 1344 1356 /** The extended PAE page directory (2048 entries, works as root currently). */ 1345 1357 #define PGMPOOL_IDX_PAE_PD 2 … … 1360 1372 /** The first normal index. */ 1361 1373 #define PGMPOOL_IDX_FIRST 10 1374 #endif 1362 1375 /** The last valid index. (inclusive, 14 bits) */ 1363 1376 #define PGMPOOL_IDX_LAST 0x3fff … … 1429 1442 PGMPOOLKIND_PAE_PT_FOR_PAE_2MB, 1430 1443 1444 /** Shw: 32-bit page directory. Gst: 32-bit page directory. */ 1445 PGMPOOLKIND_32BIT_PD, 1446 /** Shw: 32-bit page directory. Gst: real mode. */ 1447 PGMPOOLKIND_32BIT_PD_PHYS_REAL, 1448 /** Shw: 32-bit page directory. Gst: protected mode without paging. */ 1449 PGMPOOLKIND_32BIT_PD_PHYS_PROT, 1431 1450 /** Shw: PAE page directory; Gst: 32-bit page directory. */ 1432 1451 PGMPOOLKIND_PAE_PD_FOR_32BIT_PD, 1433 1452 /** Shw: PAE page directory; Gst: PAE page directory. */ 1434 1453 PGMPOOLKIND_PAE_PD_FOR_PAE_PD, 1454 /** Shw: PAE page directory; Gst: real mode. */ 1455 PGMPOOLKIND_PAE_PD_PHYS_REAL, 1456 /** Shw: PAE page directory; Gst: protected mode without paging. */ 1457 PGMPOOLKIND_PAE_PD_PHYS_PROT, 1458 1459 /** Shw: PAE page directory pointer table (legacy, 4 entries); Gst 32 bits paging. */ 1460 PGMPOOLKIND_PAE_PDPT_FOR_32BIT, 1461 /** Shw: PAE page directory pointer table (legacy, 4 entries); Gst PAE PDPT. */ 1462 PGMPOOLKIND_PAE_PDPT, 1435 1463 1436 1464 /** Shw: 64-bit page directory pointer table; Gst: 64-bit page directory pointer table. */ … … 1453 1481 PGMPOOLKIND_EPT_PT_FOR_PHYS, 1454 1482 1483 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 1455 1484 /** Shw: Root 32-bit page directory. */ 1456 1485 PGMPOOLKIND_ROOT_32BIT_PD, … … 1459 1488 /** Shw: Root PAE page directory pointer table (legacy, 4 entries). */ 1460 1489 PGMPOOLKIND_ROOT_PDPT, 1490 #endif 1461 1491 /** Shw: Root Nested paging table. */ 1462 1492 PGMPOOLKIND_ROOT_NESTED, … … 1741 1771 # define PGMPOOL_PAGE_2_PTR(pVM, pPage) pgmPoolMapPage((pVM), (pPage)) 1742 1772 #else 1743 # define PGMPOOL_PAGE_2_PTR(pVM, pPage) ((pPage)->pvPageR3) 1773 inline R3R0PTRTYPE(void *) PGMPOOL_PAGE_2_PTR(PVM pVM, PPGMPOOLPAGE pPage) 1774 { 1775 Assert(pPage->pvPageR3); 1776 return pPage->pvPageR3; 1777 } 1744 1778 #endif 1745 1779 … … 1915 1949 DECLR3CALLBACKMEMBER(int, pfnR3GstModifyPage,(PVM pVM, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask)); 1916 1950 DECLR3CALLBACKMEMBER(int, pfnR3GstGetPDE,(PVM pVM, RTGCPTR GCPtr, PX86PDEPAE pPde)); 1951 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 1917 1952 DECLR3CALLBACKMEMBER(int, pfnR3GstMonitorCR3,(PVM pVM, RTGCPHYS GCPhysCR3)); 1918 1953 DECLR3CALLBACKMEMBER(int, pfnR3GstUnmonitorCR3,(PVM pVM)); 1954 #endif 1919 1955 DECLR3CALLBACKMEMBER(int, pfnR3GstMapCR3,(PVM pVM, RTGCPHYS GCPhysCR3)); 1920 1956 DECLR3CALLBACKMEMBER(int, pfnR3GstUnmapCR3,(PVM pVM)); 1957 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 1921 1958 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnR3GstWriteHandlerCR3; 1922 1959 R3PTRTYPE(const char *) pszR3GstWriteHandlerCR3; 1923 1960 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnR3GstPAEWriteHandlerCR3; 1924 1961 R3PTRTYPE(const char *) pszR3GstPAEWriteHandlerCR3; 1925 1962 #endif 1926 1963 DECLRCCALLBACKMEMBER(int, pfnRCGstGetPage,(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)); 1927 1964 DECLRCCALLBACKMEMBER(int, pfnRCGstModifyPage,(PVM pVM, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask)); 1928 1965 DECLRCCALLBACKMEMBER(int, pfnRCGstGetPDE,(PVM pVM, RTGCPTR GCPtr, PX86PDEPAE pPde)); 1966 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 1929 1967 DECLRCCALLBACKMEMBER(int, pfnRCGstMonitorCR3,(PVM pVM, RTGCPHYS GCPhysCR3)); 1930 1968 DECLRCCALLBACKMEMBER(int, pfnRCGstUnmonitorCR3,(PVM pVM)); 1969 #endif 1931 1970 DECLRCCALLBACKMEMBER(int, pfnRCGstMapCR3,(PVM pVM, RTGCPHYS GCPhysCR3)); 1932 1971 DECLRCCALLBACKMEMBER(int, pfnRCGstUnmapCR3,(PVM pVM)); 1972 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 1933 1973 RCPTRTYPE(PFNPGMRCPHYSHANDLER) pfnRCGstWriteHandlerCR3; 1934 1974 RCPTRTYPE(PFNPGMRCPHYSHANDLER) pfnRCGstPAEWriteHandlerCR3; 1935 1975 #endif 1936 1976 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPage,(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)); 1937 1977 DECLR0CALLBACKMEMBER(int, pfnR0GstModifyPage,(PVM pVM, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask)); 1938 1978 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPDE,(PVM pVM, RTGCPTR GCPtr, PX86PDEPAE pPde)); 1979 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 1939 1980 DECLR0CALLBACKMEMBER(int, pfnR0GstMonitorCR3,(PVM pVM, RTGCPHYS GCPhysCR3)); 1940 1981 DECLR0CALLBACKMEMBER(int, pfnR0GstUnmonitorCR3,(PVM pVM)); 1982 #endif 1941 1983 DECLR0CALLBACKMEMBER(int, pfnR0GstMapCR3,(PVM pVM, RTGCPHYS GCPhysCR3)); 1942 1984 DECLR0CALLBACKMEMBER(int, pfnR0GstUnmapCR3,(PVM pVM)); 1985 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 1943 1986 R0PTRTYPE(PFNPGMRCPHYSHANDLER) pfnR0GstWriteHandlerCR3; 1944 1987 R0PTRTYPE(PFNPGMRCPHYSHANDLER) pfnR0GstPAEWriteHandlerCR3; 1988 #endif 1945 1989 /** @} */ 1946 1990 … … 2086 2130 /** @name 32-bit Shadow Paging 2087 2131 * @{ */ 2132 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY 2133 /** The Physical Address (HC) of the current active shadow CR3. */ 2134 RTHCPHYS HCPhysShwCR3; 2135 /** Pointer to the page of the current active CR3 - R3 Ptr. */ 2136 R3PTRTYPE(PPGMPOOLPAGE) pShwPageCR3R3; 2137 /** Pointer to the page of the current active CR3 - R0 Ptr. */ 2138 R0PTRTYPE(PPGMPOOLPAGE) pShwPageCR3R0; 2139 /** Pointer to the page of the current active CR3 - RC Ptr. */ 2140 RCPTRTYPE(PPGMPOOLPAGE) pShwPageCR3RC; 2141 # if HC_ARCH_BITS == 64 2142 RTRCPTR alignment6; /**< structure size alignment. */ 2143 # endif 2144 #else 2088 2145 /** The 32-Bit PD - R3 Ptr. */ 2089 2146 R3PTRTYPE(PX86PD) pShw32BitPdR3; … … 2092 2149 /** The 32-Bit PD - RC Ptr. */ 2093 2150 RCPTRTYPE(PX86PD) pShw32BitPdRC; 2094 # if HC_ARCH_BITS == 642151 # if HC_ARCH_BITS == 64 2095 2152 uint32_t u32Padding1; /**< alignment padding. */ 2096 # endif2153 # endif 2097 2154 /** The Physical Address (HC) of the 32-Bit PD. */ 2098 2155 RTHCPHYS HCPhysShw32BitPD; … … 2105 2162 * Thus, it's possible to walk the 2048 entries starting where apHCPaePDs[0] points. */ 2106 2163 R3PTRTYPE(PX86PDPAE) apShwPaePDsR3[4]; 2107 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R02164 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 2108 2165 /** The four PDs for the low 4GB - R0 Ptr. 2109 2166 * Same kind of mapping as apHCPaePDs. */ 2110 2167 R0PTRTYPE(PX86PDPAE) apShwPaePDsR0[4]; 2111 # endif2168 # endif 2112 2169 /** The four PDs for the low 4GB - RC Ptr. 2113 2170 * Same kind of mapping as apHCPaePDs. */ … … 2125 2182 RCPTRTYPE(PX86PDPT) pShwPaePdptRC; 2126 2183 /** @} */ 2127 # if HC_ARCH_BITS == 642184 # if HC_ARCH_BITS == 64 2128 2185 RTRCPTR alignment5; /**< structure size alignment. */ 2129 # endif2186 # endif 2130 2187 2131 2188 /** @name AMD64 Shadow Paging … … 2134 2191 /** The Page Map Level 4 table - R3 Ptr. */ 2135 2192 R3PTRTYPE(PX86PML4) pShwPaePml4R3; 2136 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE2193 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE 2137 2194 /** The Page Map Level 4 table - R0 Ptr. */ 2138 2195 R0PTRTYPE(PX86PML4) pShwPaePml4R0; 2139 # endif2196 # endif 2140 2197 /** The Physical Address (HC) of the Page Map Level 4 table. */ 2141 2198 RTHCPHYS HCPhysShwPaePml4; … … 2150 2207 /** Root table; format depends on the host paging mode (AMD-V) or EPT - R3 pointer. */ 2151 2208 RTR3PTR pShwNestedRootR3; 2152 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE2209 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE 2153 2210 /** Root table; format depends on the host paging mode (AMD-V) or EPT - R0 pointer. */ 2154 2211 RTR0PTR pShwNestedRootR0; 2155 # endif2212 # endif 2156 2213 /** The Physical Address (HC) of the nested paging root. */ 2157 2214 RTHCPHYS HCPhysShwNestedRoot; 2215 #endif 2158 2216 /** @} */ 2159 2217 … … 2182 2240 DECLR3CALLBACKMEMBER(int, pfnR3GstModifyPage,(PVM pVM, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask)); 2183 2241 DECLR3CALLBACKMEMBER(int, pfnR3GstGetPDE,(PVM pVM, RTGCPTR GCPtr, PX86PDEPAE pPde)); 2242 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 2184 2243 DECLR3CALLBACKMEMBER(int, pfnR3GstMonitorCR3,(PVM pVM, RTGCPHYS GCPhysCR3)); 2185 2244 DECLR3CALLBACKMEMBER(int, pfnR3GstUnmonitorCR3,(PVM pVM)); 2245 #endif 2186 2246 DECLR3CALLBACKMEMBER(int, pfnR3GstMapCR3,(PVM pVM, RTGCPHYS GCPhysCR3)); 2187 2247 DECLR3CALLBACKMEMBER(int, pfnR3GstUnmapCR3,(PVM pVM)); 2248 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 2188 2249 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnR3GstWriteHandlerCR3; 2189 2250 R3PTRTYPE(const char *) pszR3GstWriteHandlerCR3; 2190 2251 R3PTRTYPE(PFNPGMR3PHYSHANDLER) pfnR3GstPAEWriteHandlerCR3; 2191 2252 R3PTRTYPE(const char *) pszR3GstPAEWriteHandlerCR3; 2192 2253 #endif 2193 2254 DECLRCCALLBACKMEMBER(int, pfnRCGstGetPage,(PVM pVM, RTGCPTR GCPtr, uint64_t *pfFlags, PRTGCPHYS pGCPhys)); 2194 2255 DECLRCCALLBACKMEMBER(int, pfnRCGstModifyPage,(PVM pVM, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask)); 2195 2256 DECLRCCALLBACKMEMBER(int, pfnRCGstGetPDE,(PVM pVM, RTGCPTR GCPtr, PX86PDEPAE pPde)); 2257 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 2196 2258 DECLRCCALLBACKMEMBER(int, pfnRCGstMonitorCR3,(PVM pVM, RTGCPHYS GCPhysCR3)); 2197 2259 DECLRCCALLBACKMEMBER(int, pfnRCGstUnmonitorCR3,(PVM pVM)); 2260 #endif 2198 2261 DECLRCCALLBACKMEMBER(int, pfnRCGstMapCR3,(PVM pVM, RTGCPHYS GCPhysCR3)); 2199 2262 DECLRCCALLBACKMEMBER(int, pfnRCGstUnmapCR3,(PVM pVM)); 2263 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 2200 2264 RCPTRTYPE(PFNPGMRCPHYSHANDLER) pfnRCGstWriteHandlerCR3; 2201 2265 RCPTRTYPE(PFNPGMRCPHYSHANDLER) pfnRCGstPAEWriteHandlerCR3; 2266 #endif 2202 2267 #if HC_ARCH_BITS == 64 2203 2268 RTRCPTR alignment3; /**< structure size alignment. */ … … 2207 2272 DECLR0CALLBACKMEMBER(int, pfnR0GstModifyPage,(PVM pVM, RTGCPTR GCPtr, size_t cbPages, uint64_t fFlags, uint64_t fMask)); 2208 2273 DECLR0CALLBACKMEMBER(int, pfnR0GstGetPDE,(PVM pVM, RTGCPTR GCPtr, PX86PDEPAE pPde)); 2274 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 2209 2275 DECLR0CALLBACKMEMBER(int, pfnR0GstMonitorCR3,(PVM pVM, RTGCPHYS GCPhysCR3)); 2210 2276 DECLR0CALLBACKMEMBER(int, pfnR0GstUnmonitorCR3,(PVM pVM)); 2277 #endif 2211 2278 DECLR0CALLBACKMEMBER(int, pfnR0GstMapCR3,(PVM pVM, RTGCPHYS GCPhysCR3)); 2212 2279 DECLR0CALLBACKMEMBER(int, pfnR0GstUnmapCR3,(PVM pVM)); 2280 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 2213 2281 R0PTRTYPE(PFNPGMRCPHYSHANDLER) pfnR0GstWriteHandlerCR3; 2214 2282 R0PTRTYPE(PFNPGMRCPHYSHANDLER) pfnR0GstPAEWriteHandlerCR3; 2283 #endif 2215 2284 /** @} */ 2216 2285 … … 3857 3926 DECLINLINE(PX86PD) pgmShwGet32BitPDPtr(PPGM pPGM) 3858 3927 { 3859 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 3928 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY 3929 return (PX86PD)PGMPOOL_PAGE_2_PTR(PGM2VM(pPGM), pPGM->CTX_SUFF(pShwPageCR3)); 3930 #else 3931 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 3860 3932 PX86PD pShwPd; 3861 3933 Assert(pPGM->HCPhysShw32BitPD != 0 && pPGM->HCPhysShw32BitPD != NIL_RTHCPHYS); … … 3863 3935 AssertRCReturn(rc, NULL); 3864 3936 return pShwPd; 3865 # else3937 # else 3866 3938 return pPGM->CTX_SUFF(pShw32BitPd); 3939 # endif 3867 3940 #endif 3868 3941 } … … 3878 3951 DECLINLINE(X86PDE) pgmShwGet32BitPDE(PPGM pPGM, RTGCPTR GCPtr) 3879 3952 { 3880 const unsigned iPd = (GCPtr >> X86_PD_SHIFT) & X86_PD_MASK; 3881 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 3882 PCX86PD pShwPd; 3883 Assert(pPGM->HCPhysShw32BitPD != 0 && pPGM->HCPhysShw32BitPD != NIL_RTHCPHYS); 3884 int rc = PGM_HCPHYS_2_PTR(PGM2VM(pPGM), pPGM->HCPhysShw32BitPD, &pShwPd); 3885 if (RT_FAILURE(rc)) 3953 const unsigned iPd = (GCPtr >> X86_PD_SHIFT) & X86_PD_MASK; 3954 3955 PX86PD pShwPde = pgmShwGet32BitPDPtr(pPGM); 3956 if (!pShwPde) 3886 3957 { 3887 3958 X86PDE ZeroPde = {0}; 3888 AssertMsgFailedReturn(("%Rrc\n", rc), ZeroPde);3959 return ZeroPde; 3889 3960 } 3890 return pShwPd->a[iPd]; 3891 #else 3892 return pPGM->CTX_SUFF(pShw32BitPd)->a[iPd]; 3893 #endif 3961 return pShwPde->a[iPd]; 3894 3962 } 3895 3963 … … 3905 3973 DECLINLINE(PX86PDE) pgmShwGet32BitPDEPtr(PPGM pPGM, RTGCPTR GCPtr) 3906 3974 { 3907 const unsigned iPd = (GCPtr >> X86_PD_SHIFT) & X86_PD_MASK; 3908 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 3909 PX86PD pShwPd; 3910 Assert(pPGM->HCPhysShw32BitPD != 0 && pPGM->HCPhysShw32BitPD != NIL_RTHCPHYS); 3911 int rc = PGM_HCPHYS_2_PTR(PGM2VM(pPGM), pPGM->HCPhysShw32BitPD, &pShwPd); 3912 AssertRCReturn(rc, NULL); 3913 return &pShwPd->a[iPd]; 3914 #else 3915 return &pPGM->CTX_SUFF(pShw32BitPd)->a[iPd]; 3916 #endif 3975 const unsigned iPd = (GCPtr >> X86_PD_SHIFT) & X86_PD_MASK; 3976 3977 PX86PD pPde = pgmShwGet32BitPDPtr(pPGM); 3978 AssertReturn(pPde, NULL); 3979 return &pPde->a[iPd]; 3917 3980 } 3918 3981 … … 3926 3989 DECLINLINE(PX86PDPT) pgmShwGetPaePDPTPtr(PPGM pPGM) 3927 3990 { 3928 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 3991 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY 3992 return (PX86PDPT)PGMPOOL_PAGE_2_PTR(PGM2VM(pPGM), pPGM->CTX_SUFF(pShwPageCR3)); 3993 #else 3994 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 3929 3995 PX86PDPT pShwPdpt; 3930 3996 Assert(pPGM->HCPhysShwPaePdpt != 0 && pPGM->HCPhysShwPaePdpt != NIL_RTHCPHYS); … … 3932 3998 AssertRCReturn(rc, 0); 3933 3999 return pShwPdpt; 3934 # else4000 # else 3935 4001 return pPGM->CTX_SUFF(pShwPaePdpt); 4002 # endif 3936 4003 #endif 3937 4004 } … … 3943 4010 * @returns Pointer to the shadow PD. 3944 4011 * @param pPGM Pointer to the PGM instance data. 3945 * @param GCPtr Address.4012 * @param GCPtr The address. 3946 4013 */ 3947 4014 DECLINLINE(PX86PDPAE) pgmShwGetPaePDPtr(PPGM pPGM, RTGCPTR GCPtr) 3948 4015 { 4016 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY 3949 4017 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE; 3950 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 4018 PX86PDPT pPdpt = pgmShwGetPaePDPTPtr(pPGM); 4019 4020 /* Fetch the pgm pool shadow descriptor. */ 4021 PPGMPOOLPAGE pShwPde = pgmPoolGetPageByHCPhys(PGM2VM(pPGM), pPdpt->a[iPdpt].u & X86_PDPE_PG_MASK); 4022 AssertReturn(pShwPde, NULL); 4023 4024 return (PX86PDPAE)PGMPOOL_PAGE_2_PTR(PGM2VM(pPGM), pShwPde); 4025 #else 4026 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE; 4027 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 3951 4028 PX86PDPAE pPD; 3952 4029 int rc = PGM_HCPHYS_2_PTR(PGM2VM(pPGM), pPGM->aHCPhysPaePDs[iPdpt], &pPD); 3953 4030 AssertRCReturn(rc, 0); 3954 4031 return pPD; 3955 # else4032 # else 3956 4033 PX86PDPAE pPD = pPGM->CTX_SUFF(apShwPaePDs)[iPdpt]; 3957 4034 Assert(pPD); 3958 4035 return pPD; 4036 # endif 3959 4037 #endif 3960 4038 } … … 3966 4044 * @returns PDE. 3967 4045 * @param pPGM Pointer to the PGM instance data. 3968 * @param GCPtr Address.4046 * @param GCPtr The address. 3969 4047 */ 3970 4048 DECLINLINE(X86PDEPAE) pgmShwGetPaePDE(PPGM pPGM, RTGCPTR GCPtr) 3971 4049 { 3972 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;3973 4050 const unsigned iPd = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK; 3974 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 3975 PCX86PDPAE pPD; 3976 int rc = PGM_HCPHYS_2_PTR(PGM2VM(pPGM), pPGM->aHCPhysPaePDs[iPdpt], &pPD); 3977 if (RT_FAILURE(rc)) 4051 4052 PX86PDPAE pShwPde = pgmShwGetPaePDPtr(pPGM, GCPtr); 4053 if (!pShwPde) 3978 4054 { 3979 4055 X86PDEPAE ZeroPde = {0}; 3980 AssertMsgFailedReturn(("%Rrc\n", rc), ZeroPde);4056 return ZeroPde; 3981 4057 } 3982 return pPD->a[iPd]; 3983 #else 3984 return pPGM->CTX_SUFF(apShwPaePDs)[iPdpt]->a[iPd]; 3985 #endif 4058 return pShwPde->a[iPd]; 3986 4059 } 3987 4060 … … 3992 4065 * @returns Pointer to the PDE. 3993 4066 * @param pPGM Pointer to the PGM instance data. 3994 * @param GCPtr Address.4067 * @param GCPtr The address. 3995 4068 */ 3996 4069 DECLINLINE(PX86PDEPAE) pgmShwGetPaePDEPtr(PPGM pPGM, RTGCPTR GCPtr) 3997 4070 { 3998 const unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE;3999 4071 const unsigned iPd = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK; 4000 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 4001 PX86PDPAE pPD; 4002 int rc = PGM_HCPHYS_2_PTR(PGM2VM(pPGM), pPGM->aHCPhysPaePDs[iPdpt], &pPD); 4003 AssertRCReturn(rc, 0); 4004 return &pPD->a[iPd]; 4005 #else 4006 Assert(pPGM->CTX_SUFF(apShwPaePDs)[iPdpt]); 4007 return &pPGM->CTX_SUFF(apShwPaePDs)[iPdpt]->a[iPd]; 4008 #endif 4072 4073 PX86PDPAE pPde = pgmShwGetPaePDPtr(pPGM, GCPtr); 4074 AssertReturn(pPde, NULL); 4075 return &pPde->a[iPd]; 4009 4076 } 4010 4077 … … 4019 4086 DECLINLINE(PX86PML4) pgmShwGetLongModePML4Ptr(PPGM pPGM) 4020 4087 { 4021 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 4088 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY 4089 return (PX86PML4)PGMPOOL_PAGE_2_PTR(PGM2VM(pPGM), pPGM->CTX_SUFF(pShwPageCR3)); 4090 #else 4091 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 4022 4092 PX86PML4 pShwPml4; 4023 4093 Assert(pPGM->HCPhysShwPaePml4 != 0 && pPGM->HCPhysShwPaePml4 != NIL_RTHCPHYS); … … 4025 4095 AssertRCReturn(rc, 0); 4026 4096 return pShwPml4; 4027 # else4097 # else 4028 4098 Assert(pPGM->CTX_SUFF(pShwPaePml4)); 4029 4099 return pPGM->CTX_SUFF(pShwPaePml4); 4100 # endif 4030 4101 #endif 4031 4102 } … … 4042 4113 { 4043 4114 const unsigned iPml4 = ((RTGCUINTPTR64)GCPtr >> X86_PML4_SHIFT) & X86_PML4_MASK; 4044 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 4045 PCX86PML4 pShwPml4; 4046 Assert(pPGM->HCPhysShwPaePml4 != 0 && pPGM->HCPhysShwPaePml4 != NIL_RTHCPHYS); 4047 int rc = PGM_HCPHYS_2_PTR(PGM2VM(pPGM), pPGM->HCPhysShwPaePml4, &pShwPml4); 4048 if (RT_FAILURE(rc)) 4115 PX86PML4 pShwPml4 = pgmShwGetLongModePML4Ptr(pPGM); 4116 4117 if (!pShwPml4) 4049 4118 { 4050 4119 X86PML4E ZeroPml4e = {0}; 4051 AssertMsgFailedReturn(("%Rrc\n", rc), ZeroPml4e);4120 return ZeroPml4e; 4052 4121 } 4053 4122 return pShwPml4->a[iPml4]; 4054 # else4055 Assert(pPGM->CTX_SUFF(pShwPaePml4));4056 return pPGM->CTX_SUFF(pShwPaePml4)->a[iPml4];4057 # endif4058 4123 } 4059 4124 … … 4068 4133 DECLINLINE(PX86PML4E) pgmShwGetLongModePML4EPtr(PPGM pPGM, unsigned int iPml4) 4069 4134 { 4070 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 4071 PX86PML4 pShwPml4; 4072 Assert(pPGM->HCPhysShwPaePml4 != 0 && pPGM->HCPhysShwPaePml4 != NIL_RTHCPHYS);4073 int rc = PGM_HCPHYS_2_PTR(PGM2VM(pPGM), pPGM->HCPhysShwPaePml4, &pShwPml4);4074 AssertRCReturn(rc, 0); 4135 PX86PML4 pShwPml4 = pgmShwGetLongModePML4Ptr(pPGM); 4136 4137 if (!pShwPml4) 4138 return NULL; 4139 4075 4140 return &pShwPml4->a[iPml4]; 4076 # else4077 Assert(pPGM->CTX_SUFF(pShwPaePml4));4078 return &pPGM->CTX_SUFF(pShwPaePml4)->a[iPml4];4079 # endif4080 4141 } 4081 4142 -
trunk/src/VBox/VMM/PGMMap.cpp
r14154 r14301 430 430 } 431 431 432 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 432 433 /* 433 434 * Turn off CR3 updating monitoring. … … 435 436 int rc2 = PGM_GST_PFN(UnmonitorCR3, pVM)(pVM); 436 437 AssertRC(rc2); 438 #endif 437 439 438 440 /* … … 482 484 AssertRC(rc); 483 485 486 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 484 487 rc = PGM_GST_PFN(MonitorCR3, pVM)(pVM, pVM->pgm.s.GCPhysCR3); 485 488 AssertRC(rc); 486 489 #endif 487 490 return VINF_SUCCESS; 488 491 } … … 723 726 */ 724 727 pPGM->pInterPD->a[iOldPDE].u = 0; 728 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 725 729 pPGM->pShw32BitPdR3->a[iOldPDE].u = 0; 726 730 #endif 727 731 /* 728 732 * PAE. … … 731 735 unsigned iPDE = iOldPDE * 2 % 512; 732 736 pPGM->apInterPaePDs[iPD]->a[iPDE].u = 0; 737 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 733 738 pPGM->apShwPaePDsR3[iPD]->a[iPDE].u = 0; 739 #endif 734 740 iPDE++; 735 741 pPGM->apInterPaePDs[iPD]->a[iPDE].u = 0; 742 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 736 743 pPGM->apShwPaePDsR3[iPD]->a[iPDE].u = 0; 737 744 738 745 /* Clear the PGM_PDFLAGS_MAPPING flag for the page directory pointer entry. (legacy PAE guest mode) */ 739 746 pPGM->pShwPaePdptR3->a[iPD].u &= ~PGM_PLXFLAGS_MAPPING; 747 #endif 740 748 } 741 749 } … … 771 779 * 32-bit. 772 780 */ 781 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 773 782 if (pPGM->pShw32BitPdR3->a[iNewPDE].n.u1Present) 774 783 pgmPoolFree(pVM, pPGM->pShw32BitPdR3->a[iNewPDE].u & X86_PDE_PG_MASK, PGMPOOL_IDX_PD, iNewPDE); 784 #endif 775 785 X86PDE Pde; 776 786 /* Default mapping page directory flags are read/write and supervisor; individual page attributes determine the final flags */ 777 787 Pde.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | (uint32_t)pMap->aPTs[i].HCPhysPT; 778 788 pPGM->pInterPD->a[iNewPDE] = Pde; 789 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 779 790 pPGM->pShw32BitPdR3->a[iNewPDE] = Pde; 780 791 #endif 781 792 /* 782 793 * PAE. … … 784 795 const unsigned iPD = iNewPDE / 256; 785 796 unsigned iPDE = iNewPDE * 2 % 512; 797 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 786 798 if (pPGM->apShwPaePDsR3[iPD]->a[iPDE].n.u1Present) 787 799 pgmPoolFree(pVM, pPGM->apShwPaePDsR3[iPD]->a[iPDE].u & X86_PDE_PAE_PG_MASK, PGMPOOL_IDX_PAE_PD, iNewPDE * 2); 800 #endif 788 801 X86PDEPAE PdePae0; 789 802 PdePae0.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT0; 790 803 pPGM->apInterPaePDs[iPD]->a[iPDE] = PdePae0; 804 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 791 805 pPGM->apShwPaePDsR3[iPD]->a[iPDE] = PdePae0; 792 806 #endif 793 807 iPDE++; 808 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 794 809 if (pPGM->apShwPaePDsR3[iPD]->a[iPDE].n.u1Present) 795 810 pgmPoolFree(pVM, pPGM->apShwPaePDsR3[iPD]->a[iPDE].u & X86_PDE_PAE_PG_MASK, PGMPOOL_IDX_PAE_PD, iNewPDE * 2 + 1); 811 #endif 796 812 X86PDEPAE PdePae1; 797 813 PdePae1.u = PGM_PDFLAGS_MAPPING | X86_PDE_P | X86_PDE_A | X86_PDE_RW | X86_PDE_US | pMap->aPTs[i].HCPhysPaePT1; 798 814 pPGM->apInterPaePDs[iPD]->a[iPDE] = PdePae1; 815 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 799 816 pPGM->apShwPaePDsR3[iPD]->a[iPDE] = PdePae1; 800 817 801 818 /* Set the PGM_PDFLAGS_MAPPING flag in the page directory pointer entry. (legacy PAE guest mode) */ 802 819 pPGM->pShwPaePdptR3->a[iPD].u |= PGM_PLXFLAGS_MAPPING; 820 #endif 803 821 } 804 822 } … … 1129 1147 } 1130 1148 1149 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY 1150 /** 1151 * Apply the hypervisor mappings to the active CR3. 1152 * 1153 * @returns VBox status. 1154 * @param pVM The virtual machine. 1155 */ 1156 VMMR3DECL(int) PGMR3MapActivate(PVM pVM) 1157 { 1158 /* 1159 * Can skip this if mappings are safely fixed. 1160 */ 1161 if (pVM->pgm.s.fMappingsFixed) 1162 return VINF_SUCCESS; 1163 1164 PGMMODE const enmGuestMode = PGMGetGuestMode(pVM); 1165 Assert(enmGuestMode <= PGMMODE_PAE_NX); 1166 1167 /* 1168 * Iterate mappings. 1169 */ 1170 if (enmGuestMode == PGMMODE_32_BIT) 1171 { 1172 /* 1173 * Resolve the page directory. 1174 */ 1175 PX86PD pPD = (PX86PD)pVM->pgm.s.pShwPageCR3R3; 1176 Assert(pPD); 1177 1178 for (PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3) 1179 { 1180 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT; 1181 unsigned iPT = pCur->cPTs; 1182 while (iPT-- > 0) 1183 pPD->a[iPDE + iPT].u = 0; 1184 } 1185 } 1186 else if ( enmGuestMode == PGMMODE_PAE 1187 || enmGuestMode == PGMMODE_PAE_NX) 1188 { 1189 for (PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3) 1190 { 1191 RTGCPTR GCPtr = pCur->GCPtr; 1192 unsigned iPdpt = (GCPtr >> X86_PDPT_SHIFT) & X86_PDPT_MASK_PAE; 1193 1194 unsigned iPT = pCur->cb >> X86_PD_PAE_SHIFT; 1195 while (iPT-- > 0) 1196 { 1197 PX86PDEPAE pPDE = pgmShwGetPaePDEPtr(&pVM->pgm.s, GCPtr); 1198 pPDE->u = 0; 1199 1200 GCPtr += (1 << X86_PD_PAE_SHIFT); 1201 } 1202 } 1203 } 1204 else 1205 AssertFailed(); 1206 1207 return VINF_SUCCESS; 1208 } 1209 1210 /** 1211 * Remove the hypervisor mappings from the active CR3 1212 * 1213 * @returns VBox status. 1214 * @param pVM The virtual machine. 1215 */ 1216 VMMR3DECL(int) PGMR3MapDeactivate(PVM pVM) 1217 { 1218 /* 1219 * Can skip this if mappings are safely fixed. 1220 */ 1221 if (pVM->pgm.s.fMappingsFixed) 1222 return VINF_SUCCESS; 1223 1224 PGMMODE const enmGuestMode = PGMGetGuestMode(pVM); 1225 Assert(enmGuestMode <= PGMMODE_PAE_NX); 1226 1227 /* 1228 * Iterate mappings. 1229 */ 1230 if (enmGuestMode == PGMMODE_32_BIT) 1231 { 1232 /* 1233 * Resolve the page directory. 1234 */ 1235 PX86PD pPD = (PX86PD)pVM->pgm.s.pShwPageCR3R3; 1236 Assert(pPD); 1237 1238 for (PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3) 1239 { 1240 unsigned iPDE = pCur->GCPtr >> X86_PD_SHIFT; 1241 unsigned iPT = pCur->cPTs; 1242 while (iPT-- > 0) 1243 pPD->a[iPDE + iPT].u = 0; 1244 } 1245 } 1246 else if ( enmGuestMode == PGMMODE_PAE 1247 || enmGuestMode == PGMMODE_PAE_NX) 1248 { 1249 for (PPGMMAPPING pCur = pVM->pgm.s.pMappingsR3; pCur; pCur = pCur->pNextR3) 1250 { 1251 RTGCPTR GCPtr = pCur->GCPtr; 1252 1253 unsigned iPT = pCur->cb >> X86_PD_PAE_SHIFT; 1254 while (iPT-- > 0) 1255 { 1256 PX86PDEPAE pPDE = pgmShwGetPaePDEPtr(&pVM->pgm.s, GCPtr); 1257 pPDE->u = 0; 1258 1259 GCPtr += (1 << X86_PD_PAE_SHIFT); 1260 } 1261 } 1262 1263 /* Clear the PGM_PDFLAGS_MAPPING flag for the page directory pointer entries. (legacy PAE guest mode) */ 1264 PX86PDPT pPdpt = (PX86PDPT)pVM->pgm.s.pShwPageCR3R3; 1265 for (unsigned i=0;i<X86_PG_PAE_PDPE_ENTRIES;i++) 1266 pPdpt->a[i].u &= ~PGM_PLXFLAGS_MAPPING; 1267 } 1268 else 1269 AssertFailed(); 1270 1271 return VINF_SUCCESS; 1272 } 1273 #endif /* VBOX_WITH_PGMPOOL_PAGING_ONLY */ 1131 1274 1132 1275 /** -
trunk/src/VBox/VMM/PGMPool.cpp
r14147 r14301 255 255 pPool->aPages[PGMPOOL_IDX_PD].Core.Key = NIL_RTHCPHYS; 256 256 pPool->aPages[PGMPOOL_IDX_PD].GCPhys = NIL_RTGCPHYS; 257 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY 258 pPool->aPages[PGMPOOL_IDX_PD].pvPageR3 = 0; 259 pPool->aPages[PGMPOOL_IDX_PD].enmKind = PGMPOOLKIND_32BIT_PD; 260 #else 257 261 pPool->aPages[PGMPOOL_IDX_PD].pvPageR3 = pVM->pgm.s.pShw32BitPdR3; 258 262 pPool->aPages[PGMPOOL_IDX_PD].enmKind = PGMPOOLKIND_ROOT_32BIT_PD; 263 #endif 259 264 pPool->aPages[PGMPOOL_IDX_PD].idx = PGMPOOL_IDX_PD; 260 265 266 #ifndef VBOX_WITH_PGMPOOL_PAGING_ONLY 261 267 /* The Shadow PAE PDs. This is actually 4 pages! (32 bits guest paging) */ 262 268 pPool->aPages[PGMPOOL_IDX_PAE_PD].Core.Key = NIL_RTHCPHYS; … … 275 281 pPool->aPages[PGMPOOL_IDX_PAE_PD_0 + i].idx = PGMPOOL_IDX_PAE_PD_0 + i; 276 282 } 283 #endif 277 284 278 285 /* The Shadow PDPT. */ 279 286 pPool->aPages[PGMPOOL_IDX_PDPT].Core.Key = NIL_RTHCPHYS; 280 287 pPool->aPages[PGMPOOL_IDX_PDPT].GCPhys = NIL_RTGCPHYS; 288 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY 289 pPool->aPages[PGMPOOL_IDX_PDPT].pvPageR3 = 0; 290 pPool->aPages[PGMPOOL_IDX_PDPT].enmKind = PGMPOOLKIND_PAE_PDPT; 291 #else 281 292 pPool->aPages[PGMPOOL_IDX_PDPT].pvPageR3 = pVM->pgm.s.pShwPaePdptR3; 282 293 pPool->aPages[PGMPOOL_IDX_PDPT].enmKind = PGMPOOLKIND_ROOT_PDPT; 294 #endif 283 295 pPool->aPages[PGMPOOL_IDX_PDPT].idx = PGMPOOL_IDX_PDPT; 284 296 … … 286 298 pPool->aPages[PGMPOOL_IDX_AMD64_CR3].Core.Key = NIL_RTHCPHYS; 287 299 pPool->aPages[PGMPOOL_IDX_AMD64_CR3].GCPhys = NIL_RTGCPHYS; 300 #ifdef VBOX_WITH_PGMPOOL_PAGING_ONLY 301 pPool->aPages[PGMPOOL_IDX_AMD64_CR3].pvPageR3 = 0; 302 #else 288 303 pPool->aPages[PGMPOOL_IDX_AMD64_CR3].pvPageR3 = pVM->pgm.s.pShwPaePdptR3; /* not used - isn't it wrong as well? */ 304 #endif 289 305 pPool->aPages[PGMPOOL_IDX_AMD64_CR3].enmKind = PGMPOOLKIND_64BIT_PML4_FOR_64BIT_PML4; 290 306 pPool->aPages[PGMPOOL_IDX_AMD64_CR3].idx = PGMPOOL_IDX_AMD64_CR3; -
trunk/src/VBox/VMM/VMMSwitcher/32BitToAMD64.asm
r14267 r14301 439 439 lss esp, [edx + CPUMCPU.Host.esp] 440 440 441 FIXUP FIX_NO_SYSENTER_JMP, 0, gth_sysenter_no - NAME(Start) ; this will insert a jmp gth_sysenter_no if host doesn't use sysenter. 441 FIXUP FIX_NO_SYSENTER_JMP, 0, gth_sysenter_no - NAME(Start) ; this will insert a jmp gth_sysenter_no if host doesn't use sysenter. 442 442 443 ; restore MSR_IA32_SYSENTER_CS register. 443 444 mov ecx, MSR_IA32_SYSENTER_CS
Note:
See TracChangeset
for help on using the changeset viewer.