Changeset 87141 in vbox
- Timestamp:
- Dec 29, 2020 7:12:45 PM (4 years ago)
- Location:
- trunk
- Files:
-
- 9 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/include/VBox/types.h
r86666 r87141 1123 1123 typedef struct PGMPAGEMAPLOCK 1124 1124 { 1125 #if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)1125 #if defined(IN_RC) 1126 1126 /** The locked page. */ 1127 1127 void *pvPage; -
trunk/include/VBox/vmm/pgm.h
r85126 r87141 563 563 VMM_INT_DECL(int) PGMPhysIemQueryAccess(PVMCC pVM, RTGCPHYS GCPhys, bool fWritable, bool fByPassHandlers); 564 564 VMM_INT_DECL(int) PGMPhysIemGCPhys2PtrNoLock(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, uint64_t const volatile *puTlbPhysRev, 565 #if defined(IN_RC) || defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0)565 #if defined(IN_RC) 566 566 R3PTRTYPE(uint8_t *) *ppb, 567 567 #else -
trunk/src/VBox/VMM/VMMAll/PGMAll.cpp
r86488 r87141 2186 2186 2187 2187 2188 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R02189 2190 2188 /** 2191 2189 * Performs the lazy mapping of the 32-bit guest PD. … … 2347 2345 # else 2348 2346 RTHCPTR HCPtr = NIL_RTHCPTR; 2349 # ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R02350 2347 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, &HCPtr); 2351 2348 AssertRC(rc); 2352 # endif2353 2349 if (RT_SUCCESS(rc)) 2354 2350 { … … 2433 2429 return rc; 2434 2430 } 2435 2436 #endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */2437 2431 2438 2432 … … 3560 3554 } 3561 3555 3562 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 3563 3564 /** 3565 * Common worker for pgmRZDynMapGCPageOffInlined and pgmRZDynMapGCPageV2Inlined. 3566 * 3567 * @returns VBox status code. 3568 * @param pVM The cross context VM structure. 3569 * @param pVCpu The cross context virtual CPU structure of the calling EMT. 3570 * @param GCPhys The guest physical address of the page to map. The 3571 * offset bits are not ignored. 3572 * @param ppv Where to return the address corresponding to @a GCPhys. 3573 * @param SRC_POS The source position of the caller (RT_SRC_POS). 3574 */ 3575 int pgmRZDynMapGCPageCommon(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL) 3576 { 3577 pgmLock(pVM); 3578 3579 /* 3580 * Convert it to a writable page and it on to the dynamic mapper. 3581 */ 3582 int rc; 3583 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys); 3584 if (RT_LIKELY(pPage)) 3585 { 3586 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys); 3587 if (RT_SUCCESS(rc)) 3588 { 3589 void *pv; 3590 rc = pgmRZDynMapHCPageInlined(pVCpu, PGM_PAGE_GET_HCPHYS(pPage), &pv RTLOG_COMMA_SRC_POS_ARGS); 3591 if (RT_SUCCESS(rc)) 3592 *ppv = (void *)((uintptr_t)pv | ((uintptr_t)GCPhys & PAGE_OFFSET_MASK)); 3593 } 3594 else 3595 AssertRC(rc); 3596 } 3597 else 3598 { 3599 AssertMsgFailed(("Invalid physical address %RGp!\n", GCPhys)); 3600 rc = VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS; 3601 } 3602 3603 pgmUnlock(pVM); 3604 return rc; 3605 } 3606 3607 #endif /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */ 3556 3608 3557 #if !defined(IN_R0) || defined(LOG_ENABLED) 3609 3558 -
trunk/src/VBox/VMM/VMMAll/PGMAllBth.h
r86554 r87141 4318 4318 AssertReturn(pPageCR3, VERR_PGM_INVALID_CR3_ADDR); 4319 4319 /** @todo this needs some reworking wrt. locking? */ 4320 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R04321 HCPtrGuestCR3 = NIL_RTHCPTR;4322 int rc = VINF_SUCCESS;4323 # else4324 4320 int rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPageCR3, GCPhysCR3 & GST_CR3_PAGE_MASK, (void **)&HCPtrGuestCR3); /** @todo r=bird: This GCPhysCR3 masking isn't necessary. */ 4325 # endif4326 4321 pgmUnlock(pVM); 4327 4322 if (RT_SUCCESS(rc)) … … 4373 4368 PPGMPAGE pPage = pgmPhysGetPage(pVM, GCPhys); 4374 4369 AssertReturn(pPage, VERR_PGM_INVALID_PDPE_ADDR); 4375 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R04376 HCPtr = NIL_RTHCPTR;4377 int rc2 = VINF_SUCCESS;4378 # else4379 4370 int rc2 = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)&HCPtr); 4380 # endif4381 4371 pgmUnlock(pVM); 4382 4372 if (RT_SUCCESS(rc2)) -
trunk/src/VBox/VMM/VMMAll/PGMAllPhys.cpp
r86473 r87141 1152 1152 AssertReturn(idChunk != NIL_GMM_CHUNKID, VERR_INVALID_PARAMETER); 1153 1153 1154 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 1155 /* 1156 * Map it by HCPhys. 1157 */ 1158 return pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), HCPhys, ppv RTLOG_COMMA_SRC_POS); 1159 1160 #elif defined(IN_RING0) && defined(VBOX_WITH_RAM_IN_KERNEL) 1154 #if defined(IN_RING0) && defined(VBOX_WITH_RAM_IN_KERNEL) 1161 1155 # ifdef VBOX_WITH_LINEAR_HOST_PHYS_MEM 1162 1156 return SUPR0HCPhysToVirt(HCPhys & ~(RTHCPHYS)PAGE_OFFSET_MASK, ppv); … … 1233 1227 PGM_LOCK_ASSERT_OWNER(pVM); 1234 1228 NOREF(GCPhys); 1235 1236 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R01237 /*1238 * Just some sketchy GC/R0-darwin code.1239 */1240 *ppMap = NULL;1241 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(pPage);1242 Assert(HCPhys != pVM->pgm.s.HCPhysZeroPg);1243 pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), HCPhys, ppv RTLOG_COMMA_SRC_POS);1244 return VINF_SUCCESS;1245 1246 #else /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */1247 1248 1229 1249 1230 /* … … 1361 1342 return VINF_SUCCESS; 1362 1343 # endif /* !IN_RING0 || !VBOX_WITH_RAM_IN_KERNEL */ 1363 #endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */1364 1344 } 1365 1345 … … 1455 1435 } 1456 1436 1457 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R01458 1437 1459 1438 /** … … 1542 1521 } 1543 1522 1544 #endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */1545 1523 1546 1524 /** … … 1583 1561 * Get the mapping address. 1584 1562 */ 1585 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R01586 void *pv;1587 rc = pgmRZDynMapHCPageInlined(VMMGetCpu(pVM),1588 PGM_PAGE_GET_HCPHYS(pPage),1589 &pv1590 RTLOG_COMMA_SRC_POS);1591 if (RT_FAILURE(rc))1592 return rc;1593 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));1594 #else1595 1563 PPGMPAGEMAPTLBE pTlbe; 1596 1564 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe); … … 1598 1566 return rc; 1599 1567 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK)); 1600 #endif1601 1568 return VINF_SUCCESS; 1602 1569 } 1603 1570 1604 #ifndef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R01605 1571 1606 1572 /** … … 1690 1656 } 1691 1657 1692 #endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */1693 1694 1658 1695 1659 /** … … 1732 1696 * Do the job. 1733 1697 */ 1734 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R01735 void *pv;1736 PVMCPU pVCpu = VMMGetCpu(pVM);1737 rc = pgmRZDynMapHCPageInlined(pVCpu,1738 PGM_PAGE_GET_HCPHYS(pPage),1739 &pv1740 RTLOG_COMMA_SRC_POS);1741 if (RT_FAILURE(rc))1742 return rc;1743 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));1744 pLock->pvPage = pv;1745 pLock->pVCpu = pVCpu;1746 1747 #else1748 1698 PPGMPAGEMAPTLBE pTlbe; 1749 1699 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe); … … 1752 1702 pgmPhysPageMapLockForWriting(pVM, pPage, pTlbe, pLock); 1753 1703 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK)); 1754 #endif1755 1704 return VINF_SUCCESS; 1756 1705 } … … 1784 1733 * Do the job. 1785 1734 */ 1786 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R01787 void *pv;1788 PVMCPU pVCpu = VMMGetCpu(pVM);1789 int rc = pgmRZDynMapHCPageInlined(pVCpu,1790 PGM_PAGE_GET_HCPHYS(pPage),1791 &pv1792 RTLOG_COMMA_SRC_POS); /** @todo add a read only flag? */1793 if (RT_FAILURE(rc))1794 return rc;1795 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));1796 pLock->pvPage = pv;1797 pLock->pVCpu = pVCpu;1798 1799 #else1800 1735 PPGMPAGEMAPTLBE pTlbe; 1801 1736 int rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe); … … 1804 1739 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock); 1805 1740 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK)); 1806 #endif1807 1741 return VINF_SUCCESS; 1808 1742 } … … 1847 1781 AssertRCReturn(rc, rc); 1848 1782 1849 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R01850 /*1851 * Find the page and make sure it's writable.1852 */1853 PPGMPAGE pPage;1854 rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);1855 if (RT_SUCCESS(rc))1856 {1857 if (RT_UNLIKELY(PGM_PAGE_GET_STATE(pPage) != PGM_PAGE_STATE_ALLOCATED))1858 rc = pgmPhysPageMakeWritable(pVM, pPage, GCPhys);1859 if (RT_SUCCESS(rc))1860 {1861 AssertMsg(rc == VINF_SUCCESS || rc == VINF_PGM_SYNC_CR3 /* not returned */, ("%Rrc\n", rc));1862 1863 PVMCPU pVCpu = VMMGetCpu(pVM);1864 void *pv;1865 rc = pgmRZDynMapHCPageInlined(pVCpu,1866 PGM_PAGE_GET_HCPHYS(pPage),1867 &pv1868 RTLOG_COMMA_SRC_POS);1869 if (RT_SUCCESS(rc))1870 {1871 AssertRCSuccess(rc);1872 1873 pv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));1874 *ppv = pv;1875 pLock->pvPage = pv;1876 pLock->pVCpu = pVCpu;1877 }1878 }1879 }1880 1881 #else /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */1882 1783 /* 1883 1784 * Query the Physical TLB entry for the page (may fail). … … 1911 1812 } 1912 1813 1913 #endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */1914 1814 pgmUnlock(pVM); 1915 1815 return rc; … … 1950 1850 AssertRCReturn(rc, rc); 1951 1851 1952 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R01953 /*1954 * Find the page and make sure it's readable.1955 */1956 PPGMPAGE pPage;1957 rc = pgmPhysGetPageEx(pVM, GCPhys, &pPage);1958 if (RT_SUCCESS(rc))1959 {1960 if (RT_UNLIKELY(PGM_PAGE_IS_MMIO_OR_SPECIAL_ALIAS(pPage)))1961 rc = VERR_PGM_PHYS_PAGE_RESERVED;1962 else1963 {1964 PVMCPU pVCpu = VMMGetCpu(pVM);1965 void *pv;1966 rc = pgmRZDynMapHCPageInlined(pVCpu,1967 PGM_PAGE_GET_HCPHYS(pPage),1968 &pv1969 RTLOG_COMMA_SRC_POS); /** @todo add a read only flag? */1970 if (RT_SUCCESS(rc))1971 {1972 AssertRCSuccess(rc);1973 1974 pv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));1975 *ppv = pv;1976 pLock->pvPage = pv;1977 pLock->pVCpu = pVCpu;1978 }1979 }1980 }1981 1982 #else /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */1983 1852 /* 1984 1853 * Query the Physical TLB entry for the page (may fail). … … 2002 1871 } 2003 1872 2004 #endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */2005 1873 pgmUnlock(pVM); 2006 1874 return rc; … … 2093 1961 VMMDECL(void) PGMPhysReleasePageMappingLock(PVMCC pVM, PPGMPAGEMAPLOCK pLock) 2094 1962 { 2095 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R02096 Assert(pLock->pvPage != NULL);2097 Assert(pLock->pVCpu == VMMGetCpu(pVM)); RT_NOREF_PV(pVM);2098 PGM_DYNMAP_UNUSED_HINT(pLock->pVCpu, pLock->pvPage);2099 pLock->pVCpu = NULL;2100 pLock->pvPage = NULL;2101 2102 #else2103 1963 # if !defined(IN_RING0) || !defined(VBOX_WITH_RAM_IN_KERNEL) 2104 1964 PPGMPAGEMAP pMap = (PPGMPAGEMAP)pLock->pvMap; … … 2153 2013 # endif 2154 2014 pgmUnlock(pVM); 2155 #endif /* !VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */2156 2015 } 2157 2016 … … 2307 2166 2308 2167 Log(("pgmPhysGCPhys2R3Ptr(,%RGp,): dont use this API!\n", GCPhys)); /** @todo eliminate this API! */ 2309 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R02310 NOREF(pVM); NOREF(pR3Ptr); RT_NOREF_PV(GCPhys);2311 AssertFailedReturn(VERR_NOT_IMPLEMENTED);2312 #else2313 2168 pgmLock(pVM); 2314 2169 … … 2322 2177 Assert(rc <= VINF_SUCCESS); 2323 2178 return rc; 2324 #endif 2325 } 2326 2327 #if 0 /*def VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */ 2328 2329 /** 2330 * Maps and locks a guest CR3 or PD (PAE) page. 2331 * 2332 * @returns VINF_SUCCESS on success. 2333 * @returns VERR_PGM_PHYS_PAGE_RESERVED it it's a valid GC physical 2334 * page but has no physical backing. 2335 * @returns VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS if it's not a valid 2336 * GC physical address. 2337 * @returns VERR_PGM_GCPHYS_RANGE_CROSSES_BOUNDARY if the range crosses 2338 * a dynamic ram chunk boundary 2339 * 2340 * @param pVM The cross context VM structure. 2341 * @param GCPhys The GC physical address to convert. 2342 * @param pR3Ptr Where to store the R3 pointer on success. This may or 2343 * may not be valid in ring-0 depending on the 2344 * VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 build option. 2345 * 2346 * @remarks The caller must own the PGM lock. 2347 */ 2348 int pgmPhysCr3ToHCPtr(PVM pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr) 2349 { 2350 2351 PPGMRAMRANGE pRam; 2352 PPGMPAGE pPage; 2353 int rc = pgmPhysGetPageAndRangeEx(pVM, GCPhys, &pPage, &pRam); 2354 if (RT_SUCCESS(rc)) 2355 rc = pgmPhysGCPhys2CCPtrInternalDepr(pVM, pPage, GCPhys, (void **)pR3Ptr); 2356 Assert(rc <= VINF_SUCCESS); 2357 return rc; 2358 } 2359 2360 2361 int pgmPhysCr3ToHCPtr(PVM pVM, RTGCPHYS GCPhys, PRTR3PTR pR3Ptr) 2362 { 2363 2364 } 2365 2366 #endif 2179 } 2180 2367 2181 2368 2182 /** … … 4287 4101 */ 4288 4102 VMM_INT_DECL(int) PGMPhysIemGCPhys2PtrNoLock(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, uint64_t const volatile *puTlbPhysRev, 4289 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R04290 R3PTRTYPE(uint8_t *) *ppb,4291 #else4292 4103 R3R0PTRTYPE(uint8_t *) *ppb, 4293 #endif4294 4104 uint64_t *pfTlb) 4295 4105 { … … 4327 4137 break; 4328 4138 } 4329 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 4330 *pfTlb |= PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3; 4331 *ppb = NULL; 4332 #else 4139 4333 4140 PPGMPAGEMAPTLBE pTlbe; 4334 4141 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe); 4335 4142 AssertLogRelRCReturn(rc, rc); 4336 4143 *ppb = (uint8_t *)pTlbe->pv; 4337 #endif4338 4144 } 4339 4145 else if (PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(pPage)) … … 4368 4174 break; 4369 4175 } 4370 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 4371 *pfTlb |= PGMIEMGCPHYS2PTR_F_NO_MAPPINGR3; 4372 *ppb = NULL; 4373 #else 4176 4374 4177 PPGMPAGEMAPTLBE pTlbe; 4375 4178 rc = pgmPhysPageQueryTlbeWithPage(pVM, pPage, GCPhys, &pTlbe); 4376 4179 AssertLogRelRCReturn(rc, rc); 4377 4180 *ppb = (uint8_t *)pTlbe->pv; 4378 #endif4379 4181 } 4380 4182 } … … 4486 4288 } 4487 4289 4488 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R04489 void *pv;4490 rc = pgmRZDynMapHCPageInlined(pVCpu,4491 PGM_PAGE_GET_HCPHYS(pPage),4492 &pv4493 RTLOG_COMMA_SRC_POS);4494 if (RT_FAILURE(rc))4495 return rc;4496 *ppv = (void *)((uintptr_t)pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK));4497 pLock->pvPage = pv;4498 pLock->pVCpu = pVCpu;4499 4500 #else4501 4290 /* Get a ring-3 mapping of the address. */ 4502 4291 PPGMPAGEMAPTLBE pTlbe; … … 4510 4299 pgmPhysPageMapLockForReading(pVM, pPage, pTlbe, pLock); 4511 4300 *ppv = (void *)((uintptr_t)pTlbe->pv | (uintptr_t)(GCPhys & PAGE_OFFSET_MASK)); 4512 #endif4513 4301 4514 4302 Log6(("PGMPhysIemGCPhys2Ptr: GCPhys=%RGp rc=%Rrc pPage=%R[pgmpage] *ppv=%p\n", GCPhys, rc, pPage, *ppv)); -
trunk/src/VBox/VMM/VMMAll/PGMAllPool.cpp
r86489 r87141 830 830 while (pRegFrame->rcx) 831 831 { 832 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0833 uint32_t iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu);834 832 pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, NULL, uIncrement); 835 PGMRZDynMapPopAutoSubset(pVCpu, iPrevSubset);836 # else837 pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault, NULL, uIncrement);838 # endif839 833 PGMPhysSimpleWriteGCPhys(pVM, GCPhysFault, &pRegFrame->rax, uIncrement); 840 834 pu32 += uIncrement; … … 881 875 * Clear all the pages. ASSUMES that pvFault is readable. 882 876 */ 883 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0884 uint32_t iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu);885 # endif886 887 877 uint32_t cbWrite = DISGetParamSize(pDis, &pDis->Param1); 888 878 if (cbWrite <= 8) … … 899 889 pgmPoolMonitorChainChanging(pVCpu, pPool, pPage, GCPhysFault + off, NULL, RT_MIN(8, cbWrite - off)); 900 890 } 901 902 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0903 PGMRZDynMapPopAutoSubset(pVCpu, iPrevSubset);904 # endif905 891 906 892 /* … … 1658 1644 Log(("Flush dirty page %RGp cMods=%d\n", pPage->GCPhys, pPage->cModifications)); 1659 1645 1660 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R01661 PVMCPU pVCpu = VMMGetCpu(pVM);1662 uint32_t iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu);1663 # endif1664 1665 1646 /* First write protect the page again to catch all write accesses. (before checking for changes -> SMP) */ 1666 1647 int rc = PGMHandlerPhysicalReset(pVM, pPage->GCPhys & PAGE_BASE_GC_MASK); … … 1723 1704 else 1724 1705 Log(("Removed dirty page %RGp cMods=%d cChanges=%d\n", pPage->GCPhys, pPage->cModifications, cChanges)); 1725 1726 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R01727 PGMRZDynMapPopAutoSubset(pVCpu, iPrevSubset);1728 # endif1729 1706 } 1730 1707 … … 3538 3515 else 3539 3516 { 3540 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 /** @todo we can drop this now. */3541 /* Start a subset here because pgmPoolTrackFlushGCPhysPTsSlow and3542 pgmPoolTrackFlushGCPhysPTs will/may kill the pool otherwise. */3543 uint32_t iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu);3544 # endif3545 3546 3517 if (PGMPOOL_TD_GET_CREFS(u16) != PGMPOOL_TD_CREFS_PHYSEXT) 3547 3518 { … … 3557 3528 rc = pgmPoolTrackFlushGCPhysPTsSlow(pVM, pPhysPage); 3558 3529 *pfFlushTLBs = true; 3559 3560 # ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R03561 PGMRZDynMapPopAutoSubset(pVCpu, iPrevSubset);3562 # endif3563 3530 } 3564 3531 } … … 4864 4831 } 4865 4832 4866 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R04867 /* Start a subset so we won't run out of mapping space. */4868 PVMCPU pVCpu = VMMGetCpu(pVM);4869 uint32_t iPrevSubset = PGMRZDynMapPushAutoSubset(pVCpu);4870 #endif4871 4872 4833 /* 4873 4834 * Mark the page as being in need of an ASMMemZeroPage(). … … 4896 4857 */ 4897 4858 pgmPoolCacheFlushPage(pPool, pPage); 4898 4899 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R04900 /* Heavy stuff done. */4901 PGMRZDynMapPopAutoSubset(pVCpu, iPrevSubset);4902 #endif4903 4859 4904 4860 /* -
trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp
r87040 r87141 10587 10587 return VINF_EM_RESCHEDULE_REM; 10588 10588 } 10589 #endif10590 10591 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R010592 PGMRZDynMapFlushAutoSet(pVCpu);10593 10589 #endif 10594 10590 -
trunk/src/VBox/VMM/include/PGMInline.h
r86488 r87141 219 219 } 220 220 221 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0222 223 /**224 * Inlined version of the ring-0 version of the host page mapping code225 * that optimizes access to pages already in the set.226 *227 * @returns VINF_SUCCESS. Will bail out to ring-3 on failure.228 * @param pVCpu The cross context virtual CPU structure.229 * @param HCPhys The physical address of the page.230 * @param ppv Where to store the mapping address.231 * @param SRC_POS The source location of the caller.232 */233 DECLINLINE(int) pgmRZDynMapHCPageInlined(PVMCPUCC pVCpu, RTHCPHYS HCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)234 {235 PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet;236 237 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPageInl, a);238 Assert(!(HCPhys & PAGE_OFFSET_MASK));239 Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));240 241 unsigned iHash = PGMMAPSET_HASH(HCPhys);242 unsigned iEntry = pSet->aiHashTable[iHash];243 if ( iEntry < pSet->cEntries244 && pSet->aEntries[iEntry].HCPhys == HCPhys245 && pSet->aEntries[iEntry].cInlinedRefs < UINT16_MAX - 1)246 {247 pSet->aEntries[iEntry].cInlinedRefs++;248 *ppv = pSet->aEntries[iEntry].pvPage;249 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPageInlHits);250 }251 else252 {253 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPageInlMisses);254 pgmRZDynMapHCPageCommon(pSet, HCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);255 }256 257 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapHCPageInl, a);258 return VINF_SUCCESS;259 }260 261 262 /**263 * Inlined version of the guest page mapping code that optimizes access to pages264 * already in the set.265 *266 * @returns VBox status code, see pgmRZDynMapGCPageCommon for details.267 * @param pVM The cross context VM structure.268 * @param pVCpu The cross context virtual CPU structure.269 * @param GCPhys The guest physical address of the page.270 * @param ppv Where to store the mapping address.271 * @param SRC_POS The source location of the caller.272 */273 DECLINLINE(int) pgmRZDynMapGCPageV2Inlined(PVMCC pVM, PVMCPUCC pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)274 {275 STAM_PROFILE_START(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInl, a);276 AssertMsg(!(GCPhys & PAGE_OFFSET_MASK), ("%RGp\n", GCPhys));277 278 /*279 * Get the ram range.280 */281 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];282 RTGCPHYS off;283 if ( !pRam284 || (off = GCPhys - pRam->GCPhys) >= pRam->cb285 /** @todo || page state stuff */286 )287 {288 /* This case is not counted into StatRZDynMapGCPageInl. */289 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlRamMisses);290 return pgmRZDynMapGCPageCommon(pVM, pVCpu, GCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);291 }292 293 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[off >> PAGE_SHIFT]);294 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlRamHits);295 296 /*297 * pgmRZDynMapHCPageInlined with out stats.298 */299 PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet;300 Assert(!(HCPhys & PAGE_OFFSET_MASK));301 Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));302 303 unsigned iHash = PGMMAPSET_HASH(HCPhys);304 unsigned iEntry = pSet->aiHashTable[iHash];305 if ( iEntry < pSet->cEntries306 && pSet->aEntries[iEntry].HCPhys == HCPhys307 && pSet->aEntries[iEntry].cInlinedRefs < UINT16_MAX - 1)308 {309 pSet->aEntries[iEntry].cInlinedRefs++;310 *ppv = pSet->aEntries[iEntry].pvPage;311 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlHits);312 }313 else314 {315 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlMisses);316 pgmRZDynMapHCPageCommon(pSet, HCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);317 }318 319 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInl, a);320 return VINF_SUCCESS;321 }322 323 324 /**325 * Inlined version of the ring-0 version of guest page mapping that optimizes326 * access to pages already in the set.327 *328 * @returns VBox status code, see pgmRZDynMapGCPageCommon for details.329 * @param pVCpu The cross context virtual CPU structure.330 * @param GCPhys The guest physical address of the page.331 * @param ppv Where to store the mapping address.332 * @param SRC_POS The source location of the caller.333 */334 DECLINLINE(int) pgmRZDynMapGCPageInlined(PVMCPUCC pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)335 {336 return pgmRZDynMapGCPageV2Inlined(pVCpu->CTX_SUFF(pVM), pVCpu, GCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);337 }338 339 340 /**341 * Inlined version of the ring-0 version of the guest byte mapping code342 * that optimizes access to pages already in the set.343 *344 * @returns VBox status code, see pgmRZDynMapGCPageCommon for details.345 * @param pVCpu The cross context virtual CPU structure.346 * @param GCPhys The guest physical address of the page.347 * @param ppv Where to store the mapping address. The offset is348 * preserved.349 * @param SRC_POS The source location of the caller.350 */351 DECLINLINE(int) pgmRZDynMapGCPageOffInlined(PVMCPUCC pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL)352 {353 STAM_PROFILE_START(&pVCpu->pgm.s.StatRZDynMapGCPageInl, a);354 355 /*356 * Get the ram range.357 */358 PVMCC pVM = pVCpu->CTX_SUFF(pVM);359 PPGMRAMRANGE pRam = pVM->pgm.s.CTX_SUFF(apRamRangesTlb)[PGM_RAMRANGE_TLB_IDX(GCPhys)];360 RTGCPHYS off;361 if ( !pRam362 || (off = GCPhys - pRam->GCPhys) >= pRam->cb363 /** @todo || page state stuff */364 )365 {366 /* This case is not counted into StatRZDynMapGCPageInl. */367 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlRamMisses);368 return pgmRZDynMapGCPageCommon(pVM, pVCpu, GCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);369 }370 371 RTHCPHYS HCPhys = PGM_PAGE_GET_HCPHYS(&pRam->aPages[off >> PAGE_SHIFT]);372 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlRamHits);373 374 /*375 * pgmRZDynMapHCPageInlined with out stats.376 */377 PPGMMAPSET pSet = &pVCpu->pgm.s.AutoSet;378 Assert(!(HCPhys & PAGE_OFFSET_MASK));379 Assert(pSet->cEntries <= RT_ELEMENTS(pSet->aEntries));380 381 unsigned iHash = PGMMAPSET_HASH(HCPhys);382 unsigned iEntry = pSet->aiHashTable[iHash];383 if ( iEntry < pSet->cEntries384 && pSet->aEntries[iEntry].HCPhys == HCPhys385 && pSet->aEntries[iEntry].cInlinedRefs < UINT16_MAX - 1)386 {387 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlHits);388 pSet->aEntries[iEntry].cInlinedRefs++;389 *ppv = (void *)((uintptr_t)pSet->aEntries[iEntry].pvPage | (PAGE_OFFSET_MASK & (uintptr_t)GCPhys));390 }391 else392 {393 STAM_COUNTER_INC(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInlMisses);394 pgmRZDynMapHCPageCommon(pSet, HCPhys, ppv RTLOG_COMMA_SRC_POS_ARGS);395 *ppv = (void *)((uintptr_t)*ppv | (PAGE_OFFSET_MASK & (uintptr_t)GCPhys));396 }397 398 STAM_PROFILE_STOP(&pVCpu->pgm.s.CTX_SUFF(pStats)->StatRZDynMapGCPageInl, a);399 return VINF_SUCCESS;400 }401 402 403 /**404 * Maps the page into current context (RC and maybe R0).405 *406 * @returns pointer to the mapping.407 * @param pVM The cross context VM structure.408 * @param pPage The page.409 * @param SRC_POS The source location of the caller.410 */411 DECLINLINE(void *) pgmPoolMapPageInlined(PVMCC pVM, PPGMPOOLPAGE pPage RTLOG_COMMA_SRC_POS_DECL)412 {413 if (pPage->idx >= PGMPOOL_IDX_FIRST)414 {415 Assert(pPage->idx < pVM->pgm.s.CTX_SUFF(pPool)->cCurPages);416 void *pv;417 pgmRZDynMapHCPageInlined(VMMGetCpu(pVM), pPage->Core.Key, &pv RTLOG_COMMA_SRC_POS_ARGS);418 return pv;419 }420 AssertFatalMsgFailed(("pgmPoolMapPageInlined invalid page index %x\n", pPage->idx));421 }422 423 424 /**425 * Maps the page into current context (RC and maybe R0).426 *427 * @returns pointer to the mapping.428 * @param pVM The cross context VM structure.429 * @param pVCpu The cross context virtual CPU structure.430 * @param pPage The page.431 * @param SRC_POS The source location of the caller.432 */433 DECLINLINE(void *) pgmPoolMapPageV2Inlined(PVMCC pVM, PVMCPUCC pVCpu, PPGMPOOLPAGE pPage RTLOG_COMMA_SRC_POS_DECL)434 {435 if (pPage->idx >= PGMPOOL_IDX_FIRST)436 {437 Assert(pPage->idx < pVM->pgm.s.CTX_SUFF(pPool)->cCurPages);438 void *pv;439 Assert(pVCpu == VMMGetCpu(pVM)); RT_NOREF_PV(pVM);440 pgmRZDynMapHCPageInlined(pVCpu, pPage->Core.Key, &pv RTLOG_COMMA_SRC_POS_ARGS);441 return pv;442 }443 AssertFatalMsgFailed(("pgmPoolMapPageV2Inlined invalid page index %x\n", pPage->idx));444 }445 446 #endif /* VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 */447 221 448 222 /** … … 496 270 STAM_COUNTER_INC(&pVM->pgm.s.CTX_SUFF(pStats)->CTX_MID_Z(Stat,PageMapTlbHits)); 497 271 rc = VINF_SUCCESS; 498 #if 0 //def VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0499 # ifdef IN_RING3500 if (pTlbe->pv == (void *)pVM->pgm.s.pvZeroPgR0)501 # else502 if (pTlbe->pv == (void *)pVM->pgm.s.pvZeroPgR3)503 # endif504 pTlbe->pv = pVM->pgm.s.CTX_SUFF(pvZeroPg);505 #endif506 272 AssertPtr(pTlbe->pv); 507 #if defined(IN_RING3) || (!defined(VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0) && !defined(VBOX_WITH_RAM_IN_KERNEL))273 #if defined(IN_RING3) || !defined(VBOX_WITH_RAM_IN_KERNEL) 508 274 Assert(!pTlbe->pMap || RT_VALID_PTR(pTlbe->pMap->pv)); 509 275 #endif … … 653 419 DECLINLINE(int) pgmGstGet32bitPDPtrEx(PVMCPUCC pVCpu, PX86PD *ppPd) 654 420 { 655 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0656 int rc = pgmRZDynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)ppPd RTLOG_COMMA_SRC_POS);657 if (RT_FAILURE(rc))658 {659 *ppPd = NULL;660 return rc;661 }662 #else663 421 *ppPd = pVCpu->pgm.s.CTX_SUFF(pGst32BitPd); 664 422 if (RT_UNLIKELY(!*ppPd)) 665 423 return pgmGstLazyMap32BitPD(pVCpu, ppPd); 666 #endif667 424 return VINF_SUCCESS; 668 425 } … … 677 434 DECLINLINE(PX86PD) pgmGstGet32bitPDPtr(PVMCPUCC pVCpu) 678 435 { 679 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0680 PX86PD pGuestPD = NULL;681 int rc = pgmRZDynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)&pGuestPD RTLOG_COMMA_SRC_POS);682 if (RT_FAILURE(rc))683 {684 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));685 return NULL;686 }687 #else688 436 PX86PD pGuestPD = pVCpu->pgm.s.CTX_SUFF(pGst32BitPd); 689 437 if (RT_UNLIKELY(!pGuestPD)) … … 693 441 return NULL; 694 442 } 695 #endif696 443 return pGuestPD; 697 444 } … … 707 454 DECLINLINE(int) pgmGstGetPaePDPTPtrEx(PVMCPUCC pVCpu, PX86PDPT *ppPdpt) 708 455 { 709 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0710 int rc = pgmRZDynMapGCPageOffInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)ppPdpt RTLOG_COMMA_SRC_POS);711 if (RT_FAILURE(rc))712 {713 *ppPdpt = NULL;714 return rc;715 }716 #else717 456 *ppPdpt = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt); 718 457 if (RT_UNLIKELY(!*ppPdpt)) 719 458 return pgmGstLazyMapPaePDPT(pVCpu, ppPdpt); 720 #endif721 459 return VINF_SUCCESS; 722 460 } … … 751 489 AssertGCPtr32(GCPtr); 752 490 753 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0754 PX86PDPT pGuestPDPT = NULL;755 int rc = pgmRZDynMapGCPageOffInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)&pGuestPDPT RTLOG_COMMA_SRC_POS);756 AssertRCReturn(rc, NULL);757 #else758 491 PX86PDPT pGuestPDPT = pVCpu->pgm.s.CTX_SUFF(pGstPaePdpt); 759 492 if (RT_UNLIKELY(!pGuestPDPT)) … … 763 496 return NULL; 764 497 } 765 #endif766 498 return &pGuestPDPT->a[(uint32_t)GCPtr >> X86_PDPT_SHIFT]; 767 499 } … … 786 518 { 787 519 const unsigned iPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK; 788 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0789 PX86PDPAE pGuestPD = NULL;790 int rc = pgmRZDynMapGCPageInlined(pVCpu,791 pGuestPDPT->a[iPdpt].u & X86_PDPE_PG_MASK,792 (void **)&pGuestPD793 RTLOG_COMMA_SRC_POS);794 if (RT_SUCCESS(rc))795 return pGuestPD->a[iPD];796 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));797 #else798 520 PX86PDPAE pGuestPD = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt]; 799 521 if ( !pGuestPD … … 802 524 if (pGuestPD) 803 525 return pGuestPD->a[iPD]; 804 #endif805 526 } 806 527 } … … 838 559 839 560 /* The PDE. */ 840 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0841 PX86PDPAE pGuestPD = NULL;842 int rc = pgmRZDynMapGCPageInlined(pVCpu,843 uPdpe & X86_PDPE_PG_MASK,844 (void **)&pGuestPD845 RTLOG_COMMA_SRC_POS);846 if (RT_FAILURE(rc))847 {848 AssertMsg(rc == VERR_PGM_INVALID_GC_PHYSICAL_ADDRESS, ("%Rrc\n", rc));849 return NULL;850 }851 #else852 561 PX86PDPAE pGuestPD = pVCpu->pgm.s.CTX_SUFF(apGstPaePDs)[iPdpt]; 853 562 if ( !pGuestPD 854 563 || (uPdpe & X86_PDPE_PG_MASK) != pVCpu->pgm.s.aGCPhysGstPaePDs[iPdpt]) 855 564 pgmGstLazyMapPaePD(pVCpu, iPdpt, &pGuestPD); 856 #endif857 565 *piPD = (GCPtr >> X86_PD_PAE_SHIFT) & X86_PD_PAE_MASK; 858 566 return pGuestPD; … … 872 580 DECLINLINE(int) pgmGstGetLongModePML4PtrEx(PVMCPUCC pVCpu, PX86PML4 *ppPml4) 873 581 { 874 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0875 int rc = pgmRZDynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)ppPml4 RTLOG_COMMA_SRC_POS);876 if (RT_FAILURE(rc))877 {878 *ppPml4 = NULL;879 return rc;880 }881 #else882 582 *ppPml4 = pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4); 883 583 if (RT_UNLIKELY(!*ppPml4)) 884 584 return pgmGstLazyMapPml4(pVCpu, ppPml4); 885 #endif886 585 return VINF_SUCCESS; 887 586 } … … 913 612 DECLINLINE(PX86PML4E) pgmGstGetLongModePML4EPtr(PVMCPUCC pVCpu, unsigned int iPml4) 914 613 { 915 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0916 PX86PML4 pGuestPml4;917 int rc = pgmRZDynMapGCPageInlined(pVCpu, pVCpu->pgm.s.GCPhysCR3, (void **)&pGuestPml4 RTLOG_COMMA_SRC_POS);918 AssertRCReturn(rc, NULL);919 #else920 614 PX86PML4 pGuestPml4 = pVCpu->pgm.s.CTX_SUFF(pGstAmd64Pml4); 921 615 if (pGuestPml4) … … 926 620 AssertRCReturn(rc, NULL); 927 621 } 928 #endif929 622 return &pGuestPml4->a[iPml4]; 930 623 } -
trunk/src/VBox/VMM/include/PGMInternal.h
r86466 r87141 296 296 * @remark There is no need to assert on the result. 297 297 */ 298 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 299 # define PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, GCPhys, ppv) \ 300 pgmRZDynMapGCPageV2Inlined(pVM, pVCpu, GCPhys, (void **)(ppv) RTLOG_COMMA_SRC_POS) 301 #else 302 # define PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, GCPhys, ppv) \ 303 pgmPhysGCPhys2R3Ptr(pVM, GCPhys, (PRTR3PTR)(ppv)) /** @todo this isn't asserting! */ 304 #endif 298 #define PGM_GCPHYS_2_PTR_V2(pVM, pVCpu, GCPhys, ppv) \ 299 pgmPhysGCPhys2R3Ptr(pVM, GCPhys, (PRTR3PTR)(ppv)) /** @todo this isn't asserting! */ 305 300 306 301 /** @def PGM_GCPHYS_2_PTR … … 344 339 * @remark There is no need to assert on the result. 345 340 */ 346 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 347 # define PGM_GCPHYS_2_PTR_EX(pVM, GCPhys, ppv) \ 348 pgmRZDynMapGCPageOffInlined(VMMGetCpu(pVM), GCPhys, (void **)(ppv) RTLOG_COMMA_SRC_POS) 349 #else 350 # define PGM_GCPHYS_2_PTR_EX(pVM, GCPhys, ppv) \ 351 pgmPhysGCPhys2R3Ptr(pVM, GCPhys, (PRTR3PTR)(ppv)) /** @todo this isn't asserting! */ 352 #endif 341 #define PGM_GCPHYS_2_PTR_EX(pVM, GCPhys, ppv) \ 342 pgmPhysGCPhys2R3Ptr(pVM, GCPhys, (PRTR3PTR)(ppv)) /** @todo this isn't asserting! */ 353 343 354 344 /** @def PGM_DYNMAP_UNUSED_HINT … … 361 351 * @param pvPage The pool page. 362 352 */ 363 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 364 # ifdef LOG_ENABLED 365 # define PGM_DYNMAP_UNUSED_HINT(pVCpu, pvPage) pgmRZDynMapUnusedHint(pVCpu, pvPage, RT_SRC_POS) 366 # else 367 # define PGM_DYNMAP_UNUSED_HINT(pVCpu, pvPage) pgmRZDynMapUnusedHint(pVCpu, pvPage) 368 # endif 369 #else 370 # define PGM_DYNMAP_UNUSED_HINT(pVCpu, pvPage) do {} while (0) 371 #endif 353 #define PGM_DYNMAP_UNUSED_HINT(pVCpu, pvPage) do {} while (0) 372 354 373 355 /** @def PGM_DYNMAP_UNUSED_HINT_VM … … 2433 2415 * @remark There is no need to assert on the result. 2434 2416 */ 2435 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 2436 # define PGMPOOL_PAGE_2_PTR(a_pVM, a_pPage) pgmPoolMapPageInlined((a_pVM), (a_pPage) RTLOG_COMMA_SRC_POS) 2437 #elif defined(VBOX_STRICT) || 1 /* temporarily going strict here */ 2417 #if defined(VBOX_STRICT) || 1 /* temporarily going strict here */ 2438 2418 # define PGMPOOL_PAGE_2_PTR(a_pVM, a_pPage) pgmPoolMapPageStrict(a_pPage, __FUNCTION__) 2439 2419 DECLINLINE(void *) pgmPoolMapPageStrict(PPGMPOOLPAGE a_pPage, const char *pszCaller) … … 2463 2443 * @remark There is no need to assert on the result. 2464 2444 */ 2465 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0 2466 # define PGMPOOL_PAGE_2_PTR_V2(a_pVM, a_pVCpu, a_pPage) pgmPoolMapPageV2Inlined((a_pVM), (a_pVCpu), (a_pPage) RTLOG_COMMA_SRC_POS) 2467 #else 2468 # define PGMPOOL_PAGE_2_PTR_V2(a_pVM, a_pVCpu, a_pPage) PGMPOOL_PAGE_2_PTR((a_pVM), (a_pPage)) 2469 #endif 2445 #define PGMPOOL_PAGE_2_PTR_V2(a_pVM, a_pVCpu, a_pPage) PGMPOOL_PAGE_2_PTR((a_pVM), (a_pPage)) 2470 2446 2471 2447 … … 4014 3990 4015 3991 #endif /* IN_RING3 */ 4016 #ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R04017 int pgmRZDynMapHCPageCommon(PPGMMAPSET pSet, RTHCPHYS HCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL);4018 int pgmRZDynMapGCPageCommon(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys, void **ppv RTLOG_COMMA_SRC_POS_DECL);4019 # ifdef LOG_ENABLED4020 void pgmRZDynMapUnusedHint(PVMCPU pVCpu, void *pvHint, RT_SRC_POS_DECL);4021 # else4022 void pgmRZDynMapUnusedHint(PVMCPU pVCpu, void *pvHint);4023 # endif4024 #endif4025 3992 int pgmPoolAlloc(PVMCC pVM, RTGCPHYS GCPhys, PGMPOOLKIND enmKind, PGMPOOLACCESS enmAccess, bool fA20Enabled, 4026 3993 uint16_t iUser, uint32_t iUserTable, bool fLockPage, PPPGMPOOLPAGE ppPage);
Note:
See TracChangeset
for help on using the changeset viewer.